Ejemplo n.º 1
0
def main():
    parser = argparse.ArgumentParser(description=' FixMatch Training')
    parser.add_argument('--wresnet-k',
                        default=2,
                        type=int,
                        help='width factor of wide resnet')
    parser.add_argument('--wresnet-n',
                        default=28,
                        type=int,
                        help='depth of wide resnet')
    parser.add_argument('--dataset',
                        type=str,
                        default='CIFAR10',
                        help='number of classes in dataset')
    # parser.add_argument('--n-classes', type=int, default=100,
    #                     help='number of classes in dataset')
    parser.add_argument('--n-labeled',
                        type=int,
                        default=40,
                        help='number of labeled samples for training')
    parser.add_argument('--n-epoches',
                        type=int,
                        default=1024,
                        help='number of training epoches')
    parser.add_argument('--batchsize',
                        type=int,
                        default=40,
                        help='train batch size of labeled samples')
    parser.add_argument('--mu',
                        type=int,
                        default=7,
                        help='factor of train batch size of unlabeled samples')
    parser.add_argument('--thr',
                        type=float,
                        default=0.95,
                        help='pseudo label threshold')
    parser.add_argument('--n-imgs-per-epoch',
                        type=int,
                        default=64 * 1024,
                        help='number of training images for each epoch')
    parser.add_argument('--lam-u',
                        type=float,
                        default=1.,
                        help='coefficient of unlabeled loss')
    parser.add_argument('--ema-alpha',
                        type=float,
                        default=0.999,
                        help='decay rate for ema module')
    parser.add_argument('--lr',
                        type=float,
                        default=0.03,
                        help='learning rate for training')
    parser.add_argument('--weight-decay',
                        type=float,
                        default=5e-4,
                        help='weight decay')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        help='momentum for optimizer')
    parser.add_argument('--seed',
                        type=int,
                        default=-1,
                        help='seed for random behaviors, no seed if negtive')
    parser.add_argument('--temperature',
                        type=float,
                        default=0.5,
                        help='temperature for loss function')
    args = parser.parse_args()

    # args.device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")

    logger, writer = setup_default_logging(args)
    logger.info(dict(args._get_kwargs()))

    # global settings
    #  torch.multiprocessing.set_sharing_strategy('file_system')
    if args.seed > 0:
        torch.manual_seed(args.seed)
        random.seed(args.seed)
        np.random.seed(args.seed)
        # torch.backends.cudnn.deterministic = True

    n_iters_per_epoch = args.n_imgs_per_epoch // args.batchsize  # 1024
    n_iters_all = n_iters_per_epoch * args.n_epoches  # 1024 * 1024

    logger.info("***** Running training *****")
    logger.info(f"  Task = {args.dataset}@{args.n_labeled}")
    logger.info(f"  Num Epochs = {n_iters_per_epoch}")
    logger.info(f"  Batch size per GPU = {args.batchsize}")
    # logger.info(f"  Total train batch size = {args.batch_size * args.world_size}")
    logger.info(f"  Total optimization steps = {n_iters_all}")

    model, criteria_x, criteria_u, criteria_z = set_model(args)
    logger.info("Total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    dltrain_x, dltrain_u = get_train_loader(args.dataset,
                                            args.batchsize,
                                            args.mu,
                                            n_iters_per_epoch,
                                            L=args.n_labeled)
    dlval = get_val_loader(dataset=args.dataset, batch_size=64, num_workers=2)

    lb_guessor = LabelGuessor(thresh=args.thr)

    ema = EMA(model, args.ema_alpha)

    wd_params, non_wd_params = [], []
    for name, param in model.named_parameters():
        # if len(param.size()) == 1:
        if 'bn' in name:
            non_wd_params.append(
                param)  # bn.weight, bn.bias and classifier.bias
            # print(name)
        else:
            wd_params.append(param)
    param_list = [{
        'params': wd_params
    }, {
        'params': non_wd_params,
        'weight_decay': 0
    }]
    optim = torch.optim.SGD(param_list,
                            lr=args.lr,
                            weight_decay=args.weight_decay,
                            momentum=args.momentum,
                            nesterov=True)
    lr_schdlr = WarmupCosineLrScheduler(optim,
                                        max_iter=n_iters_all,
                                        warmup_iter=0)

    train_args = dict(model=model,
                      criteria_x=criteria_x,
                      criteria_u=criteria_u,
                      criteria_z=criteria_z,
                      optim=optim,
                      lr_schdlr=lr_schdlr,
                      ema=ema,
                      dltrain_x=dltrain_x,
                      dltrain_u=dltrain_u,
                      lb_guessor=lb_guessor,
                      lambda_u=args.lam_u,
                      n_iters=n_iters_per_epoch,
                      logger=logger)
    best_acc = -1
    best_epoch = 0
    logger.info('-----------start training--------------')
    for epoch in range(args.n_epoches):
        train_loss, loss_x, loss_u, loss_u_real, loss_simclr, mask_mean = train_one_epoch(
            epoch, **train_args)
        # torch.cuda.empty_cache()

        top1, top5, valid_loss = evaluate(ema, dlval, criteria_x)

        writer.add_scalars('train/1.loss', {
            'train': train_loss,
            'test': valid_loss
        }, epoch)
        writer.add_scalar('train/2.train_loss_x', loss_x, epoch)
        writer.add_scalar('train/3.train_loss_u', loss_u, epoch)
        writer.add_scalar('train/4.train_loss_u_real', loss_u_real, epoch)
        writer.add_scalar('train/4.train_loss_simclr', loss_simclr, epoch)
        writer.add_scalar('train/5.mask_mean', mask_mean, epoch)
        writer.add_scalars('test/1.test_acc', {
            'top1': top1,
            'top5': top5
        }, epoch)
        # writer.add_scalar('test/2.test_loss', loss, epoch)

        # best_acc = top1 if best_acc < top1 else best_acc
        if best_acc < top1:
            best_acc = top1
            best_epoch = epoch

        logger.info(
            "Epoch {}. Top1: {:.4f}. Top5: {:.4f}. best_acc: {:.4f} in epoch{}"
            .format(epoch, top1, top5, best_acc, best_epoch))

    writer.close()
def main():
    parser = argparse.ArgumentParser(description=' FixMatch Training')
    parser.add_argument('--wresnet-k',
                        default=2,
                        type=int,
                        help='width factor of wide resnet')
    parser.add_argument('--wresnet-n',
                        default=28,
                        type=int,
                        help='depth of wide resnet')
    parser.add_argument('--dataset',
                        type=str,
                        default='CIFAR10',
                        help='number of classes in dataset')
    # parser.add_argument('--n-classes', type=int, default=100,
    #                     help='number of classes in dataset')
    parser.add_argument('--n-labeled',
                        type=int,
                        default=40,
                        help='number of labeled samples for training')
    parser.add_argument('--n-epoches',
                        type=int,
                        default=1024,
                        help='number of training epoches')
    parser.add_argument('--batchsize',
                        type=int,
                        default=40,
                        help='train batch size of labeled samples')
    parser.add_argument('--mu',
                        type=int,
                        default=7,
                        help='factor of train batch size of unlabeled samples')
    parser.add_argument('--thr',
                        type=float,
                        default=0.95,
                        help='pseudo label threshold')
    parser.add_argument('--n-imgs-per-epoch',
                        type=int,
                        default=64 * 1024,
                        help='number of training images for each epoch')
    parser.add_argument('--lam-u',
                        type=float,
                        default=1.,
                        help='coefficient of unlabeled loss')
    parser.add_argument('--lam-s',
                        type=float,
                        default=0.2,
                        help='coefficient of unlabeled loss SimCLR')
    parser.add_argument('--ema-alpha',
                        type=float,
                        default=0.999,
                        help='decay rate for ema module')
    parser.add_argument('--lr',
                        type=float,
                        default=0.03,
                        help='learning rate for training')
    parser.add_argument('--weight-decay',
                        type=float,
                        default=5e-4,
                        help='weight decay')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.9,
                        help='momentum for optimizer')
    parser.add_argument('--seed',
                        type=int,
                        default=-1,
                        help='seed for random behaviors, no seed if negtive')
    parser.add_argument('--temperature',
                        type=float,
                        default=0.5,
                        help='temperature for loss function')
    args = parser.parse_args()

    # args.device = torch.device("cuda:2" if torch.cuda.is_available() else "cpu")

    logger, writer = setup_default_logging(args)
    logger.info(dict(args._get_kwargs()))

    # global settings
    #  torch.multiprocessing.set_sharing_strategy('file_system')
    if args.seed > 0:
        torch.manual_seed(args.seed)
        random.seed(args.seed)
        np.random.seed(args.seed)
        # torch.backends.cudnn.deterministic = True

    n_iters_per_epoch = args.n_imgs_per_epoch // args.batchsize  # 1024
    n_iters_all = n_iters_per_epoch * args.n_epoches  # 1024 * 1024

    logger.info("***** Running training *****")
    logger.info(f"  Task = {args.dataset}@{args.n_labeled}")
    logger.info(f"  Num Epochs = {n_iters_per_epoch}")
    logger.info(f"  Batch size per GPU = {args.batchsize}")
    # logger.info(f"  Total train batch size = {args.batch_size * args.world_size}")
    logger.info(f"  Total optimization steps = {n_iters_all}")

    model, criteria_x, criteria_u, criteria_z = set_model(args)

    logger.info("Total params: {:.2f}M".format(
        sum(p.numel() for p in model.parameters()) / 1e6))

    dltrain_x, dltrain_u, dltrain_f = get_train_loader_mix(args.dataset,
                                                           args.batchsize,
                                                           args.mu,
                                                           n_iters_per_epoch,
                                                           L=args.n_labeled)
    dlval = get_val_loader(dataset=args.dataset, batch_size=64, num_workers=2)

    lb_guessor = LabelGuessor(thresh=args.thr)

    ema = EMA(model, args.ema_alpha)

    wd_params, non_wd_params = [], []
    for name, param in model.named_parameters():
        # if len(param.size()) == 1:
        if 'bn' in name:
            non_wd_params.append(
                param)  # bn.weight, bn.bias and classifier.bias
            # print(name)
        else:
            wd_params.append(param)

    param_list = [{
        'params': wd_params
    }, {
        'params': non_wd_params,
        'weight_decay': 0
    }]

    optim_fix = torch.optim.SGD(param_list,
                                lr=args.lr,
                                weight_decay=args.weight_decay,
                                momentum=args.momentum,
                                nesterov=True)
    lr_schdlr_fix = WarmupCosineLrScheduler(optim_fix,
                                            max_iter=n_iters_all,
                                            warmup_iter=0)

    train_args = dict(model=model,
                      criteria_x=criteria_x,
                      criteria_u=criteria_u,
                      criteria_z=criteria_z,
                      optim=optim_fix,
                      lr_schdlr=lr_schdlr_fix,
                      ema=ema,
                      dltrain_x=dltrain_x,
                      dltrain_u=dltrain_u,
                      dltrain_f=dltrain_f,
                      lb_guessor=lb_guessor,
                      lambda_u=args.lam_u,
                      lambda_s=args.lam_s,
                      n_iters=n_iters_per_epoch,
                      logger=logger,
                      bt=args.batchsize,
                      mu=args.mu)

    # # TRAINING PARAMETERS FOR SIMCLR
    # param_list = [
    #     {'params': wd_params}, {'params': non_wd_params, 'weight_decay': 0}]
    #
    # optim_simclr = torch.optim.SGD(param_list, lr=0.5, weight_decay=args.weight_decay,
    #                                momentum=args.momentum, nesterov=False)
    #
    # lr_schdlr_simclr = WarmupCosineLrScheduler(
    #     optim_simclr, max_iter=n_iters_all, warmup_iter=0
    # )
    #
    # train_args_simclr = dict(
    #     model=model,
    #     criteria_z=criteria_z,
    #     optim=optim_simclr,
    #     lr_schdlr=lr_schdlr_simclr,
    #     ema=ema,
    #     dltrain_f=dltrain_f,
    #     lambda_s=args.lam_s,
    #     n_iters=n_iters_per_epoch,
    #     logger=logger,
    #     bt=args.batchsize,
    #     mu=args.mu
    # )

    # # TRAINING PARAMETERS FOR IIC
    # param_list = [
    #     {'params': wd_params}, {'params': non_wd_params, 'weight_decay': 0}]
    #
    # optim_iic = torch.optim.Adam(param_list, lr=1e-4, weight_decay=args.weight_decay)
    #
    # lr_schdlr_iic = WarmupCosineLrScheduler(
    #     optim_iic, max_iter=n_iters_all, warmup_iter=0
    # )
    #
    # train_args_iic = dict(
    #     model=model,
    #     optim=optim_iic,
    #     lr_schdlr=lr_schdlr_iic,
    #     ema=ema,
    #     dltrain_f=dltrain_f,
    #     n_iters=n_iters_per_epoch,
    #     logger=logger,
    #     bt=args.batchsize,
    #     mu=args.mu
    # )
    #

    best_acc = -1
    best_epoch = 0
    logger.info('-----------start training--------------')

    for epoch in range(args.n_epoches):
        # guardar accuracy de modelo preentrenado hasta espacio h (SALIDA DE BACKBONE)
        top1, top5, valid_loss = evaluate_linear_Clf(ema, dltrain_x, dlval,
                                                     criteria_x)
        writer.add_scalars('test/1.test_linear_acc', {
            'top1': top1,
            'top5': top5
        }, epoch)

        logger.info("Epoch {}. on h space Top1: {:.4f}. Top5: {:.4f}.".format(
            epoch, top1, top5))

        if epoch < -500:
            # # FASE DE ENTRENAMIENTO NO SUPERVISADO
            # entrenar feature representation simclr
            # train_loss, loss_simclr, model_ = train_one_epoch_simclr(epoch, **train_args_simclr)
            # writer.add_scalar('train/4.train_loss_simclr', loss_simclr, epoch)

            # entrenar iic
            # train_loss, loss_iic, model_ = train_one_epoch_iic(epoch, **train_args_iic)
            # writer.add_scalar('train/4.train_loss_iic', loss_iic, epoch)
            # evaluate_Clf(model_, dltrain_f, dlval, criteria_x)

            top1, top5, valid_loss = evaluate_linear_Clf(
                ema, dltrain_x, dlval, criteria_x)
            # # GUARDAR MODELO ENTRENADO DE FORMA NO SUPERVISADA
            # if epoch == 497:
            #     # save model
            #     name = 'simclr_trained_good_h2.pt'
            #     torch.save(model_.state_dict(), name)
            #     logger.info('model saved')

        else:
            # ENTRENAMIENTO SEMI-SUPERVISADO
            train_loss, loss_x, loss_u, loss_u_real, mask_mean, loss_simclr = train_one_epoch(
                epoch, **train_args)
            top1, top5, valid_loss = evaluate(ema, dlval, criteria_x)

            writer.add_scalar('train/4.train_loss_simclr', loss_simclr, epoch)
            writer.add_scalar('train/2.train_loss_x', loss_x, epoch)
            writer.add_scalar('train/3.train_loss_u', loss_u, epoch)
            writer.add_scalar('train/4.train_loss_u_real', loss_u_real, epoch)
            writer.add_scalar('train/5.mask_mean', mask_mean, epoch)

        writer.add_scalars('train/1.loss', {
            'train': train_loss,
            'test': valid_loss
        }, epoch)
        writer.add_scalars('test/1.test_acc', {
            'top1': top1,
            'top5': top5
        }, epoch)
        # writer.add_scalar('test/2.test_loss', loss, epoch)

        # best_acc = top1 if best_acc < top1 else best_acc
        if best_acc < top1:
            best_acc = top1
            best_epoch = epoch

        logger.info(
            "Epoch {}. Top1: {:.4f}. Top5: {:.4f}. best_acc: {:.4f} in epoch{}"
            .format(epoch, top1, top5, best_acc, best_epoch))

    writer.close()