Exemple #1
0
logger.info(model)
logger.info('EMA: {}'.format(ema))


# Optimization
def tensor_in(t, a):
    for a_ in a:
        if t is a_:
            return True
    return False


scheduler = None

if args.optimizer == 'adam':
    optimizer = optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=args.wd)
    if args.scheduler: scheduler = CosineAnnealingWarmRestarts(optimizer, 20, T_mult=2, last_epoch=args.begin_epoch - 1)
elif args.optimizer == 'adamax':
    optimizer = optim.Adamax(model.parameters(), lr=args.lr, betas=(0.9, 0.99), weight_decay=args.wd)
elif args.optimizer == 'rmsprop':
    optimizer = optim.RMSprop(model.parameters(), lr=args.lr, weight_decay=args.wd)
elif args.optimizer == 'sgd':
    optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=0.9, weight_decay=args.wd)
    if args.scheduler:
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer, milestones=[60, 120, 160], gamma=0.2, last_epoch=args.begin_epoch - 1
        )
else:
    raise ValueError('Unknown optimizer {}'.format(args.optimizer))

best_test_bpd = math.inf
Exemple #2
0
# Optimization
def tensor_in(t, a):
    for a_ in a:
        if t is a_:
            return True
    return False


scheduler = None
params = [par
          for par in model.parameters()] + [par for par in gmm.parameters()]

# params = [par for par in gmm.parameters()]
if args.optimizer == 'adam':
    optimizer = optim.Adam(params,
                           lr=args.lr,
                           betas=(0.9, 0.99),
                           weight_decay=args.wd)
    if args.scheduler:
        scheduler = CosineAnnealingWarmRestarts(optimizer,
                                                20,
                                                T_mult=2,
                                                last_epoch=args.begin_epoch -
                                                1)
elif args.optimizer == 'adamax':
    optimizer = optim.Adamax(params,
                             lr=args.lr,
                             betas=(0.9, 0.99),
                             weight_decay=args.wd)
elif args.optimizer == 'rmsprop':
    optimizer = optim.RMSprop(params, lr=args.lr, weight_decay=args.wd)
elif args.optimizer == 'sgd':
            blocks.append(layers.CouplingLayer(2, swap=True))
            if args.actnorm: blocks.append(layers.ActNorm1d(2))
            if args.batchnorm: blocks.append(layers.MovingBatchNorm1d(2))
        model = layers.SequentialFlow(blocks).to(device)

    logger.info(model)
    logger.info("Number of trainable parameters: {}".format(count_parameters(model)))

    #genGen = Generator(16)
    #genGen = Generator(128)

    genGen = Generator(8)

    #optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    optimizer = optim.Adam(model.parameters(), lr=args.lr, weight_decay=args.weight_decay)
    #optimizerGen = optim.SGD(genGen.parameters(), lr=0.1, momentum=0.9)

    #optimizerGen = optim.SGD(genGen.parameters(), lr=0.1, momentum=0.9)

    #optimizerGen = optim.SGD(genGen.parameters(), lr=0.1, momentum=0.9)
    optimizerGen = optim.Adam(genGen.parameters(), lr=args.lr, weight_decay=args.weight_decay)

    time_meter = utils.RunningAverageMeter(0.93)
    loss_meter = utils.RunningAverageMeter(0.93)

    logpz_meter = utils.RunningAverageMeter(0.93)
    delta_logp_meter = utils.RunningAverageMeter(0.93)

    end = time.time()
    best_loss = float('inf')
Exemple #4
0
    #     )
    #     if args.actnorm: blocks.append(layers.ActNorm1d(2))
    #     if args.batchnorm: blocks.append(layers.MovingBatchNorm1d(2))
    # model = layers.SequentialFlow(blocks).to(device)

    model.to(device)
    ema = utils.ExponentialMovingAverage(model)
    logger.info(model)
    #logger.info('EMA: {}'.format(ema))

    ######################################
    #########  optimizer   ###############
    ######################################
    if args.optimizer == 'adam':
        optimizer = optim.Adam(model.parameters(),
                               lr=args.lr,
                               weight_decay=args.wd)
        #scheduler = CosineAnnealingLR(optimizer, args.epochs, eta_min=args.lr)
    elif args.optimizer == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=args.lr,
                                    momentum=0.9,
                                    weight_decay=args.wd)
        scheduler = torch.optim.lr_scheduler.MultiStepLR(
            optimizer,
            milestones=[60, 120, 160],
            gamma=0.2,
            last_epoch=args.begin_epoch - 1)
    else:
        raise ValueError('Unknown optimizer {}'.format(args.optimizer))
    logger.info(optimizer)