args.batch_size,
        height=args.height,
        width=args.width,
        implementation=args.aug_impl,
        channels=args.channels,
        color_jitter_strength=args.color_jitter_strength,
        do_shuffle=s,
        buffer_multiplier=args.shuffle_buffer_multiplier,
        use_blur=args.use_blur) if ds is not None else None
                                 for ds, s in [(train, True), (test, False)])

    # loss, learning rate, optimizer
    contrastive_loss = NTXentLoss(temperature=args.temperature)
    learning_rate_schedule = train_utils.learning_rate_schedule(
        args, num_examples=num_examples)
    optimizer = train_utils.get_optimizer(args, learning_rate_schedule)

    # make dir for logging results
    logdir = join(args.logdir, datetime.now().strftime("%Y%m%d-%H%M%S"))
    os.makedirs(logdir)
    train_utils.save_train_params(args, logdir)

    # Callbacks
    callbacks = train_utils.setup_logging_callbacks(
        logdir, save_model=not args.no_save, save_epochs=args.save_epochs)
    callbacks.append(
        LearningRateLogger(learning_rate_schedule,
                           num_examples,
                           batch_size=args.batch_size))
    model.summary()
Ejemplo n.º 2
0
from main.model import get_model, get_refiner
from data.dance_dataset import DanceDataset
from torch.utils.data import DataLoader
from utils.log_utils import global_logger
from utils.train_utils import save_model, get_optimizer
from main.loss import L1_loss, MSSSIM_loss

model = get_model(mode='test')
model.eval()
refiner = get_refiner(mode='train')
Dance_dataset = DanceDataset(mode='train')
Dance_dataloader = DataLoader(dataset=Dance_dataset,
                              batch_size=cfg.batch_size,
                              shuffle=True,
                              num_workers=cfg.num_thread)
R_optimizer = get_optimizer(refiner, mode='train', model_type='R')

for epoch in range(cfg.start_epoch, cfg.num_epoch):
    for i, (imgs, label_maps, bg_imgs) in enumerate(Dance_dataloader):
        real_imgs, label_maps, bg_imgs = imgs.cuda(), label_maps.cuda(
        ), bg_imgs.cuda()
        input = torch.cat([label_maps, bg_imgs], dim=1)

        _, fake_imgs = model(input)
        refined_imgs = refiner(fake_imgs)

        loss_L1 = L1_loss(real_imgs, fake_imgs)
        loss_MSSSIM = MSSSIM_loss(real_imgs, fake_imgs)
        R_loss = loss_MSSSIM + loss_L1

        R_optimizer.zero_grad()
            model = tf.keras.models.load_model(path, compile=False)
            if out_layer:
                model = model_utils.remove_layers(model, out_layer)

            transformed_train, transformed_test = preprocess_and_batch(train=train, test=test,
                                                                       height=224, width=224,
                                                                       preprocess_implementation=args.preprocess,
                                                                       batch_size=None)

            if preprocess_method is not None:
                preprocess = get_preprocess_fn(preprocess_method)
                transformed_train = train.map(preprocess)
                transformed_test = test.map(preprocess)

            schedule = train_utils.learning_rate_schedule(args, num_examples)
            optimizer = train_utils.get_optimizer(args, schedule)

            if args.linear:
                # Freeze model
                model.trainable = False

            result = train_and_evaluate(embedding_model=model, train=transformed_train.batch(args.batch_size),
                                        test=transformed_test.batch(args.batch_size), optimizer=optimizer,
                                        epochs=args.epochs)
            print(result)
            for metric, value in result.items():
                eval_results[name][metric].append(value)
    indices.append(repetition_indices)

    data = []
    for name, results in eval_results.items():
Ejemplo n.º 4
0
from main.config import cfg
from main.model import get_model, get_discriminator, get_Dnet_2D
from data.dance_dataset import DanceDataset
from torch.utils.data import DataLoader
from utils.log_utils import global_logger
from utils.train_utils import update_lr, save_model, get_optimizer
from main.loss import *

model = get_model(mode='train')
discriminator = get_discriminator(mode='train')
Dance_dataset = DanceDataset(mode='train')
Dance_dataloader = DataLoader(dataset=Dance_dataset,
                              batch_size=cfg.batch_size,
                              shuffle=True,
                              num_workers=cfg.num_thread)
G_optimizer = get_optimizer(model, mode='train', model_type='G')
D_optimizer = get_optimizer(model, mode='train', model_type='D')

for epoch in range(cfg.start_epoch, cfg.num_epoch):
    for i, (imgs, label_maps, bg_imgs) in enumerate(Dance_dataloader):
        real_imgs, label_maps, bg_imgs = imgs.cuda(), label_maps.cuda(
        ), bg_imgs.cuda()
        input = torch.cat([label_maps, bg_imgs], dim=1)

        intermediates, fake_imgs = model(input)

        update_lr(epoch, G_optimizer)
        update_lr(epoch, D_optimizer)

        pred_fake = discriminator(torch.cat([label_maps, fake_imgs], dim=1))
        pred_fake_detached = discriminator(
Ejemplo n.º 5
0
def train(cfg):

    batch_size = int(cfg['batch_size'])
    n_epochs = int(cfg['n_epochs'])
    sample_size = int(cfg['fixed_size'])

    #### DATA LOADING
    trans_train = []
    trans_val = []
    if cfg['rnd_sampling']:
        trans_train.append(RndSampling(sample_size, maintain_prop=False))
        #prop_vector=[1, 1]))
        trans_val.append(RndSampling(sample_size, maintain_prop=False))

    dataset, dataloader = get_dataset(cfg, trans=trans_train)
    val_dataset, val_dataloader = get_dataset(cfg,
                                              trans=trans_val,
                                              train=False)
    # summary for tensorboard
    writer = create_tb_logger(cfg)
    dump_code(cfg, writer.logdir)

    #### BUILD THE MODEL
    classifier = get_model(cfg)
    if cfg['verbose']:
        print(classifier)

    #### SET THE TRAINING
    optimizer = get_optimizer(cfg, classifier)

    lr_scheduler = get_lr_scheduler(cfg, optimizer)

    classifier.cuda()

    num_batch = len(dataset) / batch_size
    print('num of batches per epoch: %d' % num_batch)
    cfg['num_batch'] = num_batch

    n_iter = 0
    #best_pred = 0
    best_pred = 10
    best_epoch = 0
    current_lr = float(cfg['learning_rate'])
    for epoch in range(n_epochs + 1):

        # update bn decay
        if cfg['bn_decay'] and epoch != 0 and epoch % int(
                cfg['bn_decay_step']) == 0:
            update_bn_decay(cfg, classifier, epoch)

        loss, n_iter = train_ep(cfg, dataloader, classifier, optimizer, writer,
                                epoch, n_iter)

        ### validation during training
        if epoch % int(cfg['val_freq']) == 0 and cfg['val_in_train']:
            best_epoch, best_pred = val_ep(cfg, val_dataloader, classifier,
                                           writer, epoch, best_epoch,
                                           best_pred)

        # update lr
        if cfg['lr_type'] == 'step' and current_lr >= float(cfg['min_lr']):
            lr_scheduler.step()
        if cfg['lr_type'] == 'plateau':
            lr_scheduler.step(loss)

        current_lr = get_lr(optimizer)
        writer.add_scalar('train/lr', current_lr, epoch)

    writer.close()