def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config', type=str, default='configs/OT3+++R.yaml', help='Path to the config file.')
    parser.add_argument('--output_path', type=str, default='.', help="outputs path")
    parser.add_argument("--resume",default='True', action="store_true") #change to True is you need to retrain from pre-train model
    opts = parser.parse_args()

    cudnn.benchmark = True

    # Load experiment setting
    config = get_config(opts.config)

    # dataset set up
    dataset = My3DDataset(opts=config)
    train_loader = DataLoader(dataset=dataset, batch_size=config['batch_size'], shuffle=True, num_workers=config['nThreads'])


    config['vgg_model_path'] = opts.output_path

    trainer = Models(config)
    trainer.cuda()

    # Setup logger and output folders
    model_name = os.path.splitext(os.path.basename(opts.config))[0]
    output_directory = os.path.join(opts.output_path + "/outputs", model_name)
    train_writer = tensorboardX.SummaryWriter(os.path.join(opts.output_path + "/outputs/logs", model_name))
    checkpoint_directory, image_directory = prepare_sub_folder(output_directory)
    shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml')) # copy config file to output folder

    # Start training
    iterations = trainer.resume(checkpoint_directory, hyperparameters=config,need_opt=False) if opts.resume else 0
    max_iter = int(config['n_ep']* len(dataset)/config['batch_size'])+1

    while True:
        for it,out_data  in enumerate(train_loader):
            for j in range(len(out_data)):
                out_data[j] = out_data[j].cuda().detach()
            if(config['models_name']=='dynamic_human' ):
                Xa_out, Xb_out, Yb_out, Xb_prev_out, Xb_next_out, Xa_mask, Yb_mask, rand_y_out, rand_y_mask=out_data
            trainer.update_learning_rate()
            with Timer("Elapsed time in update: %f"):
                # Main training code
                trainer.dynamic_gen_update(Xa_out, Xb_out, Yb_out,Xb_prev_out,Xb_next_out,
                                               Xa_mask, Yb_mask,rand_y_out, rand_y_mask, config)
                #torch.cuda.synchronize()
            # Dump training stats in log file
            if (iterations + 1) % config['log_iter'] == 0:
                print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
                write_loss(iterations, trainer, train_writer)

            if (iterations ) % config['image_display_iter'] == 0:
                write_image2display(iterations, trainer, train_writer)

            # Save network weights
            if (iterations+1 ) % config['snapshot_save_iter'] == 0:
                trainer.save(checkpoint_directory, iterations)
            iterations += 1
            if iterations >= max_iter:
                sys.exit('Finish training')
Example #2
0
        config)
    train_display_images_a = torch.stack(
        [train_loader_a.dataset[i] for i in range(display_size)]).cuda()
    train_display_images_b = torch.stack(
        [train_loader_b.dataset[i] for i in range(display_size)]).cuda()
    test_display_images_a = torch.stack(
        [test_loader_a.dataset[i] for i in range(display_size)]).cuda()
    test_display_images_b = torch.stack(
        [test_loader_b.dataset[i] for i in range(display_size)]).cuda()

    # Setup logger and output folders
    model_name = os.path.splitext(os.path.basename(opts.config))[0]
    train_writer = tensorboardX.SummaryWriter(
        os.path.join(opts.output_path + "/logs", model_name))
    output_directory = os.path.join(opts.output_path + "/outputs", model_name)
    checkpoint_directory, image_directory = prepare_sub_folder(
        output_directory)
    shutil.copy(opts.config, os.path.join(
        output_directory, 'config.yaml'))  # copy config file to output folder

    # Start training
    iterations = trainer.resume(checkpoint_directory,
                                hyperparameters=config) if opts.resume else 0
    while True:
        for it, (images_a,
                 images_b) in enumerate(zip(train_loader_a, train_loader_b)):
            trainer.update_learning_rate()
            images_a, images_b = images_a.cuda().detach(), images_b.cuda(
            ).detach()

            with Timer("Elapsed time in update: %f"):
                # Main training code
Example #3
0
def main(argv):
    (opts, args) = parser.parse_args(argv)
    cudnn.benchmark = True
    model_name = os.path.splitext(os.path.basename(opts.config))[0]

    # Load experiment setting
    config = get_config(opts.config)
    max_iter = config['max_iter']
    display_size = config['display_size']

    # Setup model and data loader
    trainer = MUNIT_Trainer(config)
    trainer.cuda()
    train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders(
        config)
    test_display_images_a = Variable(torch.stack(
        [test_loader_a.dataset[i] for i in range(display_size)]).cuda(),
                                     volatile=True)
    test_display_images_b = Variable(torch.stack(
        [test_loader_b.dataset[i] for i in range(display_size)]).cuda(),
                                     volatile=True)
    train_display_images_a = Variable(torch.stack(
        [train_loader_a.dataset[i] for i in range(display_size)]).cuda(),
                                      volatile=True)
    train_display_images_b = Variable(torch.stack(
        [train_loader_b.dataset[i] for i in range(display_size)]).cuda(),
                                      volatile=True)

    # Setup logger and output folders
    train_writer = tensorboard.SummaryWriter(os.path.join(
        opts.log, model_name))
    output_directory = os.path.join(opts.outputs, model_name)
    checkpoint_directory, image_directory = prepare_sub_folder(
        output_directory)
    shutil.copy(opts.config, os.path.join(
        output_directory, 'config.yaml'))  # copy config file to output folder

    # Start training
    iterations = trainer.resume(checkpoint_directory) if opts.resume else 0
    while True:
        for it, (images_a,
                 images_b) in enumerate(izip(train_loader_a, train_loader_b)):
            trainer.update_learning_rate()
            images_a, images_b = Variable(images_a.cuda()), Variable(
                images_b.cuda())

            # Main training code
            trainer.dis_update(images_a, images_b, config)
            trainer.gen_update(images_a, images_b, config)

            # Dump training stats in log file
            if (iterations + 1) % config['log_iter'] == 0:
                print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
                write_loss(iterations, trainer, train_writer)

            # Write images
            if (iterations + 1) % config['image_save_iter'] == 0:
                # Test set images
                image_outputs = trainer.sample(test_display_images_a,
                                               test_display_images_b)
                write_images(
                    image_outputs, display_size,
                    '%s/gen_test%08d.jpg' % (image_directory, iterations + 1))
                # Train set images
                image_outputs = trainer.sample(train_display_images_a,
                                               train_display_images_b)
                write_images(
                    image_outputs, display_size,
                    '%s/gen_train%08d.jpg' % (image_directory, iterations + 1))
                # HTML
                write_html(output_directory + "/index.html", iterations + 1,
                           config['image_save_iter'], 'images')
            if (iterations + 1) % config['image_save_iter'] == 0:
                image_outputs = trainer.sample(test_display_images_a,
                                               test_display_images_b)
                write_images(image_outputs, display_size,
                             '%s/gen.jpg' % image_directory)

            # Save network weights
            if (iterations + 1) % config['snapshot_save_iter'] == 0:
                trainer.save(checkpoint_directory, iterations)

            iterations += 1
            if iterations >= max_iter:
                return
parser.add_argument('--w_recon_img', type=float, default=10)
parser.add_argument('--w_recon_field', type=float, default=10)
parser.add_argument('--w_tv', type=float, default=0.000005)

args = parser.parse_args()

if __name__ == '__main__':
    SEED = 0
    random.seed(SEED)
    np.random.seed(SEED)
    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    checkpoint_dir, image_dir = prepare_sub_folder(args.output_path,
                                                   delete_first=True)

    dataset = make_dataset(args)
    dataloader = DataLoader(dataset=dataset,
                            batch_size=args.batch_size,
                            shuffle=True,
                            drop_last=False,
                            num_workers=args.num_workers)

    warper = Warper(args)
    warper.to(device)
    warper.train()

    paras = list(warper.parameters())
    opt = optim.Adam([p for p in paras if p.requires_grad],
                     lr=args.lr,
parser.add_argument('--lr', type=float, default=0.0001)
parser.add_argument('--style_dim', type=int, default=8)
parser.add_argument('--w_recon_img', type=float, default=10)
parser.add_argument('--w_cyc_s', type=float, default=1)
parser.add_argument('--w_cyc_c', type=float, default=1)
args = parser.parse_args()

if __name__ == '__main__':
    SEED = 0
    random.seed(SEED)
    np.random.seed(SEED)
    torch.manual_seed(SEED)
    torch.cuda.manual_seed(SEED)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    checkpoint_dir, image_dir = prepare_sub_folder(args.output_path)

    dataset = make_dataset(args)
    dataloader = DataLoader(dataset=dataset,
                            batch_size=args.batch_size,
                            shuffle=True,
                            drop_last=False,
                            num_workers=args.num_workers)

    model = Styler(args)
    model.to(device)
    model.train()

    gen_para = list(model.gen.parameters())
    dis_para = list(model.dis.parameters())
    gen_opt = optim.Adam([p for p in gen_para if p.requires_grad],
Example #6
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--config',
                        type=str,
                        default='configs/edges2handbags_folder.yaml',
                        help='Path to the config file.')
    parser.add_argument('--output_path',
                        type=str,
                        default='.',
                        help="outputs path")
    #resume option => [, default='730000']
    parser.add_argument("--resume", default='150000', action="store_true")
    parser.add_argument('--trainer',
                        type=str,
                        default='MUNIT',
                        help="MUNIT|UNIT")
    opts = parser.parse_args()

    cudnn.benchmark = True

    # Load experiment setting
    config = get_config(opts.config)
    max_iter = config['max_iter']
    display_size = config['display_size']
    config['vgg_model_path'] = opts.output_path

    # Setup model and data loader
    if opts.trainer == 'MUNIT':
        trainer = MUNIT_Trainer(config)
    elif opts.trainer == 'UNIT':
        trainer = UNIT_Trainer(config)
    else:
        sys.exit("Only support MUNIT|UNIT")
    trainer.cuda()
    train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders(
        config)
    train_display_images_a = torch.stack(
        [train_loader_a.dataset[i] for i in range(display_size)]).cuda()
    train_display_images_b = torch.stack(
        [train_loader_b.dataset[i] for i in range(display_size)]).cuda()
    test_display_images_a = torch.stack(
        [test_loader_a.dataset[i] for i in range(display_size)]).cuda()
    test_display_images_b = torch.stack(
        [test_loader_b.dataset[i] for i in range(display_size)]).cuda()

    # Setup logger and output folders
    model_name = os.path.splitext(os.path.basename(opts.config))[0]
    train_writer = tensorboardX.SummaryWriter(
        os.path.join(opts.output_path + "/logs", model_name))
    output_directory = os.path.join(opts.output_path + "/outputs", model_name)
    checkpoint_directory, image_directory = prepare_sub_folder(
        output_directory)
    shutil.copy(opts.config, os.path.join(
        output_directory, 'config.yaml'))  # copy config file to output folder

    # Start training
    iterations = trainer.resume(checkpoint_directory,
                                hyperparameters=config) if opts.resume else 0
    while True:
        for it, (images_a,
                 images_b) in enumerate(zip(train_loader_a, train_loader_b)):
            trainer.update_learning_rate()
            images_a, images_b = images_a.cuda().detach(), images_b.cuda(
            ).detach()

            with Timer("Elapsed time in update: %f"):
                # Main training code
                trainer.dis_update(images_a, images_b, config)
                trainer.gen_update(images_a, images_b, config)
                torch.cuda.synchronize()

            # Dump training stats in log file
            if (iterations + 1) % config['log_iter'] == 0:
                print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
                write_loss(iterations, trainer, train_writer)

            # Write images
            if (iterations + 1) % config['image_save_iter'] == 0:
                with torch.no_grad():
                    test_image_outputs = trainer.sample(
                        test_display_images_a, test_display_images_b)
                    train_image_outputs = trainer.sample(
                        train_display_images_a, train_display_images_b)
                write_2images(test_image_outputs, display_size,
                              image_directory, 'test_%08d' % (iterations + 1))
                write_2images(train_image_outputs, display_size,
                              image_directory, 'train_%08d' % (iterations + 1))
                # HTML
                write_html(output_directory + "/index.html", iterations + 1,
                           config['image_save_iter'], 'images')

            if (iterations + 1) % config['image_display_iter'] == 0:
                with torch.no_grad():
                    image_outputs = trainer.sample(train_display_images_a,
                                                   train_display_images_b)
                write_2images(image_outputs, display_size, image_directory,
                              'train_current')

            # Save network weights
            if (iterations + 1) % config['snapshot_save_iter'] == 0:
                trainer.save(checkpoint_directory, iterations)

            iterations += 1
            if iterations >= max_iter:
                sys.exit('Finish training')
def main():
    cudnn.benchmark = True
    # Load experiment setting
    config = get_config(opts.config)
    max_iter = config['max_iter']
    display_size = config['display_size']
    config['vgg_model_path'] = opts.output_path

    # Setup model and data loader
    trainer = UNIT_Trainer(config)
    if torch.cuda.is_available():
        trainer.cuda(config['gpuID'])
    train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders(
        config)

    # Setup logger and output folders
    model_name = os.path.splitext(os.path.basename(opts.config))[0]
    writer = SummaryWriter(os.path.join(opts.output_path + "/logs",
                                        model_name))
    output_directory = os.path.join(opts.output_path + "/outputs", model_name)
    checkpoint_directory, image_directory = prepare_sub_folder(
        output_directory)
    shutil.copy(opts.config, os.path.join(
        output_directory, 'config.yaml'))  # copy config file to output folder

    print('start training !!')
    # Start training
    iterations = trainer.resume(checkpoint_directory,
                                hyperparameters=config) if opts.resume else 0

    TraindataA = data_prefetcher(train_loader_a)
    TraindataB = data_prefetcher(train_loader_b)
    testdataA = data_prefetcher(test_loader_a)
    testdataB = data_prefetcher(test_loader_b)

    while True:
        dataA = TraindataA.next()
        dataB = TraindataB.next()
        if dataA is None or dataB is None:
            TraindataA = data_prefetcher(train_loader_a)
            TraindataB = data_prefetcher(train_loader_b)
            dataA = TraindataA.next()
            dataB = TraindataB.next()
        with Timer("Elapsed time in update: %f"):
            # Main training code
            for _ in range(3):
                trainer.content_update(dataA, dataB, config)
            trainer.dis_update(dataA, dataB, config)
            trainer.gen_update(dataA, dataB, config)
            # torch.cuda.synchronize()
        trainer.update_learning_rate()
        # Dump training stats in log file
        if (iterations + 1) % config['log_iter'] == 0:
            print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
            write_loss(iterations, trainer, writer)
        if (iterations + 1) % config['image_save_iter'] == 0:
            testa = testdataA.next()
            testb = testdataB.next()
            if dataA is None or dataB is None or dataA.size(
                    0) != display_size or dataB.size(0) != display_size:
                testdataA = data_prefetcher(test_loader_a)
                testdataB = data_prefetcher(test_loader_b)
                testa = testdataA.next()
                testb = testdataB.next()
            with torch.no_grad():
                test_image_outputs = trainer.sample(testa, testb)
                train_image_outputs = trainer.sample(dataA, dataB)
            if test_image_outputs is not None and train_image_outputs is not None:
                write_2images(test_image_outputs, display_size,
                              image_directory, 'test_%08d' % (iterations + 1))
                write_2images(train_image_outputs, display_size,
                              image_directory, 'train_%08d' % (iterations + 1))
                # HTML
                write_html(output_directory + "/index.html", iterations + 1,
                           config['image_save_iter'], 'images')

        if (iterations + 1) % config['image_display_iter'] == 0:
            with torch.no_grad():
                image_outputs = trainer.sample(dataA, dataB)
            if image_outputs is not None:
                write_2images(image_outputs, display_size, image_directory,
                              'train_current')

            # Save network weights
        if (iterations + 1) % config['snapshot_save_iter'] == 0:
            trainer.save(checkpoint_directory, iterations)

        iterations += 1
        if iterations >= max_iter:
            writer.close()
            sys.exit('Finish training')
Example #8
0
    opts = set_mode("test", opts)
    opts.data.loaders.batch_size = 1
    val_loader = get_loader(opts)
    dataset_size = len(val_loader)

    print("#testing images = %d" % dataset_size)

    comet_exp = Experiment(workspace=opts.comet.workspace,
                           project_name=opts.comet.project_name)
    if comet_exp is not None:
        comet_exp.log_asset(file_data=str(root / opt_file),
                            file_name=root / opt_file)
        comet_exp.log_parameters(opts)

    checkpoint_directory, image_directory = prepare_sub_folder(
        opts.train.output_dir)

    opts.comet.exp = comet_exp

    model = create_model(opts)
    model.setup()

    total_steps = 0

    for i, data in enumerate(val_loader):
        #
        with Timer("Elapsed time in update " + str(i) + ": %f"):
            total_steps += opts.data.loaders.batch_size
            model.set_input(Dict(data))
            model.save_test_images([Dict(data)], total_steps)
Example #9
0
cudnn.benchmark = True

# Load experiment setting
config = get_config(opts.config)

root_dir = config['test_root']
model_name = config['trainer']
logging.basicConfig(filename=os.path.join(
    opts.output_path + "/logs", model_name + '_' + opts.tag + '.log'),
                    level=logging.DEBUG)
logging.debug('This message should go to the log file')
logging.info('So should this')
output_directory = os.path.join(opts.output_path + "/outputs",
                                model_name + '_' + opts.tag)
checkpoint_directory, image_directory, landmark_directory = prepare_sub_folder(
    output_directory, is_test=True)
landmark_directory = 'datasets/face/test_keypoints/keypoints'
shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml'))

f = open(os.path.join(root_dir, config['pca_path']), 'rb')
pca = pickle.load(f)

trainer = LipTrainer(config, is_train=False)
trainer.to(config['device'])
state_dict_lstm = torch.load(opts.checkpoint_lstm)
trainer.audio2exp.load_state_dict(state_dict_lstm['audio2exp'])

test_loader = get_data_loader_list(config, split='test')
iterations = trainer.resume(checkpoint_directory,
                            param=config) if opts.resume else 0
Example #10
0
   
l1_w          = config['l1_w']
vgg_w         = config['vgg_w']
   
gen_ckpt      = config['gen_ckpt']
   
vgg_ckpt      = config['vgg_ckpt']

num_sample    = config['num_sample']
num_eval_iter = config['num_eval_iter']

dataset       = config['src_dataset_train']

model_name = os.path.splitext(os.path.basename(opts.config))[0]
output_directory = os.path.join("outputs", model_name)
checkpoint_directory, _, evaluation_directory = prepare_sub_folder(output_directory)
shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml'))


def main():
    # Get data loader
    loader = get_loader(config, train=False)

    num_test = len(loader.dataset)

    # Generator network for target domain
    if img_size == 64:
        G = Generator64().cuda()
    else:
        G = Generator32().cuda()
    g_ckpt = torch.load('models/' + gen_ckpt)
Example #11
0
                    type=str,
                    default='.',
                    help='output path of the tensorboard file')
args = parser.parse_args()

os.environ['CUDA_VISIBLE_DEVICES'] = '1'
config = get_config(args.config)
torch.backends.cudnn.benchmark = True

# tensorboard
model_name = os.path.splitext(os.path.basename(args.config))[0].split('_')[0]
train_writer = SummaryWriter(
    os.path.join(args.output_path + '/logs', model_name))

output_dir = os.path.join(args.output_path + '/outputs', model_name)
checkpoint_dir, image_dir = prepare_sub_folder(output_dir)
config['checkpoint_dir'] = checkpoint_dir

# create train and val dataloader
for phase, dataset_opt in config['datasets'].items():
    if phase == 'train':
        train_set = create_dataset(dataset_opt)
        train_size = int(math.ceil(len(train_set) / dataset_opt['batch_size']))
        print('Number of training images: {:,d}, iters: {:,d}'.format(
            len(train_set), train_size))
        total_iters = int(dataset_opt['n_iter'])
        total_epochs = int(math.ceil(total_iters / train_size))
        print('Total epochs needed: {:d} for iters {:,d}'.format(
            total_epochs, total_iters))
        train_loader = create_dataloader(train_set, dataset_opt)
    elif phase == 'val':
Example #12
0
def main():
    from utils import get_all_data_loaders, prepare_sub_folder, write_html, write_loss, get_config, write_2images, Timer
    import argparse
    from torch.autograd import Variable
    from trainer import MUNIT_Trainer, UNIT_Trainer
    import torch.backends.cudnn as cudnn
    import torch

    # try:
    #     from itertools import izip as zip
    # except ImportError:  # will be 3.x series
    #     pass

    import os
    import sys
    import tensorboardX
    import shutil

    os.environ["CUDA_VISIBLE_DEVICES"] = str(0)

    parser = argparse.ArgumentParser()
    parser.add_argument('--config',
                        type=str,
                        default='configs/edges2handbags_folder.yaml',
                        help='Path to the config file.')
    parser.add_argument('--output_path',
                        type=str,
                        default='.',
                        help="outputs path")
    parser.add_argument("--resume", action="store_true")
    parser.add_argument('--trainer',
                        type=str,
                        default='MUNIT',
                        help="MUNIT|UNIT")
    opts = parser.parse_args()

    cudnn.benchmark = True
    '''
    Note: https://www.pytorchtutorial.com/when-should-we-set-cudnn-benchmark-to-true/
        大部分情况下,设置这个 flag 可以让内置的 cuDNN 的 auto-tuner 自动寻找最适合当前配置的高效算法,来达到优化运行效率的问题
        1.  如果网络的输入数据维度或类型上变化不大,设置  torch.backends.cudnn.benchmark = true  可以增加运行效率;
        2.  如果网络的输入数据在每次 iteration 都变化的话,会导致 cnDNN 每次都会去寻找一遍最优配置,这样反而会降低运行效率。
    '''

    # Load experiment setting
    config = get_config(opts.config)
    max_iter = config['max_iter']
    display_size = config['display_size']
    config['vgg_model_path'] = opts.output_path

    # Setup model and data loader
    if opts.trainer == 'MUNIT':
        trainer = MUNIT_Trainer(config)
    elif opts.trainer == 'UNIT':
        trainer = UNIT_Trainer(config)
    else:
        sys.exit("Only support MUNIT|UNIT")
    trainer.cuda()
    train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders(
        config)
    train_display_images_a = torch.stack(
        [train_loader_a.dataset[i] for i in range(display_size)]).cuda()
    train_display_images_b = torch.stack(
        [train_loader_b.dataset[i] for i in range(display_size)]).cuda()
    test_display_images_a = torch.stack(
        [test_loader_a.dataset[i] for i in range(display_size)]).cuda()
    test_display_images_b = torch.stack(
        [test_loader_b.dataset[i] for i in range(display_size)]).cuda()

    # Setup logger and output folders
    model_name = os.path.splitext(os.path.basename(opts.config))[0]
    train_writer = tensorboardX.SummaryWriter(
        os.path.join(opts.output_path + "/logs", model_name))
    output_directory = os.path.join(opts.output_path + "/outputs", model_name)
    checkpoint_directory, image_directory = prepare_sub_folder(
        output_directory)
    shutil.copy(opts.config, os.path.join(
        output_directory, 'config.yaml'))  # copy config file to output folder

    # Start training
    iterations = trainer.resume(checkpoint_directory,
                                hyperparameters=config) if opts.resume else 0
    while True:
        for it, (images_a,
                 images_b) in enumerate(zip(train_loader_a, train_loader_b)):
            trainer.update_learning_rate()
            images_a, images_b = images_a.cuda().detach(), images_b.cuda(
            ).detach()

            with Timer("Elapsed time in update: %f"):
                # Main training code
                trainer.dis_update(images_a, images_b, config)
                trainer.gen_update(images_a, images_b, config)
                torch.cuda.synchronize()

            # Dump training stats in log file
            if (iterations + 1) % config['log_iter'] == 0:
                print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
                write_loss(iterations, trainer, train_writer)

            # Write images
            if (iterations + 1) % config['image_save_iter'] == 0:
                with torch.no_grad():
                    test_image_outputs = trainer.sample(
                        test_display_images_a, test_display_images_b)
                    train_image_outputs = trainer.sample(
                        train_display_images_a, train_display_images_b)
                write_2images(test_image_outputs, display_size,
                              image_directory, 'test_%08d' % (iterations + 1))
                write_2images(train_image_outputs, display_size,
                              image_directory, 'train_%08d' % (iterations + 1))
                # HTML
                write_html(output_directory + "/index.html", iterations + 1,
                           config['image_save_iter'], 'images')

            if (iterations + 1) % config['image_display_iter'] == 0:
                with torch.no_grad():
                    image_outputs = trainer.sample(train_display_images_a,
                                                   train_display_images_b)
                write_2images(image_outputs, display_size, image_directory,
                              'train_current')

            # Save network weights
            if (iterations + 1) % config['snapshot_save_iter'] == 0:
                trainer.save(checkpoint_directory, iterations)

            iterations += 1
            if iterations >= max_iter:
                sys.exit('Finish training')
Example #13
0
File: train.py Project: phonx/MUNIT
elif opts.trainer == 'UNIT':
    trainer = UNIT_Trainer(config)
else:
    sys.exit("Only support MUNIT|UNIT")
trainer.cuda()
train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders(config)
train_display_images_a = Variable(torch.stack([train_loader_a.dataset[i] for i in range(display_size)]).cuda(), volatile=True)
train_display_images_b = Variable(torch.stack([train_loader_b.dataset[i] for i in range(display_size)]).cuda(), volatile=True)
test_display_images_a = Variable(torch.stack([test_loader_a.dataset[i] for i in range(display_size)]).cuda(), volatile=True)
test_display_images_b = Variable(torch.stack([test_loader_b.dataset[i] for i in range(display_size)]).cuda(), volatile=True)

# Setup logger and output folders
model_name = os.path.splitext(os.path.basename(opts.config))[0]
train_writer = tensorboardX.SummaryWriter(os.path.join(opts.output_path + "/logs", model_name))
output_directory = os.path.join(opts.output_path + "/outputs", model_name)
checkpoint_directory, image_directory = prepare_sub_folder(output_directory)
shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml')) # copy config file to output folder

# Start training
iterations = trainer.resume(checkpoint_directory, hyperparameters=config) if opts.resume else 0
while True:
    for it, (images_a, images_b) in enumerate(zip(train_loader_a, train_loader_b)):
        trainer.update_learning_rate()
        images_a, images_b = Variable(images_a.cuda()), Variable(images_b.cuda())

        # Main training code
        trainer.dis_update(images_a, images_b, config)
        trainer.gen_update(images_a, images_b, config)

        # Dump training stats in log file
        if (iterations + 1) % config['log_iter'] == 0:
Example #14
0
# Setup model and data loader
trainer = ClassifierTrainer(config)
trainer.cuda()

train_loader, test_loader = get_all_data_loaders(
    config,
    train_file_name=opts.train_file_name,
    test_file_name=opts.test_file_name)

# Setup logger and output folders
model_name = os.path.splitext(os.path.basename(opts.config))[0]
train_writer = tensorboardX.SummaryWriter(
    os.path.join(opts.output_path + "/logs", model_name))
output_directory = os.path.join(opts.output_path + "/outputs", model_name)
checkpoint_directory = prepare_sub_folder(output_directory)
shutil.copy(opts.config,
            os.path.join(output_directory,
                         'config.yaml'))  # copy config file to output folder

to_pil = transforms.ToPILImage()

if opts.resume:
    epochs, min_loss, max_acc = trainer.resume(checkpoint_directory,
                                               device='cuda:{}'.format(
                                                   opts.gpu_id))
elif opts.fine_tune:
    epochs, min_loss, max_acc = trainer.resume(prepare_sub_folder(
        os.path.join(opts.pretrained_path + "/outputs", model_name)),
                                               device='cuda:{}'.format(
                                                   opts.gpu_id))
Example #15
0
trainer = LipTrainer(config)

trainer.to(config['device'])

train_loader = get_data_loader_list(config, split='train')
eval_loader = get_data_loader_list(config, split='eval')

model_name = config['trainer']
# train_writer = tensorboardX.SummaryWriter(os.path.join(opts.output_path + "/logs", model_name))
logging.basicConfig(filename=os.path.join(opts.output_path + "/logs",
                                          model_name + '.log'),
                    level=logging.DEBUG)
logging.debug('This message should go to the log file')
logging.info('So should this')
output_directory = os.path.join(opts.output_path + "/outputs", model_name)
checkpoint_directory = os.path.join(prepare_sub_folder(output_directory), '..')
shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml'))

iterations = trainer.resume(checkpoint_directory,
                            param=config) if opts.resume else 0

min_loss = 10000000

while True:
    for id, data in enumerate(train_loader):
        trainer.train()
        trainer.update_learning_rate()
        audio = data[0].to(config['device']).detach()
        parameter = data[1].to(config['device']).detach()

        # Main training code