コード例 #1
0
ファイル: train.py プロジェクト: aireveries/MUNIT
def img_fn_to_tensor(fn):
    img = Image.open(fn).convert('RGB')
    return tv.transforms.ToTensor()(img)

# Start training
iterations = trainer.resume(checkpoint_directory, hyperparameters=config) if opts.resume else 0
while True:
    for it, (images_a, images_b) in enumerate(zip(train_loader_a, train_loader_b)):
        trainer.update_learning_rate()
        images_a, images_b = images_a.cuda().detach(), images_b.cuda().detach()


        if iterations % 1000 == 0:
            with Timer("Elapsed time in update: %f"):
                # Main training code
                trainer.dis_update(images_a, images_b, config)
                trainer.gen_update(images_a, images_b, config)
                torch.cuda.synchronize()
        else:
            # Main training code
            trainer.dis_update(images_a, images_b, config)
            trainer.gen_update(images_a, images_b, config)
            torch.cuda.synchronize()
                
        # Dump training stats in log file
        if (iterations + 1) % config['log_iter'] == 0:
            print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
            write_loss(iterations, trainer, train_writer)

        # Write images
        if (iterations + 1) % config['image_save_iter'] == 0:
コード例 #2
0
ファイル: train.py プロジェクト: zergey/MUNIT
def main(argv):
    (opts, args) = parser.parse_args(argv)
    cudnn.benchmark = True
    model_name = os.path.splitext(os.path.basename(opts.config))[0]

    # Load experiment setting
    config = get_config(opts.config)
    max_iter = config['max_iter']
    display_size = config['display_size']

    # Setup model and data loader
    trainer = MUNIT_Trainer(config)
    trainer.cuda()
    train_loader_a, train_loader_b, test_loader_a, test_loader_b = get_all_data_loaders(
        config)
    test_display_images_a = Variable(torch.stack(
        [test_loader_a.dataset[i] for i in range(display_size)]).cuda(),
                                     volatile=True)
    test_display_images_b = Variable(torch.stack(
        [test_loader_b.dataset[i] for i in range(display_size)]).cuda(),
                                     volatile=True)
    train_display_images_a = Variable(torch.stack(
        [train_loader_a.dataset[i] for i in range(display_size)]).cuda(),
                                      volatile=True)
    train_display_images_b = Variable(torch.stack(
        [train_loader_b.dataset[i] for i in range(display_size)]).cuda(),
                                      volatile=True)

    # Setup logger and output folders
    train_writer = SummaryWriter(os.path.join(opts.log, model_name))
    output_directory = os.path.join(opts.outputs, model_name)
    checkpoint_directory, image_directory = prepare_sub_folder(
        output_directory)
    shutil.copy(opts.config, os.path.join(
        output_directory, 'config.yaml'))  # copy config file to output folder

    # Start training
    iterations = trainer.resume(checkpoint_directory) if opts.resume else 0
    while True:
        for it, (images_a,
                 images_b) in enumerate(izip(train_loader_a, train_loader_b)):
            trainer.update_learning_rate()
            images_a, images_b = Variable(images_a.cuda()), Variable(
                images_b.cuda())

            # Main training code
            trainer.dis_update(images_a, images_b, config)
            trainer.gen_update(images_a, images_b, config)

            # Dump training stats in log file
            if (iterations + 1) % config['log_iter'] == 0:
                print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
                write_loss(iterations, trainer, train_writer)

            # Write images
            if (iterations + 1) % config['image_save_iter'] == 0:
                # Test set images
                image_outputs = trainer.sample(test_display_images_a,
                                               test_display_images_b)
                write_images(
                    image_outputs, display_size,
                    '%s/gen_test%08d.jpg' % (image_directory, iterations + 1))
                # Train set images
                image_outputs = trainer.sample(train_display_images_a,
                                               train_display_images_b)
                write_images(
                    image_outputs, display_size,
                    '%s/gen_train%08d.jpg' % (image_directory, iterations + 1))
                # HTML
                write_html(output_directory + "/index.html", iterations + 1,
                           config['image_save_iter'], 'images')
            if (iterations + 1) % config['image_save_iter'] == 0:
                image_outputs = trainer.sample(test_display_images_a,
                                               test_display_images_b)
                write_images(image_outputs, display_size,
                             '%s/gen.jpg' % image_directory)

            # Save network weights
            if (iterations + 1) % config['snapshot_save_iter'] == 0:
                trainer.save(checkpoint_directory, iterations)

            iterations += 1
            if iterations >= max_iter:
                return
コード例 #3
0
ファイル: train.py プロジェクト: phonx/MUNIT
# Setup logger and output folders
model_name = os.path.splitext(os.path.basename(opts.config))[0]
train_writer = tensorboardX.SummaryWriter(os.path.join(opts.output_path + "/logs", model_name))
output_directory = os.path.join(opts.output_path + "/outputs", model_name)
checkpoint_directory, image_directory = prepare_sub_folder(output_directory)
shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml')) # copy config file to output folder

# Start training
iterations = trainer.resume(checkpoint_directory, hyperparameters=config) if opts.resume else 0
while True:
    for it, (images_a, images_b) in enumerate(zip(train_loader_a, train_loader_b)):
        trainer.update_learning_rate()
        images_a, images_b = Variable(images_a.cuda()), Variable(images_b.cuda())

        # Main training code
        trainer.dis_update(images_a, images_b, config)
        trainer.gen_update(images_a, images_b, config)

        # Dump training stats in log file
        if (iterations + 1) % config['log_iter'] == 0:
            print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
            write_loss(iterations, trainer, train_writer)

        # Write images
        if (iterations + 1) % config['image_save_iter'] == 0:
            # Test set images
            image_outputs = trainer.sample(test_display_images_a, test_display_images_b)
            write_2images(image_outputs, display_size, image_directory, 'test_%08d' % (iterations + 1))
            # Train set images
            image_outputs = trainer.sample(train_display_images_a, train_display_images_b)
            write_2images(image_outputs, display_size, image_directory, 'train_%08d' % (iterations + 1))
コード例 #4
0
shutil.copy(opts.config,
            os.path.join(output_directory,
                         'config.yaml'))  # copy config file to output folder

# Start training
iterations = trainer.resume(checkpoint_directory,
                            hyperparameters=config) if opts.resume else 0
while True:
    for it, (images_a,
             images_b) in enumerate(zip(train_loader_a, train_loader_b)):
        trainer.update_learning_rate()
        images_a, images_b = images_a.cuda().detach(), images_b.cuda().detach()

        with Timer("update_time: %f"):
            # Main training code
            loss_dis_total = trainer.dis_update(images_a, images_b, config)
            loss_gen_total, loss_recon_x, loss_recon_s, loss_recon_c, loss_cycrecon, loss_vgg = trainer.gen_update(
                images_a, images_b, config)
            torch.cuda.synchronize()
        # loss_dis_total = trainer.dis_update(images_a, images_b, config)
        # loss_gen_total, loss_recon_x, loss_recon_s, loss_recon_c, loss_cycrecon, loss_vgg= trainer.gen_update(images_a, images_b, config)
        print(
            " | dis_los: %9f | gen_los_total: %9f | recon_x_los: %9f | recon_s_los: %9f | recon_c_los: %9f | cycle_los: %9f | vgg_los: %9f "
            % (loss_dis_total, loss_gen_total, loss_recon_x, loss_recon_s,
               loss_recon_c, loss_cycrecon, loss_vgg))

        # Dump training stats in log file
        if (iterations + 1) % config['log_iter'] == 0:
            print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
            write_loss(iterations, trainer, train_writer)
コード例 #5
0
ファイル: train.py プロジェクト: EdisonCCL/IOSUDA
        images_2 = images_list[index_2]
        labels_1 = labels_list[index_1]
        labels_2 = labels_list[index_2]

        use_1 = use_list[index_1]
        use_2 = use_list[index_2]
        images_1, images_2 = Variable(images_1.cuda()), Variable(images_2.cuda())

        # Main training code.
        if (ep + 1) <= int(0.75 * epochs):

            # If in Full Training mode.
            trainer.set_sup_trainable(True)
            trainer.set_gen_trainable(True)

            dis_loss+=trainer.dis_update(images_1, images_2, index_1, index_2, config)
            gen_loss+=trainer.gen_update(images_1, images_2, index_1, index_2, config)

        else:
            # If in Supervision Tuning mode.
            trainer.set_sup_trainable(True)
            trainer.set_gen_trainable(False)

        labels_1 = labels_1.to(dtype=torch.long)
        labels_1 = Variable(labels_1.cuda(), requires_grad=False)       
        labels_2 = labels_2.to(dtype=torch.long)
        labels_2 = Variable(labels_2.cuda(), requires_grad=False)

        if (ep+1)<=10:
            temp_loss=trainer.sup_update(images_1, images_2, labels_1, labels_2, index_1, index_2, use_1, use_2,ep, config)   
            seg_loss+=temp_loss[0]
コード例 #6
0
ファイル: train.py プロジェクト: WangJerry95/MUNIT
model_name = os.path.splitext(os.path.basename(opts.config))[0]
train_writer = tensorboardX.SummaryWriter(os.path.join(opts.output_path + "/logs", model_name))
output_directory = os.path.join(opts.output_path + "/outputs", model_name)
checkpoint_directory, image_directory = prepare_sub_folder(output_directory)
shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml')) # copy config file to output folder

# Start training
iterations = trainer.resume(checkpoint_directory, hyperparameters=config) if opts.resume else 0
while True:
    for it, (images_a, (images_b,label_b)) in enumerate(zip(train_loader_a, train_loader_b)):
        trainer.update_learning_rate()
        images_a, images_b, label_b= images_a.cuda().detach(), images_b.cuda().detach(), label_b.cuda().detach()

        with Timer("Elapsed time in update: %f"):
            # Main training code
            trainer.dis_update(images_a, images_b, config, label_b=label_b)
            trainer.gen_update(images_a, images_b, config, label_b=label_b)
            torch.cuda.synchronize()

        # Dump training stats in log file
        if (iterations + 1) % config['log_iter'] == 0:
            print("Iteration: %08d/%08d" % (iterations + 1, max_iter))
            write_loss(iterations, trainer, train_writer)

        # Write images
        if (iterations + 1) % config['image_save_iter'] == 0:
            with torch.no_grad():
                test_image_outputs = trainer.sample(test_display_images_a, test_display_images_b)
                train_image_outputs = trainer.sample(train_display_images_a, train_display_images_b)
            write_2images(test_image_outputs, display_size, image_directory, 'test_%08d' % (iterations + 1))
            write_2images(train_image_outputs, display_size, image_directory, 'train_%08d' % (iterations + 1))
コード例 #7
0
def main(argv):
    (opts, args) = parser.parse_args(argv)
    cudnn.benchmark = True

    # Load experiment setting
    config = get_config(opts.config)
    max_iter = config['max_iter']

    # Setup logger and output folders
    output_subfolders = prepare_logging_folders(config['output_root'],
                                                config['experiment_name'])
    logger = create_logger(
        os.path.join(output_subfolders['logs'], 'train_log.log'))
    shutil.copy(opts.config,
                os.path.join(
                    output_subfolders['logs'],
                    'config.yaml'))  # copy config file to output folder

    tb_logger = tensorboard_logger.Logger(output_subfolders['logs'])

    logger.info('============ Initialized logger ============')
    logger.info('Config File: {}'.format(opts.config))

    # Setup model and data loader
    trainer = MUNIT_Trainer(config, opts)
    trainer.cuda()
    loaders = get_all_data_loaders(config)
    val_display_images = next(iter(loaders['val']))
    logger.info('Test images: {}'.format(val_display_images['A_paths']))

    # Start training
    iterations = trainer.resume(opts.model_path,
                                hyperparameters=config) if opts.resume else 0

    while True:
        for it, images in enumerate(loaders['train']):
            trainer.update_learning_rate()
            images_a = images['A']
            images_b = images['B']

            images_a, images_b = Variable(images_a.cuda()), Variable(
                images_b.cuda())

            # Main training code
            trainer.dis_update(images_a, images_b, config)
            trainer.gen_update(images_a, images_b, config)

            # Dump training stats in log file
            if (iterations + 1) % config['log_iter'] == 0:
                for tag, value in trainer.loss.items():
                    tb_logger.scalar_summary(tag, value, iterations)

                val_output_imgs = trainer.sample(
                    Variable(val_display_images['A'].cuda()),
                    Variable(val_display_images['B'].cuda()))

                tb_imgs = []
                for imgs in val_output_imgs.values():
                    tb_imgs.append(torch.cat(torch.unbind(imgs, 0), dim=2))

                tb_logger.image_summary(list(val_output_imgs.keys()), tb_imgs,
                                        iterations)

            if (iterations + 1) % config['print_iter'] == 0:
                logger.info(
                    "Iteration: {:08}/{:08} Discriminator Loss: {:.4f} Generator Loss: {:.4f}"
                    .format(iterations + 1, max_iter, trainer.loss['D/total'],
                            trainer.loss['G/total']))

            # Write images
            # if (iterations + 1) % config['image_save_iter'] == 0:
            #     val_output_imgs = trainer.sample(
            #         Variable(val_display_images['A'].cuda()),
            #         Variable(val_display_images['B'].cuda()))
            #
            #     for key, imgs in val_output_imgs.items():
            #         key = key.replace('/', '_')
            #         write_images(imgs, config['display_size'], '{}/{}_{:08}.jpg'.format(output_subfolders['images'], key, iterations+1))
            #
            #     logger.info('Saved images to: {}'.format(output_subfolders['images']))

            # Save network weights
            if (iterations + 1) % config['snapshot_save_iter'] == 0:
                trainer.save(output_subfolders['models'], iterations)

            iterations += 1
            if iterations >= max_iter:
                return