Beispiel #1
0
 def build_model(self, inputs=None):
     # inputs
     if inputs is None:
         self.inputs = tf.placeholder(self.dtype, self.input_shape, name='Input')
     else:
         self.inputs = tf.identity(inputs, name='Input')
         self.inputs.set_shape(self.input_shape)
     inputs = self.inputs
     # convert to linear
     inputs = layers.Gamma2Linear(inputs, self.transfer)
     if self.input_range == 2:
         inputs = inputs * 2 - 1
     # forward pass
     self.generator = Generator('Generator', self.config)
     outputs = self.generator(inputs, reuse=None)
     # outputs
     if self.output_range == 2:
         # outputs = tf.tanh(outputs)
         outputs = tf.multiply(outputs + 1, 0.5)
     # convert to gamma
     self.outputs = layers.Linear2Gamma(outputs, self.transfer)
     self.outputs = tf.identity(self.outputs, name='Output')
     self.outputs_gamma = (self.outputs if self.transfer == self.loss_transfer
         else layers.Linear2Gamma(outputs, self.loss_transfer))
     # all the saver variables
     self.svars = self.generator.svars
     # all the restore variables
     self.rvars = self.generator.rvars
     # return outputs
     return self.outputs
Beispiel #2
0
def main():
    cudnn.benchmark = True
    global args
    args = parser.parse_args()
    print(args)

    if not os.path.exists(args.model_path):
        os.makedirs(args.model_path)
    if not os.path.exists(args.sample_path):
        os.makedirs(args.sample_path)

    # Networks
    g_path = os.path.join(args.model_path,
                          'generator-%d.pkl' % (args.num_epochs))

    generator = Generator()
    generator.load_state_dict(torch.load(g_path))
    generator.eval()

    if torch.cuda.is_available():
        generator = generator.cuda()

    # Sample the images
    noise = to_variable(torch.randn(args.sample_size, args.z_dim))
    fake_images = generator(noise)
    sample_path = os.path.join(args.sample_path, 'fake_samples-final.png')
    torchvision.utils.save_image(denorm(fake_images.data), sample_path)

    print("Saved sampled images to '%s'" % sample_path)
Beispiel #3
0
 def build_model(self, inputs=None, target_domains=None):
     # inputs
     if inputs is None:
         self.inputs = tf.placeholder(self.dtype,
                                      self.input_shape,
                                      name='Input')
     else:
         self.inputs = tf.identity(inputs, name='Input')
         self.inputs.set_shape(self.input_shape)
     # target domains
     if target_domains is None:
         self.target_domains = tf.placeholder(tf.int64,
                                              self.domain_shape,
                                              name='Domain')
     else:
         self.target_domains = tf.identity(target_domains, name='Domain')
         self.target_domains.set_shape(self.domain_shape)
     # forward pass
     self.generator = Generator('Generator', self.config)
     self.outputs = self.generator(self.inputs,
                                   self.target_domains,
                                   reuse=None)
     # outputs
     self.outputs = tf.identity(self.outputs, name='Output')
     # all the saver variables
     self.svars = self.generator.svars
     # all the restore variables
     self.rvars = self.generator.rvars
     # return outputs
     return self.outputs
Beispiel #4
0
    def __init__(self, bsize):
        self.bsize = bsize
        self.device = 'cuda:0'
        self.G = Generator().cuda()
        self.D = NLayerDiscriminator(3).cuda()
        init_weights(self.G)
        init_weights(self.D)
        #self.G = torch.nn.DataParallel(self.G).cuda()
        #self.D = torch.nn.DataParallel(self.D).cuda()
        #self.zmap = torch.nn.DataParallel(self.zmap).cuda()
        self.optimizers = []
        self.optimizer_G = torch.optim.Adam(self.G.parameters(),
                                            lr=2e-4, betas=(0.5, 0.999))
        self.optimizer_D = torch.optim.Adam(self.D.parameters(),
                                            lr=2e-4, betas=(0.5, 0.999))

        
        self.optimizers.append(self.optimizer_G)
        self.optimizers.append(self.optimizer_D)

        self.metric = None
        self.reg_param = 10.
        def lambda_rule(iteration):
            if iteration < 100000:
                lr_l = 1
            if 100000 <= iteration and iteration < 500000:
                lr_l = 0.5
            if iteration >= 500000:
                lr_l = 0.5 * (1 - (iteration - 500000)/ float(1000000 - 500000 + 1))
            return lr_l
        self.schedulers = [lr_scheduler.LambdaLR(optimizer, lr_lambda=lambda_rule) for optimizer in self.optimizers]
def set_model(args):
    """Configure models and loss"""
    # models
    encoder = Encoder(out_dim=args.z_dim).cuda()
    generator = Generator(z_dim=args.z_dim).cuda()
    critic = Critic().cuda()

    if torch.cuda.device_count() > 1:
        print("Use device count: {}".format(torch.cuda.device_count()))
        encoder = torch.nn.DataParallel(encoder)
        generator = torch.nn.DataParallel(generator)
        critic = torch.nn.DataParallel(critic)
        encoder.cuda()
        generator.cuda()
        critic.cuda()
        cudnn.benchmark = True

    models = Models(encoder, generator, critic)

    optim_encoder = torch.optim.Adam(encoder.parameters(), lr=args.lr_encoder)
    optim_generator = torch.optim.Adam(generator.parameters(),
                                       lr=args.lr_generator)
    optim_critic = torch.optim.Adam(critic.parameters(), lr=args.lr_critic)

    # critieron
    l2_reconstruct_criterion = nn.MSELoss().cuda()

    return models, optim_encoder, optim_generator, optim_critic, l2_reconstruct_criterion
Beispiel #6
0
    def __init__(self, cfg):
        self.translator = Generator(cfg.in_ch, cfg.out_ch, cfg.ngf,
                                    cfg.n_blocks)
        self.transform_fn = self.get_transform_fn(cfg.img_size)

        self.translator.load_state_dict(
            torch.load(cfg.generator_weight_path, map_location="cpu"))
Beispiel #7
0
def main():
    test_image_dataset = image_preprocessing(args.dataset)
    data_loader = DataLoader(test_image_dataset,
                             batch_size=args.batch_size,
                             shuffle=False,
                             num_workers=args.num_workers)
    G = Generator()
    checkpoint = torch.load(args.model_path)

    if torch.cuda.is_available():
        G = nn.DataParallel(G)

    G.load_state_dict(checkpoint['G_model'])
    G.eval()

    if not os.path.exists(args.save_path):
        os.mkdir(args.save_path)

    if torch.cuda.is_available():
        G = G.cuda()

    for step, data in enumerate(data_loader):
        real_A = to_variable(data['A'])
        real_B = to_variable(data['B'])
        fake_B = G(real_A)

        batch_image = torch.cat((torch.cat((real_A, fake_B), 3), real_B), 3)
        for i in range(args.batch_size):
            torchvision.utils.save_image(
                denorm(batch_image[i]),
                args.save_path + '{result_name}_{step}.jpg'.format(
                    result_name=args.result_name,
                    step=step * args.batch_size + i))
    def __init__(self, config):
        self.config = config

        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        self.generator = Generator(config).to(self.device)
        self.discriminator = Discriminator(config).to(self.device)

        self.loss_func = nn.BCELoss()
        self.generator_optimizer = optim.Adam(
            params=self.generator.parameters(),
            lr=config["generator_learning_rate"],
            betas=config["generator_betas"])
        self.discriminator_optimizer = optim.Adam(
            params=self.discriminator.parameters(),
            lr=config["discriminator_learning_rate"],
            betas=config["discriminator_betas"])

        self.true_labels = torch.ones(config["batch_size"],
                                      dtype=torch.float32,
                                      device=self.device)
        self.fake_labels = torch.zeros(config["batch_size"],
                                       dtype=torch.float32,
                                       device=self.device)
def get_network(LEARNING_RATE: float, device: str):
    G = Generator().to(device)
    D = Discriminator().to(device)

    criterion = nn.BCELoss()
    d_optimizer = optim.Adam(D.parameters(), lr=LEARNING_RATE)
    g_optimizer = optim.Adam(G.parameters(), lr=LEARNING_RATE)

    return G, D, criterion, d_optimizer, g_optimizer
Beispiel #10
0
 def __init__(self):
     # net = Generator(ch_style=3, ch_content=1).cuda()
     # device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
     device = torch.device("cpu")
     net = Generator(ch_style=3, ch_content=1).to(device)
     net.load_state_dict(
         torch.load('model_weights/latest_G.pth',
                    map_location=torch.device('cpu')))
     net.eval()
     self.net = net
Beispiel #11
0
def main():
    args = set_args()
    encoder = Encoder(out_dim=args.z_dim)
    generator = Generator(z_dim=args.z_dim)

    encoder = load_model(args, encoder, 'encoder')
    generator = load_model(args, generator, 'generator')

    loader = set_loader(args)
    save_one_batch_img(args, loader, generator, encoder)
Beispiel #12
0
def main():
    os.environ["CUDA_VISIBLE_DEVICES"] = opt.gpus
    test_image_dataset = image_preprocessing(opt.dataset, 'val')
    data_loader = DataLoader(test_image_dataset,
                             batch_size=opt.batch_size,
                             shuffle=False,
                             num_workers=opt.num_workers)

    G = Generator(ResidualBlock, layer_count=9)
    F = Generator(ResidualBlock, layer_count=9)

    if torch.cuda.is_available():
        G = nn.DataParallel(G)
        F = nn.DataParallel(F)

        G = G.cuda()
        F = F.cuda()

    G, F, _, _, _, _, _, _, _ = load_ckp(opt.model_path, G, F)
    G.eval()
    F.eval()

    if not os.path.exists(opt.save_path):
        os.mkdir(opt.save_path)

    for step, data in enumerate(tqdm(data_loader)):
        real_A = to_variable(data['A'])
        real_B = to_variable(data['B'])

        fake_B = G(real_A)
        fake_A = F(real_B)

        batch_image = torch.cat((torch.cat(
            (real_A, real_B), 3), torch.cat((fake_A, fake_B), 3)), 2)
        for i in range(batch_image.shape[0]):
            torchvision.utils.save_image(
                denorm(batch_image[i]),
                opt.save_path + '{result_name}_{step}.jpg'.format(
                    result_name=opt.result_name,
                    step=step * opt.batch_size + i))
Beispiel #13
0
    def __init__(self):
        self.generator = Generator()
        self.generator.to(Params.Device)

        self.discriminator = Discriminator()
        self.discriminator.to(Params.Device)

        self.loss_fn = nn.BCELoss()

        self.optimizer_g = torch.optim.Adam(self.generator.parameters(), Params.LearningRateG, betas=(Params.Beta, 0.999))
        self.optimizer_d = torch.optim.Adam(self.discriminator.parameters(), Params.LearningRateD, betas=(Params.Beta, 0.999))

        self.exemplar_latent_vectors = torch.randn( (64, Params.LatentVectorSize), device=Params.Device )
Beispiel #14
0
def evaluate_network(args):
    device = torch.device('cuda' if args.gpu_no >= 0 else 'cpu')
    check_point = torch.load(args.check_point)

    network = Generator(args.unet_flag).to(device).eval()
    network.load_state_dict(check_point['g_state_dict'])

    image = imload(args.image, args.imsize, args.cropsize,
                   args.cencrop).to(device)

    output = network(image)

    imsave(output, 'output.jpg')
Beispiel #15
0
    def _initialise_networks(self):
        self.generator = Generator(final_size=self.final_size)
        self.generator.generate_network()
        self.g_optimizer = Adam(self.generator.parameters(), lr=0.001, betas=(0, 0.99))

        self.discriminator = Discriminator(final_size=self.final_size)
        self.discriminator.generate_network()
        self.d_optimizer = Adam(self.discriminator.parameters(), lr=0.001, betas=(0, 0.99))

        self.num_channels = min(self.generator.num_channels,
                                self.generator.max_channels)
        self.upsample = [Upsample(scale_factor=2**i)
                for i in reversed(range(self.generator.num_blocks))]
Beispiel #16
0
    def __init__(self, opt):
        super(MUNIT, self).__init__()

        # generators and discriminators
        self.gen_a = Generator(opt.ngf, opt.style_dim, opt.mlp_dim)
        self.gen_b = Generator(opt.ngf, opt.style_dim, opt.mlp_dim)
        self.dis_a = Discriminator(opt.ndf)
        self.dis_b = Discriminator(opt.ndf)
        #random style code
        self.s_a = torch.randn(opt.display_size,
                               opt.style_dim,
                               1,
                               1,
                               requires_grad=True).cuda()
        self.s_b = torch.randn(opt.display_size,
                               opt.style_dim,
                               1,
                               1,
                               requires_grad=True).cuda()

        #optimizers
        dis_params = list(self.dis_a.parameters()) + list(
            self.dis_b.parameters())
        gen_params = list(self.gen_a.parameters()) + list(
            self.gen_b.parameters())
        self.dis_opt = torch.optim.Adam(dis_params,
                                        lr=opt.lr,
                                        beta=opt.beta1,
                                        weight_delay=opt.weight_delay)
        self.gen_opt = torch.optim.Adam(gen_params,
                                        lr=opt.lr,
                                        beta=opt.beta1,
                                        weight_delay=opt.weight_delay)

        # nerwork weight initialization
        self.apply(weight_init('kaiming'))
        self.dis_a.apply(weight_init('gaussian'))
        self.dis_b.apply(weight_init('gaussian'))
Beispiel #17
0
def main(_):
    parser = argparse.ArgumentParser()
    parser.add_argument('--option',
                        dest='option',
                        type=str,
                        default='train',
                        help='actions: train')
    args = parser.parse_args()
    if args.option not in ['train']:
        print('invalid option: ', args.option)
        print("Please input a option: train, test, or predict")
    else:
        model = Generator(tf.Session(), configure())
        getattr(model, args.option)()
def main():
    # Pre-settings
    cudnn.benchmark = True
    global args
    args = parser.parse_args()
    print(args)

    dataset = ImageFolder(args)
    data_loader = data.DataLoader(dataset=dataset,
                                  batch_size=args.batchSize,
                                  shuffle=True,
                                  num_workers=2)

    if not os.path.exists(args.model_path):
        os.makedirs(args.model_path)
    if not os.path.exists(args.sample_path):
        os.makedirs(args.sample_path)

    g_path = os.path.join(args.model_path,
                          'generator-%d.pkl' % (args.num_epochs))

    # Load pre-trained model
    generator = Generator(args.batchSize)
    generator.load_state_dict(torch.load(g_path))
    generator.eval()

    if torch.cuda.is_available():
        generator = generator.cuda()

    total_step = len(data_loader)  # For Print Log
    for i, sample in enumerate(data_loader):
        AtoB = args.which_direction == 'AtoB'
        input_A = sample['A' if AtoB else 'B']
        input_B = sample['B' if AtoB else 'A']

        real_A = to_variable(input_A)
        fake_B = generator(real_A)
        real_B = to_variable(input_B)

        # print the log info
        print('Validation[%d/%d]' % (i + 1, total_step))
        # save the sampled images
        res = torch.cat((torch.cat((real_A, fake_B), dim=3), real_B), dim=3)
        torchvision.utils.save_image(
            denorm(res.data),
            os.path.join(args.sample_path, 'Generated-%d.png' % (i + 1)))
def generate_image(attr):
    attr = np.reshape(attr, (1, 5, 1, 1))
    gen = Generator(6)
    chainer.serializers.load_npz('./results/gen', gen)
    
    z = gen.z(1)
    z = chainer.Variable(z)
    img = gen(z, attr, alpha=1.0)

    path = 'static/image'
    if not os.path.exists(path):
        os.makedirs(path)
    
    rand = np.random.randint(0, 100000)
    for im in img:
        filename = os.path.join(path, 'fake{}.jpg'.format(rand))
        save_image(im, filename)
    return filename
Beispiel #20
0
def init_net(depth, dropout, window, cgan):

    input_shape = (1 if not cgan else 2, window)
    # Create the 3 networks
    gen = Generator(depth, dropout, verbose=0)
    discr = Discriminator(depth, dropout, input_shape,
                          verbose=0) if not cgan else ConditionalDiscriminator(
                              depth, dropout, input_shape, verbose=0)
    ae = AutoEncoder(depth, dropout, verbose=0)

    # Put them on cuda if available
    if torch.cuda.is_available():
        gen.cuda()
        discr.cuda()
        ae.cuda()
    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    print("Using : " + str(device))
    print("Network initialized\n")

    return gen, discr, ae, device
Beispiel #21
0
def main():
    G = Generator(args.dim_disc + args.dim_cont)
    D = Discriminator()

    if os.path.isfile(args.model):
        model = torch.load(args.model)
        G.load_state_dict(model[0])
        D.load_state_dict(model[1])

    if use_cuda:
        G.cuda()
        D.cuda()

    if args.mode == "train":
        G, D = train(G, D)
        if args.model:
            torch.save([G.state_dict(), D.state_dict()],
                       args.model,
                       pickle_protocol=4)
    elif args.mode == "gen":
        gen(G)
Beispiel #22
0
    def _init_network(self):
        def init_weights(m):
            if type(m) == nn.Conv2d:
                nn.init.normal_(m.weight.data, 0.0, 0.02)
            if type(m) == nn.BatchNorm2d:
                nn.init.normal_(m.weight.data, 0.0, 0.02)
                nn.init.constant_(m.bias.data, 0.0)

        generator = Generator(DCGAN_Config).to(self.device)
        discriminator = Discriminator(DCGAN_Config).to(self.device)
        
        if self.device.type =='cuda' and DCGAN_Config['ngpu']>1:
            generator = nn.DataParallel(generator, list(range(DCGAN_Config['ngpu'])))
            discriminator = nn.DataParallel(discriminator, list(range(DCGAN_Config['ngpu'])))

        generator.apply(init_weights)
        discriminator.apply(init_weights)

        print(generator)
        print(discriminator)

        return generator, discriminator
Beispiel #23
0
def test():
    # Roading the network
    Network = Generator()
    param = torch.load(opt.test_model_name)
    Network.load_state_dict(param)
    Network = Network.to(device)
    Network.eval()

    # Roading the dataset
    testloader = DataLoader(
    TestDataset(opt.test_dataset),
    batch_size=1,
    shuffle=False,
    num_workers=opt.n_cpu
    )

    with torch.no_grad():
        for i, imgs in enumerate(testloader):
            imgs = imgs.to(device)
            outputs = Network(imgs)
            print(outputs[0].cpu().numpy().transpose(1, 2, 0).shape)
            # Save the images
            cv2.imwrite("results/" + opt.network_name + "/" + str(i) + ".png", cv2.cvtColor(outputs[0].cpu().numpy().transpose(1, 2, 0), cv2.COLOR_BGR2RGB) * 255)
Beispiel #24
0
def generate(savedir, model_path, num):
    nz = 16
    width = 28
    height = 28
    channel = 1

    device = 'cuda'

    model = Generator(nz, width, height, channel)
    model = nn.DataParallel(model)

    model.module.load_state_dict(torch.load(model_path))

    model.eval()  # 推論モードへ切り替え(Dropoutなどの挙動に影響)

    # 保存先のファイルを作成
    if os.path.exists(savedir):
        n = 1
        while 1:
            if os.path.exists('{}({})'.format(savedir, n)):
                n += 1
            else:
                savedir = '{}({})'.format(savedir, n)
                break
    os.makedirs(savedir, exist_ok=True)

    for i in range(num):
        # 入力の乱数を作成
        z = torch.randn(1, nz, 1, 1)
        z = z.to(device)

        gene_img = model(z)

        # ジェネレータの出力画像を保存
        torchvision.utils.save_image(gene_img,
                                     "{}/{:04}.png".format(savedir, i + 1))
# Create directories for images, tensorboard results and saved models.
if not args.dry_run:
    if not os.path.exists(EXPERIMENT_DIR):
        os.makedirs(EXPERIMENT_DIR)  # Set up root experiment directory.
    os.makedirs(args.save_image_dir)
    os.makedirs(args.tensorboard_dir)
    os.makedirs(args.save_model_dir)
    WRITER = SummaryWriter(args.tensorboard_dir)  # Set up TensorBoard.
else:
    print('Dry run! Just for testing, data is not saved')

DEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

# Set up the GAN.
critic_model = Critic().to(DEVICE)
generator_model = Generator().to(DEVICE)

# Load pre-trained models if they are provided.
if args.load_critic_model_path:
    critic_model.load_state_dict(torch.load(args.load_critic_model_path))

if args.load_generator_model_path:
    generator_model.load_state_dict(torch.load(args.load_generator_model_path))

# Set up Adam optimizers for both models.
critic_optimizer = optim.Adam(critic_model.parameters(),
                              lr=args.learning_rate,
                              betas=(0, 0.9))
generator_optimizer = optim.Adam(generator_model.parameters(),
                                 lr=args.learning_rate,
                                 betas=(0, 0.9))
Beispiel #26
0
                            transforms.ToTensor(),
                            transforms.Normalize((0.5, 0.5, 0.5),
                                                 (0.5, 0.5, 0.5))
                        ]),
                        mode='train')
dataloader = torch.utils.data.DataLoader(dataset,
                                         batch_size=opt.batchsize,
                                         shuffle=True,
                                         num_workers=opt.num_workers)

# print(dataloader.__len__())

# img=dataset.__getitem__(10)
# print(img['A'].size())

netG_A2B = Generator(opt.input_nc, opt.output_nc, opt.ngf)
netG_B2A = Generator(opt.input_nc, opt.output_nc, opt.ngf)
netD_A = Discriminator(opt.input_nc, opt.ndf)
netD_B = Discriminator(opt.input_nc, opt.ndf)

summary(netG_A2B, input_size=(3, 256, 256), device='cpu')
summary(netD_A, input_size=(3, 256, 256), device='cpu')

#instialize weights
netG_A2B.apply(weights_init)
netG_B2A.apply(weights_init)
netD_A.apply(weights_init)
netD_B.apply(weights_init)

#print(netG_A2B)
Beispiel #27
0
                        np.concatenate([
                            in_resized[i] * 255, o * 255,
                            out_sample_images[i] * 255
                        ],
                                       axis=1)) for i, o in enumerate(preds)
                ]
            },
            commit=False)


from network import Generator, Discriminator, get_gan_network

shape = (config.input_width, config.input_height, 3)
image_shape = (config.output_width, config.output_height, 3)

generator = Generator(shape).generator()
discriminator = Discriminator(image_shape).discriminator()

#adam = optimizers.Adam(lr=1E-4, beta_1=0.9, beta_2=0.999, epsilon=1e-08)
generator.compile(loss="mse", optimizer="adam")
discriminator.compile(loss="binary_crossentropy", optimizer="adam")

gan = get_gan_network(discriminator,
                      shape,
                      generator,
                      "adam",
                      gan_metric=perceptual_distance)

for e in range(1, config.num_epochs + 1):
    print('-' * 15, 'Epoch %d' % e, '-' * 15)
    for _ in range(config.steps_per_epoch):
Beispiel #28
0
def train(savedir, train_list_A, train_list_B, test_list_A, test_list_B, root, epochs, batch_size):
    # 画像のチャンネル数
    channel = 1
    
    # LossにおけるCycle Lossの割合を決めるパラメータ(Cycle Lossにかかる係数)
    cycle_rate = 10.0
    # LossにおけるIdentity Lossの割合を決めるパラメータ(Identity Lossにかかる係数)
    iden_rate = 0

    # ジェネレータのAdam設定(default: lr=0.001, betas=(0.9, 0.999), weight_decay=0) 
    G_opt_para = {'lr': 0.0002, 'betas': (0.5, 0.9), 'weight_decay': 0}
    # ディスクリミネータのAdam設定
    D_A_opt_para = {'lr': 0.0002, 'betas': (0.5, 0.9), 'weight_decay': 0}
    D_B_opt_para = {'lr': 0.0002, 'betas': (0.5, 0.9), 'weight_decay': 0}

    device = 'cuda'

    myloss = MyLoss()

    # 保存先のファイルを作成
    if os.path.exists(savedir):
        num = 1
        while 1:
            if os.path.exists('{}({})'.format(savedir, num)):
                num += 1
            else:
                savedir = '{}({})'.format(savedir, num)
                break
    os.makedirs(savedir, exist_ok=True)
    os.makedirs('{}/generating_image'.format(savedir), exist_ok=True)
    os.makedirs('{}/model'.format(savedir), exist_ok=True)
    os.makedirs('{}/loss'.format(savedir), exist_ok=True)

    G_A2B_model, G_B2A_model, D_A_model, D_B_model = Generator(channel), Generator(channel), Discriminator(channel), Discriminator(channel)
    G_A2B_model, G_B2A_model, D_A_model, D_B_model = nn.DataParallel(G_A2B_model), nn.DataParallel(G_B2A_model), nn.DataParallel(D_A_model), nn.DataParallel(D_B_model)
    G_A2B_model, G_B2A_model, D_A_model, D_B_model = G_A2B_model.to(device), G_B2A_model.to(device), D_A_model.to(device), D_B_model.to(device)

    # 最適化アルゴリズムの設定
    G_para = torch.optim.Adam(chain(G_A2B_model.parameters(), G_B2A_model.parameters()), lr=G_opt_para['lr'], betas=G_opt_para['betas'], weight_decay=G_opt_para['weight_decay'])
    D_A_para = torch.optim.Adam(D_A_model.parameters(), lr=D_A_opt_para['lr'], betas=D_A_opt_para['betas'], weight_decay=D_A_opt_para['weight_decay'])
    D_B_para = torch.optim.Adam(D_B_model.parameters(), lr=D_B_opt_para['lr'], betas=D_B_opt_para['betas'], weight_decay=D_B_opt_para['weight_decay'])

    # ロスの推移を保存するためのリストを確保
    result = {}
    result['G_log_loss'] = []
    result['D_A_log_loss'] = []
    result['D_B_log_loss'] = []

    df_A = pd.read_csv(train_list_A, usecols=['Path'])
    df_B = pd.read_csv(train_list_B, usecols=['Path'])
    df_test_A = pd.read_csv(test_list_A, usecols=['Path'])
    df_test_A = df_test_A.sample(frac=1)
    df_test_B = pd.read_csv(test_list_B, usecols=['Path'])
    df_test_B = df_test_B.sample(frac=1)

    train_dataset = LoadDataset(df_A, df_B, root, transform=Trans())
    train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True, drop_last=True)
    test_dataset = LoadDataset(df_test_A[0:batch_size], df_test_B[0:batch_size], root, transform=Trans())
    test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=batch_size, shuffle=False, drop_last=True)

    output_env('{}/env.txt'.format(savedir), batch_size, G_opt_para, D_A_opt_para, D_B_opt_para, G_A2B_model, G_B2A_model, D_A_model, D_B_model)

    for epoch in range(epochs):
        print('########## epoch : {}/{} ##########'.format(epoch+1, epochs))

        G_log_loss, D_A_log_loss, D_B_log_loss = [], [], []

        for img_A, img_B in tqdm(train_loader):
            # GPU用の変数に変換
            img_A = img_A.to(device)
            img_B = img_B.to(device)

            # 真正画像をジェネレータに入力
            fake_img_A = G_B2A_model(img_B)
            fake_img_B = G_A2B_model(img_A)

            # 生成画像をジェネレータに入力
            rec_img_A = G_B2A_model(fake_img_B)
            rec_img_B = G_A2B_model(fake_img_A)

            # ディスクリミネータに真正画像と生成画像を入力
            real_pred_A = D_A_model(img_A)
            real_pred_B = D_B_model(img_B)
            fake_pred_A = D_A_model(fake_img_A)
            fake_pred_B = D_B_model(fake_img_B)
            
            # ジェネレータに出力画像と同一ドメインの画像を入力(恒等写像)
            if iden_rate == 0:
                iden_img_A = None
                iden_img_B = None
            else:
                iden_img_A = G_B2A_model(img_A)
                iden_img_B = G_A2B_model(img_B)

            # ジェネレータのロス計算
            G_loss = myloss.G_loss(fake_pred_A, fake_pred_B, torch.tensor(1.0).expand_as(fake_pred_A).to(device), img_A, img_B, rec_img_A, rec_img_B, iden_img_A, iden_img_B, alpha=cycle_rate, beta=iden_rate)
            G_log_loss.append(G_loss.item())
            # ディスクリミネータのロス計算
            D_A_loss = myloss.D_A_loss(real_pred_A, torch.tensor(1.0).expand_as(real_pred_A).to(device), fake_pred_A, torch.tensor(0.0).expand_as(fake_pred_A).to(device))
            D_A_log_loss.append(D_A_loss.item())
            D_B_loss = myloss.D_B_loss(real_pred_B, torch.tensor(1.0).expand_as(real_pred_B).to(device), fake_pred_B, torch.tensor(0.0).expand_as(fake_pred_B).to(device))
            D_B_log_loss.append(D_B_loss.item())

            # ジェネレータの重み更新
            G_para.zero_grad()
            G_loss.backward(retain_graph=True)
            G_para.step()
            # ディスクリミネータの重み更新
            D_A_para.zero_grad()
            D_A_loss.backward(retain_graph=True)
            D_A_para.step()
            D_B_para.zero_grad()
            D_B_loss.backward()
            D_B_para.step()

        result['G_log_loss'].append(statistics.mean(G_log_loss))
        result['D_A_log_loss'].append(statistics.mean(D_A_log_loss))
        result['D_B_log_loss'].append(statistics.mean(D_B_log_loss))
        print('G_loss = {} , D_A_loss = {} , D_B_loss = {}'.format(result['G_log_loss'][-1], result['D_A_log_loss'][-1], result['D_B_log_loss'][-1]))

        # ロスのログを保存
        with open('{}/loss/log.txt'.format(savedir), mode='a') as f:
            f.write('##### Epoch {:03} #####\n'.format(epoch+1))
            f.write('G: {}, D_A: {}, D_B: {}\n'.format(result['G_log_loss'][-1], result['D_A_log_loss'][-1], result['D_B_log_loss'][-1]))
        
        # 定めた保存周期ごとにモデル,出力画像を保存する
        if (epoch+1)%10 == 0:
            # モデルの保存
            torch.save(G_A2B_model.module.state_dict(), '{}/model/G_A2B_model_{}.pth'.format(savedir, epoch+1))
            torch.save(G_B2A_model.module.state_dict(), '{}/model/G_B2A_model_{}.pth'.format(savedir, epoch+1))
            torch.save(D_A_model.module.state_dict(), '{}/model/D_A_model_{}.pth'.format(savedir, epoch+1))
            torch.save(D_B_model.module.state_dict(), '{}/model/D_B_model_{}.pth'.format(savedir, epoch+1))

            G_A2B_model.eval()
            G_B2A_model.eval()

            # メモリ節約のためパラメータの保存は止める(テスト時にパラメータの保存は不要)
            with torch.no_grad():
                for test_img_A, test_img_B in test_loader:
                    fake_img_test_A = G_B2A_model(test_img_B)
                    fake_img_test_B = G_A2B_model(test_img_A)
            torchvision.utils.save_image(fake_img_test_A[:batch_size], "{}/generating_image/A_epoch_{:03}.png".format(savedir, epoch+1))
            torchvision.utils.save_image(fake_img_test_B[:batch_size], "{}/generating_image/B_epoch_{:03}.png".format(savedir, epoch+1))

            G_A2B_model.train()
            G_B2A_model.train()

        # 定めた保存周期ごとにロスを保存する
        if (epoch+1)%50 == 0:
            x = np.linspace(1, epoch+1, epoch+1, dtype='int')
            plot(result['G_log_loss'], result['D_A_log_loss'], result['D_B_log_loss'], x, savedir)

    # 最後のエポックが保存周期でない場合に,保存する
    if (epoch+1)%10 != 0 and epoch+1 == epochs:
        torch.save(G_A2B_model.module.state_dict(), '{}/model/G_A2B_model_{}.pth'.format(savedir, epoch+1))
        torch.save(G_B2A_model.module.state_dict(), '{}/model/G_B2A_model_{}.pth'.format(savedir, epoch+1))
        torch.save(D_A_model.module.state_dict(), '{}/model/D_A_model_{}.pth'.format(savedir, epoch+1))
        torch.save(D_B_model.module.state_dict(), '{}/model/D_B_model_{}.pth'.format(savedir, epoch+1))

        G_A2B_model.eval()
        G_B2A_model.eval()

        # メモリ節約のためパラメータの保存は止める(テスト時にパラメータの保存は不要)
        with torch.no_grad():
            for test_img_A, test_img_B in test_loader:
                fake_img_test_A = G_B2A_model(test_img_B)
                fake_img_test_B = G_A2B_model(test_img_A)
        torchvision.utils.save_image(fake_img_test_A[:batch_size], "{}/generating_image/A_epoch_{:03}.png".format(savedir, epoch+1))
        torchvision.utils.save_image(fake_img_test_B[:batch_size], "{}/generating_image/B_epoch_{:03}.png".format(savedir, epoch+1))

        G_A2B_model.train()
        G_B2A_model.train()

        x = np.linspace(1, epoch+1, epoch+1, dtype='int')
        plot(result['G_log_loss'], result['D_A_log_loss'], result['D_B_log_loss'], x, savedir)
Beispiel #29
0
if __name__ == "__main__":
    img_list = open(config.train['img_list'], 'r').read().split('\n')
    img_list.pop()

    #input
    dataloader = torch.utils.data.DataLoader(
        TrainDataset(img_list),
        batch_size=config.train['batch_size'],
        shuffle=True,
        num_workers=8,
        pin_memory=True)

    G = torch.nn.DataParallel(
        Generator(zdim=config.G['zdim'],
                  use_batchnorm=config.G['use_batchnorm'],
                  use_residual_block=config.G['use_residual_block'],
                  num_classes=config.G['num_classes'])).cuda()
    D = torch.nn.DataParallel(
        Discriminator(use_batchnorm=config.D['use_batchnorm'])).cuda()
    optimizer_G = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                          G.parameters()),
                                   lr=1e-4)
    optimizer_D = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                          D.parameters()),
                                   lr=1e-4)
    last_epoch = -1
    if config.train['resume_model'] is not None:
        e1 = resume_model(G, config.train['resume_model'])
        e2 = resume_model(D, config.train['resume_model'])
        assert e1 == e2
        last_epoch = e1
Beispiel #30
0
 def __init__(self, depth):
     self.depth = depth
     self.gen = Generator(self.depth)
     serializers.load_npz('./pg_gen.npz', self.gen)