def main(): if len(sys.argv) < 3: raise RuntimeError( 'Command Line Argument Must be (sketch file, style file)') style_f = './data/styles/%s' % sys.argv[2] test_f = './data/test/%s' % sys.argv[1] filename = sys.argv[1][:-4] + sys.argv[2][:-4] + '.png' style = Image.open(style_f).convert('RGB') style = transforms.Resize((512, 512))(style) style_pil = style test = Image.open(test_f).convert('RGB') test_pil = transforms.Resize((512, 512))(test) transform = transforms.Compose( [transforms.Resize((512, 512)), transforms.ToTensor()]) test = transform(test) test = scale(test) test = test.unsqueeze(0).to(device) to_pil = transforms.ToPILImage() try: images = list(crop_region(style)) result = {} for i, img in enumerate(images, 1): colors = cgm.extract(img, topk + 1) result[str(i)] = { '%d' % i: get_rgb(colors[i]) for i in range(1, topk + 1) } color_tensor = make_colorgram_tensor(result) color_tensor = color_tensor.unsqueeze(0).to(device) fakeB, _ = model(test, color_tensor) fakeB = fakeB.squeeze(0) fakeB = re_scale(fakeB.detach().cpu()) fakeB = to_pil(fakeB) result_image = Image.new('RGB', (512 * 3, 512)) result_image.paste(test_pil, (512 * 0, 0, 512 * 1, 512)) result_image.paste(style_pil, (512 * 1, 0, 512 * 2, 512)) result_image.paste(fakeB, (512 * 2, 0, 512 * 3, 512)) save_image(result_image, os.path.join(out_root, filename)) except IndexError: exit(1)
def actbycolor(stylelist, testfile): print(stylelist) print(stylelist[0]) if len(sys.argv) < 3: raise RuntimeError( 'Command Line Argument Must be (sketch file, style file)') result = {} j = 0 for i in range(1, 5): temp = {} for k in range(1, 5): if j >= len(stylelist): j = 0 temp[str(k)] = stylelist[j] j = j + 1 result[str(i)] = temp print(result) test_f = testfile # filename = sys.argv[1][:-4] + sys.argv[2][:-4] + '.png' test = Image.open(test_f).convert('RGB') test_pil = transforms.Resize((256, 256))(test) style = Image.open('media/immm.jpg').convert('RGB') style = transforms.Resize((256, 256))(style) print(style) style_pil = style transform = transforms.Compose( [transforms.Resize((256, 256)), transforms.ToTensor()]) test = transform(test) test = scale(test) test = test.unsqueeze(0).to(device) to_pil = transforms.ToPILImage() nowtime = time.time() try: nowtime2 = time.time() print(result) print(nowtime2 - nowtime) color_tensor = make_colorgram_tensor(result) color_tensor = color_tensor.unsqueeze(0).to(device) fakeB, _ = model(test, color_tensor) fakeB = fakeB.squeeze(0) fakeB = re_scale(fakeB.detach().cpu()) fakeB = to_pil(fakeB) fakeB.save(os.path.expanduser('media/result.jpg')) result_image = Image.new('RGB', (256 * 3, 256)) result_image.paste(test_pil, (256 * 0, 0, 256 * 1, 256)) result_image.paste(style_pil, (256 * 1, 0, 256 * 2, 256)) result_image.paste(fakeB, (256 * 2, 0, 256 * 3, 256)) result_image.save(os.path.expanduser('media/compareresult.jpg')) # save_image(result_image, os.path.join(out_root, filename)) except IndexError: exit(1) nowtime2 = time.time() print(nowtime2 - nowtime) # if __name__ == "__main__": # main()
image = skimage.transform.pyramid_expand( image, upscale=512 // (image.shape[-1]), multichannel=False, ) return to_pil(torch.Tensor(image).unsqueeze(0)) attentions = list(map(lambda img: interpolate(img), attentions)) result = Image.new('RGBA', (4 * 512, 512)) styleB = styleB.squeeze() fakeB = fakeB.squeeze() imageA = imageA.squeeze() imageB = imageB.squeeze() imageA = to_pil(re_scale(imageA).detach().cpu()) imageB = to_pil(re_scale(imageB).detach().cpu()) styleB = to_pil(re_scale(styleB).detach().cpu()) fakeB = to_pil(re_scale(fakeB).detach().cpu()) result.paste(imageA, (0, 0)) result.paste(styleB, (512, 0)) result.paste(fakeB, (512 * 2, 0)) result.paste(imageB, (512 * 3, 0)) figure = Image.new('RGB', (9 * 512, 512)) figure.paste(imageA, (0, 0)) figure.paste(styleB, (512 * 1, 0)) figure.paste(attentions[0], (512 * 2, 0)) figure.paste(attentions[1], (512 * 3, 0)) figure.paste(attentions[2], (512 * 4, 0))
def validate(self, dataset, epoch, samples=3): # self.generator.eval() # self.discriminator.eval() length = len(dataset) # sample images idxs_total = [ random.sample(range(0, length - 1), samples * 2) for _ in range(epoch) ] for j, idxs in enumerate(idxs_total): styles = idxs[samples:] targets = idxs[0:samples] result = Image.new( 'RGB', (5 * self.resolution, samples * self.resolution)) toPIL = transforms.ToPILImage() G_loss_gan = [] G_loss_l1 = [] D_loss_real = [] D_loss_fake = [] l1_loss = self.losses['L1'] gan_loss = self.losses['GAN'] for i, (target, style) in enumerate(zip(targets, styles)): sub_result = Image.new('RGB', (5 * self.resolution, self.resolution)) imageA, imageB, _ = dataset[target] styleA, styleB, colors = dataset[style] if self.args.mode == 'B2A': imageA, imageB = imageB, imageA styleA, styleB = styleB, styleA imageA = imageA.unsqueeze(0).to(self.device) imageB = imageB.unsqueeze(0).to(self.device) styleB = styleB.unsqueeze(0).to(self.device) colors = colors.unsqueeze(0).to(self.device) with torch.no_grad(): fakeB, _ = self.generator( imageA, colors, ) fakeAB = torch.cat([imageA, fakeB], 1) realAB = torch.cat([imageA, imageB], 1) G_loss_l1.append(l1_loss(fakeB, imageB).item()) G_loss_gan.append( gan_loss(self.discriminator(fakeAB), True).item()) D_loss_real.append( gan_loss(self.discriminator(realAB), True).item()) D_loss_fake.append( gan_loss(self.discriminator(fakeAB), False).item()) styleB = styleB.squeeze() fakeB = fakeB.squeeze() imageA = imageA.squeeze() imageB = imageB.squeeze() colors = colors.squeeze() imageA = toPIL(re_scale(imageA).detach().cpu()) imageB = toPIL(re_scale(imageB).detach().cpu()) styleB = toPIL(re_scale(styleB).detach().cpu()) fakeB = toPIL(re_scale(fakeB).detach().cpu()) # synthesize top-4 colors color1 = toPIL(re_scale(colors[0:3].detach().cpu())) color2 = toPIL(re_scale(colors[3:6].detach().cpu())) color3 = toPIL(re_scale(colors[6:9].detach().cpu())) color4 = toPIL(re_scale(colors[9:12].detach().cpu())) color1 = color1.rotate(90) color2 = color2.rotate(90) color3 = color3.rotate(90) color4 = color4.rotate(90) color_result = Image.new('RGB', (self.resolution, self.resolution)) color_result.paste( color1.crop((0, 0, self.resolution, self.resolution // 4)), (0, 0)) color_result.paste( color2.crop((0, 0, self.resolution, self.resolution // 4)), (0, self.resolution // 4)) color_result.paste( color3.crop((0, 0, self.resolution, self.resolution // 4)), (0, self.resolution // 4 * 2)) color_result.paste( color4.crop((0, 0, self.resolution, self.resolution // 4)), (0, self.resolution // 4 * 3)) sub_result.paste(imageA, (0, 0)) sub_result.paste(styleB, (self.resolution, 0)) sub_result.paste(fakeB, (2 * self.resolution, 0)) sub_result.paste(imageB, (3 * self.resolution, 0)) sub_result.paste(color_result, (4 * self.resolution, 0)) result.paste(sub_result, (0, 0 + self.resolution * i)) print( 'Validate D_loss_real = %f, D_loss_fake = %f, G_loss_l1 = %f, G_loss_gan = %f' % ( sum(D_loss_real) / samples, sum(D_loss_fake) / samples, sum(G_loss_l1) / samples, sum(G_loss_gan) / samples, )) save_image( result, 'deepunetpaint_%03d_%02d' % (epoch, j), self.save_path, )