def save_current_imgs(self, path): out_img = torch.clamp( util.lab2rgb( torch.cat((self.full_real_A.type(torch.cuda.FloatTensor), self.fake_B_reg.type(torch.cuda.FloatTensor)), dim=1), self.opt), 0.0, 1.0) out_img = np.transpose(out_img.cpu().data.numpy()[0], (1, 2, 0)) io.imsave(path, img_as_ubyte(out_img))
def save_current_imgs(self, path): # save instance image print(self.instance_B_reg.size()) print(self.real_A.size()) print(torch.squeeze(self.instance_B_reg[0], 0).size()) b, c, h, w = self.instance_B_reg.size() for i in range(b): out_img = torch.clamp( util.lab2rgb( torch.cat( (torch.unsqueeze(self.real_A[i, :, :, :], 0).type( torch.cuda.FloatTensor), torch.unsqueeze(self.instance_B_reg[i, :, :, :], 0).type(torch.cuda.FloatTensor)), dim=1), self.opt), 0.0, 1.0) out_img = np.transpose(out_img.cpu().data.numpy()[0], (1, 2, 0)) io.imsave(os.path.join(path + '.instance_' + str(i) + '.png'), img_as_ubyte(out_img)) # save complete image before fusion out_img = torch.clamp( util.lab2rgb( torch.cat((self.full_real_A.type(torch.cuda.FloatTensor), self.comp_B_reg.type(torch.cuda.FloatTensor)), dim=1), self.opt), 0.0, 1.0) out_img = np.transpose(out_img.cpu().data.numpy()[0], (1, 2, 0)) io.imsave(os.path.join(path + '.complete.png'), img_as_ubyte(out_img)) # save complete image after fusion out_img = torch.clamp( util.lab2rgb( torch.cat((self.full_real_A.type(torch.cuda.FloatTensor), self.fake_B_reg.type(torch.cuda.FloatTensor)), dim=1), self.opt), 0.0, 1.0) out_img = np.transpose(out_img.cpu().data.numpy()[0], (1, 2, 0)) io.imsave(path, img_as_ubyte(out_img))
def get_current_visuals(self): # pdb.set_trace() from collections import OrderedDict visual_ret = OrderedDict() visual_ret['gray'] = util.lab2rgb( torch.cat( (self.real_A.type(torch.cuda.FloatTensor), torch.zeros_like(self.real_B).type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['real'] = util.lab2rgb( torch.cat((self.real_A.type(torch.cuda.FloatTensor), self.real_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['fake'] = util.lab2rgb( torch.cat((self.real_A.type(torch.cuda.FloatTensor), self.fake_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['hint'] = util.lab2rgb( torch.cat((self.real_A.type(torch.cuda.FloatTensor), self.hint_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['real_ab'] = util.lab2rgb( torch.cat( (torch.zeros_like(self.real_A.type(torch.cuda.FloatTensor)), self.real_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['fake_ab'] = util.lab2rgb( torch.cat( (torch.zeros_like(self.real_A.type(torch.cuda.FloatTensor)), self.fake_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['mask'] = self.mask_B_nc.expand(-1, 3, -1, -1).type( torch.cuda.FloatTensor) visual_ret['hint_ab'] = visual_ret['mask'] * util.lab2rgb( torch.cat( (torch.zeros_like(self.real_A.type(torch.cuda.FloatTensor)), self.hint_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) return visual_ret
def get_current_visuals(self): from collections import OrderedDict visual_ret = OrderedDict() device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') tempA = self.real_A.reshape((-1,self.opt.n_frames,1,self.opt.loadSize,self.opt.loadSize)) tempB = self.real_B.reshape((-1,self.opt.n_frames,2,self.opt.loadSize,self.opt.loadSize)) tempfake_B = self.fake_B_reg.reshape((-1,self.opt.n_frames,2,self.opt.loadSize,self.opt.loadSize)) visual_ret['gray'] = [] visual_ret['real'] = [] visual_ret['fake_reg'] = [] for f in range(tempA.shape[1]): visual_ret['gray'].append(util.lab2rgb(torch.cat((tempA[:,f].type(torch.FloatTensor).to(device), torch.zeros_like(tempB[:,f]).type(torch.FloatTensor).to(device)), dim=1), self.opt)) visual_ret['real'].append(util.lab2rgb( torch.cat((tempA[:,f].type(torch.FloatTensor).to(device), tempB[:,f].type(torch.FloatTensor).to(device)), dim=1), self.opt)) visual_ret['fake_reg'].append(util.lab2rgb(torch.cat( (tempA[:,f].type(torch.FloatTensor).to(device), tempfake_B[:,f].type(torch.FloatTensor).to(device)), dim=1), self.opt)) visual_ret['real'] = torch.cat(visual_ret['real'],1) visual_ret['gray'] = torch.cat(visual_ret['gray'],1) visual_ret['fake_reg'] = torch.cat(visual_ret['fake_reg'],1) visual_ret['fake_max'] = util.lab2rgb(torch.cat( (self.real_A.type(torch.FloatTensor).to(device), self.fake_B_dec_max.type(torch.FloatTensor).to(device)), dim=1), self.opt) visual_ret['fake_mean'] = util.lab2rgb(torch.cat( (self.real_A.type(torch.FloatTensor).to(device), self.fake_B_dec_mean.type(torch.FloatTensor).to(device)), dim=1), self.opt) visual_ret['hint'] = util.lab2rgb( torch.cat((self.real_A.type(torch.FloatTensor).to(device), self.hint_B.type(torch.FloatTensor).to(device)), dim=1), self.opt) visual_ret['real_ab'] = util.lab2rgb(torch.cat(( torch.zeros_like(self.real_A.type(torch.FloatTensor).to(device)), self.real_B.type(torch.FloatTensor).to(device)), dim=1), self.opt) visual_ret['fake_ab_max'] = util.lab2rgb(torch.cat((torch.zeros_like( self.real_A.type(torch.FloatTensor).to(device)), self.fake_B_dec_max.type(torch.FloatTensor).to(device)), dim=1), self.opt) visual_ret['fake_ab_mean'] = util.lab2rgb(torch.cat((torch.zeros_like( self.real_A.type(torch.FloatTensor).to(device)), self.fake_B_dec_mean.type(torch.FloatTensor).to(device)), dim=1), self.opt) visual_ret['fake_ab_reg'] = util.lab2rgb(torch.cat((torch.zeros_like( self.real_A.type(torch.FloatTensor).to(device)), self.fake_B_reg.type(torch.FloatTensor).to(device)), dim=1), self.opt) # visual_ret['mask'] = self.mask_B_nc.expand(-1, 3, -1, -1).type(torch.FloatTensor).to(device) # visual_ret['hint_ab'] = visual_ret['mask'] * util.lab2rgb(torch.cat((torch.zeros_like( # self.real_A.type(torch.FloatTensor).to(device)), self.hint_B.type(torch.FloatTensor).to(device)), dim=1), # self.opt) # C = self.fake_B_distr.shape[1] # scale to [-1, 2], then clamped to [-1, 1] # visual_ret['fake_entr'] = torch.clamp(3 * self.fake_B_entr.expand(-1, 3, -1, -1) / np.log(C) - 1, -1, 1) return visual_ret
lossreg = L1oss(outputreg, data['B'].to(device)) print(outputclass.dtype, realclass.dtype) lossclass = CEloss( outputclass.type(torch.cuda.FloatTensor), realclass[:, 0, :, :].type(torch.cuda.LongTensor)) if record: if index % loss_fre == 0: #100 writer.add_scalars('train/loss:', { 'reg': lossreg.item() * 10, 'class': lossclass.item() }, epoch * lens + index * batch_size) if index % img_fre == 0: # 2000 image_fake = util.lab2rgb( torch.cat([ data['A'].type(torch.cuda.FloatTensor), outputreg.type(torch.cuda.FloatTensor) ], dim=1), opt) image_hint = util.lab2rgb( torch.cat([ data['A'].type(torch.cuda.FloatTensor), data['hint_B'].type(torch.cuda.FloatTensor) ], dim=1), opt) image_real = util.lab2rgb( torch.cat([ data['A'].type(torch.cuda.FloatTensor), data['B'].type(torch.cuda.FloatTensor) ], dim=1), opt) image_fake = image_fake.clamp_(0, 1)
def get_current_visuals(self): from collections import OrderedDict visual_ret = OrderedDict() if self.opt.stage == 'full' or self.opt.stage == 'instance': visual_ret['gray'] = util.lab2rgb(torch.cat((self.real_A.type(torch.cuda.FloatTensor), torch.zeros_like(self.real_B).type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['real'] = util.lab2rgb(torch.cat((self.real_A.type(torch.cuda.FloatTensor), self.real_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['fake_reg'] = util.lab2rgb(torch.cat((self.real_A.type(torch.cuda.FloatTensor), self.fake_B_reg.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['hint'] = util.lab2rgb(torch.cat((self.real_A.type(torch.cuda.FloatTensor), self.hint_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['real_ab'] = util.lab2rgb(torch.cat((torch.zeros_like(self.real_A.type(torch.cuda.FloatTensor)), self.real_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['fake_ab_reg'] = util.lab2rgb(torch.cat((torch.zeros_like(self.real_A.type(torch.cuda.FloatTensor)), self.fake_B_reg.type(torch.cuda.FloatTensor)), dim=1), self.opt) elif self.opt.stage == 'fusion': visual_ret['gray'] = util.lab2rgb(torch.cat((self.full_real_A.type(torch.cuda.FloatTensor), torch.zeros_like(self.full_real_B).type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['real'] = util.lab2rgb(torch.cat((self.full_real_A.type(torch.cuda.FloatTensor), self.full_real_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['comp_reg'] = util.lab2rgb(torch.cat((self.full_real_A.type(torch.cuda.FloatTensor), self.comp_B_reg.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['fake_reg'] = util.lab2rgb(torch.cat((self.full_real_A.type(torch.cuda.FloatTensor), self.fake_B_reg.type(torch.cuda.FloatTensor)), dim=1), self.opt) self.instance_mask = torch.nn.functional.interpolate(torch.zeros([1, 1, 176, 176]), size=visual_ret['gray'].shape[2:], mode='bilinear').type(torch.cuda.FloatTensor) visual_ret['box_mask'] = torch.cat((self.instance_mask, self.instance_mask, self.instance_mask), 1) visual_ret['real_ab'] = util.lab2rgb(torch.cat((torch.zeros_like(self.full_real_A.type(torch.cuda.FloatTensor)), self.full_real_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['comp_ab_reg'] = util.lab2rgb(torch.cat((torch.zeros_like(self.full_real_A.type(torch.cuda.FloatTensor)), self.comp_B_reg.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['fake_ab_reg'] = util.lab2rgb(torch.cat((torch.zeros_like(self.full_real_A.type(torch.cuda.FloatTensor)), self.fake_B_reg.type(torch.cuda.FloatTensor)), dim=1), self.opt) else: print('Error! Wrong stage selection!') exit() return visual_ret
def get_current_visuals(self): from collections import OrderedDict visual_ret = OrderedDict() visual_ret['gray'] = util.lab2rgb( torch.cat( (self.real_A.type(torch.cuda.FloatTensor), torch.zeros_like(self.real_B).type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['real'] = util.lab2rgb( torch.cat((self.real_A.type(torch.cuda.FloatTensor), self.real_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['fake_max'] = util.lab2rgb( torch.cat((self.real_A.type(torch.cuda.FloatTensor), self.fake_B_dec_max.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['fake_mean'] = util.lab2rgb( torch.cat((self.real_A.type(torch.cuda.FloatTensor), self.fake_B_dec_mean.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['fake_reg'] = util.lab2rgb( torch.cat((self.real_A.type(torch.cuda.FloatTensor), self.fake_B_reg.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['hint'] = util.lab2rgb( torch.cat((self.real_A.type(torch.cuda.FloatTensor), self.hint_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['real_ab'] = util.lab2rgb( torch.cat( (torch.zeros_like(self.real_A.type(torch.cuda.FloatTensor)), self.real_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['fake_ab_max'] = util.lab2rgb( torch.cat( (torch.zeros_like(self.real_A.type(torch.cuda.FloatTensor)), self.fake_B_dec_max.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['fake_ab_mean'] = util.lab2rgb( torch.cat( (torch.zeros_like(self.real_A.type(torch.cuda.FloatTensor)), self.fake_B_dec_mean.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['fake_ab_reg'] = util.lab2rgb( torch.cat( (torch.zeros_like(self.real_A.type(torch.cuda.FloatTensor)), self.fake_B_reg.type(torch.cuda.FloatTensor)), dim=1), self.opt) visual_ret['mask'] = self.mask_B_nc.expand(-1, 3, -1, -1).type( torch.cuda.FloatTensor) visual_ret['hint_ab'] = visual_ret['mask'] * util.lab2rgb( torch.cat( (torch.zeros_like(self.real_A.type(torch.cuda.FloatTensor)), self.hint_B.type(torch.cuda.FloatTensor)), dim=1), self.opt) C = self.fake_B_distr.shape[1] # scale to [-1, 2], then clamped to [-1, 1] visual_ret['fake_entr'] = torch.clamp( 3 * self.fake_B_entr.expand(-1, 3, -1, -1) / np.log(C) - 1, -1, 1) return visual_ret
model.model.set_requires_grad(model.model.netG) # model(data) # transforms.ToPILImage()(image[0]).show(command='fim') # to_visualize = ['gray', 'hint', 'hint_ab', 'fake_entr', # 'real', 'fake_reg', 'real_ab', 'fake_ab_reg', ] # visuals = util.get_subset_dict( # model.model.get_current_visuals(), to_visualize) # for key, value in visuals.items(): # print(key) # transforms.ToPILImage()(value[0]).show(command='fim') output = model(img, hint) output = util.lab2rgb(output, opt=opt) transforms.ToPILImage()(output[0]).show(command='fim') traced_model = torch.jit.trace(model, (img, hint), check_trace=False) mlmodel = ct.convert( model=traced_model, inputs=[ ct.TensorType(name="image", shape=ct.Shape(shape=(1, 3, ct.RangeDim(1, 4096), ct.RangeDim(1, 4096)))), ct.TensorType(name="hint", shape=ct.Shape(shape=(1, 3, ct.RangeDim(1, 4096), ct.RangeDim(1, 4096)))), ]) mlmodel.save("~/color.mlmodel")