Esempio n. 1
0
def generate_one_frame(gan, input_tensor, frame_shape, scale, geo_shifts,
                       center):
    with torch.no_grad():
        base_sz = input_tensor.shape
        in_size = base_sz[2:]
        out_pad = np.uint8(np.zeros([frame_shape[0], frame_shape[1], 3]))

        if scale[0] == -1:
            output_tensor = [None, input_tensor]
            out_mask = torch.ones_like(output_tensor[1])
            out_size = in_size

        else:
            out_mask, out_size = prepare_geometric(base_sz, scale, geo_shifts)

            output_tensor, _, _ = gan.test(input_tensor=input_tensor,
                                           input_size=in_size,
                                           output_size=out_size,
                                           rand_affine=geo_shifts,
                                           run_d_pred=False,
                                           run_reconstruct=False)

        out = out_mask * output_tensor[1] - 1 + out_mask
        margin = np.uint16(
            (frame_shape - np.array(out_size)) / 2) if center else [0, 0]
        out_pad[margin[0]:margin[0] + out_size[0],
                margin[1]:margin[1] + out_size[1], :] = util.hist_match(
                    util.tensor2im(out), util.tensor2im(input_tensor),
                    util.tensor2im(out_mask))
        return out_pad
    def get_current_visuals(self):
        real_A = util.tensor2im(self.real_A.data)
        # fake_B = util.tensor2im(self.fake_B.data)
        fake_C = util.tensor2im(self.fake_C.data)

        # return OrderedDict([('original', real_A), ('restyled', fake_B), ('depth', fake_C)])
        return OrderedDict([('original', real_A), ('depth', fake_C)])
Esempio n. 3
0
def predict(net,data):
    real_A = data['A'].cuda()
    real_A_gray = data['A_gray'].cuda()
    fake_B, latent_real_A = net.forward(real_A, real_A_gray)
    latent_real_A = util.tensor2im(latent_real_A.data)
    real_A = util.tensor2im(real_A.data)
    fake_B = util.tensor2im(fake_B.data)
    A_gray = util.atten2im(real_A_gray.data)
    return OrderedDict([('real_A', real_A), ('fake_B', fake_B),('A_gray',A_gray),('latent_real_A', latent_real_A)])
Esempio n. 4
0
 def predict(self, data):
     self.real_A = data['A'].cuda()
     self.real_A_gray = data['A_gray'].cuda()
     self.fake_B, self.latent_real_A = self.netG.forward(
         self.real_A, self.real_A_gray)
     real_A = util.tensor2im(self.real_A.data)
     fake_B = util.tensor2im(self.fake_B.data)
     A_gray = util.atten2im(self.real_A_gray.data)
     return OrderedDict([('real_A', real_A), ('fake_B', fake_B),
                         ('A_gray', A_gray)])
Esempio n. 5
0
 def get_current_visuals(self):
     real_A = util.tensor2im(self.real_A.data)
     fake_map = util.tensor2im(self.fake_map.data)
     real_map = util.tensor2im(self.real_map.data)
     return OrderedDict(
         [
             ('real_A', real_A),
             ('fake_map', fake_map),
             ('real_map', real_map)
         ]
     )
Esempio n. 6
0
def plot_prediction(logger, image, marking_points, prediction):
    """Plot the ground truth and prediction of a random sample in a batch."""
    rand_sample = random.randint(0, image.size(0) - 1)
    sampled_image = util.tensor2im(image[rand_sample])
    logger.plot_marking_points(sampled_image,
                               marking_points[rand_sample],
                               win_name='gt_marking_points')
    sampled_image = util.tensor2im(image[rand_sample])
    pred_points = data.get_predicted_points(prediction[rand_sample], 0.01)
    if pred_points:
        logger.plot_marking_points(sampled_image,
                                   list(list(zip(*pred_points))[1]),
                                   win_name='pred_marking_points')
Esempio n. 7
0
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
    """Save images to the disk.

    Parameters:
        webpage (the HTML class) -- the HTML webpage class that stores these imaegs (see html.py for more details)
        visuals (OrderedDict)    -- an ordered dictionary that stores (name, images (either tensor or numpy) ) pairs
        image_path (str)         -- the string is used to create image paths
        aspect_ratio (float)     -- the aspect ratio of saved images
        width (int)              -- the images will be resized to width x width

    This function will save images stored in 'visuals' to the HTML file specified by 'webpage'.
    """
    image_dir = webpage.get_image_dir()
    short_path = ntpath.basename(image_path[0])
    name = os.path.splitext(short_path)[0]

    webpage.add_header(name)
    ims, txts, links = [], [], []

    for label, im_data in visuals.items():
        im = util.tensor2im(im_data)
        image_name = '%s_%s.png' % (name, label)
        save_path = os.path.join(image_dir, image_name)
        util.save_image(im, save_path, aspect_ratio=aspect_ratio)
        ims.append(image_name)
        txts.append(label)
        links.append(image_name)
    webpage.add_images(ims, txts, links, width=width)
Esempio n. 8
0
def test_one_scale(gan,
                   input_tensor,
                   scale,
                   must_divide,
                   affine=None,
                   return_tensor=False,
                   size_instead_scale=False):
    with torch.no_grad():
        in_size = input_tensor.shape[2:]
        if size_instead_scale:
            out_size = scale
        else:
            out_size = (
                np.uint32(
                    np.floor(scale[0] * in_size[0] * 1.0 / must_divide) *
                    must_divide),
                np.uint32(
                    np.floor(scale[1] * in_size[1] * 1.0 / must_divide) *
                    must_divide))

        output_tensor, _, _ = gan.test(input_tensor=input_tensor,
                                       input_size=in_size,
                                       output_size=out_size,
                                       rand_affine=affine,
                                       run_d_pred=False,
                                       run_reconstruct=False)
        if return_tensor:
            return output_tensor[1]
        else:
            return util.tensor2im(output_tensor[1])
Esempio n. 9
0
    def get_current_visuals(self):
        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        real_B = util.tensor2im(self.real_B.data)

        latent_real_A = util.tensor2im(self.latent_real_A.data)
        latent_show = util.latent2im(self.latent_real_A.data)
        fake_patch = util.tensor2im(self.fake_patch.data)
        real_patch = util.tensor2im(self.real_patch.data)
        input_patch = util.tensor2im(self.input_patch.data)
        if not self.opt.self_attention:
            return OrderedDict([('real_A', real_A), ('fake_B', fake_B),
                                ('latent_real_A', latent_real_A),
                                ('latent_show', latent_show),
                                ('real_B', real_B), ('real_patch', real_patch),
                                ('fake_patch', fake_patch),
                                ('input_patch', input_patch)])
        else:
            self_attention = util.atten2im(self.real_A_gray.data)
            return OrderedDict([('real_A', real_A), ('fake_B', fake_B),
                                ('latent_real_A', latent_real_A),
                                ('latent_show', latent_show),
                                ('real_B', real_B), ('real_patch', real_patch),
                                ('fake_patch', fake_patch),
                                ('input_patch', input_patch),
                                ('self_attention', self_attention)])
Esempio n. 10
0
    def finish(self, image):

        with torch.no_grad():
            image = im2tensor(image)

            sr = self.U(image)
            if self.conf.X4:
                sr = im2tensor(tensor2im(sr))
                sr = self.U(sr)
            sr = tensor2im(sr)

            def save_np_as_img(arr, path):
                Image.fromarray(np.uint8(arr)).save(path)

            save_np_as_img(
                sr, os.path.join(self.conf.output_dir_path, 'image sr.png'))
            print('FINISHED RUN (see --%s-- folder)\n' %
                  self.conf.output_dir_path + '*' * 60 + '\n\n')
Esempio n. 11
0
def display(img):
    # img = img.permute(1,2,0).squeeze()
    # print(img.shape)
    # plt.imshow(img)
    img = util.tensor2im(img, batch=False)
    plt.interactive(False)

    plt.imshow(img)
    plt.show()
def save_image(result_dir, image, image_name, aspect_ratio=1.0):
    im = util.tensor2im(image)
    save_path = os.path.join(result_dir, image_name)
    h, w, _ = im.shape
    if aspect_ratio > 1.0:
        im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic')
    if aspect_ratio < 1.0:
        im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic')
    util.save_image(im, save_path)
    print('save in path: ', save_path)
Esempio n. 13
0
def draw_obj_eval(model: Scence2Image, obj):
    obj_tensor = processing.obj2tensor(obj)

    if torch.cuda.is_available():
        obj_tensor = obj_tensor.cuda()

    obj_im = model.obj2im(Variable(obj_tensor))[0]

    im = Image.fromarray(util.tensor2im(obj_im.data.cpu()))
    return im
    def add_to_webpage(self, images, filenames, tile=1):
        converted_images = []
        for image in images:
            if isinstance(image, list):
                image = torch.stack(image, dim=0).flatten(0, 1)
            image = Image.fromarray(util.tensor2im(image, tile=min(image.size(0), tile)))
            converted_images.append(image)

        self.webpage.add_images(converted_images,
                                filenames)
        print("saved %s" % str(filenames))
Esempio n. 15
0
    def test(self,data):
        full_fakeB = torch.empty(1,3,320,320)
        with torch.no_grad():
            for i in range(10):
                for j in range(10):
                    # realA = data['A'][:,:,i*32:i*32+32,j*32:j*32+32].cuda()
                    # realB = data['B'][:,:,i*32:i*32+32,j*32:j*32+32].cuda()
                    realA_gray =  data['A_gray'][:,:,i*32:i*32+32,j*32:j*32+32].cuda()
                    real_img = data['input_img'][:,:,i*32:i*32+32,j*32:j*32+32].cuda()
                    real_img = real_img.unsqueeze(0)
                    realA_gray = realA_gray.unsqueeze(0)
                    fakeB,latent_real_A = self.netG(real_img,realA_gray)
                    full_fakeB[0,:,i*32:i*32+32,j*32:j*32+32] = fakeB[0,:3,:,:]
        realA = tensor2im(data['input_img'].unsqueeze(0)[:,:3,:,:])
        full_fakeB = tensor2im(full_fakeB)

        fig,ax = plt.subplots(nrows=1,ncols=2)
        ax[0].axis('off')
        ax[0].imshow(realA)
        ax[1].axis('off')
        ax[1].imshow(full_fakeB)
        plt.show()
Esempio n. 16
0
def generate_collage_and_outputs(conf, gan, input_tensor):
    output_images = generate_images_for_collage(gan, input_tensor, conf.collage_scales, conf.must_divide)

    for i in range(len(output_images)):
        for j in range(len(output_images)):
            Image.fromarray(output_images[i][j], 'RGB').save(conf.output_dir_path + '/test_%d_%d.png' % (i, j))

    input_spot = conf.collage_input_spot
    output_images[input_spot[0]][input_spot[1]] = util.tensor2im(input_tensor)

    collage = concat_images(output_images, margin=10, input_spot=input_spot)

    Image.fromarray(np.uint8(collage), 'RGB').save(conf.output_dir_path + '/test_collage.png')
    def generate_mix_grid(self, model, images):
        sps, gls = [], []
        for image in images:
            assert image.size(0) == 1
            sp, gl = model(image.expand(self.opt.num_gpus, -1, -1, -1),
                           command="encode")
            sp = sp[:1]
            gl = gl[:1]
            sps.append(sp)
            gls.append(gl)
        gl = torch.cat(gls, dim=0)

        def put_img(img, canvas, row, col):
            h, w = img.shape[0], img.shape[1]
            start_x = int(self.opt.load_size * col +
                          (self.opt.load_size - w) * 0.5)
            start_y = int(self.opt.load_size * row +
                          (self.opt.load_size - h) * 0.5)
            canvas[start_y:start_y + h, start_x:start_x + w] = img

        grid_w = self.opt.load_size * (gl.size(0) + 1)
        grid_h = self.opt.load_size * (gl.size(0) + 1)
        grid_img = np.ones((grid_h, grid_w, 3), dtype=np.uint8)
        #images_np = util.tensor2im(images, tile=False)
        for i, image in enumerate(images):
            image_np = util.tensor2im(image, tile=False)[0]
            put_img(image_np, grid_img, 0, i + 1)
            put_img(image_np, grid_img, i + 1, 0)

        for i, sp in enumerate(sps):
            sp_for_current_row = sp.repeat(gl.size(0), 1, 1, 1)
            mix_row = model(sp_for_current_row, gl, command="decode")
            mix_row = util.tensor2im(mix_row, tile=False)
            for j, mix in enumerate(mix_row):
                put_img(mix, grid_img, i + 1, j + 1)

        final_grid = Image.fromarray(grid_img)
        return final_grid
Esempio n. 18
0
    def predict(self):
        self.real_A = Variable(self.input_A, volatile=True)
        self.real_A_gray = Variable(self.input_A_gray, volatile=True)
        if self.opt.noise > 0:
            self.noise = Variable(
                torch.cuda.FloatTensor(self.real_A.size()).normal_(
                    mean=0, std=self.opt.noise / 255.))
            self.real_A = self.real_A + self.noise
        if self.opt.input_linear:
            self.real_A = (self.real_A - torch.min(self.real_A)) / (
                torch.max(self.real_A) - torch.min(self.real_A))
        # print(np.transpose(self.real_A.data[0].cpu().float().numpy(),(1,2,0))[:2][:2][:])
        if self.opt.skip == 1:
            self.fake_B, self.latent_real_A = self.netG_A.forward(
                self.real_A, self.real_A_gray)
        else:
            self.fake_B = self.netG_A.forward(self.real_A, self.real_A_gray)
        # self.rec_A = self.netG_B.forward(self.fake_B)

        real_A = util.tensor2im(self.real_A.data)
        fake_B = util.tensor2im(self.fake_B.data)
        A_gray = util.atten2im(self.real_A_gray.data)
        return OrderedDict([('real_A', real_A), ('fake_B', fake_B)])
Esempio n. 19
0
    def quick_eval(self):
        # Evaluate trained upsampler and downsampler on input data
        with torch.no_grad():
            downsampled_img_t = self.G_DN(self.in_img_cropped_t)
            upsampled_img_t = self.G_UP(self.in_img_t)

        self.downsampled_img = util.tensor2im(downsampled_img_t)
        self.upsampled_img = util.tensor2im(upsampled_img_t)

        if self.gt_kernel is not None:
            self.DN_psnrs += [
                util.cal_y_psnr(self.downsampled_img,
                                self.gt_downsampled_img,
                                border=self.conf.scale_factor)
            ]
        if self.gt_img is not None:
            self.UP_psnrs += [
                util.cal_y_psnr(self.upsampled_img,
                                self.gt_img,
                                border=self.conf.scale_factor)
            ]
        self.debug_steps += [self.iter]

        if self.conf.debug:
            # Save loss values for visualization
            self.loss_GANs += [util.move2cpu(self.loss_GAN)]
            self.loss_cycle_forwards += [
                util.move2cpu(self.loss_cycle_forward)
            ]
            self.loss_cycle_backwards += [
                util.move2cpu(self.loss_cycle_backward)
            ]
            self.loss_interps += [util.move2cpu(self.loss_interp)]
            self.loss_Discriminators += [
                util.move2cpu(self.loss_Discriminator)
            ]
Esempio n. 20
0
def test_homo(conf, gan, input_tensor, must_divide=8):
    shift_range = np.arange(conf.non_rect_shift_range[0], conf.non_rect_shift_range[1], conf.non_rect_shift_range[2])
    total = (len(conf.non_rect_scales)*len(shift_range))**2
    ind = 0
    for scale1 in conf.non_rect_scales:
        for scale2 in conf.non_rect_scales:
            scale = [scale1, scale2]
            for shift1 in shift_range:
                for shift2 in shift_range:
                    ind += 1
                    shifts = (shift1, shift2)
                    sz = input_tensor.shape
                    out_pad = np.uint8(255*np.ones([np.uint32(np.floor(sz[2]*scale[0])), np.uint32(np.floor(3*sz[3]*scale[1])), 3]))

                    pad_l = np.abs(np.int(np.ceil(sz[3] * shifts[0])))
                    pad_r = np.abs(np.int(np.ceil(sz[3] * shifts[1])))

                    in_mask = torch.zeros(sz[0], sz[1], sz[2], pad_l + sz[3] + pad_r).cuda()
                    input_for_regular = torch.zeros(sz[0], sz[1], sz[2], pad_l + sz[3] + pad_r).cuda()

                    in_size = in_mask.shape[2:]

                    out_size = (np.uint32(np.floor(scale[0] * in_size[0] * 1.0 / must_divide) * must_divide),
                                np.uint32(np.floor(scale[1] * in_size[1] * 1.0 / must_divide) * must_divide))

                    if pad_r > 0:
                        in_mask[:,:, :, pad_l:-pad_r] = torch.ones_like(input_tensor)
                        input_for_regular[:, :, :, pad_l:-pad_r] = input_tensor
                    else:
                        in_mask[:, :, :, pad_l:] = torch.ones_like(input_tensor)
                        input_for_regular[:, :, :, pad_l:] = input_tensor

                    out = test_one_scale(gan, input_tensor, out_size, conf.must_divide, affine=shifts, return_tensor=True, size_instead_scale=True)
                    # regular = transform(input_tensor, out_size, shifts)
                    out_mask = _make_homography_mask(in_mask, out_size, shifts)

                    out = util.tensor2im(out_mask * out + 1 - out_mask)
                    # regular_out = util.tensor2im(out_mask * regular + 1 - out_mask)
                    # out_pad[:, sz[3] - pad_l:  sz[3] - pad_l + out_size[1], :] = out
                    shift_str = "{1:0{0}d}_{3:0{2}d}".format(2 if shift1>=0 else 3, int(10*shift1), 2 if shift2>=0 else 3, int(10*shift2))

                    # out = np.rot90(out, 3)
                    # regular_out = np.rot90(regular_out, 3)

                    Image.fromarray(out, 'RGB').save(conf.output_dir_path + '/scale_%02d_%02d_transform %s_ingan.png' % (int(10*scale1), int(10*scale2), shift_str))
                    # Image.fromarray(regular_out, 'RGB').save(conf.output_dir_path + '/scale_%02d_%02d_transform %s_ref.png' % (scale1, scale2, shift_str))
                    print(("{}/{}\tscale:{}\tshift:{}".format(ind, total, scale, shifts)))
Esempio n. 21
0
def random_eval():
    (ss, ls), ims = util.load_data()
    test_index = 50
    r_l = 10
    end_index = test_index + r_l
    sample_s, sample_l, sample_i = ss[test_index:end_index], ls[
        test_index:end_index], ims[test_index:end_index]

    obj_vec_d = 300
    model = Scence2Image(encoding_d=9, obj_vec_d=obj_vec_d)
    # model = Scence2Image()

    if torch.cuda.is_available():
        model.cuda()

    SAVE_PATH = "/media/easonnie/Seagate Expansion Drive/RL_net/m_32999_4.3951619817199825"
    model.load_state_dict(torch.load(SAVE_PATH))

    model.eval()

    sample_s_v = Variable(sample_s)
    sample_l_v = Variable(sample_l)
    sample_i_v = Variable(sample_i)

    print(sample_s_v)

    if torch.cuda.is_available():
        sample_s_v = sample_s_v.cuda()
        sample_l_v = sample_l_v.cuda()
        sample_i_v = sample_i_v.cuda()
    vecs = model.scence2vec(sample_s_v, sample_l_v)

    vec1 = vecs[0]
    vec2 = vecs[2]

    ims = util.abstract_changing(vec1, vec2, model)
    # print(ims)

    g_im = vutil.make_grid(ims.data, nrow=8, padding=15)
    Image.fromarray(util.tensor2im(g_im)).save("g_im_changing_8_(0-2).png")
Esempio n. 22
0
 def save_current_results(self,
                          visuals,
                          epoch=None,
                          isTrain=True,
                          prefix=''):
     """ save visualization image to file """
     for label, image in visuals.items():
         image_numpy = util.tensor2im(image)
         # make better understanding file names
         if label == 'real_A':
             label_new = 'BW'
         elif label == 'real_B_rgb':
             label_new = 'original'
         elif label == 'fake_B_rgb':
             label_new = 'colorized'
         if isTrain:
             img_path = os.path.join(
                 './train_result', 'epoch%.3d_%s.jpg' % (epoch, label_new))
         else:  # test mode, original file name as prefix
             img_path = os.path.join('./test_result',
                                     '%s_%s.jpg' % (prefix, label_new))
         util.save_image(image_numpy, img_path)
Esempio n. 23
0
def save_images(webpage, visuals, image_path, aspect_ratio=1.0, width=256):
    image_dir = webpage.get_image_dir()
    short_path = ntpath.basename(image_path[0])
    name = os.path.splitext(short_path)[0]

    webpage.add_header(name)
    ims, txts, links = [], [], []

    for label, im_data in visuals.items():
        im = util.tensor2im(im_data)
        image_name = '%s_%s.png' % (name, label)
        save_path = os.path.join(image_dir, image_name)
        h, w, _ = im.shape
        if aspect_ratio > 1.0:
            im = imresize(im, (h, int(w * aspect_ratio)), interp='bicubic')
        if aspect_ratio < 1.0:
            im = imresize(im, (int(h / aspect_ratio), w), interp='bicubic')
        util.save_image(im, save_path)

        ims.append(image_name)
        txts.append(label)
        links.append(image_name)
    webpage.add_images(ims, txts, links, width=width)
Esempio n. 24
0
def save_images(visuals, output_dir, file_name, aspect_ratio=1.0, width=256):
    for label, im_data in visuals.items():
        im = util.tensor2im(im_data)
        save_path = os.path.join(output_dir, file_name)
        util.save_image(im, save_path, aspect_ratio=aspect_ratio)
Esempio n. 25
0
        s_depth = np.squeeze(data['sparse'].data.cpu().numpy())

        pred_depth[pred_depth <= 0.9] = 0.9
        pred_depth[pred_depth > 85] = 85
        mask = (gt_depth > 0) & (gt_depth <= 100)

        mae[ind], rmse[ind], imae[ind], irmse[ind], a1[ind], \
            a2[ind], a3[ind], a4[ind] = util.compute_errors(gt_depth[mask], pred_depth[mask])

        if opt.save:
            gt_depth = gt_depth[96:, :]
            s_depth = s_depth[96:, :]
            pred_depth = pred_depth[96:, :]
            gt_image = ToFalseColors(gt_depth,
                                     mask=(gt_depth > 0).astype(np.float32))
            pred_image = ToFalseColors(pred_depth)
            s_image = ToFalseColors(s_depth,
                                    mask=(s_depth > 0).astype(np.float32))

            gt_img = Image.fromarray(gt_image, 'RGB')
            pred_img = Image.fromarray(pred_image, 'RGB')
            s_img = Image.fromarray(s_image, 'RGB')
            gt_img.save('%s/%05d_gt.png' % (dirs, ind))
            pred_img.save('%s/%05d_pred.png' % (dirs, ind))
            s_img.save('%s/%05d_sparse.png' % (dirs, ind))
            im = util.tensor2im(visuals['img'])
            util.save_image(im, '%s/%05d_img.png' % (dirs, ind), 'RGB')

    print(mae.mean(), rmse.mean(), imae.mean(), irmse.mean(), a1.mean(),
          a2.mean(), a3.mean(), a4.mean())
Esempio n. 26
0
    def display_current_results(self, visuals, epoch, save_result):
        """Display current results on visdom; save current results to an HTML file.

        Parameters:
            visuals (OrderedDict) - - dictionary of images to display or save
            epoch (int) - - the current epoch
            save_result (bool) - - if save the current results to an HTML file
        """
        if self.display_id > 0:  # show images in the browser using visdom
            ncols = self.ncols
            if ncols > 0:  # show all the images in one visdom panel
                ncols = min(ncols, len(visuals))
                h, w = next(iter(visuals.values())).shape[:2]
                table_css = """<style>
                        table {border-collapse: separate; border-spacing: 4px; white-space: nowrap; text-align: center}
                        table td {width: % dpx; height: % dpx; padding: 4px; outline: 4px solid black}
                        </style>""" % (w, h)  # create a table css
                # create a table of images.
                title = self.name
                label_html = ''
                label_html_row = ''
                images = []
                idx = 0
                for label, image in visuals.items():
                    image_numpy = util.tensor2im(image)
                    label_html_row += '<td>%s</td>' % label
                    images.append(image_numpy.transpose([2, 0, 1]))
                    idx += 1
                    if idx % ncols == 0:
                        label_html += '<tr>%s</tr>' % label_html_row
                        label_html_row = ''
                white_image = np.ones_like(image_numpy.transpose([2, 0, 1
                                                                  ])) * 255
                while idx % ncols != 0:
                    images.append(white_image)
                    label_html_row += '<td></td>'
                    idx += 1
                if label_html_row != '':
                    label_html += '<tr>%s</tr>' % label_html_row
                try:
                    self.vis.images(images,
                                    nrow=ncols,
                                    win=self.display_id + 1,
                                    padding=2,
                                    opts=dict(title=title + ' images'))
                    label_html = '<table>%s</table>' % label_html
                    self.vis.text(table_css + label_html,
                                  win=self.display_id + 2,
                                  opts=dict(title=title + ' labels'))
                except VisdomExceptionBase:
                    self.create_visdom_connections()

            else:  # show each image in a separate visdom panel;
                idx = 1
                try:
                    for label, image in visuals.items():
                        image_numpy = util.tensor2im(image)
                        self.vis.image(image_numpy.transpose([2, 0, 1]),
                                       opts=dict(title=label),
                                       win=self.display_id + idx)
                        idx += 1
                except VisdomExceptionBase:
                    self.create_visdom_connections()

        if self.use_html and (
                save_result or not self.saved
        ):  # save images to an HTML file if they haven't been saved.
            self.saved = True
            # save images to the disk
            for label, image in visuals.items():
                image_numpy = util.tensor2im(image)
                img_path = os.path.join(self.img_dir,
                                        'epoch%.3d_%s.png' % (epoch, label))
                util.save_image(image_numpy, img_path)

            # update website
            webpage = html.HTML(self.web_dir,
                                'Experiment name = %s' % self.name,
                                refresh=1)
            for n in range(epoch, 0, -1):
                webpage.add_header('epoch [%d]' % n)
                ims, txts, links = [], [], []

                for label, image_numpy in visuals.items():
                    image_numpy = util.tensor2im(image)
                    img_path = 'epoch%.3d_%s.png' % (n, label)
                    ims.append(img_path)
                    txts.append(label)
                    links.append(img_path)
                webpage.add_images(ims, txts, links, width=self.win_size)
            webpage.save()
Esempio n. 27
0
def produce_dev_images():
    import util
    (ss, ls), ims = util.load_data(mode='dev')
    test_index = 50
    r_l = 30
    end_index = test_index + r_l
    sample_s, sample_l, sample_i = ss[test_index:end_index], ls[test_index:end_index], ims[test_index:end_index]

    obj_vec_d = 2400
    model = AutoEncoder(obj_vec_d=obj_vec_d)
    # model = Scence2Image()

    if torch.cuda.is_available():
        model.cuda()

    SAVE_PATH = "/media/easonnie/Seagate Expansion Drive/RL_net/m_82999_0.2122452172755806_d:2400_auto_encoder"
    model.load_state_dict(torch.load(SAVE_PATH))

    model.eval()

    sample_s_v = Variable(sample_s)
    sample_l_v = Variable(sample_l)
    sample_i_v = Variable(sample_i)

    if torch.cuda.is_available():
        sample_s_v = sample_s_v.cuda()
        sample_l_v = sample_l_v.cuda()
        sample_i_v = sample_i_v.cuda()

    vecs = model.im2vec(sample_i_v)

    im_list = []

    vec1 = vecs[0]
    vec2 = vecs[2]
    ims = util.abstract_changing(vec1, vec2, model)
    im_list.append(ims)

    vec1 = vecs[1]
    vec2 = vecs[3]
    ims = util.abstract_changing(vec1, vec2, model)
    im_list.append(ims)

    vec1 = vecs[4]
    vec2 = vecs[5]
    ims = util.abstract_changing(vec1, vec2, model)
    im_list.append(ims)

    vec1 = vecs[12]
    vec2 = vecs[6]
    ims = util.abstract_changing(vec1, vec2, model)
    im_list.append(ims)

    vec1 = vecs[21]
    vec2 = vecs[9]
    ims = util.abstract_changing(vec1, vec2, model)
    im_list.append(ims)

    vec1 = vecs[14]
    vec2 = vecs[20]
    ims = util.abstract_changing(vec1, vec2, model)
    im_list.append(ims)

    ims = torch.cat(im_list, dim=0)
    # print(ims)

    g_im = vutil.make_grid(ims.data, nrow=8, padding=15)
    Image.fromarray(util.tensor2im(g_im)).save("vector_shifting_3_auto.png")
Esempio n. 28
0
    h = AB.size(1)
    w_offset = random.randint(0, max(0, w - fineSize - 1))
    h_offset = random.randint(0, max(0, h - fineSize - 1))
    # Extract the input mask with hair pixels are overlaid
    A = AB[:, h_offset:h_offset + fineSize, w_offset:w_offset + fineSize]
    B = AB[:, h_offset:h_offset + fineSize,
           w + w_offset:w + w_offset + fineSize]

    A = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(A)
    B = transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))(B)

    # Load the network and apply the trained model
    A = Variable(A, volatile=True)
    model2 = load_checkpoint('checkpoint.pth')
    output2 = model2(A.unsqueeze(0))
    output2 = util.tensor2im(output2.data)

    # Save output
    image_pil = Image.fromarray(output2)
    image_pil.save(os.path.join('./dataset/simulated', image))

    image_h = imread(
        os.path.join('dataset/nohair_test',
                     image.split('.')[0].split('-')[0] + '.jpg'))
    image_m = imread(
        os.path.join('dataset/mask_test',
                     image.split('.')[0].split('-')[1] + '.jpg'))

    if len(image_m.shape) > 2:
        image_m = image_m[:, :, 1]
Esempio n. 29
0
def anomaly_test():
    print('Anomaly')

    category = 'hand'
    img_file = requestFolderName + '/image-16.jpg'

    print('saved ', img_file, ' category: ', category)
    count = 16

    files = []
    for i in range(65):
        files.append(img_file)

    data = {'0': np.array(files)}

    mura_valid_df = pd.DataFrame(data)
    print(mura_valid_df.head())
    transforms = transform(False, True, True, True, True, True, True, False)
    transforms = inverse_transform(False, True, True, True, True, True, True,
                                   False)

    # resize image to 256 X 256 to construct the output image

    noresize_transform = transform(False, False, False, True, True, True, True,
                                   False)
    img = cv2.imread(img_file)
    print(img.shape)
    img = noresize_transform(img)
    print(img.shape)

    transforms1 = transform(False, True, False, False, False, False, True,
                            False)
    resized_input_img = transforms1(img)

    # transforms2 = transform(False, True, False, False, False, False, True, False)
    # resized_input_img = transforms2(img)

    # rotation, hflip, resize, totensor, normalize, centercrop, to_pil, gray

    # valid_dataset = MURA_dataset(mura_valid_df, '/content/drive/Shared drives/MeanSquare-Drive/Advanced-DeepLearning/', transforms)
    valid_dataset = MURA_dataset(mura_valid_df, '', transforms)
    valid_dataloader = torch.utils.data.DataLoader(dataset=valid_dataset,
                                                   batch_size=64,
                                                   shuffle=True,
                                                   num_workers=0,
                                                   drop_last=False)
    if category == 'hand':
        out = 'models/XR_HAND/'
    else:
        out = 'models/XR_ELBOW/'

    max_auc = 0
    latent_dim = 128
    channels = 3
    batch_size = 64

    generator = Generator(dim=64, zdim=latent_dim, nc=channels)
    discriminator = Discriminator(dim=64,
                                  zdim=latent_dim,
                                  nc=channels,
                                  out_feat=True)
    encoder = Encoder(dim=64, zdim=latent_dim, nc=channels)
    device = torch.device("cuda:0" if (torch.cuda.is_available()) else "cpu")
    device = 'cpu'
    generator.load_state_dict(
        torch.load(out + 'G_epoch5000.pt', map_location=torch.device('cpu')))
    discriminator.load_state_dict(
        torch.load(out + 'D_epoch5000.pt', map_location=torch.device('cpu')))

    generator.to(device)
    encoder.to(device)
    discriminator.to(device)

    with torch.no_grad():
        labels = torch.zeros(size=(len(valid_dataloader.dataset), ),
                             dtype=torch.long,
                             device=device)

        scores = torch.empty(size=(len(valid_dataloader.dataset), ),
                             dtype=torch.float32,
                             device=device)
        for i, (imgs, lbls) in enumerate(valid_dataloader):
            print('imgs. shape ', imgs.shape)
            imgs = imgs.to(device)
            lbls = lbls.to(device)

            labels[i * batch_size:(i + 1) * batch_size].copy_(lbls)
            emb_query = encoder(imgs)
            print('emb_query. shape ', emb_query.shape)

            fake_imgs = generator(emb_query)
            emb_fake = encoder(fake_imgs)

            image_feats = discriminator(imgs)
            recon_feats = discriminator(fake_imgs)

            diff = imgs - fake_imgs

            image1_tensor = diff[0]
            im = tensor2im(imgs)

            im2 = tensor2im(fake_imgs)
            print(lbls)

            im3 = tensor2im(diff)
            # plt.figure(1)
            # plt.subplot(311)
            # plt.title('Real image')
            # plt.imshow(im)

            # plt.subplot(312)
            # plt.title('Fake img')
            # plt.imshow(im2)
            # plt.show()

            img = cv2.GaussianBlur(im3, (5, 5), 0)
            img_gray = rgb2gray(img)
            #plt.imshow(img_gray)
            thresh = threshold_otsu(img_gray)
            binary = img_gray > thresh

            #plt.imshow(binary)
            im_rgb = np.array(Image.fromarray(binary).convert('RGB'))
            mask = binary.copy()
            mask[mask > 0.5] = 1
            mask[mask <= 0.5] = 0

            mask3 = np.stack((mask, mask, mask), axis=2)

            all_labels = measure.label(mask)
            all_labels[all_labels >= 1] = 255
            all_labels[all_labels < 1] = 0
            all_labels3 = np.stack((all_labels, all_labels, all_labels),
                                   axis=2)

            #             kernel = np.ones((6, 6), np.uint8)

            #             # Using cv2.erode() method
            #             image = cv2.erode(Image.fromarray(mask3), kernel, cv2.BORDER_REFLECT)

            black_pixels_mask = np.all(mask3 == 1, axis=2)
            non_black_pixels_mask = np.any(mask3 > [0, 0, 0], axis=-1)

            all_labels3[non_black_pixels_mask] = [255, 0, 0]

            # plt.subplot(313)
            # plt.title('Difference')
            # plt.imshow(im3)
            # plt.show()
            #
            # plt.subplot(321)
            # plt.title('colored mask')
            # plt.imshow(all_labels3)
            # plt.show()

            gray = cv2.cvtColor(im3, cv2.COLOR_BGR2GRAY)

            # Find Canny edges
            edged = cv2.Canny(gray, 30, 200)

            # Finding Contours
            # Use a copy of the image e.g. edged.copy()
            # since findContours alters the image
            contours, hierarchy = cv2.findContours(edged, cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_NONE)

            # plt.subplot(322)
            # plt.imshow(edged)
            # plt.title('Edged')
            # plt.show()

            print("Number of Contours found = " + str(len(contours)))

            # Draw all contours
            # -1 signifies drawing all contours
            print('im3: ', im3.shape)
            backtorgb = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)
            print('contours: ', len(contours))
            img_contours = np.zeros(backtorgb.shape)

            cv2.drawContours(img_contours, contours, -1, (220, 0, 0), 1)
            resized_output_image = cv2.resize(img_contours, (256, 256))

            cv2.imshow('output blue', resized_output_image)
            cv2.waitKey(0)

            cv2.imwrite('output_files/output-image-' + str(count) + '.jpg',
                        resized_output_image)
            #Image.fromarray(resized_output_image).save('output_files/output-image-' + str(count) + '.jpg')
            print('resize: ', resized_output_image.shape,
                  np.asarray(resized_input_img).shape)

            mix_img = cv2.addWeighted(np.asarray(resized_input_img),
                                      0.3,
                                      resized_output_image,
                                      0.7,
                                      0,
                                      dtype=cv2.CV_32F)
            #Image.fromarray(mix_img).save('output_files/mix-image-' + str(count) + '.jpg')
            cv2.imwrite('output_files/mix-image-' + str(count) + '.jpg',
                        mix_img)

            # plt.subplot(323)
            # plt.title('contour')
            # plt.imshow(gray)

            # plt.show()

            thresh = 50
            ret, thresh_img = cv2.threshold(gray, thresh, 255,
                                            cv2.THRESH_BINARY)

            contours, hierarchy = cv2.findContours(thresh_img, cv2.RETR_TREE,
                                                   cv2.CHAIN_APPROX_SIMPLE)
            print('contours second time : ', len(contours))

            backtorgb1 = cv2.cvtColor(gray, cv2.COLOR_GRAY2RGB)

            cv2.drawContours(backtorgb1, contours, -1, (0, 255, 0), 1)

            #backtorgb = cv2.cvtColor(gray,cv2.COLOR_GRAY2RGB)

            cv2.imshow('output', backtorgb1)
            cv2.waitKey(0)

            # Image.fromarray(backtorgb1).save('output_files/image-' + str(count) + '.jpg')
            # cv2.imwrite('output_files/cv-image-' + str(count) + '.jpg', backtorgb1)
            #break

            image_distance = torch.mean(torch.pow(imgs - fake_imgs, 2),
                                        dim=[1, 2, 3])
            feat_distance = torch.mean(torch.pow(image_feats - recon_feats, 2),
                                       dim=1)
            print(emb_query.shape, emb_fake.shape)
            z_distance = mse_loss(emb_query,
                                  emb_fake)  # mse_loss(emb_query, emb_fake)
            # print z_distance
            print('z_distance=', z_distance)
            # print('hiiiiiiiii')
            scores[i * batch_size:(i + 1) * batch_size].copy_(feat_distance)
            print('feat_distance ', feat_distance[0])
            break

    output = {}
    output['status'] = 'done'
    return 'done'
Esempio n. 30
0
def train():

    da1_real_loss_record = AvgMeter()
    da1_fake_loss_record = AvgMeter()
    da1_loss_record = AvgMeter()
    da2_real_loss_record = AvgMeter()
    da2_fake_loss_record = AvgMeter()
    da2_loss_record = AvgMeter()
    da3_real_loss_record = AvgMeter()
    da3_fake_loss_record = AvgMeter()
    da3_loss_record = AvgMeter()

    db1_real_loss_record = AvgMeter()
    db1_fake_loss_record = AvgMeter()
    db1_loss_record = AvgMeter()
    db2_real_loss_record = AvgMeter()
    db2_fake_loss_record = AvgMeter()
    db2_loss_record = AvgMeter()
    db3_real_loss_record = AvgMeter()
    db3_fake_loss_record = AvgMeter()
    db3_loss_record = AvgMeter()

    ga_ad1_loss_record = AvgMeter()
    ga_ad2_loss_record = AvgMeter()
    ga_ad3_loss_record = AvgMeter()
    gb_ad1_loss_record = AvgMeter()
    gb_ad2_loss_record = AvgMeter()
    gb_ad3_loss_record = AvgMeter()

    ga_loss_record = AvgMeter()
    gb_loss_record = AvgMeter()

    cyc_a1_loss_record = AvgMeter()
    cyc_a2_loss_record = AvgMeter()
    cyc_a3_loss_record = AvgMeter()
    cyc_b1_loss_record = AvgMeter()
    cyc_b2_loss_record = AvgMeter()
    cyc_b3_loss_record = AvgMeter()

    syn_a1_loss_record = AvgMeter()
    syn_a2_loss_record = AvgMeter()
    syn_a3_loss_record = AvgMeter()
    syn_b1_loss_record = AvgMeter()
    syn_b2_loss_record = AvgMeter()
    syn_b3_loss_record = AvgMeter()

    g_loss_record = AvgMeter()

    total_steps = 0
    for epoch in range(1, opt.niter + opt.niter_decay + 1):
        for i, data in enumerate(dataset):

            input_A = data['A'].float()
            input_B = data['B'].float()

            inputAimg = input_A
            inputBimg = input_B

            input_Aimg = util.tensor2im(inputAimg)
            input_Bimg = util.tensor2im(inputBimg)
            input_A128 = torch.unsqueeze(scale128_transform(input_Aimg), 0)

            input_A64 = torch.unsqueeze(scale64_transform(input_Aimg), 0)
            input_B128 = torch.unsqueeze(scale128_transform(input_Bimg), 0)
            input_B64 = torch.unsqueeze(scale64_transform(input_Bimg), 0)

            real_A = Variable(inputAimg).cuda()
            real_A128 = Variable(input_A128).cuda()
            real_A64 = Variable(input_A64).cuda()

            real_B = Variable(inputBimg).cuda()
            real_B128 = Variable(input_B128).cuda()
            real_B64 = Variable(input_B64).cuda()

            ### PHOTO-->SKETCH-->PHOTO
            fake_B64, fake_B128, fake_B = GA(real_A)
            rec_A64, rec_A128, rec_A = GB(fake_B)
            ### SKETCH-->PHOTO-->SKETCH
            fake_A64, fake_A128, fake_A = GB(real_B)
            rec_B64, rec_B128, rec_B = GA(fake_A)

            ### update D first
            DA1.zero_grad()
            DA2.zero_grad()
            DA3.zero_grad()
            DB1.zero_grad()
            DB2.zero_grad()
            DB3.zero_grad()

            fakeA = fake_A_pool.query(torch.cat((real_B, fake_A), 1).data)
            realA = torch.cat((real_B, real_A), 1)
            loss_DA256_real, loss_DA256_fake, loss_DA256 = update_d(
                DA1, realA, fakeA)
            optimizer_D_A1.step()
            #
            # print(real_A128)
            # print(fake_A128)
            fakeA128 = fake_A128_pool.query(
                torch.cat((real_B128, fake_A128), 1).data)
            realA128 = torch.cat((real_B128, real_A128), 1)
            loss_DA128_real, loss_DA128_fake, loss_DA128 = update_d(
                DA2, realA128, fakeA128)
            optimizer_D_A2.step()

            fakeA64 = fake_A64_pool.query(
                torch.cat((real_B64, fake_A64), 1).data)
            realA64 = torch.cat((real_B64, real_A64), 1)
            loss_DA64_real, loss_DA64_fake, loss_DA64 = update_d(
                DA3, realA64, fakeA64)
            optimizer_D_A3.step()

            fakeB = fake_B_pool.query(torch.cat((real_A, fake_B), 1).data)
            realB = torch.cat((real_A, real_B), 1)
            loss_DB256_real, loss_DB256_fake, loss_DB256 = update_d(
                DB1, realB, fakeB)
            optimizer_D_B1.step()

            fakeB128 = fake_B128_pool.query(
                torch.cat((real_A128, fake_B128), 1).data)
            realB128 = torch.cat((real_A128, real_B128), 1)
            loss_DB128_real, loss_DB128_fake, loss_DB128 = update_d(
                DB2, realB128, fakeB128)
            optimizer_D_B2.step()

            fakeB64 = fake_B64_pool.query(
                torch.cat((real_B64, fake_B64), 1).data)
            realB64 = torch.cat((real_A64, real_B64), 1)
            loss_DB64_real, loss_DB64_fake, loss_DB64 = update_d(
                DB3, realB64, fakeB64)
            optimizer_D_B3.step()

            ## update G
            GA.zero_grad()
            GB.zero_grad()

            # First, G(A) should fake the discriminator
            pred_fakeB = DA1(fakeB)
            loss_GA_GAN = criterionGAN(pred_fakeB, True)
            pred_fakeB128 = DA2(fakeB128)
            loss_GA_GAN128 = criterionGAN(pred_fakeB128, True)
            pred_fakeB64 = DA3(fakeB64)
            loss_GA_GAN64 = criterionGAN(pred_fakeB64, True)

            pred_fakeA = DB1(fakeA)
            loss_GB_GAN = criterionGAN(pred_fakeA, True)
            pred_fakeA128 = DB2(fakeA128)
            loss_GB_GAN128 = criterionGAN(pred_fakeA128, True)
            pred_fakeA64 = DB3(fakeA64)
            loss_GB_GAN64 = criterionGAN(pred_fakeA64, True)

            # Second, G(A) = B
            syn_A256 = criterionRec(fake_A, real_A)
            syn_A128 = criterionRec(fake_A128, real_A128)
            syn_A64 = criterionRec(fake_A64, real_A64)

            syn_B256 = criterionRec(fake_B, real_B)
            syn_B128 = criterionRec(fake_B128, real_B128)
            syn_B64 = criterionRec(fake_B64, real_B64)

            cyc_A256 = criterionRec(rec_A, real_A)
            cyc_A128 = criterionRec(rec_A128, real_A128)
            cyc_A64 = criterionRec(rec_A64, real_A64)

            cyc_B256 = criterionRec(rec_B, real_B)
            cyc_B128 = criterionRec(rec_B128, real_B128)
            cyc_B64 = criterionRec(rec_B64, real_B64)

            eta = 1
            mu = 0.7
            Lambda = 10

            loss_G =  eta * loss_GA_GAN \
                    + eta * loss_GA_GAN128\
                    + eta * loss_GA_GAN64\
                    + eta * loss_GB_GAN\
                    + eta * loss_GB_GAN128\
                    + eta * loss_GB_GAN64\
                    + mu * cyc_A64 \
                    + mu * cyc_A128 \
                    + mu * cyc_A256 \
                    + mu * cyc_B64 \
                    + mu * cyc_B128 \
                    + mu * cyc_B256 \
                    + Lambda * syn_A64 \
                    + Lambda * syn_A128 \
                    + Lambda * syn_A256 \
                    + Lambda * syn_B64 \
                    + Lambda * syn_B128 \
                    + Lambda * syn_B256 \

            loss_G.backward()
            optimizer_G.step()

            da1_loss_record.update(loss_DA256.data[0])
            da1_real_loss_record.update(loss_DA256_real.data[0])
            da1_fake_loss_record.update(loss_DA256_fake.data[0])
            da2_loss_record.update(loss_DA128.data[0])
            da2_real_loss_record.update(loss_DA128_real.data[0])
            da2_fake_loss_record.update(loss_DA128_fake.data[0])
            da3_loss_record.update(loss_DA64.data[0])
            da3_real_loss_record.update(loss_DA64_real.data[0])
            da3_fake_loss_record.update(loss_DA64_fake.data[0])

            db1_loss_record.update(loss_DB256.data[0])
            db1_real_loss_record.update(loss_DB256_real.data[0])
            db1_fake_loss_record.update(loss_DB256_fake.data[0])
            db2_loss_record.update(loss_DB128.data[0])
            db2_real_loss_record.update(loss_DB128_real.data[0])
            db2_fake_loss_record.update(loss_DB128_fake.data[0])
            db3_loss_record.update(loss_DB64.data[0])
            db3_real_loss_record.update(loss_DB64_real.data[0])
            db3_fake_loss_record.update(loss_DB64_fake.data[0])

            ga_ad1_loss_record.update(loss_GA_GAN.data[0])
            ga_ad2_loss_record.update(loss_GA_GAN128.data[0])
            ga_ad3_loss_record.update(loss_GA_GAN64.data[0])
            gb_ad1_loss_record.update(loss_GB_GAN.data[0])
            gb_ad2_loss_record.update(loss_GB_GAN128.data[0])
            gb_ad3_loss_record.update(loss_GB_GAN64.data[0])

            cyc_a1_loss_record.update(cyc_A256.data[0])
            cyc_a2_loss_record.update(cyc_A128.data[0])
            cyc_a3_loss_record.update(cyc_A64.data[0])
            cyc_b1_loss_record.update(cyc_B256.data[0])
            cyc_b2_loss_record.update(cyc_B128.data[0])
            cyc_b3_loss_record.update(cyc_B64.data[0])

            syn_a1_loss_record.update(syn_A256.data[0])
            syn_a2_loss_record.update(syn_A128.data[0])
            syn_a3_loss_record.update(syn_A64.data[0])
            syn_b1_loss_record.update(syn_B256.data[0])
            syn_b2_loss_record.update(syn_B128.data[0])
            syn_b3_loss_record.update(syn_B64.data[0])
            g_loss_record.update(loss_G.data[0])
            # print(loss_G.data[0])

            if i % opt.print_iter == 0:
                print(
                '[train]: [epoch %d], [iter %d / %d],'
                '[da1_ad_loss %.5f],[da1_real_loss %.5f],[da1_fake_loss %.5f],'
                '[da2_ad_loss %.5f],[da2_real_loss %.5f],[da2_fake_loss %.5f],'
                '[da3_ad_loss %.5f],[da3_real_loss %.5f],[da3_fake_loss %.5f],'
                '[db1_ad_loss %.5f],[db1_real_loss %.5f],[db1_fake_loss %.5f],'
                '[db2_ad_loss %.5f],[db2_real_loss %.5f],[db2_fake_loss %.5f],'
                '[db3_ad_loss %.5f],[db3_real_loss %.5f],[db3_fake_loss %.5f],'
                '[ga_ad1_loss %.5f],[ga_ad2_loss %.5f],[ga_ad3_loss %.5f],'
                '[gb_ad1_loss %.5f],[gb_ad2_loss %.5f],[gb_ad3_loss %.5f],'
                '[syn_a1_loss %.5f],[syn_a2_loss %.5f],[syn_a3_loss %.5f],'
                '[syn_b1_loss %.5f],[syn_b2_loss %.5f],[syn_b3_loss %.5f],'
                '[cyc_a1_loss %.5f],[cyc_a2_loss %.5f],[cyc_a3_loss %.5f],'
                '[cyc_b1_loss %.5f],[cyc_b2_loss %.5f],[cyc_b3_loss %.5f],'
                '[g_loss %.5f]' % \
                (epoch + 1, i + 1, dataset_size,
                 da1_loss_record.avg, da1_real_loss_record.avg, da1_fake_loss_record.avg,
                 da2_loss_record.avg, da2_real_loss_record.avg, da2_fake_loss_record.avg,
                 da3_loss_record.avg, da3_real_loss_record.avg, da3_fake_loss_record.avg,
                 db1_loss_record.avg, db1_real_loss_record.avg, db1_fake_loss_record.avg,
                 db2_loss_record.avg, db2_real_loss_record.avg, db2_fake_loss_record.avg,
                 db3_loss_record.avg, db3_real_loss_record.avg, db3_fake_loss_record.avg,
                 ga_ad1_loss_record.avg, ga_ad2_loss_record.avg, ga_ad3_loss_record.avg,
                 gb_ad1_loss_record.avg, gb_ad2_loss_record.avg, gb_ad3_loss_record.avg,
                 syn_a1_loss_record.avg, syn_a2_loss_record.avg, syn_a3_loss_record.avg,
                 syn_b1_loss_record.avg, syn_b2_loss_record.avg, syn_b3_loss_record.avg,
                 cyc_a1_loss_record.avg, cyc_a2_loss_record.avg, cyc_a3_loss_record.avg,
                 cyc_b1_loss_record.avg, cyc_b2_loss_record.avg, cyc_b3_loss_record.avg,
                 g_loss_record.avg)
                )

            iter = int(idx) + epoch * dataset_size + i

            if i % opt.display_iter == 0:
                A256 = util.get_current_visuals(real_A, fake_B, rec_A, real_B)
                A128 = util.get_current_visuals(real_A128, fake_B128, rec_A128,
                                                real_B128)
                A64 = util.get_current_visuals(real_A64, fake_B64, rec_A64,
                                               real_B64)
                B256 = util.get_current_visuals(real_B, fake_A, rec_B, real_A)
                B128 = util.get_current_visuals(real_B128, fake_A128, rec_B128,
                                                real_A128)
                B64 = util.get_current_visuals(real_B64, fake_A64, rec_B64,
                                               real_A64)

                train_visual.display_current_results(A256, iter, winid=1)
                train_visual.display_current_results(A128, iter, winid=2)
                train_visual.display_current_results(A64, iter, winid=3)
                train_visual.display_current_results(B256, iter, winid=4)
                train_visual.display_current_results(B128, iter, winid=5)
                train_visual.display_current_results(B64, iter, winid=6)

                err1 = OrderedDict([
                    ('cyc_a1_loss', cyc_A256.data[0]),
                    ('cyc_b1_loss', cyc_B256.data[0]),
                    ('syn_a1_loss', syn_A256.data[0]),
                    ('syn_b1_loss', syn_B256.data[0]),
                    ('ga_ad1_loss', loss_GA_GAN.data[0]),
                    ('gb_ad1_loss', loss_GB_GAN.data[0]),
                    ('da1_loss', loss_DA256.data[0]),
                    ('da1_real_loss', loss_DA256_real.data[0]),
                    ('da1_fake_loss', loss_DA256_fake.data[0]),
                    ('db1_loss', loss_DB256.data[0]),
                    ('db1_real_loss', loss_DB256_real.data[0]),
                    ('db1_fake_loss', loss_DB256_fake.data[0]),
                ])
                # print iter
                # print err1
                train_visual.plot_current_errors(iter, err1, winid=10)
                test()

            if i % opt.save_iter == 0:
                snapshot_name = str(iter)
                torch.save(
                    GA.state_dict(),
                    os.path.join(opt.ckpt_path, snapshot_name + '_ga.pth'))
                torch.save(
                    GB.state_dict(),
                    os.path.join(opt.ckpt_path, snapshot_name + '_gb.pth'))
                torch.save(
                    DA1.state_dict(),
                    os.path.join(opt.ckpt_path, snapshot_name + '_da1.pth'))
                torch.save(
                    DA2.state_dict(),
                    os.path.join(opt.ckpt_path, snapshot_name + '_da2.pth'))
                torch.save(
                    DA3.state_dict(),
                    os.path.join(opt.ckpt_path, snapshot_name + '_da3.pth'))
                torch.save(
                    DB1.state_dict(),
                    os.path.join(opt.ckpt_path, snapshot_name + '_db1.pth'))
                torch.save(
                    DB2.state_dict(),
                    os.path.join(opt.ckpt_path, snapshot_name + '_db2.pth'))
                torch.save(
                    DB3.state_dict(),
                    os.path.join(opt.ckpt_path, snapshot_name + '_db3.pth'))