Example #1
0
def draw_concat(Gs,Zs,reals,NoiseAmp,in_s,mode,m_noise,m_image,opt):
    G_z = in_s
    if len(Gs) > 0:
        if mode == 'rand':
            count = 0
            pad_noise = int(((opt.ker_size-1)*opt.num_layer)/2)
            if opt.mode == 'animation_train':
                pad_noise = 0
            for G,Z_opt,real_curr,real_next,noise_amp in zip(Gs,Zs,reals,reals[1:],NoiseAmp):
                if count == 0:
                    z = functions.generate_noise([1, Z_opt.shape[2] - 2 * pad_noise, Z_opt.shape[3] - 2 * pad_noise], device=opt.device)
                    z = z.expand(1, 3, z.shape[2], z.shape[3])
                else:
                    z = functions.generate_noise([opt.nc_z,Z_opt.shape[2] - 2 * pad_noise, Z_opt.shape[3] - 2 * pad_noise], device=opt.device)
                z = m_noise(z)
                G_z = G_z[:,:,0:real_curr.shape[2],0:real_curr.shape[3]]
                G_z = m_image(G_z)
                z_in = noise_amp*z+G_z
                G_z = G(z_in.detach(),G_z)
                G_z = imresize(G_z,1/opt.scale_factor,opt)
                G_z = G_z[:,:,0:real_next.shape[2],0:real_next.shape[3]]
                count += 1
        if mode == 'rec':
            count = 0
            for G,Z_opt,real_curr,real_next,noise_amp in zip(Gs,Zs,reals,reals[1:],NoiseAmp):
                G_z = G_z[:, :, 0:real_curr.shape[2], 0:real_curr.shape[3]]
                G_z = m_image(G_z)
                z_in = noise_amp*Z_opt+G_z
                G_z = G(z_in.detach(),G_z)
                G_z = imresize(G_z,1/opt.scale_factor,opt)
                G_z = G_z[:,:,0:real_next.shape[2],0:real_next.shape[3]]
                #if count != (len(Gs)-1):
                #    G_z = m_image(G_z)
                count += 1
    return G_z
Example #2
0
def draw_concat(Gs, Zs, reals, NoiseAmp, in_s, mode, m_noise, m_image, opt, noise_mode: NoiseMode):
    G_z = in_s
    if len(Gs) > 0:
        if mode == 'rand':
            count = 0
            pad_noise = int(((opt.ker_size-1)*opt.num_layer)/2)
            for G,(Z_opt1, Z_opt2),real_curr,real_next,(noise_amp1, noise_amp2) in zip(Gs,Zs,reals,reals[1:],NoiseAmp):

                noise_amp = None
                if noise_mode == NoiseMode.Z1:
                    z1 = _create_noise_for_draw_concat(opt, count, pad_noise, m_noise, Z_opt1, noise_mode)
                    z2 = torch.zeros(z1.shape, device=opt.device)
                    noise_amp = noise_amp1
                elif noise_mode == NoiseMode.Z2:
                    z2 = _create_noise_for_draw_concat(opt, count, pad_noise, m_noise, Z_opt2, noise_mode)
                    z1 = torch.zeros(z2.shape, device=opt.device)
                    noise_amp = noise_amp2
                elif noise_mode == NoiseMode.MIXED:
                    z1 = _create_noise_for_draw_concat(opt, count, pad_noise, m_noise, Z_opt1, noise_mode)
                    z2 = _create_noise_for_draw_concat(opt, count, pad_noise, m_noise, Z_opt2, noise_mode)
                else:
                    raise NotImplementedError

                z = functions.merge_noise_vectors(z1, z2, opt.noise_vectors_merge_method)

                G_z = G_z[:,:,0:real_curr.shape[2],0:real_curr.shape[3]]
                G_z = m_image(G_z)
                z_in = noise_amp*z+G_z
                G_z = G(z_in.detach(), G_z)[0]
                G_z = imresize(G_z,1/opt.scale_factor,opt)
                G_z = G_z[:,:,0:real_next.shape[2],0:real_next.shape[3]]
                count += 1
        if mode == 'rec':
            count = 0
            for G,(Z_opt1, Z_opt2),real_curr,real_next,(noise_amp1, noise_amp2) in zip(Gs,Zs,reals,reals[1:],NoiseAmp):
                G_z = G_z[:, :, 0:real_curr.shape[2], 0:real_curr.shape[3]]
                G_z = m_image(G_z)

                noise_amp = None
                if noise_mode == NoiseMode.Z1:
                    Z_opt2_zeros = torch.zeros(Z_opt2.shape, device=opt.device)
                    Z_opt = functions.merge_noise_vectors(Z_opt1, Z_opt2_zeros, opt.noise_vectors_merge_method)
                    noise_amp = noise_amp1
                elif noise_mode == NoiseMode.Z2:
                    Z_opt1_zeros = torch.zeros(Z_opt1.shape, device=opt.device)
                    Z_opt = functions.merge_noise_vectors(Z_opt1_zeros, Z_opt2, opt.noise_vectors_merge_method)
                    noise_amp = noise_amp2
                elif noise_mode == NoiseMode.MIXED:
                    Z_opt = functions.merge_noise_vectors(Z_opt1, Z_opt2, opt.noise_vectors_merge_method)
                else:
                    raise NotImplementedError

                z_in = noise_amp*Z_opt+G_z
                G_z = G(z_in.detach(),G_z)[0]
                G_z = imresize(G_z,1/opt.scale_factor,opt)
                G_z = G_z[:,:,0:real_next.shape[2],0:real_next.shape[3]]
                #if count != (len(Gs)-1):
                #    G_z = m_image(G_z)
                count += 1
    return G_z
def draw_concat(Gs,Zs,reals,NoiseAmp,in_s,mode,m_noise,m_image,opt):
    G_z = in_s
    if len(Gs) > 0:
        if mode == 'rand':
            count = 0
            pad_noise = int(((opt.ker_size-1)*opt.num_layer)/2)
            if opt.mode == 'animation_train':
                pad_noise = 0
            for G,Z_opt,real_curr,real_next,noise_amp in zip(Gs,Zs,reals,reals[1:],NoiseAmp):
                if count == 0:
                    z = functions.generate_noise([1, Z_opt.shape[1] - 2 * pad_noise, Z_opt.shape[2] - 2 * pad_noise])
                    z = tf.broadcast_to(z, [1, z.shape[1], z.shape[2], 3])
                else:
                    z = functions.generate_noise([opt.nc_z,Z_opt.shape[1] - 2 * pad_noise, Z_opt.shape[2] - 2 * pad_noise])
                
                z = m_noise(z)
                G_z = G_z[:,0:real_curr.shape[1],0:real_curr.shape[2],:] #PY: NCWH, TF:NWHC
                G_z = m_image(G_z)
                z_in = noise_amp*z+G_z
                G_z = G(z_in,G_z, training=True)
                G_z = imresize(G_z,1/opt.scale_factor,opt)
                G_z = G_z[:,0:real_next.shape[1],0:real_next.shape[2], :]
                count += 1
        if mode == 'rec':
            count = 0
            for G,Z_opt,real_curr,real_next,noise_amp in zip(Gs,Zs,reals,reals[1:],NoiseAmp):
                G_z = G_z[:, 0:real_curr.shape[1], 0:real_curr.shape[2], :]
                G_z = m_image(G_z)
                z_in = noise_amp*Z_opt+G_z
                G_z = G(z_in,G_z, training=True)
                G_z = imresize(G_z,1/opt.scale_factor,opt)
                G_z = G_z[:,0:real_next.shape[1],0:real_next.shape[2], :]
                count += 1
    return G_z
Example #4
0
def draw_concat(Gs, Zs, reals, NoiseAmp, in_s, mode, m_noise, m_image, opt):
    G_z = in_s
    # if it's not the first scale, else do nothign
    if len(Gs) > 0:
        # if in random mode
        if mode == 'rand':
            count = 0
            pad_noise = int(((opt.ker_size - 1) * opt.num_layer) / 2)
            #from each scale
            for G, Z_opt, real_curr, real_next, noise_amp in zip(
                    Gs, Zs, reals, reals[1:], NoiseAmp):
                # for the first loop
                if count == 0:
                    #generate the noise
                    z = functions.generate_noise([
                        1, Z_opt.shape[2] - 2 * pad_noise,
                        Z_opt.shape[3] - 2 * pad_noise
                    ],
                                                 device=opt.device)
                    #broadcast it to correct shape
                    z = z.expand(1, 3, z.shape[2], z.shape[3])
                else:
                    #direct generate the noise
                    z = functions.generate_noise([
                        opt.nc_z, Z_opt.shape[2] - 2 * pad_noise,
                        Z_opt.shape[3] - 2 * pad_noise
                    ],
                                                 device=opt.device)
                #padding the noise
                z = m_noise(z)
                #------------------------------------------------------------
                #generate a shape of current real image's [width,height] from G_z(in_s)
                G_z = G_z[:, :, 0:real_curr.shape[2], 0:real_curr.shape[3]]
                #padding it with images
                G_z = m_image(G_z)
                #amplify the generated noise, then add with the G_z
                z_in = noise_amp * z + G_z
                #generate a new output from generator
                G_z = G(z_in.detach(), G_z)
                #resize the graph with 1/opt.scale_factor
                G_z = imresize(G_z, 1 / opt.scale_factor, opt)
                #generate a shape of current real image's [width,height] from G_z(in_s)
                G_z = G_z[:, :, 0:real_next.shape[2], 0:real_next.shape[3]]
                count += 1
        if mode == 'rec':
            count = 0
            #from each scale
            for G, Z_opt, real_curr, real_next, noise_amp in zip(
                    Gs, Zs, reals, reals[1:], NoiseAmp):
                # do same thing except
                G_z = G_z[:, :, 0:real_curr.shape[2], 0:real_curr.shape[3]]
                G_z = m_image(G_z)
                z_in = noise_amp * Z_opt + G_z  # for here we use Z_opt instead of generated noise
                G_z = G(z_in.detach(), G_z)
                G_z = imresize(G_z, 1 / opt.scale_factor, opt)
                G_z = G_z[:, :, 0:real_next.shape[2], 0:real_next.shape[3]]
                count += 1
    return G_z
Example #5
0
def draw_concat(Gs, Zs, reals, NoiseAmp, in_s, mode, m_noise, m_image, opt):
    """ Generate through all higher level Gs """
    G_z = in_s  # G_z is the current image output
    if len(
            Gs
    ) > 0:  # skipped for the initial pyr level, since there is no previous G to generate
        if mode == 'rand':  # using random noise map Z_opt
            count = 0
            pad_noise = int(((opt.ker_size - 1) * opt.num_layer) / 2)
            if opt.mode == 'animation_train':
                pad_noise = 0
            for G, Z_opt, real_curr, real_next, noise_amp in zip(
                    Gs, Zs, reals, reals[1:], NoiseAmp):
                if count == 0:  # Z_opt is not really used, except its size.
                    z = functions.generate_noise([
                        1, Z_opt.shape[2] - 2 * pad_noise,
                        Z_opt.shape[3] - 2 * pad_noise
                    ],
                                                 device=opt.device)
                    z = z.expand(1, 3, z.shape[2],
                                 z.shape[3])  # same value along color channel
                else:
                    z = functions.generate_noise(
                        [
                            opt.nc_z, Z_opt.shape[2] - 2 * pad_noise,
                            Z_opt.shape[3] - 2 * pad_noise
                        ],
                        device=opt.device)  # noise including color
                z = m_noise(z)
                G_z = G_z[:, :, 0:real_curr.shape[2], 0:real_curr.shape[3]]
                G_z = m_image(G_z)
                z_in = noise_amp * z + G_z
                G_z = G(z_in.detach(), G_z)
                G_z = imresize(G_z, 1 / opt.scale_factor,
                               opt)  # upsample it to current level
                G_z = G_z[:, :, 0:real_next.shape[2], 0:real_next.shape[3]]
                count += 1
        if mode == 'rec':  # using reconstruction vectors Z_opt
            count = 0
            for G, Z_opt, real_curr, real_next, noise_amp in zip(
                    Gs, Zs, reals, reals[1:], NoiseAmp):
                G_z = G_z[:, :, 0:real_curr.shape[2], 0:real_curr.shape[
                    3]]  # make sure the size is the same as real pyr
                G_z = m_image(G_z)
                z_in = noise_amp * Z_opt + G_z  # use the loaded noise amplitude
                G_z = G(z_in.detach(),
                        G_z)  # THis is the iteration equation for G_z
                G_z = imresize(G_z, 1 / opt.scale_factor,
                               opt)  # upsample it to current level
                G_z = G_z[:, :, 0:real_next.shape[2], 0:real_next.shape[
                    3]]  # make sure the size is the same as real pyr
                #if count != (len(Gs)-1):
                #    G_z = m_image(G_z)
                count += 1
    return G_z
Example #6
0
def train(opt,Gs,Zs,reals1,reals2,NoiseAmp):
    real1_, real2_ = functions.read_image(opt)
    in_s = 0
    scale_num = 0
    real1 = imresize(real1_,opt.scale1,opt)
    real2 = imresize(real2_, opt.scale1, opt)
    reals1 = functions.creat_reals_pyramid(real1,reals1,opt)
    reals2 = functions.creat_reals_pyramid(real2, reals2, opt)
    nfc_prev = 0
    while scale_num<opt.stop_scale+1:
        opt.nfc = min(opt.nfc_init * pow(2, math.floor(scale_num / 4)), 128)
        opt.min_nfc = min(opt.min_nfc_init * pow(2, math.floor(scale_num / 4)), 128)

        opt.out_ = functions.generate_dir2save(opt)
        opt.outf = '%s/%d' % (opt.out_,scale_num)
        try:
            os.makedirs(opt.outf)
        except OSError:
                pass

        plt.imsave('%s/real1_scale.png' %  (opt.outf), functions.convert_image_np(reals1[scale_num]), vmin=0, vmax=1)
        plt.imsave('%s/real2_scale.png' % (opt.outf), functions.convert_image_np(reals2[scale_num]), vmin=0, vmax=1)

        D_curr,G_curr = init_models(opt)
        if (nfc_prev==opt.nfc):
            G_curr.load_state_dict(torch.load('%s/%d/netG.pth' % (opt.out_,scale_num-1)))
            D_curr.load_state_dict(torch.load('%s/%d/netD.pth' % (opt.out_,scale_num-1)))

        z_curr,in_s,G_curr = train_single_scale(D_curr,G_curr,reals1,reals2,Gs,Zs,in_s,NoiseAmp,opt)

        G_curr = functions.reset_grads(G_curr,False)
        G_curr.eval()
        D_curr = functions.reset_grads(D_curr,False)
        D_curr.eval()

        Gs.append(G_curr)
        Zs.append(z_curr)
        NoiseAmp.append(opt.noise_amp)

        torch.save(Zs, '%s/Zs.pth' % (opt.out_))
        torch.save(Gs, '%s/Gs.pth' % (opt.out_))
        torch.save(reals1, '%s/reals1.pth' % (opt.out_))
        torch.save(reals2, '%s/reals2.pth' % (opt.out_))
        torch.save(NoiseAmp, '%s/NoiseAmp.pth' % (opt.out_))

        scale_num+=1
        nfc_prev = opt.nfc
        del D_curr,G_curr
    return
Example #7
0
def SinGAN_generate(Gs,Zs,reals,NoiseAmp,opt,in_s=None,scale_v=1,scale_h=1,n=0,gen_start_scale=0,num_samples=50):
    #if torch.is_tensor(in_s) == False:
    if in_s is None:
        in_s = torch.full(reals[0].shape, 0, device=opt.device)
    images_cur = []
    for G,Z_opt,noise_amp in zip(Gs,Zs,NoiseAmp):
        pad1 = ((opt.ker_size-1)*opt.num_layer)/2
        m = nn.ZeroPad2d(int(pad1))
        nzx = (Z_opt.shape[2]-pad1*2)*scale_v
        nzy = (Z_opt.shape[3]-pad1*2)*scale_h

        images_prev = images_cur
        images_cur = []

        for i in range(0,num_samples,1):
            if n == 0:
                z_curr = functions.generate_noise([1,nzx,nzy], device=opt.device)
                z_curr = z_curr.expand(1,3,z_curr.shape[2],z_curr.shape[3])
                z_curr = m(z_curr)
            else:
                z_curr = functions.generate_noise([opt.nc_z,nzx,nzy], device=opt.device)
                z_curr = m(z_curr)

            if images_prev == []:
                I_prev = m(in_s)
                #I_prev = m(I_prev)
                #I_prev = I_prev[:,:,0:z_curr.shape[2],0:z_curr.shape[3]]
                #I_prev = functions.upsampling(I_prev,z_curr.shape[2],z_curr.shape[3])
            else:
                I_prev = images_prev[i]
                I_prev = imresize(I_prev,1/opt.scale_factor, opt)
                if opt.mode != "SR":
                    I_prev = I_prev[:, :, 0:round(scale_v * reals[n].shape[2]), 0:round(scale_h * reals[n].shape[3])]
                    I_prev = m(I_prev)
                    I_prev = I_prev[:,:,0:z_curr.shape[2],0:z_curr.shape[3]]
                    I_prev = functions.upsampling(I_prev,z_curr.shape[2],z_curr.shape[3])
                else:
                    I_prev = m(I_prev)

            if n < gen_start_scale:
                z_curr = Z_opt

            z_in = noise_amp*(z_curr)+I_prev
            I_curr = G(z_in.detach(),I_prev)

            if opt.mode == 'train':
                dir2save = '%s/RandomSamples/%s/gen_start_scale=%d' % (opt.out, opt.input_name[:-4], gen_start_scale)
            else:
                dir2save = functions.generate_dir2save(opt)
            try:
                os.makedirs(dir2save)
            except OSError:
                pass
            if (opt.mode != "harmonization") & (opt.mode != "editing") & (opt.mode != "SR") & (opt.mode != "paint2image"):
                plt.imsave('%s/%d.png' % (dir2save, i), functions.convert_image_np(I_curr.detach()), vmin=0,vmax=1)
                #plt.imsave('%s/%d_%d.png' % (dir2save,i,n),functions.convert_image_np(I_curr.detach()), vmin=0, vmax=1)
                #plt.imsave('%s/in_s.png' % (dir2save), functions.convert_image_np(in_s), vmin=0,vmax=1)
            images_cur.append(I_curr)
        n+=1
    return I_curr.detach()
Example #8
0
def adjust_scales2image(real_, opt):
    # print(real_.shape)  #  [1, 3, 300, 300]
    # print(real_.shape[2])  # 300
    # print(real_.shape[3])  # 300
    opt.num_scales = int((math.log(math.pow(opt.min_size / (real_.shape[2]), 1), opt.scale_factor_init))) + 1
    # print(opt.num_scales)  # scale: 9
    scale2stop = int(
        math.log(min([opt.max_size, max([real_.shape[2], real_.shape[3]])]) / max([real_.shape[2], real_.shape[3]]),
                 opt.scale_factor_init))
    # print('scale2stop', scale2stop)  # scale2stop: 0
    opt.stop_scale = opt.num_scales - scale2stop
    # print('stop_scale', opt.stop_scale) # out: 9
    opt.scale1 = min(opt.max_size / max([real_.shape[2], real_.shape[3]]),
                     1)  # min(250/max([real_.shape[0],real_.shape[1]]),1)

    # print('scale1', opt.scale1)   # out: 1.0
    real = imresize(real_, opt.scale1, opt)
    # print('imresize_real', real.shape)   # [1, 3, 300, 300]
    opt.scale_factor = math.pow(opt.min_size / (real.shape[2]), 1 / (opt.stop_scale))
    # print(opt.scale_factor)  # 0.7587
    # opt.scale_factor = math.pow(opt.min_size/(min(real_.shape[0],real_.shape[1])),1/(opt.stop_scale))
    scale2stop = int(
        math.log(min([opt.max_size, max([real_.shape[2], real_.shape[3]])]) / max([real_.shape[2], real_.shape[3]]),
                 opt.scale_factor_init))
    # print(scale2stop) # 0
    opt.stop_scale = opt.num_scales - scale2stop
    # print(opt.stop_scale) # 9

    return real
def preprocess_content_image(opt, reals,scale):
    real = functions.read_image(opt)
    functions.adjust_scales2image(real, opt)
    ref = functions.read_image_dir('%s/%s' % (opt.ref_dir, opt.ref_name), opt)
    if ref.shape[3] != real.shape[3]:
        ref = imresize_to_shape(ref, [real.shape[2], real.shape[3]], opt)
        ref = ref[:, :, :real.shape[2], :real.shape[3]]

    N = len(reals) - 1
    n = scale
    in_s = imresize(ref, pow(opt.scale_factor, (N - n + 1)), opt)
    in_s = in_s[:, :, :reals[n - 1].shape[2], :reals[n - 1].shape[3]]
    in_s = imresize(in_s, 1 / opt.scale_factor, opt)
    in_s = in_s[:, :, :reals[n].shape[2], :reals[n].shape[3]]

    return in_s
Example #10
0
def adjust_scales2image(real_, opt):
    # opt.num_scales = int((math.log(math.pow(opt.min_size / (real_.shape[2]), 1), opt.scale_factor_init))) + 1
    opt.num_scales = math.ceil((math.log(
        math.pow(opt.min_size / (min(real_.shape[2], real_.shape[3])), 1),
        opt.scale_factor_init))) + 1
    scale2stop = math.ceil(
        math.log(
            min([opt.max_size,
                 max([real_.shape[2], real_.shape[3]])]) /
            max([real_.shape[2], real_.shape[3]]), opt.scale_factor_init))
    opt.stop_scale = opt.num_scales - scale2stop
    opt.scale1 = min(opt.max_size / max([real_.shape[2], real_.shape[3]]),
                     1)  # min(250/max([real_.shape[0],real_.shape[1]]),1)
    real = imresize(real_, opt.scale1, opt)
    # opt.scale_factor = math.pow(opt.min_size / (real.shape[2]), 1 / (opt.stop_scale))
    opt.scale_factor = math.pow(
        opt.min_size / (min(real.shape[2], real.shape[3])),
        1 / (opt.stop_scale))
    scale2stop = math.ceil(
        math.log(
            min([opt.max_size,
                 max([real_.shape[2], real_.shape[3]])]) /
            max([real_.shape[2], real_.shape[3]]), opt.scale_factor_init))
    opt.stop_scale = opt.num_scales - scale2stop
    return real
def adjust_scales2image(real_, opt):
    #opt.num_scales = int((math.log(math.pow(opt.min_size / (real_.shape[2]), 1), opt.scale_factor_init))) + 1
    opt.num_scales = math.ceil((math.log(
        math.pow(opt.min_size / (min(real_.shape[2], real_.shape[3])), 1),
        opt.scale_factor_init))) + 1
    scale2stop = math.ceil(
        math.log(
            min([opt.max_size,
                 max([real_.shape[2], real_.shape[3]])]) /
            max([real_.shape[2], real_.shape[3]]), opt.scale_factor_init))
    opt.stop_scale = opt.num_scales - scale2stop
    opt.scale1 = min(opt.max_size / max([real_.shape[2], real_.shape[3]]),
                     1)  # min(250/max([real_.shape[0],real_.shape[1]]),1)
    #taking spare pixels for mask in case needed
    if opt.mode != 'random_samples':
        opt.mask_coords[::2] = np.floor(
            np.array(opt.mask_coords[::2]).astype(np.float) * opt.scale1)
        opt.mask_coords[1::2] = np.ceil(
            np.array(opt.mask_coords[1::2]).astype(np.float) * opt.scale1)
    real = imresize(real_, opt.scale1, opt)
    #opt.scale_factor = math.pow(opt.min_size / (real.shape[2]), 1 / (opt.stop_scale))
    opt.scale_factor = math.pow(
        opt.min_size / (min(real.shape[2], real.shape[3])),
        1 / (opt.stop_scale))
    scale2stop = math.ceil(
        math.log(
            min([opt.max_size,
                 max([real_.shape[2], real_.shape[3]])]) /
            max([real_.shape[2], real_.shape[3]]), opt.scale_factor_init))
    opt.stop_scale = opt.num_scales - scale2stop
    return real
Example #12
0
def adjust_scales2image_SR(real_, opt):
    opt.min_size = 18
    opt.num_scales = (int((math.log(
        opt.min_size / min(real_.shape[2], real_.shape[3]),
        opt.scale_factor_init,
    ))) + 1)
    scale2stop = int(
        math.log(
            min(opt.max_size, max(real_.shape[2], real_.shape[3])) /
            max(real_.shape[0], real_.shape[3]),
            opt.scale_factor_init,
        ))
    opt.stop_scale = opt.num_scales - scale2stop
    opt.scale1 = min(opt.max_size / max([real_.shape[2], real_.shape[3]]),
                     1)  # min(250/max([real_.shape[0],real_.shape[1]]),1)
    real = imresize(real_, opt.scale1, opt)
    # opt.scale_factor = math.pow(opt.min_size / (real.shape[2]), 1 / (opt.stop_scale))
    opt.scale_factor = math.pow(
        opt.min_size / (min(real.shape[2], real.shape[3])),
        1 / (opt.stop_scale))
    scale2stop = int(
        math.log(
            min(opt.max_size, max(real_.shape[2], real_.shape[3])) /
            max(real_.shape[0], real_.shape[3]),
            opt.scale_factor_init,
        ))
    opt.stop_scale = opt.num_scales - scale2stop
    return real
Example #13
0
def creat_reals_pyramid(real, reals, opt):
    real = real[:, 0:3, :, :]
    for i in range(0, opt.stop_scale + 1, 1):
        scale = math.pow(opt.scale_factor, opt.stop_scale - i)
        curr_real = imresize(real, scale, opt)
        reals.append(curr_real)
    return reals
Example #14
0
def create_reals_pyramid(real, reals, opt):
    real = real[:, 0:3, :, :]
    for i in range(0, opt.stop_scale + 1, 1):
        scale = math.pow(opt.scale_factor, opt.stop_scale - i)
        #resize the real image to correct scale
        curr_real = imresize(real, scale, opt)
        reals.append(curr_real)
    return reals  # the image list
Example #15
0
def creat_reals_pyramid(real, reals, opt):
    if opt.input_type == 'image':
        real = real[:, 0:3, :, :]
    for i in range(0, opt.stop_scale + 1, 1):
        scale = math.pow(opt.scale_factor, opt.stop_scale - i)
        curr_real = imresize(real, scale, opt)
        # print('@ creat_reals_pyramid: curr_real.shape = ', curr_real.shape)
        reals.append(curr_real)
    return reals
Example #16
0
def create_pyramid(im,pyr_list,opt, mode=None):

    for i in range(0,opt.stop_scale+1,1):
        scale = math.pow(opt.scale_factor,opt.stop_scale-i)
        if mode == "mask":
            curr_im = imresize_mask(im,scale,opt)  
        else:        
            curr_im = imresize(im,scale,opt)
        pyr_list.append(curr_im.to(opt.device))
    return pyr_list
def creat_reals_pyramid(real, reals, opt):
    if real.shape[1] == 4:
        real = real[:, 0:4, :, :]  # added by vajira
    else:
        real = real[:, 0:3, :, :]
    for i in range(0, opt.stop_scale + 1, 1):
        scale = math.pow(opt.scale_factor, opt.stop_scale - i)
        curr_real = imresize(real, scale, opt)
        reals.append(curr_real)
    return reals
Example #18
0
def train(opt, Gs, Zs, reals, NoiseAmp):
    real_ = functions.read_image(opt)
    in_s = 0
    scale_num = 0
    real = imresize(real_, opt.scale1, opt)
    reals = functions.creat_reals_pyramid(real, reals, opt)
    nfc_prev = 0
    netD_optimizer = tf.keras.optimizers.Adam(learning_rate=opt.lr_d,
                                              beta_1=opt.beta1,
                                              beta_2=0.999)
    netG_optimizer = tf.keras.optimizers.Adam(learning_rate=opt.lr_g,
                                              beta_1=opt.beta1,
                                              beta_2=0.999)

    while scale_num < opt.stop_scale + 1:

        opt.nfc = min(opt.nfc_init * pow(2, math.floor(scale_num / 4)), 128)
        opt.min_nfc = min(opt.min_nfc_init * pow(2, math.floor(scale_num / 4)),
                          128)
        opt.out_ = functions.generate_dir2save(opt)
        opt.outf = '%s/%d' % (opt.out_, scale_num)

        try:
            os.makedirs(opt.outf)
        except OSError:
            pass

        plt.imsave('%s/real_scale.png' % (opt.outf),
                   functions.convert_image_np(reals[scale_num]),
                   vmin=0,
                   vmax=1)
        D_curr, G_curr = init_models(opt)
        if nfc_prev == opt.nfc:
            D_curr.load_weights('%s/%d/netD' % (opt.out_, scale_num - 1))
            G_curr.load_weights('%s/%d/netG' % (opt.out_, scale_num - 1))

        z_curr, in_s, G_curr = train_single_scale(D_curr, G_curr, reals, Gs,
                                                  Zs, in_s, NoiseAmp, opt,
                                                  scale_num, netG_optimizer,
                                                  netD_optimizer)

        Gs.append(G_curr)
        Zs.append(z_curr)
        NoiseAmp.append(opt.noise_amp)
        with open('%s/Zs.pkl' % (opt.out_), 'wb') as f:
            pickle.dump(Zs, f)
        with open('%s/reals.pkl' % (opt.out_), 'wb') as f:
            pickle.dump(reals, f)
        with open('%s/NoiseAmp.pkl' % (opt.out_), 'wb') as f:
            pickle.dump(NoiseAmp, f)
        scale_num += 1
        nfc_prev = opt.nfc
        del D_curr, G_curr
    return None
Example #19
0
def cache_input_output(Gs, Zs, NoiseAmp, reals, scale_in=None, scale_out=None):
    """
    cache time-series input at scale i, and time-series output at scale j.
    both i and j start at 0 index

    :output:
        cache_dict -- dict, {
            'input': list of time-series np.array, 
            'output': list of time-series np.array
            }
    """
    cache_dict = defaultdict(list)
    # by default cache first scale input and final scale output
    scale_in = 0 if scale_in is None else scale_in
    scale_out = len(Gs) - 1 if scale_out is None else scale_out
    # create layer for boarder padding
    pad_image = int(((ker_size - 1) * num_layer) / 2)
    m_image = nn.ZeroPad2d(int(pad_image))
    in_s = torch.full(Zs[0].shape, 0, device=device)
    frames_curr = []
    # out loop is scale iteration
    for scale_n, (G, Z_opt, noise_amp,
                  real) in enumerate(zip(Gs, Zs, NoiseAmp, reals)):
        frames_prev = frames_curr
        frames_curr = []
        z_prev1, z_prev2 = compute_z_prev(scale_n, Z_opt, device)
        # inner loop is time iteration
        for t in range(0, 100, 1):
            z_diff = compute_z_diff(scale_n, Z_opt, z_prev1, z_prev2, beta,
                                    device)
            z_curr = compute_z_curr(Z_opt, z_prev1, z_diff, alpha)
            z_prev2 = z_prev1
            z_prev1 = z_curr
            # overwrite z_curr if init at higher scale
            if scale_n < scale_start:
                z_curr = Z_opt
            if frames_prev == []:
                I_prev = in_s
            else:
                I_prev = frames_prev[t]
                I_prev = imresize(I_prev, 1 / scale_factor, opt)  # edit
                I_prev = I_prev[:, :, 0:real.shape[2], 0:real.shape[3]]
                I_prev = m_image(I_prev)
            z_in = noise_amp * z_curr + I_prev
            I_curr = G(z_in.detach(), I_prev)
            frames_curr.append(I_curr)
            # cache results
            if scale_n == scale_in:
                z_in = tensor_to_np(z_in)
                cache_dict['input'].append(z_in)
            if scale_n == scale_out:
                I_curr = tensor_to_np(I_curr)
                cache_dict['output'].append(I_curr)
    return cache_dict
def create_masks_pyramid(real, masks, opt):
    real = real[:, 0:3, :, :]
    for i in range(0, opt.stop_scale + 1, 1):
        scale = math.pow(opt.scale_factor, opt.stop_scale - i)
        curr_real = imresize(real, scale, opt)
        curr_mask = torch.ones_like(curr_real)
        curr_coords = [
            int(np.ceil(coord * scale)) for coord in opt.mask_coords
        ]
        curr_mask[:, :, curr_coords[0]:curr_coords[1],
                  curr_coords[2]:curr_coords[3]] = 0
        masks.append(curr_mask)
    return masks
Example #21
0
def SinGAN_SR(opt, Gs, Zs, reals, NoiseAmp):
    mode = opt.mode
    in_scale, iter_num = functions.calc_init_scale(opt)
    opt.scale_factor = 1 / in_scale
    opt.scale_factor_init = 1 / in_scale
    opt.mode = 'SR_train'
    #opt.alpha = 100
    opt.stop_scale = 0
    dir2trained_model = functions.generate_dir2save(opt)
    if (os.path.exists(dir2trained_model)):
        #print('Trained model does not exist, training SinGAN for SR')
        Gs, Zs, reals, NoiseAmp = functions.load_trained_pyramid(opt)
        opt.mode = mode
    else:
        SR_train(opt, Gs, Zs, reals, NoiseAmp)
        opt.mode = mode
    print('%f' % pow(in_scale, iter_num))
    Zs_sr = []
    reals_sr = []
    NoiseAmp_sr = []
    Gs_sr = []
    real = reals[-1]  #read_image(opt)
    for j in range(1, iter_num + 1, 1):
        real_ = imresize(real, pow(1 / opt.scale_factor, j), opt)
        real_ = real_[:, :,
                      0:int(pow(1 / opt.scale_factor, j) * real.shape[2]),
                      0:int(pow(1 / opt.scale_factor, j) * real.shape[3])]
        reals_sr.append(real_)
        Gs_sr.append(Gs[-1])
        NoiseAmp_sr.append(NoiseAmp[-1])
        z_opt = torch.full(real_.shape, 0, device=opt.device)
        m = nn.ZeroPad2d(5)
        z_opt = m(z_opt)
        Zs_sr.append(z_opt)
    out = SinGAN_generate(Gs_sr,
                          Zs_sr,
                          reals_sr,
                          NoiseAmp_sr,
                          opt,
                          in_s=reals_sr[0],
                          num_samples=1)
    dir2save = functions.generate_dir2save(opt)
    plt.imsave('%s.png' % (dir2save),
               functions.convert_image_np(out.detach()),
               vmin=0,
               vmax=1)
    return
Example #22
0
def adjust_scales2image(real_,opt):
    #opt.num_scales = int((math.log(math.pow(opt.min_size / (real_.shape[2]), 1), opt.scale_factor_init))) + 1
    # num_scales: how many levels of pyramids
    opt.num_scales = int((math.log(math.pow(opt.min_size / (min(real_.shape[2], real_.shape[3])), 1), opt.scale_factor_init)))
    # scale2stop: for the largest patch size, what ratio wrt the image shape in terms of scaler_factor_init
    # 1:0; 1/2:1; etc
    scale2stop = math.ceil(math.log(min([opt.max_size, max([real_.shape[2], real_.shape[3]])]) / max([real_.shape[2], real_.shape[3]]),opt.scale_factor_init))
    # stop_scale: level to stop, since reach the maximum size.
    opt.stop_scale = opt.num_scales - scale2stop
    # scale1: for the largest patch size, what ratio wrt the image shape
    opt.scale1 = min(opt.max_size / max([real_.shape[2], real_.shape[3]]),1)  # min(250/max([real_.shape[0],real_.shape[1]]),1)
    real = imresize(real_, opt.scale1, opt)
    # scale_factor:  evenly divide the scale_factor_init
    opt.scale_factor = opt.scale_factor_init
    # scale2stop = math.ceil(math.log(min([opt.max_size, max([real_.shape[2], real_.shape[3]])]) / max([real_.shape[2], real_.shape[3]]),opt.scale_factor_init))
    # opt.stop_scale = opt.num_scales - scale2stop
    return real
Example #23
0
def adjust_scales2image_SR(real_, opt):
    # 定义最小尺寸为18
    opt.min_size = 18
    # scales数目
    opt.num_scales = int(
        (math.log(math.pow(opt.min_size / (min([real_.shape[2], real_.shape[3]])), 1), opt.scale_factor_init))) + 1
    print('aaaaaaaaaaaa', opt.num_scales)
    scale2stop = int(
        math.log(min([opt.max_size, max([real_.shape[2], real_.shape[3]])]) / max([real_.shape[2], real_.shape[3]]),
                 opt.scale_factor_init))
    opt.stop_scale = opt.num_scales - scale2stop
    opt.scale1 = min(opt.max_size / max([real_.shape[2], real_.shape[3]]),
                     1)  # min(250/max([real_.shape[0],real_.shape[1]]),1)
    real = imresize(real_, opt.scale1, opt)
    opt.scale_factor = math.pow(opt.min_size / (min([real_.shape[2], real_.shape[3]])), 1 / (opt.stop_scale))
    # opt.scale_factor = math.pow(opt.min_size/(min(real_.shape[0],real_.shape[1])),1/(opt.stop_scale))
    scale2stop = int(
        math.log(min([opt.max_size, max([real_.shape[2], real_.shape[3]])]) / max([real_.shape[2], real_.shape[3]]),
                 opt.scale_factor_init))
    opt.stop_scale = opt.num_scales - scale2stop
    return real
Example #24
0
def test_pyramid(images):
    parser = get_arguments()
    parser.add_argument('--input_dir',
                        help='input image dir',
                        default='Input/Images')
    #parser.add_argument('--input_name', help='input image name', required=True)
    parser.add_argument('--mode', help='task to be done', default='train')
    opt = parser.parse_args("")
    opt.input_name = 'blank'
    opt = functions.post_config(opt)

    real = functions.np2torch(images[0], opt)
    functions.adjust_scales2image(real, opt)

    all_reals = []
    for image in images:
        reals = []
        real_ = functions.np2torch(image, opt)
        real = imresize(real_, opt.scale1, opt)
        reals = functions.creat_reals_pyramid(real, reals, opt)
        all_reals.append(reals)

    return np.array(all_reals).T
Example #25
0
def train(opt, Gs, Zs, reals, NoiseAmp):
    real_ = functions.read_image(opt)
    in_s = 0
    scale_num = 0
    real = imresize(real_, opt.scale1, opt)
    reals = functions.creat_reals_pyramid(real, reals, opt)
    nfc_prev = 0

    memory = []  ##storing memory
    time = []  ##storing time
    while scale_num < opt.stop_scale + 1:
        opt.nfc = min(opt.nfc_init * pow(2, math.floor(scale_num / 4)), 128)
        opt.min_nfc = min(opt.min_nfc_init * pow(2, math.floor(scale_num / 4)),
                          128)

        opt.out_ = functions.generate_dir2save(opt)
        opt.outf = '%s/%d' % (opt.out_, scale_num)
        try:
            os.makedirs(opt.outf)
        except OSError:
            pass

        #plt.imsave('%s/in.png' %  (opt.out_), functions.convert_image_np(real), vmin=0, vmax=1)
        #plt.imsave('%s/original.png' %  (opt.out_), functions.convert_image_np(real_), vmin=0, vmax=1)
        plt.imsave('%s/real_scale.png' % (opt.outf),
                   functions.convert_image_np(reals[scale_num]),
                   vmin=0,
                   vmax=1)

        D_curr, G_curr = init_models(opt)
        if (nfc_prev == opt.nfc):
            G_curr.load_state_dict(
                torch.load('%s/%d/netG.pth' % (opt.out_, scale_num - 1)))
            D_curr.load_state_dict(
                torch.load('%s/%d/netD.pth' % (opt.out_, scale_num - 1)))
        start = datetime.datetime.now()
        z_curr, in_s, G_curr, mbs, percent = train_single_scale(
            D_curr, G_curr, reals, Gs, Zs, in_s, NoiseAmp, opt)
        memory.append([mbs, percent])
        end = datetime.datetime.now()
        elapsed = end - start
        time.append(elapsed)
        print(f'time: {elapsed}')
        G_curr = functions.reset_grads(G_curr, False)
        G_curr.eval()
        D_curr = functions.reset_grads(D_curr, False)
        D_curr.eval()

        Gs.append(G_curr)
        Zs.append(z_curr)
        NoiseAmp.append(opt.noise_amp)

        torch.save(Zs, '%s/Zs.pth' % (opt.out_))
        torch.save(Gs, '%s/Gs.pth' % (opt.out_))
        torch.save(reals, '%s/reals.pth' % (opt.out_))
        torch.save(NoiseAmp, '%s/NoiseAmp.pth' % (opt.out_))
        torch.save(full_memory, '%s/full_memory.pth' % (opt.out_))
        torch.save(full_time, '%s/full_time.pth' % (opt.out_))

        scale_num += 1
        nfc_prev = opt.nfc
        del D_curr, G_curr
    #torch.save(full_memory, '%s/full_memory.pk' % (opt.out_))
    #torch.save(full_time, '%s/full_time.pk' % (opt.out_))
    print(memory)
    print(time)
    print(full_memory)
    print(full_time)
    #pk.dump(full_memory, open('Full_memory', 'wb'))
    return
Example #26
0
                (opt.input_dir, opt.ref_name[:-4], opt.ref_name[-4:]), opt)
            mask = functions.read_image_dir(
                '%s/%s_mask%s' %
                (opt.ref_dir, opt.ref_name[:-4], opt.ref_name[-4:]), opt)
            if ref.shape[3] != real.shape[3]:
                mask = imresize_to_shape(mask, [real.shape[2], real.shape[3]],
                                         opt)
                mask = mask[:, :, :real.shape[2], :real.shape[3]]
                ref = imresize_to_shape(ref, [real.shape[2], real.shape[3]],
                                        opt)
                ref = ref[:, :, :real.shape[2], :real.shape[3]]
            mask = functions.dilate_mask(mask, opt)

            N = len(reals) - 1
            n = opt.inpainting_start_scale
            in_s = imresize(ref, pow(opt.scale_factor, (N - n + 1)), opt)
            in_s = in_s[:, :, :reals[n - 1].shape[2], :reals[n - 1].shape[3]]
            in_s = imresize(in_s, 1 / opt.scale_factor, opt)
            in_s = in_s[:, :, :reals[n].shape[2], :reals[n].shape[3]]
            out = SinGAN_generate(Gs[n:],
                                  Zs[n:],
                                  reals,
                                  NoiseAmp[n:],
                                  opt,
                                  in_s,
                                  n=n,
                                  num_samples=1)
            out = (1 - mask) * real + mask * out
            plt.imsave('%s/start_scale=%d.png' %
                       (dir2save, opt.inpainting_start_scale),
                       functions.convert_image_np(out.detach()),
Example #27
0
        except OSError:
            pass
        real = functions.read_image(opt)
        real = functions.adjust_scales2image(real, opt)
        Gs, Zs, reals, NoiseAmp = functions.load_trained_pyramid(opt)
        if (opt.editing_start_scale < 1) | (opt.editing_start_scale >
                                            (len(Gs) - 1)):
            print("injection scale should be between 1 and %d" % (len(Gs) - 1))
        else:
            ref = functions.read_image_dir(
                '%s/%s' % (opt.ref_dir, opt.ref_name), opt)
            mask = functions.read_image_dir(
                '%s/%s_mask%s' %
                (opt.ref_dir, opt.ref_name[:-4], opt.ref_name[-4:]), opt)
            if ref.shape[3] != real.shape[3]:
                mask = imresize(mask, real.shape[3] / ref.shape[3], opt)
                mask = mask[:, :, :real.shape[2], :real.shape[3]]
                ref = imresize(ref, real.shape[3] / ref.shape[3], opt)
                ref = ref[:, :, :real.shape[2], :real.shape[3]]
            mask = functions.dilate_mask(mask, opt)

            N = len(reals) - 1
            n = opt.editing_start_scale
            in_s = imresize(ref, pow(opt.scale_factor, (N - n + 1)), opt)
            in_s = in_s[:, :, :reals[n - 1].shape[2], :reals[n - 1].shape[3]]
            in_s = imresize(in_s, 1 / opt.scale_factor, opt)
            in_s = in_s[:, :, :reals[n].shape[2], :reals[n].shape[3]]
            out = SinGAN_generate(Gs[n:],
                                  Zs[n:],
                                  reals,
                                  NoiseAmp[n:],
Example #28
0
def generate_gif(Gs,Zs,reals,NoiseAmp,opt,alpha=0.1,beta=0.9,start_scale=2,fps=10):

    in_s = torch.full(Zs[0].shape, 0, device=opt.device)
    images_cur = []
    count = 0

    for G,Z_opt,noise_amp,real in zip(Gs,Zs,NoiseAmp,reals):
        pad_image = int(((opt.ker_size - 1) * opt.num_layer) / 2)
        nzx = Z_opt.shape[2]
        nzy = Z_opt.shape[3]
        #pad_noise = 0
        #m_noise = nn.ZeroPad2d(int(pad_noise))
        m_image = nn.ZeroPad2d(int(pad_image))
        images_prev = images_cur
        images_cur = []
        if count == 0:
            z_rand = functions.generate_noise([1,nzx,nzy], device=opt.device)
            z_rand = z_rand.expand(1,3,Z_opt.shape[2],Z_opt.shape[3])
            z_prev1 = 0.95*Z_opt +0.05*z_rand
            z_prev2 = Z_opt
        else:
            z_prev1 = 0.95*Z_opt +0.05*functions.generate_noise([opt.nc_z,nzx,nzy], device=opt.device)
            z_prev2 = Z_opt

        for i in range(0,100,1):
            if count == 0:
                z_rand = functions.generate_noise([1,nzx,nzy], device=opt.device)
                z_rand = z_rand.expand(1,3,Z_opt.shape[2],Z_opt.shape[3])
                diff_curr = beta*(z_prev1-z_prev2)+(1-beta)*z_rand
            else:
                diff_curr = beta*(z_prev1-z_prev2)+(1-beta)*(functions.generate_noise([opt.nc_z,nzx,nzy], device=opt.device))

            z_curr = alpha*Z_opt+(1-alpha)*(z_prev1+diff_curr)
            z_prev2 = z_prev1
            z_prev1 = z_curr

            if images_prev == []:
                I_prev = in_s
            else:
                I_prev = images_prev[i]
                I_prev = imresize(I_prev, 1 / opt.scale_factor, opt)
                I_prev = I_prev[:, :, 0:real.shape[2], 0:real.shape[3]]
                I_prev = m_image(I_prev)
            if count < start_scale:
                z_curr = Z_opt

            z_in = noise_amp*z_curr+I_prev
            I_curr = G(z_in.detach(),I_prev)

            if (count == len(Gs)-1):
                I_curr = functions.denorm(I_curr).detach()
                I_curr = I_curr[0,:,:,:].cpu().numpy()
                I_curr = I_curr.transpose(1, 2, 0)*255
                I_curr = I_curr.astype(np.uint8)

            images_cur.append(I_curr)
        count += 1
    dir2save = functions.generate_dir2save(opt)
    try:
        os.makedirs('%s/start_scale=%d' % (dir2save,start_scale) )
    except OSError:
        pass
    imageio.mimsave('%s/start_scale=%d/alpha=%f_beta=%f.gif' % (dir2save,start_scale,alpha,beta),images_cur,fps=fps)
    del images_cur
     real = functions.read_image(opt)
     opt.min_size = 18
     real = functions.adjust_scales2image_SR(real, opt)
     train(opt, Gs, Zs, reals, NoiseAmp)
     opt.mode = mode
 print('%f' % pow(in_scale, iter_num))
 Zs_sr = []
 reals_sr = []
 NoiseAmp_sr = []
 Gs_sr = []
 real = reals[-1]  # read_image(opt)
 real_ = real
 opt.scale_factor = 1 / in_scale
 opt.scale_factor_init = 1 / in_scale
 for j in range(1, iter_num + 1, 1):
     real_ = imresize(real_, pow(1 / opt.scale_factor, 1), opt)
     reals_sr.append(real_)
     Gs_sr.append(Gs[-1])
     NoiseAmp_sr.append(NoiseAmp[-1])
     z_opt = torch.full(real_.shape,
                        0,
                        dtype=torch.float32,
                        device=opt.device)
     m = nn.ZeroPad2d(5)
     z_opt = m(z_opt)
     Zs_sr.append(z_opt)
 out = SinGAN_generate(Gs_sr,
                       Zs_sr,
                       reals_sr,
                       NoiseAmp_sr,
                       opt,
Example #30
0
def train(opt, Gs, Zs, reals, NoiseAmp):
    real_ = functions.read_image(opt)
    in_s = 0
    scale_num = 0
    real = imresize(real_, opt.scale1, opt)
    reals = functions.creat_reals_pyramid(real, reals, opt)
    nfc_prev = 0

    while scale_num < opt.stop_scale + 1:
        opt.nfc = min(opt.nfc_init * pow(2, math.floor(scale_num / 4)), 128)
        opt.min_nfc = min(opt.min_nfc_init * pow(2, math.floor(scale_num / 4)),
                          128)
        if opt.fast_training:
            if (scale_num > 0) & (scale_num % 4 == 0):
                opt.niter = opt.niter // 2
        '''
        if (scale_num == opt.stop_scale):
            opt.nfc = 128
            opt.min_nfc = 128
        '''
        opt.out_ = functions.generate_dir2save(opt)
        opt.outf = '%s/%d' % (opt.out_, scale_num)
        try:
            os.makedirs(opt.outf)
        except OSError:
            pass

        #plt.imsave('%s/in.png' %  (opt.out_), functions.convert_image_np(real), vmin=0, vmax=1)
        #plt.imsave('%s/original.png' %  (opt.out_), functions.convert_image_np(real_), vmin=0, vmax=1)
        plt.imsave('%s/real_scale.png' % (opt.outf),
                   functions.convert_image_np(reals[scale_num]),
                   vmin=0,
                   vmax=1)

        D_curr, G_curr = init_models(opt)
        if (nfc_prev == opt.nfc):
            G_curr.load_state_dict(
                torch.load('%s/%d/netG.pth' % (opt.out_, scale_num - 1)))
            D_curr.load_state_dict(
                torch.load('%s/%d/netD.pth' % (opt.out_, scale_num - 1)))

        z_curr, in_s, G_curr = train_single_scale(D_curr, G_curr, reals, Gs,
                                                  Zs, in_s, NoiseAmp, opt)

        G_curr = functions.reset_grads(G_curr, False)
        G_curr.eval()
        D_curr = functions.reset_grads(D_curr, False)
        D_curr.eval()

        Gs.append(G_curr)
        Zs.append(z_curr)
        NoiseAmp.append(opt.noise_amp)

        torch.save(Zs, '%s/Zs.pth' % (opt.out_))
        torch.save(Gs, '%s/Gs.pth' % (opt.out_))
        torch.save(reals, '%s/reals.pth' % (opt.out_))
        torch.save(NoiseAmp, '%s/NoiseAmp.pth' % (opt.out_))

        scale_num += 1
        nfc_prev = opt.nfc
        del D_curr, G_curr
    return