Esempio n. 1
0
    def buildHighfromMed(self, sess, graph_path, model_path, model_name,
                         meta_name):
        self.sess = sess
        self.model_name = model_name
        self.model_path = model_path
        self.graph_path = graph_path
        self.meta_name = meta_name
        self.loadMetaSmall()

        self.iterator = util.initDataset(self.dataset_path, self.batch_sz,
                                         self.shuffle_buff)
        self.getDataset = self.iterator.get_next()

        # NEtwork
        self.x_ = self.graph.get_tensor_by_name('Pretrained/x_in:0')
        self.y_ = self.graph.get_tensor_by_name('Pretrained/y_in:0')
        self.fov_ = self.graph.get_tensor_by_name('Pretrained/fov_in:0')
        self.y = util.catResize(self.y_)
        output = self.graph.get_tensor_by_name(
            'Pretrained/Generator/im_h/conv2d_transpose:0')
        self.g = tf.nn.tanh(output)

        # D high
        self.d_real, self.dr = model.D(self.y)
        self.d_fake, self.df = model.D(self.g, reuse=True)

        self.lbl_real = tf.ones_like(self.d_real) * random.uniform(0.8, 1.1)
        self.lbl_fake = tf.zeros_like(self.d_fake) + random.uniform(0.0, 0.4)

        # Loss High
        self.loss_d_real, self.loss_d_fake, self.loss_d = model.dLossGan(
            self.d_real, self.d_fake, self.lbl_real, self.lbl_fake)
        self.loss_g_adv, self.loss_g_hole, self.loss_g_valid, self.loss_g = model.gLossGan(
            self.g, self.d_fake, self.y, self.lbl_real, 10, 10)

        self.setLearingRate()
        d_var = [
            v for v in tf.trainable_variables()
            if v.name.startswith('DiscriminatorH')
        ]
        g_var = [
            v for v in tf.trainable_variables()
            if v.name.startswith('Pretrained/Generator/g_')
            or v.name.startswith('Pretrained/Generator/im_h')
        ]

        with tf.variable_scope('MinimizerH'):
            self.opt_g = tf.train.AdamOptimizer(self.lr_g).minimize(
                self.loss_g, global_step=self.step, var_list=g_var)
            self.opt_d = tf.train.AdamOptimizer(self.lr_d).minimize(
                self.loss_d, global_step=self.step, var_list=d_var)
Esempio n. 2
0
    def forwardGAN(self):
        # G
        self.gs, self.gm, self.g = model.Gs3(self.x)
        # D small
        self.ds_real, self.dsr = model.Ds(model.dstack(self.xs, self.ys))
        self.ds_fake, self.dsf = model.Ds(model.dstack(self.xs, self.gs),
                                          reuse=True)
        # D med
        self.dm_real, self.dmr = model.Dm(model.dstack(self.xm, self.ym))
        self.dm_fake, self.dmf = model.Dm(model.dstack(self.xm, self.gm),
                                          reuse=True)
        # D high
        self.d_real, self.dr = model.D(model.dstack(self.x, self.y))
        self.d_fake, self.df = model.D(model.dstack(self.x, self.g),
                                       reuse=True)

        self.lbl_real = tf.ones_like(self.ds_real) * random.uniform(0.8, 1.1)
        self.lbl_fake = tf.zeros_like(self.ds_fake) + random.uniform(0.0, 0.4)
Esempio n. 3
0
c1 = np.hstack([c, np.zeros_like(c)])
c2 = np.hstack([np.zeros_like(c), c])

## Data
train_loader = torch.utils.data.DataLoader(datasets.MNIST(
    './data',
    train=True,
    download=False,
    transform=transforms.Compose(
        [transforms.Resize(64), transforms.ToTensor()])),
                                           batch_size=Batch,
                                           shuffle=True)

## main
model_G = model.G().to(device).apply(model.weights_init)
model_D = model.D(c_size).to(device).apply(model.weights_init)
model_Q = model.Q(c_size).to(device).apply(model.weights_init)

opt_D = torch.optim.Adam([{
    'params': model_D.parameters()
}],
                         betas=(0.5, 0.99),
                         lr=Lr_d)
opt_G = torch.optim.Adam([{
    'params': model_G.parameters()
}, {
    'params': model_Q.parameters()
}],
                         lr=Lr_g,
                         betas=(0.5, 0.99))
Esempio n. 4
0
LR = FLAGS.lr
MOMENTUM = FLAGS.momentum
ROOT_PATH = os.path.dirname(os.path.realpath(__file__))
LOG_PATH = os.path.join(ROOT_PATH, FLAGS.log_dir)
OUT_PATH = os.path.join(ROOT_PATH, FLAGS.output_dir)
if not os.path.exists(LOG_PATH): os.mkdir(LOG_PATH)
if not os.path.exists(OUT_PATH): os.mkdir(OUT_PATH)

(train_images,
 train_labels), train_iters = data.data_train(IMG_PATH, TRAIN_LABEL,
                                              BATCH_SIZE)
(valid_images,
 valid_labels), valid_iters = data.data_test(IMG_PATH, VALID_LABEL, N_SAMPLE)
Genc = model.Genc()
Gdec = model.Gdec()
D = model.D()
Adv = model.Adv_cls()

####################################################


def V_graph(sess, phv):
    real_labels = valid_labels * 2 - 1
    fake_labels = -real_labels

    u = Genc.build(valid_images, phv['is_training_v'])
    fake_images = Gdec.build(u, fake_labels, phv['is_training_v'])

    return fake_images

Esempio n. 5
0
parser.add_argument('--imsize', type=int, help='size of image', default=128)
parser.add_argument('--gamma', type=float, help='gamma', default=0.5)
parser.add_argument('--lr', type=float, help='learning rate', default=0.0001)
parser.add_argument('--comment', help='comments', default='')

opt = parser.parse_args()
print(opt)
opt.batchSize = opt.batchSize * torch.cuda.device_count()



dataset = ImageFolderSR(root=opt.dataroot, HRsize=opt.imsize, is_crop=False)#, transform=inputTF, target_transform=GTTF)
dataloader = torch.utils.data.DataLoader(dataset, batch_size=opt.batchSize, shuffle=True, num_workers=int(opt.workers), pin_memory=True, drop_last=True)
assert dataset
netG = model.G(n=128, h=opt.nz).cuda()
netD = model.D(n=128, h=opt.nz).cuda()

if opt.imsize==128:
    netG = model128.G(n=128, h=opt.nz).cuda()
    netD = model128.D(n=128, h=opt.nz).cuda()

netG = makeParallel(netG)
netD = makeParallel(netD)


optimG = torch.optim.Adam(netG.parameters(), lr = opt.lr, betas=(0.5, 0.999))
optimD = torch.optim.Adam(netD.parameters(), lr = opt.lr, betas=(0.5, 0.999))

if opt.workers==0:
    expname = ''
else:
Esempio n. 6
0
    def __init__(self,
                 opt=None,
                 train_dt=None,
                 train_dt_warm=None,
                 dis_list=[],
                 val_dt_warm=None):
        self.device = torch.device(
            "cuda" if torch.cuda.is_available() else "cpu")

        self.opt = opt

        self.visualizer = Visualizer(opt)

        num_gpus = torch.cuda.device_count()
        #dis_list[1]
        print(dis_list)
        #torch.cuda.device_count()
        self.rank = dis_list[0]
        print(self.rank)

        #=====START: ADDED FOR DISTRIBUTED======
        if num_gpus > 1:
            #init_distributed(rank, num_gpus, group_name, **dist_config)
            dist_config = dis_list[3]
            init_distributed(dis_list[0], dis_list[1], dis_list[2],
                             **dist_config)
        #=====END:   ADDED FOR DISTRIBUTED======

        if opt.ge_net == "srfeat":
            self.netG = model.G()
        elif opt.ge_net == "carn":
            self.netG = model.G1()
        elif opt.ge_net == "carnm":
            self.netG = model.G2()
        else:
            raise Exception("unknow ")

        self.netD_vgg = model.D(input_c=512, input_width=18)

        self.netD = model.D()

        if opt.vgg_type == "style":
            self.vgg = load_vgg16(opt.vgg_model_path + '/models')
        elif opt.vgg_type == "classify":
            self.vgg = model.vgg19_withoutbn_customefinetune()

        self.vgg.eval()
        for param in self.vgg.parameters():
            param.requires_grad = False

#         for p in self.vgg.parameters():
#             p.requires_grad = False

        init_weights(self.netD, init_type=opt.init)
        init_weights(self.netD_vgg, init_type=opt.init)
        init_weights(self.netG, init_type=opt.init)

        self.vgg = self.vgg.to(self.device)
        self.netD = self.netD.to(self.device)
        self.netD_vgg = self.netD_vgg.to(self.device)
        self.netG = self.netG.to(self.device)

        #=====START: ADDED FOR DISTRIBUTED======
        if num_gpus > 1:
            #self.vgg = apply_gradient_allreduce(self.vgg)
            self.netD_vgg = apply_gradient_allreduce(self.netD_vgg)
            self.netD = apply_gradient_allreduce(self.netD)
            self.netG = apply_gradient_allreduce(self.netG)

        #=====END:   ADDED FOR DISTRIBUTED======

        print(opt)

        self.optim_G= torch. optim.Adam(filter(lambda p: p.requires_grad, self.netG.parameters()),\
         lr=opt.warm_opt.lr, betas=opt.warm_opt.betas, weight_decay=0.0)

        #        self.optim_G= torch.optim.Adam(filter(lambda p: p.requires_grad, self.netG.parameters()),\
        #         lr=opt.gen.lr, betas=opt.gen.betas, weight_decay=0.0)

        if opt.dis.optim == "sgd":
            self.optim_D= torch.optim.SGD( filter(lambda p: p.requires_grad, \
                itertools.chain(self.netD_vgg.parameters(),self.netD.parameters() ) ),\
                lr=opt.dis.lr,
             )
        elif opt.dis.optim == "adam":
            self.optim_D= torch.optim.Adam( filter(lambda p: p.requires_grad, \
                itertools.chain(self.netD_vgg.parameters(),self.netD.parameters() ) ),\
                lr=opt.dis.lr,betas=opt.dis.betas, weight_decay=0.0
             )
        else:
            raise Exception("unknown")

        print("create schedule ")

        lr_sc_G = get_scheduler(self.optim_G, opt.gen)
        lr_sc_D = get_scheduler(self.optim_D, opt.dis)

        self.schedulers = []

        self.schedulers.append(lr_sc_G)
        self.schedulers.append(lr_sc_D)

        # =====START: ADDED FOR DISTRIBUTED======
        train_dt = torch.utils.data.ConcatDataset([train_dt, train_dt_warm])

        train_sampler = DistributedSampler(train_dt) if num_gpus > 1 else None
        val_sampler_warm = DistributedSampler(
            val_dt_warm) if num_gpus > 1 else None
        # =====END:   ADDED FOR DISTRIBUTED======

        kw = {
            "pin_memory": True,
            "num_workers": 8
        } if torch.cuda.is_available() else {}
        dl_c =t_data.DataLoader(train_dt ,batch_size=opt.batch_size,\
             sampler=train_sampler , drop_last=True, **kw )

        dl_val_warm = t_data.DataLoader(
            val_dt_warm,
            batch_size=opt.batch_size
            if not hasattr(opt, "batch_size_warm") else opt.batch_size_warm,
            sampler=val_sampler_warm,
            drop_last=True,
            **kw)

        self.dt_train = dl_c
        self.dt_val_warm = dl_val_warm

        if opt.warm_opt.loss_fn == "mse":
            self.critic_pixel = torch.nn.MSELoss()
        elif opt.warm_opt.loss_fn == "l1":
            self.critic_pixel = torch.nn.L1Loss()
        elif opt.warm_opt.loss_fn == "smooth_l1":
            self.critic_pixel = torch.nn.SmoothL1Loss()
        else:
            raise Exception("unknown")

        self.critic_pixel = self.critic_pixel.to(self.device)

        self.gan_loss = GANLoss(gan_mode=opt.gan_loss_fn).to(self.device)
        print("init ....")

        self.save_dir = os.path.dirname(self.visualizer.log_name)