Ejemplo n.º 1
0
    def __init__(self, params):
        self.params = params
        self.model_dict = {}
        self.opt_dict = {}
        self.current_epoch = 0
        self.current_iter = 0
        self.preview_noise = helper.new_random_z(16, params['z_size'], seed=3)

        self.transform = load.NormDenorm([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])

        self.train_loader, self.data_len = load.data_load(
            f'data/{params["dataset"]}/{params["train_folder"]}/',
            self.transform,
            params["batch_size"],
            shuffle=True,
            perc=params["data_perc"],
            output_res=params["output_size"])

        print(f'Data Loader Initialized: {self.data_len} Images')

        self.model_dict["G"] = n.Generator(layers=params["gen_layers"],
                                           filts=params["gen_filters"],
                                           channels=params["in_channels"],
                                           z_size=params['z_size'])

        self.model_dict["D"] = n.Discriminator(layers=params["disc_layers"],
                                               filts=params["disc_filters"],
                                               channels=params["in_channels"])

        for i in self.model_dict.keys():
            self.model_dict[i].apply(helper.weights_init_normal)
            self.model_dict[i].cuda()
            self.model_dict[i].train()

        print('Networks Initialized')

        # setup optimizers #
        self.opt_dict["G"] = optim.RMSprop(self.model_dict["G"].parameters(),
                                           lr=params['lr_gen'])
        self.opt_dict["D"] = optim.RMSprop(self.model_dict["D"].parameters(),
                                           lr=params['lr_disc'])

        print('Optimizers Initialized')

        # setup history storage #
        self.losses = ['G_Loss', 'D_Loss']
        self.loss_batch_dict = {}
        self.loss_epoch_dict = {}
        self.train_hist_dict = {}

        for loss in self.losses:
            self.train_hist_dict[loss] = []
            self.loss_epoch_dict[loss] = []
            self.loss_batch_dict[loss] = []
    def __init__(self, params):
        self.params = params
        self.model_dict = {}
        self.opt_dict = {}
        self.current_epoch = 0
        self.current_iter = 0
        self.loop_iter = 0

        self.transform = load.NormDenorm([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])

        self.train_loader, self.data_len = load.data_load(
            self.transform,
            params["batch_size"],
            shuffle=True,
            output_res=params["render_res"],
            perc=params['data_perc'],
            workers=params['loader_workers'],
            generic=params['use_generic_dataset'],
            path_a=params['dataset'])

        print(f'Data Loader Initialized: {self.data_len} Images')

        self.model_dict["M"] = n.Model(params["grid_res"],
                                       f'dem/{params["dem_file"]}')

        self.model_dict["D"] = n.Discriminator(channels=3,
                                               filts=params["disc_filters"],
                                               kernel_size=4,
                                               layers=params["disc_layers"])
        self.v2t = n.Vert2Tri()
        self.t2v = n.Vert2Tri(conv=False)
        self.render = n.Render(res=params["render_res"])

        self.v2t.cuda()
        self.t2v.cuda()
        self.render.cuda()
        self.model_dict["D"].apply(helper.weights_init_normal)

        for i in self.model_dict.keys():
            self.model_dict[i].cuda()
            self.model_dict[i].train()
        print('Networks Initialized')

        self.l1_loss = nn.L1Loss()

        self.dir_lgt_dir, self.dir_lgt_col, self.eye = helper.random_eye_and_light(
        )

        # setup optimizers #
        opt_params = [{
            'params': self.model_dict["M"].textures,
            'lr': params["lr_tex"]
        }, {
            'params': self.model_dict["M"].vertices,
            'lr': params["lr_mesh"]
        }]

        self.opt_dict["M"] = optim.RMSprop(opt_params)

        print(
            f'Optimize Mesh:{params["opt_mesh"]}   Optimize Tex:{params["opt_tex"]}'
        )
        if not params["opt_tex"]:
            self.model_dict["M"].textures.requires_grad = False
        if not params["opt_mesh"]:
            self.model_dict["M"].vertices.requires_grad = False

        self.opt_dict["D"] = optim.RMSprop(self.model_dict["D"].parameters(),
                                           lr=params['lr_disc'])
        print('Optimizers Initialized')

        # setup history storage #
        self.losses = ['M_Loss', 'D_Loss']
        self.loss_batch_dict = {}
        self.loss_epoch_dict = {}
        self.train_hist_dict = {}

        for loss in self.losses:
            self.train_hist_dict[loss] = []
            self.loss_epoch_dict[loss] = []
            self.loss_batch_dict[loss] = []

        print(f'Camera Pausing: {params["camera_pausing"]}')
Ejemplo n.º 3
0
    def __init__(self, params):
        self.params = params
        self.model_dict = {}
        self.opt_dict = {}
        self.current_epoch = 0
        self.current_iter = 0
        self.current_cycle = 0

        # Setup data loaders
        self.transform = load.NormDenorm([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])

        self.train_loader, data_len = load.data_load(f'/data/{params["dataset"]}/{params["train_folder"]}/',
                                                     self.transform,
                                                     params["batch_size"],
                                                     shuffle=True,
                                                     output_res=params["img_output_size"],
                                                     perc=params["test_perc"])

        self.test_loader, test_data_len = load.data_load(f'/data/{params["dataset"]}/{params["test_folder"]}/',
                                                         self.transform,
                                                         1,
                                                         shuffle=False,
                                                         perc=params["test_perc"],
                                                         output_res=params["img_output_size"],
                                                         train=False)
        # Set learning rate schedule
        self.set_lr_sched(params['train_epoch'],
                          math.ceil(float(data_len) / float(params['batch_size'])),
                          params['lr_cycle_mult'])

        # Setup models
        self.model_dict["G"] = n.Generator(layers=params["gen_layers"],
                                           filts=params["gen_filters"],
                                           channels=params["in_channels"])

        self.model_dict["D"] = n.Discriminator(layers=params["disc_layers"],
                                               filts=params["disc_filters"],
                                               channels=params["in_channels"] * 2)

        for i in self.model_dict.keys():
            self.model_dict[i].apply(helper.weights_init_normal)
            self.model_dict[i].cuda()
            self.model_dict[i].train()

        print('Networks Initialized')

        # Setup losses
        self.BCE_loss = nn.BCELoss()
        self.L1_loss = nn.L1Loss()

        # Setup optimizers
        self.opt_dict["G"] = optim.Adam(self.model_dict["G"].parameters(),
                                        lr=params['lr_gen'],
                                        betas=(params['beta1'], params['beta2']),
                                        weight_decay=.00001)

        self.opt_dict["D"] = optim.Adam(self.model_dict["D"].parameters(),
                                        lr=params['lr_disc'],
                                        betas=(params['beta1'], params['beta2']),
                                        weight_decay=.00001)

        print('Losses Initialized')

        # Setup history storage
        self.losses = ['D_loss', 'G_D_loss', 'G_L_loss']
        self.loss_batch_dict = {}
        self.loss_batch_dict_test = {}
        self.loss_epoch_dict = {}
        self.loss_epoch_dict_test = {}
        self.train_hist_dict = {}
        self.train_hist_dict_test = {}

        for loss in self.losses:
            self.train_hist_dict[loss] = []
            self.loss_epoch_dict[loss] = []
            self.loss_batch_dict[loss] = []
            self.train_hist_dict_test[loss] = []
            self.loss_epoch_dict_test[loss] = []
            self.loss_batch_dict_test[loss] = []
Ejemplo n.º 4
0
    def __init__(self, params):
        self.params = params
        self.model_dict = {}
        self.opt_dict = {}
        self.current_epoch = 0
        self.current_iter = 0

        # Setup data loaders
        self.transform = load.NormDenorm([.485, .456, .406],
                                         [.229, .224, .225])

        self.train_data = load.GenericDataset(f'data/{params["dataset"]}',
                                              self.transform,
                                              output_res=params["res"],
                                              test_perc=params["test_perc"],
                                              data_perc=params["data_perc"])

        self.data_len = self.train_data.__len__()
        self.test_data = copy.deepcopy(self.train_data)
        self.test_data.train = False

        self.train_loader = torch.utils.data.DataLoader(
            self.train_data,
            batch_size=params["batch_size"],
            num_workers=params["workers"],
            shuffle=True,
            drop_last=True)

        self.test_loader = torch.utils.data.DataLoader(
            self.test_data,
            batch_size=params["batch_size"],
            num_workers=params["workers"],
            shuffle=True,
            drop_last=True)
        print('Data Loaders Initialized')
        # Setup models
        self.model_dict["G"] = n.Generator(layers=params["gen_layers"],
                                           filts=params["gen_filters"],
                                           channels=params["in_channels"],
                                           res_layers=params["res_blocks"])

        self.tensor_transform = n.TensorTransform(res=params["res"],
                                                  mean=[.485, .456, .406],
                                                  std=[.229, .224, .225])
        self.tensor_transform.cuda()

        for i in self.model_dict.keys():
            self.model_dict[i].apply(helper.weights_init_normal)
            self.model_dict[i].cuda()
            self.model_dict[i].train()

        print('Networks Initialized')
        # Setup loss
        self.style = load.open_style(f'style/{params["style_image"]}',
                                     self.transform,
                                     batch_size=params["batch_size"],
                                     size=params["res"]).cuda()

        self.vgg = n.make_vgg()
        self.vgg.cuda()

        self.cs_loss = n.ContStyleLoss(self.vgg, self.style,
                                       params['content_weight'],
                                       params['style_weight'],
                                       params['vgg_layers_s'],
                                       params['vgg_layers_c'])

        # Setup optimizers
        self.opt_dict["G"] = optim.Adam(self.model_dict["G"].parameters(),
                                        lr=params['lr'],
                                        betas=(params['beta1'],
                                               params['beta2']))

        print('Losses Initialized')

        # Setup history storage
        self.losses = ['S_Loss', 'C_Loss']
        self.loss_batch_dict = {}
        self.loss_batch_dict_test = {}
        self.loss_epoch_dict = {}
        self.loss_epoch_dict_test = {}
        self.train_hist_dict = {}
        self.train_hist_dict_test = {}

        for loss in self.losses:
            self.train_hist_dict[loss] = []
            self.loss_epoch_dict[loss] = []
            self.loss_batch_dict[loss] = []
            self.train_hist_dict_test[loss] = []
            self.loss_epoch_dict_test[loss] = []
            self.loss_batch_dict_test[loss] = []
Ejemplo n.º 5
0
    def __init__(self, params):
        self.params = params
        self.model_dict = {}
        self.perc_dict = {}
        self.opt_dict = {}
        self.current_epoch = 0
        self.current_iter = 0
        self.current_epoch_iter = 0

        # Setup data loaders
        self.transform = load.NormDenorm([.5, .5, .5], [.5, .5, .5])

        self.train_data_a = load.FaceDataset(f'./data/{params["dataset_a"]}/',
                                             f'./data/{params["dataset_b"]}/',
                                             self.transform,
                                             output_res=params["res"])
        self.train_data_b = load.FaceDataset(f'./data/{params["dataset_b"]}/',
                                             f'./data/{params["dataset_a"]}/',
                                             self.transform,
                                             output_res=params["res"])

        self.train_loader_a = torch.utils.data.DataLoader(
            self.train_data_a,
            batch_size=params["batch_size"],
            num_workers=params["workers"],
            shuffle=True,
            drop_last=True)

        self.train_loader_b = torch.utils.data.DataLoader(
            self.train_data_b,
            batch_size=params["batch_size"],
            num_workers=params["workers"],
            shuffle=True,
            drop_last=True)

        print(
            f'Data Loaders Initialized,  Data A Len:{self.train_data_a.__len__()} '
            f' Data B Len:{self.train_data_b.__len__()}')

        # Setup models
        self.res_tran = n.TensorTransform(res=params["res"],
                                          mean=[91.4953, 103.8827, 131.0912],
                                          std=[1, 1, 1])
        self.res_tran.cuda()

        self.model_dict['ENC'] = n.Encoder(
            layers=int(math.log(params["res"], 2) - 2),
            attention=params['enc_att'])

        self.model_dict['DEC_A'] = n.Decoder(
            layers=int(math.log(params["res"], 2) - 3),
            min_filts=64,
            attention=params['dec_att'])
        self.model_dict['DEC_B'] = n.Decoder(
            layers=int(math.log(params["res"], 2) - 3),
            min_filts=64,
            attention=params['dec_att'])

        self.model_dict['DISC_A'] = n.Discriminator(
            attention=params['disc_att'], channels=3)
        self.model_dict['DISC_B'] = n.Discriminator(
            attention=params['disc_att'], channels=3)

        self.res_face = n.resnet_face()
        for param in self.res_face.parameters():
            param.requires_grad = False

        self.res_face.cuda()

        for i in self.model_dict.keys():
            self.model_dict[i].apply(helper.weights_init_normal)
            self.model_dict[i].cuda()
            self.model_dict[i].train()

        self.model_dict['ENC'].apply(helper.weights_init_icnr)
        self.model_dict['DEC_A'].apply(helper.weights_init_icnr)
        self.model_dict['DEC_B'].apply(helper.weights_init_icnr)

        print('Networks Initialized')

        # Setup loss
        face_children = list(self.res_face.children())

        res_face_hooks = [
            n.SetHook(face_children[i]) for i in params['res_layers_p']
        ]

        self.perceptual_loss = n.PerceptualLoss(self.res_face,
                                                params['perceptual_weight'],
                                                params['res_layers_p'],
                                                params['res_layers_p_weight'],
                                                hooks=res_face_hooks,
                                                use_instance_norm=True)

        self.perceptual_loss.cuda()

        disc_a_convs = [
            list(self.model_dict['DISC_A'].children())[0][1],
            list(list(self.model_dict['DISC_A'].children())[0]
                 [2].children())[0].conv,
            list(list(self.model_dict['DISC_A'].children())[0][3].children())
            [0].conv,
            list(list(
                self.model_dict['DISC_A'].children())[0][4].children())[0].conv
        ]

        disc_a_hooks = [n.SetHook(i) for i in disc_a_convs]

        self.perc_dict['DISC_A'] = n.PerceptualLoss(
            self.model_dict['DISC_A'],
            params['disc_perceptual_weight'], [], [1, 1, 1, 1],
            hooks=disc_a_hooks,
            use_instance_norm=True)
        self.perc_dict['DISC_A'].cuda()

        disc_b_convs = [
            list(self.model_dict['DISC_B'].children())[0][1],
            list(list(self.model_dict['DISC_B'].children())[0]
                 [2].children())[0].conv,
            list(list(self.model_dict['DISC_B'].children())[0][3].children())
            [0].conv,
            list(list(
                self.model_dict['DISC_B'].children())[0][4].children())[0].conv
        ]

        disc_b_hooks = [n.SetHook(i) for i in disc_b_convs]

        self.perc_dict['DISC_B'] = n.PerceptualLoss(
            self.model_dict['DISC_B'],
            params['disc_perceptual_weight'], [], [1, 1, 1, 1],
            hooks=disc_b_hooks,
            use_instance_norm=True)
        self.perc_dict['DISC_B'].cuda()
        # Setup optimizers
        self.model_dict["DEC_A"].apply(helper.weights_init_icnr)
        self.model_dict["DEC_B"].apply(helper.weights_init_icnr)

        self.opt_dict["AE_A"] = optim.Adam(
            itertools.chain(self.model_dict["ENC"].parameters(),
                            self.model_dict["DEC_A"].parameters()),
            lr=params['lr'],
            betas=(params['beta1'], params['beta2']),
            weight_decay=0.0)

        self.opt_dict["AE_B"] = optim.Adam(
            itertools.chain(self.model_dict["ENC"].parameters(),
                            self.model_dict["DEC_B"].parameters()),
            lr=params['lr'],
            betas=(params['beta1'], params['beta2']),
            weight_decay=0.0)

        self.opt_dict["DISC_A"] = optim.Adam(
            self.model_dict["DISC_A"].parameters(),
            lr=params['lr'],
            betas=(params['beta1'], params['beta2']),
            weight_decay=0.0)

        self.opt_dict["DISC_B"] = optim.Adam(
            self.model_dict["DISC_B"].parameters(),
            lr=params['lr'],
            betas=(params['beta1'], params['beta2']),
            weight_decay=0.0)
        print('Losses Initialized')

        # Setup history storage
        self.losses = [
            'L1_A_Loss', 'L1_B_Loss', 'P_A_Loss', 'P_B_Loss', 'D_A_Loss',
            'D_B_Loss', 'DP_A_Loss', 'DP_B_Loss', 'AE_A_Loss', 'AE_B_Loss',
            'MV_A_Loss', 'MV_B_Loss', 'M_A_Loss', 'M_B_Loss'
        ]

        self.loss_batch_dict = {}
        self.loss_epoch_dict = {}
        self.train_hist_dict = {}

        for loss in self.losses:
            self.train_hist_dict[loss] = []
            self.loss_epoch_dict[loss] = []
            self.loss_batch_dict[loss] = []
Ejemplo n.º 6
0
    def __init__(self, params):
        self.params = params
        self.model_dict = {}
        self.opt_dict = {}
        self.current_epoch = 0
        self.current_iter = 0

        # Setup data loaders
        self.transform = load.NormDenorm([.485, .456, .406],
                                         [.229, .224, .225])

        self.train_data = load.SuperResDataset(
            f'/data/{params["dataset"]}/train',
            self.transform,
            in_res=params["in_res"],
            out_res=params["in_res"] * math.pow(2, params["zoom_count"]),
            data_perc=params["data_perc"],
            test_perc=params["test_perc"],
            kernel=params["blur_kernel"])

        self.test_data = copy.deepcopy(self.train_data)
        self.test_data.train = False

        self.train_loader = torch.utils.data.DataLoader(
            self.train_data,
            batch_size=params["batch_size"],
            num_workers=params["workers"],
            shuffle=True,
            drop_last=True)
        self.test_loader = torch.utils.data.DataLoader(
            self.test_data,
            batch_size=params["batch_size"],
            num_workers=params["workers"],
            shuffle=True,
            drop_last=True)

        self.data_len = self.train_data.__len__()
        self.test_data = copy.deepcopy(self.train_data)
        self.test_data.train = False
        print(
            f'Data Loaders Initialized,  Data Len:{self.train_data.__len__()}')

        # Setup models
        self.model_dict['G'] = n.SrResNet(
            params["gen_filters"],
            params["zoom_count"],
            switch_epoch=params["rnn_switch_epoch"])

        self.weights_init = helper.WeightsInit(params["gen_filters"] * 4)
        for i in self.model_dict.keys():
            self.model_dict[i].apply(self.weights_init)
            self.model_dict[i].cuda()
            self.model_dict[i].train()

        print('Networks Initialized')
        # Setup loss

        self.vgg = n.make_vgg()
        self.vgg.cuda()

        self.ct_loss = n.ContLoss(self.vgg, params['content_weight'],
                                  params['l1_weight'], params['vgg_layers_c'])

        self.ct_loss.cuda()

        # Setup optimizers
        self.opt_dict["G"] = optim.Adam(self.model_dict["G"].parameters(),
                                        lr=params['lr'],
                                        betas=(params['beta1'],
                                               params['beta2']))

        print('Losses Initialized')

        # Setup history storage
        self.losses = ['L1_Loss', 'C_Loss']
        self.loss_batch_dict = {}
        self.loss_batch_dict_test = {}
        self.loss_epoch_dict = {}
        self.loss_epoch_dict_test = {}
        self.train_hist_dict = {}
        self.train_hist_dict_test = {}

        for loss in self.losses:
            self.train_hist_dict[loss] = []
            self.loss_epoch_dict[loss] = []
            self.loss_batch_dict[loss] = []
            self.train_hist_dict_test[loss] = []
            self.loss_epoch_dict_test[loss] = []
            self.loss_batch_dict_test[loss] = []
Ejemplo n.º 7
0
    def __init__(self, params):
        self.params = params
        self.model_dict = {}
        self.opt_dict = {}
        self.current_epoch = 0
        self.current_iter = 0
        self.preview_noise = helper.new_random_z(16, params['z_size'], seed=3)

        self.transform = load.NormDenorm([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])

        self.train_data = load.MountainDataset(params['dataset'],
                                               self.transform,
                                               output_res=params["res"],
                                               perc=params['data_perc'])

        self.datalen = self.train_data.__len__()

        self.train_loader = torch.utils.data.DataLoader(
            self.train_data,
            batch_size=params["batch_size"],
            num_workers=params["workers"],
            shuffle=True,
            drop_last=True)

        print('Data Loader Initialized: ' + str(self.datalen) + ' Images')

        self.model_dict["G"] = n.Generator(
            layers=int(math.log(params["res"], 2) - 3),
            filts=params["gen_stretch_z_filts"],
            max_filts=params["gen_max_filts"],
            min_filts=params["gen_min_filts"],
            attention=params["attention"])

        self.model_dict["D"] = n.Discriminator(
            channels=3,
            layers=params["disc_layers"],
            filts_min=params["disc_min_filts"],
            filts=params["disc_max_filts"],
            attention=params["attention"])

        for i in self.model_dict.keys():
            self.model_dict[i].apply(helper.weights_init_normal)
            self.model_dict[i].cuda()
            self.model_dict[i].train()

        print('Networks Initialized')

        self.opt_dict["G"] = optim.Adam(self.model_dict["G"].parameters(),
                                        lr=params['lr_gen'])
        self.opt_dict["D"] = optim.Adam(self.model_dict["D"].parameters(),
                                        lr=params['lr_disc'])

        print('Optimizers Initialized')

        # setup history storage #
        self.losses = ['G_Loss', 'D_Loss']
        self.loss_batch_dict = {}
        self.loss_epoch_dict = {}
        self.train_hist_dict = {}

        for loss in self.losses:
            self.train_hist_dict[loss] = []
            self.loss_epoch_dict[loss] = []
            self.loss_batch_dict[loss] = []