def build(self): logger.info("loading model {} from cache at {}".format( self.model_name, self.weight_path)) # Load config config = make_unconditioned_big_gan(self.config_path, self.weight_path) self.config = config logger.info("Model config {}".format(config)) self.net = BigGAN.Generator(**config)
def make_big_gan(weights_root, target_class): config = make_biggan_config(weights_root) config['resolution'] = utils.imsize_dict[config['dataset']] config['n_classes'] = utils.nclass_dict[config['dataset']] config['G_activation'] = utils.activation_dict[config['G_nl']] config['D_activation'] = utils.activation_dict[config['D_nl']] config['skip_init'] = True config['no_optim'] = True G = BigGAN.Generator(**config) G.load_state_dict(torch.load(config['weights_root'], map_location='cpu'), strict=True) return ConditionedBigGAN(G, target_class).cuda()
def make_big_bi_gan_as_big_gan(weights_root, resolution=128): attn_dict = {128: '64', 256: '128', 512: '64'} dim_z_dict = {128: 120, 256: 140, 512: 128} config = { 'G_param': 'SN', 'D_param': 'SN', 'G_ch': 96, 'D_ch': 96, 'D_wide': True, 'G_shared': True, 'shared_dim': 128, 'dim_z': dim_z_dict[resolution], 'hier': True, 'cross_replica': False, 'mybn': False, 'G_activation': nn.ReLU(inplace=True), 'G_attn': attn_dict[resolution], 'norm_style': 'bn', 'G_init': 'ortho', 'skip_init': True, 'no_optim': True, 'G_fp16': False, 'G_mixed_precision': False, 'accumulate_stats': False, 'num_standing_accumulations': 16, 'G_eval_mode': True, 'BN_eps': 1e-04, 'SN_eps': 1e-04, 'num_G_SVs': 1, 'num_G_SV_itrs': 1, 'resolution': resolution, 'n_classes': 1000 } G = BigGAN.Generator(**config) G.load_state_dict(torch.load(weights_root, map_location=torch.device('cpu')), strict=False) return UnconditionalBigGAN(G).cuda()