def define_F(opt, use_bn=False): gpu_ids = opt['gpu_ids'] device = torch.device('cuda' if gpu_ids else 'cpu') if (not 'network_F' in opt) or opt['network_F']['which_model_F'] == 'VGG': # PyTorch pretrained VGG19-54, before ReLU. if use_bn: feature_layer = 49 else: feature_layer = 34 netF = SRGAN_arch.VGGFeatureExtractor(feature_layer=feature_layer, use_bn=use_bn, use_input_norm=True, device=device) elif opt['network_F']['which_model_F'] == 'squeezenet': netF = SRGAN_arch.SqueezeNetFeatureExtractor(use_input_norm=True, device=device) elif opt['network_F']['which_model_F'] == 'mobilenet': # raise NotImplementedError netF = SRGAN_arch.MobileNetFeatureExtractor(use_input_norm=True, device=device) elif opt['network_F']['which_model_F'] == 'shufflenet': # raise NotImplementedError netF = SRGAN_arch.ShuffleNetFeatureExtractor(use_input_norm=True, device=device) else: raise NotImplementedError netF.eval() # No need to train return netF
def define_F(opt, use_bn=False): gpu_ids = opt['gpu_ids'] device = torch.device('cuda' if gpu_ids else 'cpu') # PyTorch pretrained VGG19-54, before ReLU. if use_bn: feature_layer = 49 else: feature_layer = 34 netF = SRGAN_arch.VGGFeatureExtractor(feature_layer=feature_layer, use_bn=use_bn, use_input_norm=True, device=device) netF.eval() # No need to train return netF