def test_session(self): vgg = VGG19(include_top=False, weights="imagenet") vgg.trainable = False session = tf.compat.v1.Session() perceptual_vgg = smlosses.PerceptualVGG(vgg=vgg, eager=False, session=session) from skimage import data from supermariopy.tfutils import image x = ( data.astronaut() .astype(np.float32) .reshape((1, 512, 512, 3))[:, :224, :224, :] ) y = ( data.astronaut() .astype(np.float32) .reshape((1, 512, 512, 3))[:, :224, :224, :] ) x_ph = tf.placeholder(shape=(1, 224, 224, 3), dtype=tf.float32) y_ph = tf.placeholder(shape=(1, 224, 224, 3), dtype=tf.float32) loss = perceptual_vgg.loss(x_ph, y_ph) session.run(tf.initialize_all_variables()) loss_v = session.run(loss, {x_ph: x, y_ph: y}) assert all([np.allclose(l, np.array([0])) for l in loss_v])
def __init__(self, session, feature_layers=None, feature_weights=None, gram_weights=None): K.set_session(session) self.base_model = VGG19(include_top=False, weights='imagenet') if feature_layers is None: feature_layers = [ "input_1", "block1_conv2", "block2_conv2", "block3_conv2", "block4_conv2", "block5_conv2" ] self.layer_names = [l.val_set_name for l in self.base_model.layers] for k in feature_layers: if not k in self.layer_names: raise KeyError("Invalid layer {}. Available layers: {}".format( k, self.layer_names)) features = [ self.base_model.get_layer(k).output for k in feature_layers ] self.model = Model(inputs=self.base_model.input, outputs=features) if feature_weights is None: feature_weights = len(feature_layers) * [1.0] if gram_weights is None: gram_weights = len(feature_layers) * [0.1] self.feature_weights = feature_weights self.gram_weights = gram_weights assert len(self.feature_weights) == len(features) self.use_gram = np.max(self.gram_weights) > 0.0 self.variables = self.base_model.weights
def test_eager(self): tf.enable_eager_execution() vgg = VGG19(include_top=False, weights="imagenet") vgg.trainable = False perceptual_vgg = smlosses.PerceptualVGG(vgg=vgg, eager=True) x = data.astronaut().astype(np.float32).reshape((1, 512, 512, 3)) x = image.resize_bilinear(x, [224, 224]) y = data.astronaut().astype(np.float32).reshape((1, 512, 512, 3)) y = image.resize_bilinear(y, [224, 224]) losses = perceptual_vgg.loss(tf.convert_to_tensor(x), tf.convert_to_tensor(y)) assert all([np.allclose(loss, np.array([0])) for loss in losses]) y = image.resize_bilinear(y, [224, 224]) + tf.random.normal(y.shape) losses = perceptual_vgg.loss(tf.convert_to_tensor(x), tf.convert_to_tensor(y)) assert all([not np.allclose(loss, np.array([0])) for loss in losses])
def __init__( self, session, feature_layers=None, feature_weights=None, gram_weights=None, default_gram=0.1, original_scale=False, eager=False, ): if eager: pass else: K.set_session(session) self.base_model = VGG19(include_top=False, weights="imagenet") if feature_layers is None: feature_layers = [ "input_1", "block1_conv2", "block2_conv2", "block3_conv2", "block4_conv2", "block5_conv2", ] self.layer_names = [l.name for l in self.base_model.layers] for k in feature_layers: if not k in self.layer_names: raise KeyError("Invalid layer {}. Available layers: {}".format( k, self.layer_names)) self.feature_layers = feature_layers features = [ self.base_model.get_layer(k).output for k in feature_layers ] self.model = Model(inputs=self.base_model.input, outputs=features) if feature_weights is None: feature_weights = len(feature_layers) * [1.0] if gram_weights is None: gram_weights = len(feature_layers) * [default_gram] elif isinstance(gram_weights, (int, float)): gram_weights = len(feature_layers) * [gram_weights] self.feature_weights = feature_weights self.gram_weights = gram_weights assert len(self.feature_weights) == len(features) self.use_gram = np.max(self.gram_weights) > 0.0 self.original_scale = original_scale self.variables = self.base_model.weights
def __init__(self): # self.input = tf.placeholder(tf.float32, [None,224,224,3], name='input_img') self.model = VGG19(include_top=False)