Example #1
0
 def __init__(self, conv_depth, name="encoder", **kwargs):
     super(Encoder, self).__init__(name=name, **kwargs)
     self.data_format = "channels_last"
     num_filters = 256
     self.sublayers = [
         tfc.SignalConv2D(
             num_filters,
             (9, 9),
             name="layer_0",
             corr=True,
             strides_down=2,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="gdn_0"),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_1",
             corr=True,
             strides_down=2,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="gdn_1"),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_2",
             corr=True,
             strides_down=1,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="gdn_2"),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_3",
             corr=True,
             strides_down=1,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="gdn_3"),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             conv_depth,
             (5, 5),
             name="layer_out",
             corr=True,
             strides_down=1,
             padding="same_zeros",
             use_bias=True,
             activation=None,
         ),
     ]
def analysis_transform(tensor, num_filters):
    """Builds the analysis transform."""

    with tf.variable_scope("analysis"):
        with tf.variable_scope("layer_0"):
            layer = tfc.SignalConv2D(num_filters, (9, 9),
                                     corr=True,
                                     strides_down=4,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=tfc.GDN())
            tensor = layer(tensor)

        with tf.variable_scope("layer_1"):
            layer = tfc.SignalConv2D(num_filters, (5, 5),
                                     corr=True,
                                     strides_down=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=tfc.GDN())
            tensor = layer(tensor)

        with tf.variable_scope("layer_2"):
            layer = tfc.SignalConv2D(num_filters, (5, 5),
                                     corr=True,
                                     strides_down=2,
                                     padding="same_zeros",
                                     use_bias=False,
                                     activation=None)
            tensor = layer(tensor)

        return tensor
Example #3
0
def encoder_li(tensor):

    with tf.variable_scope("analysis"):
        with tf.variable_scope("layer_0"):
            layer = tfc.SignalConv2D(
                NUM_FILTERS, (9, 9), corr=True, strides_down=4, padding="same_zeros",
                use_bias=True, activation=tfc.GDN())
            tensor = layer(tensor)

        with tf.variable_scope("layer_1"):
            layer = tfc.SignalConv2D(
                NUM_FILTERS, (5, 5), corr=True, strides_down=2, padding="same_zeros",
                use_bias=True, activation=tfc.GDN())
            tensor = layer(tensor)

        with tf.variable_scope("layer_2"):
            layer = tfc.SignalConv2D(
                NUM_FILTERS, (5, 5), corr=True, strides_down=2, padding="same_zeros",
                use_bias=False, activation=tfc.GDN())
            tensor = layer(tensor)
            tensor2 = tensor

        with tf.variable_scope("layer_3"):
            layer = tfc.SignalConv2D(
              NUM_FILTERS, (1, 1), corr=True, strides_down=1, padding="same_zeros",
              use_bias=False, activation=None)
            tensor = layer(tensor)

        return tensor, tensor2
Example #4
0
 def __init__(self, num_filters):
     super().__init__(name="analysis")
     self.add(tf.keras.layers.Lambda(lambda x: x / 255.))
     self.add(
         tfc.SignalConv2D(num_filters, (5, 5),
                          name="layer_0",
                          corr=True,
                          strides_down=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=tfc.GDN(name="gdn_0")))
     self.add(
         tfc.SignalConv2D(num_filters, (5, 5),
                          name="layer_1",
                          corr=True,
                          strides_down=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=tfc.GDN(name="gdn_1")))
     self.add(
         tfc.SignalConv2D(num_filters, (5, 5),
                          name="layer_2",
                          corr=True,
                          strides_down=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=tfc.GDN(name="gdn_2")))
     self.add(
         tfc.SignalConv2D(num_filters, (5, 5),
                          name="layer_3",
                          corr=True,
                          strides_down=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=None))
Example #5
0
 def __init__(self, n_channels, name="decoder", **kwargs):
     super(Decoder, self).__init__(name=name, **kwargs)
     self.data_format = "channels_last"
     num_filters = 256
     self.sublayers = [
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_out",
             corr=False,
             strides_up=1,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="igdn_out", inverse=True),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_0",
             corr=False,
             strides_up=1,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="igdn_0", inverse=True),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_1",
             corr=False,
             strides_up=1,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="igdn_1", inverse=True),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             num_filters,
             (5, 5),
             name="layer_2",
             corr=False,
             strides_up=2,
             padding="same_zeros",
             use_bias=True,
             activation=tfc.GDN(name="igdn_2", inverse=True),
         ),
         layers.PReLU(shared_axes=[1, 2]),
         tfc.SignalConv2D(
             n_channels,
             (9, 9),
             name="layer_3",
             corr=False,
             strides_up=2,
             padding="same_zeros",
             use_bias=True,
             activation=tf.nn.sigmoid,
         ),
     ]
Example #6
0
def decoder_li(tensor):

  with tf.variable_scope("synthesis"):

    with tf.variable_scope("layer_0"):
         layer = tfc.SignalConv2D(
             NUM_FILTERS, (1, 1), corr=True, strides_down=1, padding="same_zeros",
             use_bias=True, activation=tfc.GDN(inverse=True))
         tensor = layer(tensor)

    with tf.variable_scope("layer_1"):
      layer = tfc.SignalConv2D(
          NUM_FILTERS, (5, 5), corr=False, strides_up=2, padding="same_zeros",
          use_bias=True, activation=tfc.GDN(inverse=True))
      tensor = layer(tensor)

    with tf.variable_scope("layer_2"):
      layer = tfc.SignalConv2D(
          NUM_FILTERS, (5, 5), corr=False, strides_up=2, padding="same_zeros",
          use_bias=True, activation=tfc.GDN(inverse=True))
      tensor = layer(tensor)

    with tf.variable_scope("layer_3"):
      layer = tfc.SignalConv2D(
          3, (9, 9), corr=False, strides_up=4, padding="same_zeros",
          use_bias=True, activation=None)
      tensor = layer(tensor)

  return tensor
def analysis_transform_e4(tensor, conv_filters_num, num_filters):
    """Builds the analysis transform."""

    with tf.variable_scope("e4_pre256", reuse=tf.AUTO_REUSE):
        with tf.variable_scope("e4_enc_layer_0", reuse=tf.AUTO_REUSE):
            layer = tfc.SignalConv2D(conv_filters_num, (9, 9),
                                     corr=True,
                                     strides_down=4,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=tfc.GDN())
            tensor = layer(tensor)

        with tf.variable_scope("e4_enc_layer_1", reuse=tf.AUTO_REUSE):
            layer = tfc.SignalConv2D(conv_filters_num, (5, 5),
                                     corr=True,
                                     strides_down=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=tfc.GDN())
            tensor = layer(tensor)

        with tf.variable_scope("e4_enc_layer_2", reuse=tf.AUTO_REUSE):
            layer = tfc.SignalConv2D(num_filters, (5, 5),
                                     corr=True,
                                     strides_down=2,
                                     padding="same_zeros",
                                     use_bias=False,
                                     activation=None)
            tensor = layer(tensor)

        return tensor
Example #8
0
 def build(self, input_shape):
   self._layers = [
       tfc.SignalConv2D(
           self.num_filters, (9, 9), name="layer_0", corr=True, strides_down=4,
           padding="same_zeros", use_bias=True,
           activation=tfc.GDN(name="gdn_0")),
       tfc.SignalConv2D(
           1, (5, 5), name="layer_1dw", corr=True, strides_down=2,
           padding="same_zeros", use_bias=True,
           channel_separable=True,
           activation=None),
       tfc.SignalConv2D(
           self.num_filters, (1, 1), name="layer_1pw", corr=True, strides_down=1,
           padding="same_zeros", use_bias=True,
           activation=tfc.GDN(name="gdn_1")),
       tfc.SignalConv2D(
           1, (5, 5), name="layer_2dw", corr=True, strides_down=2,
           padding="same_zeros", use_bias=True,
           channel_separable=True,
           activation=None),
       tfc.SignalConv2D(
           self.num_filters, (1, 1), name="layer_2pw", corr=True, strides_down=1,
           padding="same_zeros", use_bias=True,
           activation=None),
   ]
   super(AnalysisTransform, self).build(input_shape)
Example #9
0
 def build(self, input_shape):
     self._layers = [
         tfc.SignalConv2D(self.num_filters, (5, 5),
                          name="layer_0",
                          corr=True,
                          strides_down=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=tfc.GDN(name="gdn_0")),
         tfc.SignalConv2D(self.num_filters, (5, 5),
                          name="layer_1",
                          corr=True,
                          strides_down=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=tfc.GDN(name="gdn_1")),
         tfc.SignalConv2D(self.num_filters, (5, 5),
                          name="layer_2",
                          corr=True,
                          strides_down=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=tfc.GDN(name="gdn_2")),
         tfc.SignalConv2D(self.num_filters, (5, 5),
                          name="layer_3",
                          corr=True,
                          strides_down=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=None),
     ]
     super(AnalysisTransform, self).build(input_shape)
Example #10
0
 def build(self, input_shape):
     self._layers = [
         tfc.SignalConv2D(self.num_filters, (5, 5),
                          name="layer_0",
                          corr=False,
                          strides_up=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=tfc.GDN(name="igdn_0", inverse=True)),
         tfc.SignalConv2D(self.num_filters, (5, 5),
                          name="layer_1",
                          corr=False,
                          strides_up=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=tfc.GDN(name="igdn_1", inverse=True)),
         tfc.SignalConv2D(3, (9, 9),
                          name="layer_2",
                          corr=False,
                          strides_up=4,
                          padding="same_zeros",
                          use_bias=True,
                          activation=None),
     ]
     super(SynthesisTransform, self).build(input_shape)
Example #11
0
	def build(self, input_shape):
		self._layers = [
		    tfc.GDN(inverse=True),
		    tfc.SignalConv1D(self.num_filters // 8,
		                     3,
		                     strides_up=2,
		                     padding="same_zeros"),
		    tf.keras.layers.ReLU(),
		    tfc.SignalConv1D(self.num_filters, 3, padding="same_zeros"),
		    tfc.GDN(inverse=True),
		    tfc.SignalConv1D(self.num_filters,
		                     3,
		                     strides_up=2,
		                     padding="same_zeros"),
		    tf.keras.layers.ReLU(),
		    tfc.SignalConv1D(self.num_filters, 3, padding="same_zeros"),
		    tfc.GDN(inverse=True),
		    tfc.SignalConv1D(self.num_filters,
		                     3,
		                     strides_up=2,
		                     padding="same_zeros"),
		    tfc.SignalConv1D(2, 3, padding="same_zeros"),
		]

		super(SynthesisTransform, self).build(input_shape)
Example #12
0
 def __init__(self, num_filters):
     super().__init__(name="synthesis")
     self.add(
         tfc.SignalConv2D(num_filters, (5, 5),
                          name="layer_0",
                          corr=False,
                          strides_up=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=tfc.GDN(name="igdn_0", inverse=True)))
     self.add(
         tfc.SignalConv2D(num_filters, (5, 5),
                          name="layer_1",
                          corr=False,
                          strides_up=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=tfc.GDN(name="igdn_1", inverse=True)))
     self.add(
         tfc.SignalConv2D(num_filters, (5, 5),
                          name="layer_2",
                          corr=False,
                          strides_up=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=tfc.GDN(name="igdn_2", inverse=True)))
     self.add(
         tfc.SignalConv2D(3, (5, 5),
                          name="layer_3",
                          corr=False,
                          strides_up=2,
                          padding="same_zeros",
                          use_bias=True,
                          activation=None))
     self.add(tf.keras.layers.Lambda(lambda x: x * 255.))
Example #13
0
def synthesis_transform(tensor, num_filters):
  """Builds the synthesis transform."""

  with tf.variable_scope("synthesis"):
    with tf.variable_scope("layer_0"):
      layer = tfc.SignalConv2D(
          num_filters, (5, 5), corr=False, strides_up=2, padding="same_zeros",
          use_bias=True, activation=tfc.GDN(inverse=True))
      tensor = layer(tensor)

    with tf.variable_scope("layer_1"):
      layer = tfc.SignalConv2D(
          num_filters, (5, 5), corr=False, strides_up=1, padding="same_zeros",
          use_bias=True, activation= tfc.GDN(inverse=True))
      tensor = layer(tensor)

    with tf.variable_scope("layer_2"):
      layer = tfc.SignalConv2D(
          num_filters, (5, 5), corr=False, strides_up=2, padding="same_zeros",
          use_bias=True, activation=tfc.GDN(inverse=True))
      tensor = layer(tensor)

    with tf.variable_scope("layer_3"):
      layer = tfc.SignalConv2D(
          num_filters, (5, 5), corr=False, strides_up=2, padding="same_zeros",
          use_bias=True, activation=tfc.GDN(inverse=True))
      tensor = layer(tensor)

    with tf.variable_scope("layer_4"):
      layer = tfc.SignalConv2D(
          3, (9, 9), corr=False, strides_up=4, padding="same_zeros",
          use_bias=True, activation=None)
      tensor = layer(tensor)

    return tensor
def synthesis_transform(tensor, conv_filters_num, num_filters):
    """Builds the synthesis transform."""

    with tf.variable_scope("pre256", reuse=tf.AUTO_REUSE):
        with tf.variable_scope("dec_layer_0", reuse=tf.AUTO_REUSE):
            layer = tfc.SignalConv2D(conv_filters_num, (5, 5),
                                     corr=False,
                                     strides_up=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=tfc.GDN(inverse=True))
            tensor = layer(tensor)
            tf.add_to_collection("pre256", tensor)

        with tf.variable_scope("dec_layer_1", reuse=tf.AUTO_REUSE):
            layer = tfc.SignalConv2D(conv_filters_num, (5, 5),
                                     corr=False,
                                     strides_up=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=tfc.GDN(inverse=True))
            tensor = layer(tensor)
            tf.add_to_collection("pre256", tensor)

        with tf.variable_scope("dec_layer_2", reuse=tf.AUTO_REUSE):
            layer = tfc.SignalConv2D(3, (9, 9),
                                     corr=False,
                                     strides_up=4,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=None)
            tensor = layer(tensor)
            tf.add_to_collection("pre256", tensor)

        return tensor
Example #15
0
def slimmable_analysis_transform(tensor_in, switch_list, total_filters_num):
    """Builds the slimmable analysis transform."""
    with tf.variable_scope("analysis"):
        tensor_encoder = list()
        for i, _switch in enumerate(switch_list):
            # the first conv and switchable gdn layers
            with tf.variable_scope("layer_0", reuse=(i > 0)):
                layer = tfc.SignalConv2D_slim(total_filters_num, (9, 9),
                                              corr=True,
                                              strides_down=4,
                                              padding="same_zeros",
                                              use_bias=True,
                                              activation=None)
                tensor = layer(tensor_in, 3, _switch)

            with tf.variable_scope("gdn_an_0_{:1d}".format(i)):
                tensor_gdn_0 = tfc.GDN()(tensor)
            tensor_gdn_0 = tf.pad(
                tensor_gdn_0,
                [[0, 0], [0, 0], [0, 0], [0, (total_filters_num - _switch)]],
                "CONSTANT")

            # the second conv and switchable gdn layers
            with tf.variable_scope("layer_1", reuse=(i > 0)):
                layer = tfc.SignalConv2D_slim(total_filters_num, (5, 5),
                                              corr=True,
                                              strides_down=2,
                                              padding="same_zeros",
                                              use_bias=True,
                                              activation=None)
                tensor = layer(tensor_gdn_0, _switch, _switch)

            with tf.variable_scope("gdn_an_1_{:1d}".format(i)):
                tensor_gdn_1 = tfc.GDN()(tensor)
            tensor_gdn_1 = tf.pad(
                tensor_gdn_1,
                [[0, 0], [0, 0], [0, 0], [0, (total_filters_num - _switch)]],
                "CONSTANT")

            # the third conv and switchable gdn layers
            with tf.variable_scope("layer_2", reuse=(i > 0)):
                layer = tfc.SignalConv2D_slim(total_filters_num, (5, 5),
                                              corr=True,
                                              strides_down=2,
                                              padding="same_zeros",
                                              use_bias=False,
                                              activation=None)
                tensor = layer(tensor_gdn_1, _switch, _switch)

            with tf.variable_scope("gdn_an_2_{:1d}".format(i)):
                tensor_gdn_2 = tfc.GDN()(tensor)

            # store the bottleneck features from different width
            tensor_encoder.append(tensor_gdn_2)

        return tensor_encoder
Example #16
0
def analysis_transform(tensor, num_filters):
    """Builds the analysis transform."""

    with tf.variable_scope("analysis"):
        with tf.variable_scope("layer_0"):
            layer = tfc.SignalConv2D(
                num_filters, (9, 9), corr=True, strides_down=2, padding="same_zeros",
                use_bias=True, activation=tfc.GDN())
            tensor = layer(tensor)

        with tf.variable_scope("layer_1"):
            layer = tfc.SignalConv2D(
                num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros",
                use_bias=True, activation=tfc.GDN())
            tensor = layer(tensor)

        with tf.variable_scope("layer_2"):
            layer = tfc.SignalConv2D(
                num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros",
                use_bias=True, activation=tfc.GDN())
            tensor = layer(tensor)

        with tf.variable_scope("layer_3"):
            layer = tfc.SignalConv2D(
                num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros",
                use_bias=False, activation=None)
            tensor = layer(tensor)

        with tf.variable_scope('reshape'):
            tensor = tf.layers.flatten(tensor)
        if args.activation == 'sigmoid':
            with tf.variable_scope('encoder'):
                tensor = tf.nn.sigmoid(tf.layers.dense(tensor, args.dim1))
                tensor = tf.layers.dense(tensor, args.z)
        elif args.activation == 'softplus':
            with tf.variable_scope('encoder'):
                tensor = tf.nn.softplus(tf.layers.dense(tensor, args.dim1))
                # mean of z
                mean = tf.layers.dense(tensor,  args.z)
                # mean of sigma
                sigma = tf.layers.dense(tensor, args.z)

                # dense layer
                # Sampler: Normal (gaussian) random distribution
                eps = tf.random_normal(tf.shape(mean), dtype=tf.float32, mean=0., stddev=1.0,
                                       name='epsilon')
                # reparameterization trick
                z = mean + tf.exp(sigma / 2) * eps
                # x = tf.layers.dense(x, 128, tf.nn.tanh)
        elif args.activation == 'None':
            with tf.variable_scope('encoder'):
                tensor = tf.layers.dense(tensor, args.z)
        return z, mean, sigma
Example #17
0
 def __init__(self):
     super().__init__()
     conv = functools.partial(tfc.SignalConv3D, corr=True, strides_down=2,
                              padding="same_zeros", use_bias=True)
     layers = [
         conv(192, (3, 3, 3), name="layer_0", activation=tfc.GDN(name="gdn_0")),
         conv(192, (3, 3, 3), name="layer_1", activation=tfc.GDN(name="gdn_1")),
         conv(192, (3, 3, 3), name="layer_2", activation=tfc.GDN(name="gdn_2")),
         conv(320, (3, 3, 3), name="layer_3", activation=None),
     ]
     for layer in layers:
         self.add(layer)
Example #18
0
def MV_analysis(tensor, num_filters, out_filters, Height, Width, c_state,
                h_state, act):
    """Builds the analysis transform."""

    with tf.variable_scope("MV_analysis", reuse=tf.AUTO_REUSE):
        with tf.variable_scope("layer_0"):
            layer = tfc.SignalConv2D(num_filters, (3, 3),
                                     corr=True,
                                     strides_down=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=tfc.GDN())
            tensor = layer(tensor)

        with tf.variable_scope("layer_1"):
            layer = tfc.SignalConv2D(num_filters, (3, 3),
                                     corr=True,
                                     strides_down=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=tfc.GDN())
            tensor = layer(tensor)

        with tf.variable_scope("recurrent"):
            tensor, c_state_out, h_state_out = one_step_rnn(tensor,
                                                            c_state,
                                                            h_state,
                                                            Height,
                                                            Width,
                                                            num_filters,
                                                            scale=4,
                                                            kernal=[3, 3],
                                                            act=act)

        with tf.variable_scope("layer_2"):
            layer = tfc.SignalConv2D(num_filters, (3, 3),
                                     corr=True,
                                     strides_down=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=tfc.GDN())
            tensor = layer(tensor)

        with tf.variable_scope("layer_3"):
            layer = tfc.SignalConv2D(out_filters, (3, 3),
                                     corr=True,
                                     strides_down=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=None)
            tensor = layer(tensor)

        return tensor, c_state_out, h_state_out
Example #19
0
def Res_synthesis(tensor, num_filters, Height, Width, c_state, h_state, act):
    """Builds the synthesis transform."""

    with tf.variable_scope("synthesis", reuse=tf.AUTO_REUSE):
        with tf.variable_scope("layer_0"):
            layer = tfc.SignalConv2D(num_filters, (5, 5),
                                     corr=False,
                                     strides_up=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=tfc.GDN(inverse=True))
            tensor = layer(tensor)

        with tf.variable_scope("layer_1"):
            layer = tfc.SignalConv2D(num_filters, (5, 5),
                                     corr=False,
                                     strides_up=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=tfc.GDN(inverse=True))
            tensor = layer(tensor)

        with tf.variable_scope("recurrent"):
            tensor, c_state_out, h_state_out = one_step_rnn(tensor,
                                                            c_state,
                                                            h_state,
                                                            Height,
                                                            Width,
                                                            num_filters,
                                                            scale=4,
                                                            kernal=[5, 5],
                                                            act=act)

        with tf.variable_scope("layer_2"):
            layer = tfc.SignalConv2D(num_filters, (5, 5),
                                     corr=False,
                                     strides_up=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=tfc.GDN(inverse=True))
            tensor = layer(tensor)

        with tf.variable_scope("layer_3"):
            layer = tfc.SignalConv2D(3, (5, 5),
                                     corr=False,
                                     strides_up=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=None)
            tensor = layer(tensor)

        return tensor, c_state_out, h_state_out
Example #20
0
 def __init__(self, latent_depth):
   super().__init__()
   conv = functools.partial(tfc.SignalConv2D, corr=True, strides_down=2,
                            padding="same_zeros", use_bias=True)
   layers = [
       tf.keras.layers.Lambda(lambda x: x / 255.),
       conv(192, (5, 5), name="layer_0", activation=tfc.GDN(name="gdn_0")),
       conv(192, (5, 5), name="layer_1", activation=tfc.GDN(name="gdn_1")),
       conv(192, (5, 5), name="layer_2", activation=tfc.GDN(name="gdn_2")),
       conv(latent_depth, (5, 5), name="layer_3", activation=None),
   ]
   for layer in layers:
     self.add(layer)
Example #21
0
 def build(self, input_shape):
     
     self.layers = [
         tfc.SignalConv2D(filters=self.num_filters, 
                          kernel_support=(5, 5), 
                          name="layer_0", 
                          corr=True, 
                          strides_down=2,
                          padding=self.padding, 
                          use_bias=True,
                          activation=tfc.GDN(name="gdn_0")),
         tfc.SignalConv2D(filters=self.num_filters, 
                          kernel_support=(5, 5), 
                          name="layer_1", 
                          corr=True, 
                          strides_down=2,
                          padding=self.padding, 
                          use_bias=True,
                          activation=tfc.GDN(name="gdn_1")),
         tfc.SignalConv2D(filters=self.num_filters, 
                          kernel_support=(5, 5), 
                          name="layer_2", 
                          corr=True, 
                          strides_down=2, 
                          padding=self.padding, 
                          use_bias=True,
                          activation=tfc.GDN(name="gdn_2"))]
     
     
     # Note the linear activation
     self.loc_head = tfc.SignalConv2D(filters=self.num_latent_channels, 
                                      kernel_support=(5, 5), 
                                      name="layer_loc", 
                                      corr=True, 
                                      strides_down=2,
                                      padding=self.padding, 
                                      use_bias=False,
                                      activation=None)
     
     # Note the exp activation
     self.scale_head = tfc.SignalConv2D(filters=self.num_latent_channels, 
                                        kernel_support=(5, 5), 
                                        name="layer_scale", 
                                        corr=True, 
                                        strides_down=2,
                                        padding=self.padding, 
                                        use_bias=False,
                                        activation=tf.math.exp)
     
     super(AnalysisTransform_1, self).build(input_shape)
Example #22
0
 def __init__(self):
     super().__init__()
     conv = functools.partial(tfc.SignalConv3D, corr=False,
                              padding="same_zeros", use_bias=True)
     layers = [
         conv(8, (3, 3, 3), name="layer_0", strides_up=(1,4,4),
              activation=tfc.GDN(name="igdn_0", inverse=True)),
         conv(8, (3, 3, 3), name="layer_1", strides_up=(1,2,2),
              activation=tfc.GDN(name="igdn_1", inverse=True)),
         conv(2, (3, 3, 3), name="layer_2",
              activation=None),
     ]
     for layer in layers:
         self.add(layer)
Example #23
0
def get_BLS2017_neural_nets(num_filters, filter_dims, sampling_rates):
    """
    Create the inference and generative networks for a Gaussian VAE, using the GDN/IGDN architecture from Balle 2017.
    :param num_filters: iterable of ints indicating num filters per layer (original model used the same for all layers)
    :param filter_dims: iterable of ints indicating kernel widths in the forward (analysis) computation; the reverse is
        used for backward (synthesis) computation
    :param sampling_rates: iterable of ints indicating downsampling rates in the forward (analysis) computation; the
        reverse is used for backward (synthesis) upsampling computation
    :return:
    """
    num_layers = len(num_filters)
    layers = []
    for i in range(num_layers):
        if i != num_layers - 1:
            layer = tfc.SignalConv2D(num_filters[i], (filter_dims[i], filter_dims[i]), name="layer_%d" % i,
                                     corr=True,
                                     strides_down=sampling_rates[i],
                                     padding="same_zeros", use_bias=True,
                                     activation=tfc.GDN(name="gdn_%d" % i))
        else:
            layer = tfc.SignalConv2D(num_filters[i] * 2, (filter_dims[i], filter_dims[i]), name="layer_%d" % i,
                                     corr=True,
                                     strides_down=sampling_rates[i],
                                     padding="same_zeros", use_bias=False,
                                     activation=None)  # twice the usual number of filters for combined output for mean and logvar
        layers.append(layer)

    inference_net = tf.keras.Sequential(layers)

    layers = []
    for i in reversed(range(num_layers)):
        j = num_layers - 1 - i
        if i != 0:
            layer = tfc.SignalConv2D(num_filters[i], (filter_dims[i], filter_dims[i]), name="layer_%d" % j,
                                     corr=False,
                                     strides_up=sampling_rates[i],
                                     padding="same_zeros", use_bias=True,
                                     activation=tfc.GDN(name="igdn_%d" % j, inverse=True))
        else:
            layer = tfc.SignalConv2D(3, (filter_dims[i], filter_dims[i]), name="layer_%d" % j,
                                     corr=False,
                                     strides_up=sampling_rates[i],
                                     padding="same_zeros", use_bias=True,
                                     activation=None)
        layers.append(layer)

    generative_net = tf.keras.Sequential(layers)

    return inference_net, generative_net
Example #24
0
def modulated_analysis_transform(tensor, conds, total_filters_num):
    """Builds the modulated analysis transform."""

    with tf.variable_scope("analysis"):
        with tf.variable_scope("layer_0"):
            layer = tfc.SignalConv2D(total_filters_num, (9, 9),
                                     corr=True,
                                     strides_down=4,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=None)
            tensor = layer(tensor)
            vector = conds[0]
            modulated_tensor = tensor * vector

            with tf.variable_scope("gnd_an_0"):
                tensor_gdn_0 = tfc.GDN()(modulated_tensor)

        with tf.variable_scope("layer_1"):
            layer = tfc.SignalConv2D(total_filters_num, (5, 5),
                                     corr=True,
                                     strides_down=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=None)
            tensor = layer(tensor_gdn_0)
            vector = conds[1]
            modulated_tensor = tensor * vector

            with tf.variable_scope("gnd_an_1"):
                tensor_gdn_1 = tfc.GDN()(modulated_tensor)

        with tf.variable_scope("layer_2"):
            layer = tfc.SignalConv2D(total_filters_num, (5, 5),
                                     corr=True,
                                     strides_down=2,
                                     padding="same_zeros",
                                     use_bias=False,
                                     activation=None)
            tensor = layer(tensor_gdn_1)
            vector = conds[2]
            modulated_tensor = tensor * vector

            with tf.variable_scope("gnd_an_2"):
                tensor_gdn_2 = tfc.GDN()(modulated_tensor)

        return tensor_gdn_2
Example #25
0
def demodulated_synthesis_transform(tensor, conds, total_filters_num):
    """Builds the demodulated synthesis transform."""

    with tf.variable_scope("synthesis"):
        with tf.variable_scope("layer_0"):
            with tf.variable_scope("gnd_sy_0"):
                tensor_igdn_0 = tfc.GDN(inverse=True)(tensor)
            vector = conds[0]
            demodulated_tensor = tensor_igdn_0 * vector

            layer = tfc.SignalConv2D(total_filters_num, (5, 5),
                                     corr=False,
                                     strides_up=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=None)
            tensor = layer(demodulated_tensor)

        with tf.variable_scope("layer_1"):
            with tf.variable_scope("gnd_sy_1"):
                tensor_igdn_1 = tfc.GDN(inverse=True)(tensor)
            vector = conds[1]
            demodulated_tensor = tensor_igdn_1 * vector

            layer = tfc.SignalConv2D(total_filters_num, (5, 5),
                                     corr=False,
                                     strides_up=2,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=None)
            tensor = layer(demodulated_tensor)

        with tf.variable_scope("layer_2"):
            with tf.variable_scope("gnd_sy_2"):
                tensor_igdn_2 = tfc.GDN(inverse=True)(tensor)
            vector = conds[2]
            demodulated_tensor = tensor_igdn_2 * vector

            layer = tfc.SignalConv2D(3, (9, 9),
                                     corr=False,
                                     strides_up=4,
                                     padding="same_zeros",
                                     use_bias=True,
                                     activation=None)
            tensor = layer(demodulated_tensor)

        return tensor
Example #26
0
def synthesis_transform(tensor, num_filters):
    """Builds the synthesis transform."""

    with tf.variable_scope("synthesis", reuse=tf.AUTO_REUSE):
        if args.activation == 'sigmoid':
            with tf.variable_scope('decoder'):
                tensor = tf.nn.sigmoid(tf.layers.dense(tensor, args.dim1))
                tensor = tf.layers.dense(tensor, 4 * 4 * num_filters)
        elif args.activation == 'softplus':
            with tf.variable_scope('decoder'):
                tensor = tf.nn.softplus(tf.layers.dense(tensor, args.dim1))
                if args.ac2 == 'True':
                    tensor = tf.nn.softplus(tf.layers.dense(tensor, 4 * 4 * num_filters))
                else:
                    tensor = tf.layers.dense(tensor, 4 * 4 * num_filters)
        elif args.activation == 'None':
            with tf.variable_scope('decoder'):
                tensor = tf.layers.dense(tensor, 4 * 4 * num_filters)
        with tf.variable_scope('reshape'):
            # dense layer
            tensor = tf.reshape(tensor, [-1, 4, 4, num_filters])

        with tf.variable_scope("layer_0"):
            layer = tfc.SignalConv2D(
                num_filters, (5, 5), corr=False, strides_up=2, padding="same_zeros",
                use_bias=True, activation=tfc.GDN(inverse=True))
            tensor = layer(tensor)

        with tf.variable_scope("layer_1"):
            layer = tfc.SignalConv2D(
                num_filters, (5, 5), corr=False, strides_up=2, padding="same_zeros",
                use_bias=True, activation=tfc.GDN(inverse=True))
            tensor = layer(tensor)

        with tf.variable_scope("layer_2"):
            layer = tfc.SignalConv2D(
                num_filters // 2, (5, 5), corr=False, strides_up=2, padding="same_zeros",
                use_bias=True, activation=tfc.GDN(inverse=True))
            tensor = layer(tensor)

        with tf.variable_scope("layer_3"):
            layer = tfc.SignalConv2D(
                3, (9, 9), corr=False, strides_up=2, padding="same_zeros",
                use_bias=True, activation=None)
            tensor = layer(tensor)

        return tensor
Example #27
0
def GFR_Decoder_Module(inputs, name_prefix, num_filter, kernel_size, stride, activation=None):
    conv = tfc.SignalConv2D(num_filter, kernel_size, corr=False, strides_up=stride, padding="same_zeros", use_bias=True,
                            activation=tfc.GDN(inverse=True), name=name_prefix + '_conv')(inputs)
    if activation == 'prelu':
        conv = PReLU(shared_axes=[1,2], name=name_prefix + '_prelu')(conv)
    elif activation == 'sigmoid':
        conv = Activation('sigmoid', name=name_prefix + '_sigmoid')(conv)
    return conv
Example #28
0
 def __init__(self):
   super().__init__()
   conv = functools.partial(tfc.SignalConv2D, corr=False, strides_up=2,
                            padding="same_zeros", use_bias=True)
   layers = [
       conv(192, (5, 5), name="layer_0",
            activation=tfc.GDN(name="igdn_0", inverse=True)),
       conv(192, (5, 5), name="layer_1",
            activation=tfc.GDN(name="igdn_1", inverse=True)),
       conv(192, (5, 5), name="layer_2",
            activation=tfc.GDN(name="igdn_2", inverse=True)),
       conv(3, (5, 5), name="layer_3",
            activation=None),
       tf.keras.layers.Lambda(lambda x: x * 255.),
   ]
   for layer in layers:
     self.add(layer)
Example #29
0
 def _run_gdn(self, x, shape, inverse, rectify, data_format):
     inputs = tf.placeholder(tf.float32, shape)
     layer = tfc.GDN(inverse=inverse,
                     rectify=rectify,
                     data_format=data_format)
     outputs = layer(inputs)
     with self.test_session() as sess:
         tf.global_variables_initializer().run()
         y, = sess.run([outputs], {inputs: x})
     return y
Example #30
0
 def build(self, input_shape):
     
     self.layers = [
         tfc.SignalConv2D(filters=self.num_filters, 
                          kernel_support=(5, 5), 
                          name="layer_0", 
                          corr=False, 
                          strides_up=2,
                          padding=self.padding, 
                          use_bias=True,
                          activation=tfc.GDN(name="igdn_0", inverse=True)),
         
         tfc.SignalConv2D(filters=self.num_filters, 
                          kernel_support=(5, 5), 
                          name="layer_1", 
                          corr=False, 
                          strides_up=2,
                          padding=self.padding, 
                          use_bias=True,
                          activation=tfc.GDN(name="igdn_1", inverse=True)),
         
         tfc.SignalConv2D(filters=self.num_filters, 
                          kernel_support=(5, 5), 
                          name="layer_2", 
                          corr=False, 
                          strides_up=2,
                          padding=self.padding, 
                          use_bias=True,
                          activation=tfc.GDN(name="igdn_2", inverse=True)),
         
         # The output always has 3 channels
         tfc.SignalConv2D(filters=3, 
                          kernel_support=(5, 5), 
                          name="layer_3", 
                          corr=False, 
                          strides_up=2,
                          padding=self.padding, 
                          use_bias=True,
                          activation=None),
        # tf.nn.sigmoid
     ]
     
     super(SynthesisTransform_1, self).build(input_shape)