def encoder_li(tensor): with tf.variable_scope("analysis"): with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D( NUM_FILTERS, (9, 9), corr=True, strides_down=4, padding="same_zeros", use_bias=True, activation=tfc.GDN()) tensor = layer(tensor) with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D( NUM_FILTERS, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN()) tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D( NUM_FILTERS, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=False, activation=tfc.GDN()) tensor = layer(tensor) tensor2 = tensor with tf.variable_scope("layer_3"): layer = tfc.SignalConv2D( NUM_FILTERS, (1, 1), corr=True, strides_down=1, padding="same_zeros", use_bias=False, activation=None) tensor = layer(tensor) return tensor, tensor2
def decoder_li(tensor): with tf.variable_scope("synthesis"): with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D( NUM_FILTERS, (1, 1), corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tfc.GDN(inverse=True)) tensor = layer(tensor) with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D( NUM_FILTERS, (5, 5), corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(inverse=True)) tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D( NUM_FILTERS, (5, 5), corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(inverse=True)) tensor = layer(tensor) with tf.variable_scope("layer_3"): layer = tfc.SignalConv2D( 3, (9, 9), corr=False, strides_up=4, padding="same_zeros", use_bias=True, activation=None) tensor = layer(tensor) return tensor
def synthesis_transform_hs(tensor, num_filters, M): """Builds the synthesis transform.""" with tf.variable_scope("synthesis"): with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D(num_filters, (5, 5), corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation='relu') tensor = layer(tensor) with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D(num_filters, (5, 5), corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation='relu') tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D(M, (3, 3), corr=False, strides_up=1, padding="same_zeros", use_bias=True, activation='rule') tensor = layer(tensor) return tensor
def analysis_transform_ha(tensor, num_filters): """Builds the analysis transform.""" with tf.variable_scope("analysis"): with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D(num_filters, (3, 3), corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation='rule') tensor = layer(tf.math.abs(tensor)) with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D(num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation='relu') tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D(num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=False, activation=None) tensor = layer(tensor) return tensor
def build(self, input_shape): self._layers = [ tfc.SignalConv2D(self.num_filters, (3, 3), name="layer_0", corr=False, strides_up=1, padding="same_zeros", use_bias=True, activation=tf.nn.leaky_relu), tfc.SignalConv2D(self.num_filters, (3, 3), name="layer_1", corr=False, strides_down=2, padding="same_zeros", use_bias=True, activation=tf.nn.leaky_relu), tfc.SignalConv2D(self.num_filters, (3, 3), name="layer_2", corr=False, strides_down=2, padding="same_zeros", use_bias=True, activation=None), tfc.SignalConv2D(self.num_filters * 2, (1, 1), name="layer_3", corr=False, strides_down=1, padding="same_zeros", use_bias=True, activation=None), ] super(FTransform, self).build(input_shape)
def analysis_transform_e4(tensor, conv_filters_num, num_filters): """Builds the analysis transform.""" with tf.variable_scope("e4_pre256", reuse=tf.AUTO_REUSE): with tf.variable_scope("e4_enc_layer_0", reuse=tf.AUTO_REUSE): layer = tfc.SignalConv2D(conv_filters_num, (9, 9), corr=True, strides_down=4, padding="same_zeros", use_bias=True, activation=tfc.GDN()) tensor = layer(tensor) with tf.variable_scope("e4_enc_layer_1", reuse=tf.AUTO_REUSE): layer = tfc.SignalConv2D(conv_filters_num, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN()) tensor = layer(tensor) with tf.variable_scope("e4_enc_layer_2", reuse=tf.AUTO_REUSE): layer = tfc.SignalConv2D(num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=False, activation=None) tensor = layer(tensor) return tensor
def analysis_transform(tensor, num_filters): """Builds the analysis transform.""" with tf.variable_scope("analysis"): with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D(num_filters, (9, 9), corr=True, strides_down=4, padding="same_zeros", use_bias=True, activation=tfc.GDN()) tensor = layer(tensor) with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D(num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN()) tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D(num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=False, activation=None) tensor = layer(tensor) return tensor
def importance_map(tensor): with tf.variable_scope("importance"): with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D(NUM_FILTERS, (1, 1), corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tf.nn.sigmoid) tensor = layer(tensor) with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D(NUM_FILTERS, (1, 1), corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tf.nn.sigmoid) tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D(NUM_FILTERS, (1, 1), corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tf.nn.sigmoid) tensor = layer(tensor) return tensor
def depth_analysis_transform_2(rgb_tensor, depth_tensor, num_filters): """Builds the analysis transform.""" with tf.variable_scope("analysis"): # --------------------------------------- fusion tensor = tf.concat([rgb_tensor, depth_tensor], 3) with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D(num_filters, (9, 9), corr=True, strides_down=4, padding="same_zeros", use_bias=True, activation=tf.nn.relu) tensor = layer(tensor) with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D(num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tf.nn.relu) tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D(num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=False, activation=None) tensor = layer(tensor) return tensor
def synthesis_transform(tensor, conv_filters_num, num_filters): """Builds the synthesis transform.""" with tf.variable_scope("pre256", reuse=tf.AUTO_REUSE): with tf.variable_scope("dec_layer_0", reuse=tf.AUTO_REUSE): layer = tfc.SignalConv2D(conv_filters_num, (5, 5), corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(inverse=True)) tensor = layer(tensor) tf.add_to_collection("pre256", tensor) with tf.variable_scope("dec_layer_1", reuse=tf.AUTO_REUSE): layer = tfc.SignalConv2D(conv_filters_num, (5, 5), corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(inverse=True)) tensor = layer(tensor) tf.add_to_collection("pre256", tensor) with tf.variable_scope("dec_layer_2", reuse=tf.AUTO_REUSE): layer = tfc.SignalConv2D(3, (9, 9), corr=False, strides_up=4, padding="same_zeros", use_bias=True, activation=None) tensor = layer(tensor) tf.add_to_collection("pre256", tensor) return tensor
def __init__(self, n_channels, name="decoder", **kwargs): super(Decoder, self).__init__(name=name, **kwargs) self.data_format = "channels_last" num_filters = 256 self.sublayers = [ tfc.SignalConv2D( num_filters, (5, 5), name="layer_out", corr=False, strides_up=1, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="igdn_out", inverse=True), ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( num_filters, (5, 5), name="layer_0", corr=False, strides_up=1, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="igdn_0", inverse=True), ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( num_filters, (5, 5), name="layer_1", corr=False, strides_up=1, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="igdn_1", inverse=True), ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( num_filters, (5, 5), name="layer_2", corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="igdn_2", inverse=True), ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( n_channels, (9, 9), name="layer_3", corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tf.nn.sigmoid, ), ]
def __init__(self, conv_depth, name="encoder", **kwargs): super(Encoder, self).__init__(name=name, **kwargs) self.data_format = "channels_last" num_filters = 256 self.sublayers = [ tfc.SignalConv2D( num_filters, (9, 9), name="layer_0", corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="gdn_0"), ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( num_filters, (5, 5), name="layer_1", corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="gdn_1"), ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( num_filters, (5, 5), name="layer_2", corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="gdn_2"), ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( num_filters, (5, 5), name="layer_3", corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="gdn_3"), ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( conv_depth, (5, 5), name="layer_out", corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=None, ), ]
def synthesis_transform(tensor, num_filters): """Builds the synthesis transform.""" with tf.variable_scope("synthesis"): with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D( num_filters, (5, 5), corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(inverse=True)) tensor = layer(tensor) with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D( num_filters, (5, 5), corr=False, strides_up=1, padding="same_zeros", use_bias=True, activation= tfc.GDN(inverse=True)) tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D( num_filters, (5, 5), corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(inverse=True)) tensor = layer(tensor) with tf.variable_scope("layer_3"): layer = tfc.SignalConv2D( num_filters, (5, 5), corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(inverse=True)) tensor = layer(tensor) with tf.variable_scope("layer_4"): layer = tfc.SignalConv2D( 3, (9, 9), corr=False, strides_up=4, padding="same_zeros", use_bias=True, activation=None) tensor = layer(tensor) return tensor
def __init__(self, num_filters): super().__init__(name="hyper_analysis") self.add( tfc.SignalConv2D(num_filters, (3, 3), name="layer_0", corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tf.nn.relu)) self.add( tfc.SignalConv2D(num_filters, (5, 5), name="layer_1", corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tf.nn.relu)) self.add( tfc.SignalConv2D(num_filters, (5, 5), name="layer_2", corr=True, strides_down=2, padding="same_zeros", use_bias=False, activation=None))
def build(self, input_shape): self._layers = [ tfc.SignalConv2D(self.num_filters, (5, 5), name="layer_0", corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="gdn_0")), tfc.SignalConv2D(self.num_filters, (5, 5), name="layer_1", corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="gdn_1")), tfc.SignalConv2D(self.num_filters, (5, 5), name="layer_2", corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="gdn_2")), tfc.SignalConv2D(self.num_filters, (5, 5), name="layer_3", corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=None), ] super(AnalysisTransform, self).build(input_shape)
def __init__(self, num_filters): super().__init__(name="analysis") self.add(tf.keras.layers.Lambda(lambda x: x / 255.)) self.add( tfc.SignalConv2D(num_filters, (5, 5), name="layer_0", corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="gdn_0"))) self.add( tfc.SignalConv2D(num_filters, (5, 5), name="layer_1", corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="gdn_1"))) self.add( tfc.SignalConv2D(num_filters, (5, 5), name="layer_2", corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="gdn_2"))) self.add( tfc.SignalConv2D(num_filters, (5, 5), name="layer_3", corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=None))
def _make_synthesis(syn_name): return tf.keras.Sequential([ tfc.SignalConv2D(num_filters, (5, 5), name=f"layer_{syn_name}_0", corr=False, strides_up=2, padding="same_zeros", use_bias=True, kernel_parameterizer=None, activation=tf.nn.relu), tfc.SignalConv2D(num_filters, (5, 5), name=f"layer_{syn_name}_1", corr=False, strides_up=2, padding="same_zeros", use_bias=True, kernel_parameterizer=None, activation=tf.nn.relu), tfc.SignalConv2D(num_chan_bottleneck, (3, 3), name=f"layer_{syn_name}_2", corr=False, padding="same_zeros", use_bias=True, kernel_parameterizer=None, activation=None), ], name="HyperSynthesis")
def build(self, input_shape): self._layers = [ tfc.SignalConv2D(self.num_filters, (5, 5), name="layer_0", corr=False, strides_up=2, padding="same_zeros", use_bias=True, kernel_parameterizer=None, activation=tf.nn.relu), tfc.SignalConv2D(self.num_filters, (5, 5), name="layer_1", corr=False, strides_up=2, padding="same_zeros", use_bias=True, kernel_parameterizer=None, activation=tf.nn.relu), tfc.SignalConv2D(self.num_filters, (3, 3), name="layer_2", corr=False, strides_up=1, padding="same_zeros", use_bias=True, kernel_parameterizer=None, activation=None), ] super(HyperSynthesisTransform, self).build(input_shape)
def build(self, input_shape): self._layers = [ tfc.SignalConv2D( self.num_filters, (9, 9), name="layer_0", corr=True, strides_down=4, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="gdn_0")), tfc.SignalConv2D( 1, (5, 5), name="layer_1dw", corr=True, strides_down=2, padding="same_zeros", use_bias=True, channel_separable=True, activation=None), tfc.SignalConv2D( self.num_filters, (1, 1), name="layer_1pw", corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="gdn_1")), tfc.SignalConv2D( 1, (5, 5), name="layer_2dw", corr=True, strides_down=2, padding="same_zeros", use_bias=True, channel_separable=True, activation=None), tfc.SignalConv2D( self.num_filters, (1, 1), name="layer_2pw", corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=None), ] super(AnalysisTransform, self).build(input_shape)
def build(self, input_shape): self._layers = [ tfc.SignalConv2D(self.num_filters, (3, 3), name="layer_0", corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tf.nn.relu), tfc.SignalConv2D(self.num_filters, (5, 5), name="layer_1", corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tf.nn.relu), tfc.SignalConv2D(self.num_filters, (5, 5), name="layer_2", corr=True, strides_down=2, padding="same_zeros", use_bias=False, activation=None) ] super(HyperAnalysisTransform, self).build(input_shape)
def __init__(self, num_filters): super().__init__(name="synthesis") self.add( tfc.SignalConv2D(num_filters, (5, 5), name="layer_0", corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="igdn_0", inverse=True))) self.add( tfc.SignalConv2D(num_filters, (5, 5), name="layer_1", corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="igdn_1", inverse=True))) self.add( tfc.SignalConv2D(num_filters, (5, 5), name="layer_2", corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="igdn_2", inverse=True))) self.add( tfc.SignalConv2D(3, (5, 5), name="layer_3", corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=None)) self.add(tf.keras.layers.Lambda(lambda x: x * 255.))
def build(self, input_shape): self.layers = [ tfc.SignalConv2D(self.num_filters, (1, 1), name="layer_0", corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tf.nn.leaky_relu), tfc.SignalConv2D(self.num_filters, (1, 1), name="layer_1", corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tf.nn.leaky_relu), tfc.SignalConv2D(self.num_filters, (1, 1), name="layer_2", corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=None), ] super().build(input_shape)
def __init__(self, num_filters): super().__init__(name="hyper_synthesis") self.add( tfc.SignalConv2D(num_filters, (5, 5), name="layer_0", corr=False, strides_up=2, padding="same_zeros", use_bias=True, kernel_parameter="variable", activation=tf.nn.relu)) self.add( tfc.SignalConv2D(num_filters, (5, 5), name="layer_1", corr=False, strides_up=2, padding="same_zeros", use_bias=True, kernel_parameter="variable", activation=tf.nn.relu)) self.add( tfc.SignalConv2D(num_filters, (3, 3), name="layer_2", corr=False, strides_up=1, padding="same_zeros", use_bias=True, kernel_parameter="variable", activation=None))
def build(self, input_shape): self._layers = [ tfc.SignalConv2D(self.num_filters, (5, 5), name="layer_0", corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="igdn_0", inverse=True)), tfc.SignalConv2D(self.num_filters, (5, 5), name="layer_1", corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tfc.GDN(name="igdn_1", inverse=True)), tfc.SignalConv2D(3, (9, 9), name="layer_2", corr=False, strides_up=4, padding="same_zeros", use_bias=True, activation=None), ] super(SynthesisTransform, self).build(input_shape)
def hyper_synthesis(tensor, num_filters): """Builds the hyper synthesis transform""" with tf.variable_scope("hyper_synthesis", reuse=tf.AUTO_REUSE): #One 5x5 is replaced by two 3x3 filters with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D(num_filters, (3, 3), corr=False, strides_up=1, padding="same_zeros", use_bias=True, activation=tf.nn.leaky_relu, name='signal_conv2d') tensor = layer(tensor) with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D(num_filters, (3, 3), corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tf.nn.leaky_relu, name='signal_conv2d') tensor = layer(tensor) #One 5x5 is replaced by two 3x3 filters with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D(num_filters * 1.5, (3, 3), corr=False, strides_up=1, padding="same_zeros", use_bias=True, activation=tf.nn.leaky_relu, name='signal_conv2d') tensor = layer(tensor) with tf.variable_scope("layer_3"): layer = tfc.SignalConv2D(num_filters * 1.5, (3, 3), corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tf.nn.leaky_relu, name='signal_conv2d') tensor = layer(tensor) with tf.variable_scope("layer_4"): layer = tfc.SignalConv2D(num_filters * 2, (3, 3), corr=False, strides_up=1, padding="same_zeros", use_bias=True, activation=None, name='signal_conv2d') tensor = layer(tensor) return tensor
def hyper_analysis(tensor, num_filters): """Build the analysis transform in hyper""" with tf.variable_scope("hyper_analysis"): with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D(num_filters, (3, 3), corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tf.nn.leaky_relu, name='signal_conv2d') tensor = layer(tensor) #One 5x5 is replaced by two 3x3 filters with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D(num_filters, (3, 3), corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tf.nn.leaky_relu, name='signal_conv2d') tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D(num_filters, (3, 3), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tf.nn.leaky_relu, name='signal_conv2d') tensor = layer(tensor) #One 5x5 is replaced by two 3x3 filters with tf.variable_scope("layer_3"): layer = tfc.SignalConv2D(num_filters, (3, 3), corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=tf.nn.leaky_relu, name='signal_conv2d') tensor = layer(tensor) with tf.variable_scope("layer_4"): layer = tfc.SignalConv2D(num_filters, (3, 3), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=None, name='signal_conv2d') tensor = layer(tensor) return tensor
def __init__(self, n_channels, name="decoder", **kwargs): super(Decoder, self).__init__(name=name, **kwargs) self.data_format = "channels_last" self.sublayers = [ tfc.SignalConv2D( 32, (5, 5), name="conv_1", corr=False, strides_up=1, padding="same_zeros", use_bias=True, ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( 32, (5, 5), name="conv_2", corr=False, strides_up=1, padding="same_zeros", use_bias=True, ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( 32, (5, 5), name="conv_3", corr=False, strides_up=1, padding="same_zeros", use_bias=True, ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( 16, (5, 5), name="conv_4", corr=False, strides_up=2, padding="same_zeros", use_bias=True, ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( n_channels, (5, 5), name="conv_5", corr=False, strides_up=2, padding="same_zeros", use_bias=True, activation=tf.nn.sigmoid, ), ]
def __init__(self, conv_depth, name="encoder", **kwargs): super(Encoder, self).__init__(name=name, **kwargs) self.data_format = "channels_last" self.sublayers = [ tfc.SignalConv2D( 16, (5, 5), name="conv_1", corr=True, strides_down=2, padding="same_zeros", use_bias=True, ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( 32, (5, 5), name="conv_2", corr=True, strides_down=2, padding="same_zeros", use_bias=True, ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( 32, (5, 5), name="conv_3", corr=True, strides_down=1, padding="same_zeros", use_bias=True, ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( 32, (5, 5), name="conv_4", corr=True, strides_down=1, padding="same_zeros", use_bias=True, ), layers.PReLU(shared_axes=[1, 2]), tfc.SignalConv2D( conv_depth, (5, 5), name="conv_5", corr=True, strides_down=1, padding="same_zeros", use_bias=True, activation=None, ), ]
def depth_analysis_transform_3(rgb_tensor, depth_tensor, num_filters): """Builds the analysis transform.""" with tf.variable_scope("analysis"): # --------------------------------------- rgb branch with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D(num_filters, (9, 9), corr=True, strides_down=4, padding="same_zeros", use_bias=True, activation=tf.nn.relu) rgb_tensor = layer(rgb_tensor) # --------------------------------------- depth branch with tf.variable_scope("layer_d0"): layer = tfc.SignalConv2D(num_filters, (9, 9), corr=True, strides_down=4, padding="same_zeros", use_bias=True, activation=tf.nn.relu) depth_tensor = layer(depth_tensor) # --------------------------------------- fusion tf.summary.histogram('rgb_tensor', rgb_tensor) tf.summary.histogram('depth_tensor', depth_tensor) tensor = fuse_SSMA_noBatch('SSMA_fusion', rgb_tensor, depth_tensor, training=True, C=num_filters) #tensor = rgb_tensor + depth_tensor #tensor = tf.concat([rgb_tensor, depth_tensor], 3) with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D(num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tf.nn.relu) tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D(num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=False, activation=None) tensor = layer(tensor) return tensor
def analysis_transform(tensor, num_filters): """Builds the analysis transform.""" with tf.variable_scope("analysis"): with tf.variable_scope("layer_0"): layer = tfc.SignalConv2D( num_filters, (9, 9), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN()) tensor = layer(tensor) with tf.variable_scope("layer_1"): layer = tfc.SignalConv2D( num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN()) tensor = layer(tensor) with tf.variable_scope("layer_2"): layer = tfc.SignalConv2D( num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=True, activation=tfc.GDN()) tensor = layer(tensor) with tf.variable_scope("layer_3"): layer = tfc.SignalConv2D( num_filters, (5, 5), corr=True, strides_down=2, padding="same_zeros", use_bias=False, activation=None) tensor = layer(tensor) with tf.variable_scope('reshape'): tensor = tf.layers.flatten(tensor) if args.activation == 'sigmoid': with tf.variable_scope('encoder'): tensor = tf.nn.sigmoid(tf.layers.dense(tensor, args.dim1)) tensor = tf.layers.dense(tensor, args.z) elif args.activation == 'softplus': with tf.variable_scope('encoder'): tensor = tf.nn.softplus(tf.layers.dense(tensor, args.dim1)) # mean of z mean = tf.layers.dense(tensor, args.z) # mean of sigma sigma = tf.layers.dense(tensor, args.z) # dense layer # Sampler: Normal (gaussian) random distribution eps = tf.random_normal(tf.shape(mean), dtype=tf.float32, mean=0., stddev=1.0, name='epsilon') # reparameterization trick z = mean + tf.exp(sigma / 2) * eps # x = tf.layers.dense(x, 128, tf.nn.tanh) elif args.activation == 'None': with tf.variable_scope('encoder'): tensor = tf.layers.dense(tensor, args.z) return z, mean, sigma