Ejemplo n.º 1
0
 def create_actor_network(self, state_size, action_dim):
     print("Now we build the model")
     with tf.name_scope('ActorNetwork') as scope:
         S = Input(shape=[state_size], name='state')
         h0 = Dense(HIDDEN1_UNITS, activation='relu', name='DenseLayer0')(S)
         h1 = Dense(HIDDEN2_UNITS, activation='relu',
                    name='DenseLayer1')(h0)
         Steering = Dense(
             1,
             name='Steering',
             activation='tanh',
             kernel_initializer=variance_scaling(scale=1e-4,
                                                 distribution='normal'),
             bias_initializer=variance_scaling(scale=1e-4,
                                               distribution='normal')
         )(
             h1
         )  #,init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
         Acceleration = Dense(
             1,
             name='Acceleration',
             activation='tanh',
             kernel_initializer=variance_scaling(scale=1e-4,
                                                 distribution='normal'),
             bias_initializer=variance_scaling(scale=1e-4,
                                               distribution='normal')
         )(
             h1
         )  #,init=lambda shape, name: normal(shape, scale=1e-4, name=name))(h1)
         #Brake = Dense(1,activation='sigmoid', kernel_initializer=variance_scaling(scale=1e-4, distribution='normal'), bias_initializer=variance_scaling(scale=1e-4, distribution='normal'))(h1)
         #Gear = Dense(3,activation='softmax')(h1)
         V = concatenate([Steering, Acceleration], name='Action')  #,Brake])
         model = Model(inputs=S, outputs=V)
         return model, model.trainable_weights, S
Ejemplo n.º 2
0
    def create_tensor_graph(self, model_input_params, class_count, algorithm_params):
        with tf.device(model_input_params.device_id):
            with slim.arg_scope([slim.conv2d, slim.fully_connected],
                                weights_initializer=initializers.variance_scaling(scale=2.0),
                                weights_regularizer=slim.l2_regularizer(algorithm_params["l2regularizer_scale"])):
                band_size = model_input_params.x.get_shape()[3].value
                hs_lidar_groups = tf.split(axis=3, num_or_size_splits=[band_size - 1, 1],
                                           value=model_input_params.x)

                lrelu = lambda inp: slim.nn.leaky_relu(inp, alpha=algorithm_params["lrelu_alpha"])
                bn_training_params = {'is_training': model_input_params.is_training, 'decay': 0.95}

                hs_lidar_diff = algorithm_params["hs_lidar_diff"]
                hs_net = self._create_hs_tensor_branch(algorithm_params, bn_training_params,
                                                       hs_lidar_groups[0][:, hs_lidar_diff:-hs_lidar_diff,
                                                       hs_lidar_diff:-hs_lidar_diff, :], lrelu,
                                                       model_input_params)
                lidar_net = self._create_lidar_tensor_branch(bn_training_params, hs_lidar_groups[1], lrelu,
                                                             model_input_params)

                # net = tf.concat(axis=3, values=[hs_net, lidar_net])
                net = tf.concat(axis=1, values=[slim.flatten(hs_net), slim.flatten(lidar_net)])
                net = self._create_fc_tensor_branch(algorithm_params, bn_training_params, class_count, lrelu,
                                                    model_input_params, net)
        return ModelOutputTensors(y_conv=net, image_output=None, image_original=None, histogram_tensors=[])
Ejemplo n.º 3
0
def _shadowdata_discriminator_model(generated_data,
                                    generator_input,
                                    is_training=True):
    with slim.arg_scope(
        [slim.fully_connected, slim.separable_conv2d, slim.convolution1d],
            weights_initializer=initializers.variance_scaling(scale=2.0),
            weights_regularizer=slim.l2_regularizer(0.001),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training, 'decay': 0.999},
            # normalizer_fn=slim.instance_norm,
            # normalizer_params={'center': True, 'scale': True, 'epsilon': 0.001},
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp, alpha=0.1))):
        band_size = generated_data.get_shape()[3].value

        net = generated_data
        net = tf.squeeze(net, axis=[1, 2])
        net = tf.expand_dims(net, axis=2)

        net1 = slim.convolution1d(net, band_size, band_size, padding='VALID')

        net2 = slim.convolution1d(transpose(net1, perm=[0, 2, 1]),
                                  band_size,
                                  band_size,
                                  padding='VALID',
                                  normalizer_fn=None,
                                  normalizer_params=None,
                                  activation_fn=None)

    return tf.expand_dims(tf.expand_dims(slim.flatten(net2), axis=1), axis=1)
Ejemplo n.º 4
0
def _shadowdata_generator_model_v2(netinput, is_training=True):
    with slim.arg_scope(
        [slim.conv2d, slim.conv2d_transpose, slim.separable_conv2d],
            weights_initializer=initializers.variance_scaling(scale=2.0),
            weights_regularizer=slim.l2_regularizer(0.00001),
            normalizer_fn=slim.batch_norm,
            normalizer_params={
                'is_training': is_training,
                'decay': 0.95
            },
            # normalizer_fn=slim.instance_norm,
            # normalizer_params={'center': True, 'scale': True, 'epsilon': 0.001},
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp)),
            trainable=is_training):
        net_hats = []
        for index in range(0, 64):
            level0 = expand_dims(netinput[:, :, :, index], axis=3)
            level1 = slim.conv2d(level0,
                                 1, [1, 1],
                                 normalizer_fn=None,
                                 normalizer_params=None,
                                 weights_regularizer=None)
            net_hats.append(level1)

        net = tf.concat(net_hats, axis=3)
        return net
Ejemplo n.º 5
0
def _shadowdata_discriminator_model(generated_data,
                                    generator_input,
                                    is_training=True):
    # bn_training_params = {'is_training': is_training, 'decay': 0.95}
    # normalizer_fn=slim.batch_norm,
    with slim.arg_scope(
        [slim.fully_connected],
            weights_initializer=initializers.variance_scaling(scale=2.0),
            # weights_regularizer=slim.l2_regularizer(0.001),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training, 'decay': 0.95},
            # normalizer_fn=slim.instance_norm,
            # normalizer_params={'center': True, 'scale': True, 'epsilon': 0.001},
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp))):
        net = tf.concat(axis=3, values=[generated_data, generator_input])

        # net = tf.transpose(net, [0, 3, 1, 2])
        # filter_count = 16
        # net = slim.conv2d(net, filter_count, [1, 1], stride=2)
        # net = slim.conv2d(net, int(filter_count / 2), [1, 1], stride=2)
        # net = slim.conv2d(net, int(filter_count / 4), [1, 1], stride=2)
        # net = slim.conv2d(net, int(filter_count / 8), [1, 1], stride=2)
        # net = slim.conv2d(net, int(filter_count / 16), [1, 1], stride=2)

        net = slim.flatten(net)
        net = slim.fully_connected(net, 192, scope='fc1')
        net = slim.fully_connected(net, 128, scope='fc2')
        net = slim.fully_connected(net, 96, scope='fc3')
        net = slim.fully_connected(net, 64, scope='fc4')
        net = slim.fully_connected(net, 48, scope='fc5')
        net = slim.fully_connected(net, 32, scope='fc6')

    return net
Ejemplo n.º 6
0
    def build(self, input_shape: Union[list, tuple]):
        super().build(input_shape)

        logit_count = self.__policy__.num_inputs + 1
        k_init = init.variance_scaling()
        b_init = init.zeros()

        self.__create_weights__(k_init((input_shape[-1], logit_count)),
                                'actor_kernel')
        self.__create_weights__(b_init(logit_count), 'actor_bias')
Ejemplo n.º 7
0
def _srdata_generator_model(netinput, is_training=True):
    with slim.arg_scope(
        [slim.conv2d, slim.conv2d_transpose],
            weights_initializer=initializers.variance_scaling(scale=2.0),
            # weights_regularizer=slim.l2_regularizer(0.00001),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training, 'decay': 0.95},
            # normalizer_fn=slim.instance_norm,
            # normalizer_params={'center': True, 'scale': True, 'epsilon': 0.001},
            # activation_fn=(lambda inp: slim.nn.leaky_relu(inp)),
            trainable=is_training,
            data_format="NHWC"):
        net = gen_net_method3(netinput)
        return net
Ejemplo n.º 8
0
def _srdata_discriminator_model(generated_data,
                                generator_input,
                                is_training=True):
    with slim.arg_scope(
        [slim.fully_connected],
            weights_initializer=initializers.variance_scaling(scale=2.0),
            weights_regularizer=slim.l2_regularizer(0.001),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training, 'decay': 0.95},
            normalizer_fn=slim.instance_norm,
            normalizer_params={
                'center': True,
                'scale': True,
                'epsilon': 0.001
            },
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp))):
        # net = tf.concat(axis=3, values=[generated_data, generator_input])
        #
        # net = slim.flatten(net)
        # net = slim.fully_connected(net, 768 * 2, scope='fc2')
        # net = slim.fully_connected(net, 384 * 2, scope='fc2')
        # net = slim.fully_connected(net, 192 * 2, scope='fc3')
        # net = slim.fully_connected(net, 128, scope='fc4')
        # net = slim.fully_connected(net, 96, scope='fc5')
        # net = slim.fully_connected(net, 64, scope='fc6')
        net_hats = []
        for index in range(0, 144):
            level0 = expand_dims(generated_data[:, :, :, index], axis=3)
            level1 = slim.conv2d(level0, 1, [5, 5], padding='VALID')
            level2 = slim.conv2d(level1, 1, [3, 3], padding='VALID')
            level3 = slim.conv2d(level2,
                                 1, [1, 1],
                                 padding='VALID',
                                 activation_fn=None,
                                 normalizer_fn=None,
                                 normalizer_params=None)
            net_hats.append(level3)
        net = tf.concat(net_hats, axis=3)
        # net = slim.flatten(net)
        # net = slim.fully_connected(net, 4 * 4 * 144, scope='fc1')
        # net = slim.dropout(net, is_training=is_training)
        # net = slim.fully_connected(net, 4 * 4 * 144, scope='fc2',
        #                            activation_fn=None, normalizer_fn=None, normalizer_params=None)
        # net = tf.reshape(net, [-1, 2, 2, 144])
    return net
Ejemplo n.º 9
0
def _shadowdata_feature_discriminator_model(generated_data, is_training=True):
    with slim.arg_scope(
        [slim.fully_connected, slim.separable_conv2d, slim.convolution1d],
            weights_initializer=initializers.variance_scaling(scale=2.0),
            weights_regularizer=slim.l2_regularizer(0.001),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training, 'decay': 0.999},
            # normalizer_fn=slim.instance_norm,
            # normalizer_params={'center': True, 'scale': True, 'epsilon': 0.001},
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp, alpha=0.1))):
        band_size = generated_data.get_shape()[3].value

        net = generated_data
        net = slim.flatten(net)

        net1 = slim.fully_connected(net, band_size // 2)
        net2 = slim.fully_connected(net1, band_size // 4)
        net3 = slim.fully_connected(net2, band_size // 8)
    return net3
Ejemplo n.º 10
0
def _shadowdata_discriminator_model_simple(generated_data,
                                           generator_input,
                                           is_training=True):
    with slim.arg_scope(
        [slim.fully_connected, slim.separable_conv2d, slim.convolution1d],
            weights_initializer=initializers.variance_scaling(scale=2.0),
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp, alpha=0.01))):
        band_size = generated_data.get_shape()[3].value

        net = tf.concat(axis=3, values=[generated_data, generator_input])
        net = tf.squeeze(net, axis=[1, 2])
        net = tf.expand_dims(net, axis=2)
        size = band_size * 2
        net = slim.convolution1d(net,
                                 size,
                                 size,
                                 padding='VALID',
                                 normalizer_fn=None,
                                 normalizer_params=None,
                                 activation_fn=None)
        net = tf.expand_dims(tf.expand_dims(slim.flatten(net), axis=1), axis=1)
    return net
Ejemplo n.º 11
0
def _shadowdata_generator_model(netinput, is_training=True):
    with slim.arg_scope(
        [slim.conv2d, slim.conv2d_transpose],
            weights_initializer=initializers.variance_scaling(scale=2.0),
            # weights_regularizer=slim.l2_regularizer(0.001),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training, 'decay': 0.95},
            # normalizer_fn=slim.instance_norm,
            # normalizer_params={'center': True, 'scale': True, 'epsilon': 0.001},
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp)),
            trainable=is_training,
            data_format="NHWC"):
        # netinput = tf.transpose(netinput, [0, 3, 1, 2])
        net = slim.conv2d(netinput, 144, [1, 1])
        net = slim.conv2d(net, 72, [1, 1])
        net = slim.conv2d(net, 36, [1, 1])
        net = slim.conv2d(net, 36, [1, 1])
        net = slim.conv2d(net, 36, [1, 1])
        net = slim.conv2d(net, 72, [1, 1])
        net = slim.conv2d(net, 144, [1, 1])
        # net = tf.transpose(net, [0, 2, 3, 1])
        return net
Ejemplo n.º 12
0
def _shadowdata_discriminator_model(generated_data,
                                    generator_input,
                                    is_training=True):
    # bn_training_params = {'is_training': is_training, 'decay': 0.95}
    # normalizer_fn=slim.batch_norm,
    with slim.arg_scope(
        [slim.fully_connected, slim.separable_conv2d],
            weights_initializer=initializers.variance_scaling(scale=2.0),
            # weights_regularizer=slim.l2_regularizer(0.001),
            # normalizer_fn=slim.batch_norm,
            # normalizer_params={'is_training': is_training, 'decay': 0.95},
            # normalizer_fn=slim.instance_norm,
            # normalizer_params={'center': True, 'scale': True, 'epsilon': 0.001},
            activation_fn=(lambda inp: slim.nn.leaky_relu(inp))):
        # net = tf.concat(axis=3, values=[generated_data, generator_input])
        # net = slim.flatten(net)
        # net = slim.fully_connected(net, 192, scope='fc1')
        # net = slim.dropout(net, is_training=is_training)
        # net = slim.fully_connected(net, 128, scope='fc2')
        # net = slim.dropout(net, is_training=is_training)
        # net = slim.fully_connected(net, 96, scope='fc3')

        # net_hats = []
        # for index in range(0, 64):
        #     level0 = expand_dims(generated_data[:, :, :, index], axis=3)
        #     level1 = slim.conv2d(level0, 1, [1, 1], padding='VALID',
        #                          activation_fn=None, normalizer_fn=None, normalizer_params=None)
        #     net_hats.append(level1)
        # net = tf.concat(net_hats, axis=3)
        net = slim.separable_conv2d(generated_data,
                                    64, [1, 1],
                                    64,
                                    normalizer_fn=None,
                                    normalizer_params=None,
                                    weights_regularizer=None)
    return net
Ejemplo n.º 13
0
    def create_tensor_graph(self, model_input_params, class_count,
                            algorithm_params):
        with tf.device(model_input_params.device_id):
            data_format = None  # 'NHWC'
            bn_training_params = {
                'is_training': model_input_params.is_training,
                'decay': algorithm_params["bn_decay"]
            }
            lrelu = lambda inp: slim.nn.leaky_relu(
                inp, alpha=algorithm_params["lrelu_alpha"])
            with slim.arg_scope(
                [slim.conv2d, slim.fully_connected],
                    weights_initializer=initializers.variance_scaling(
                        scale=2.0),
                    weights_regularizer=slim.l2_regularizer(
                        algorithm_params["l2regularizer_scale"]),
                    normalizer_fn=slim.batch_norm,
                    normalizer_params=bn_training_params,
                    activation_fn=lrelu):
                level_filter_count = algorithm_params["filter_count"]

                if data_format == 'NCHW':
                    net0 = tf.transpose(model_input_params.x,
                                        [0, 3, 1, 2])  # Convert input to NCHW
                else:
                    net0 = model_input_params.x

                spectral_hierarchy_level = algorithm_params[
                    "spectral_hierarchy_level"]
                net1 = self.__create_spectral_nn_layers(
                    data_format, level_filter_count, net0,
                    spectral_hierarchy_level, True)
                net1 = net1 + CNNModelv4.__scale_input_to_output(net0, net1)

                net2 = self.__create_spectral_nn_layers(
                    data_format, level_filter_count, net1,
                    spectral_hierarchy_level, False)
                net2 = net2 + CNNModelv4.__scale_input_to_output(net1, net2)

                spatial_hierarchy_level = algorithm_params[
                    "spatial_hierarchy_level"]
                net3 = self.__create_levels_as_blocks(
                    data_format, int(net2.get_shape()[3].value / 2), net2,
                    spatial_hierarchy_level)
                net3 = net3 + CNNModelv4.__scale_input_to_output(net2, net3)

                net4 = slim.flatten(net3)

                degradation_coeff = algorithm_params["degradation_coeff"]
                net5 = self.__create_fc_block(algorithm_params, class_count,
                                              degradation_coeff,
                                              model_input_params, net4)

                net6 = slim.fully_connected(net5,
                                            class_count,
                                            weights_regularizer=None,
                                            activation_fn=None,
                                            scope='fc_final')

                image_gen_net4 = None
                if model_input_params.is_training:
                    image_gen_net1 = slim.fully_connected(
                        net6,
                        class_count * 3,
                        weights_regularizer=None,
                        scope='image_gen_net_1')
                    image_gen_net2 = slim.fully_connected(
                        image_gen_net1,
                        class_count * 9,
                        weights_regularizer=None,
                        scope='image_gen_net_2')
                    image_gen_net3 = slim.fully_connected(
                        image_gen_net2,
                        class_count * 27,
                        weights_regularizer=None,
                        scope='image_gen_net_3')
                    image_size = (net0.get_shape()[1] * net0.get_shape()[2] *
                                  net0.get_shape()[3]).value
                    image_gen_net4 = slim.fully_connected(
                        image_gen_net3,
                        image_size,
                        weights_regularizer=None,
                        activation_fn=tf.sigmoid,
                        scope='image_gen_net_4')
        return ModelOutputTensors(y_conv=net6,
                                  image_output=image_gen_net4,
                                  image_original=net0)
Ejemplo n.º 14
0
    def create_tensor_graph(self, model_input_params, class_count,
                            algorithm_params):
        iter_routing = algorithm_params["iter_routing"]
        conv_layer_kernel_size = [
            algorithm_params["conv_layer_kernel_size"],
            algorithm_params["conv_layer_kernel_size"]
        ]
        primary_caps_kernel_size = [
            algorithm_params["primary_caps_kernel_size"],
            algorithm_params["primary_caps_kernel_size"]
        ]
        feature_count = algorithm_params["feature_count"]
        primary_capsule_count = algorithm_params["primary_capsule_count"]
        primary_capsule_output_space = algorithm_params[
            "digit_capsule_output_space"]
        digit_capsule_output_space = algorithm_params[
            "digit_capsule_output_space"]

        digit_capsule_count = class_count
        batch_size = -1
        enable_decoding = algorithm_params["enable_decoding"]

        lrelu_func = lambda inp: slim.nn.leaky_relu(
            inp, alpha=algorithm_params["lrelu_alpha"])
        with tf.device(model_input_params.device_id):
            with slim.arg_scope(
                [slim.conv2d],
                    trainable=model_input_params.is_training,
                    weights_initializer=initializers.variance_scaling(
                        scale=2.0)):
                with tf.variable_scope('Conv1_layer') as scope:
                    image_output = slim.conv2d(
                        model_input_params.x,
                        num_outputs=feature_count,
                        activation_fn=lrelu_func,
                        kernel_size=conv_layer_kernel_size,
                        padding='VALID',
                        scope=scope)

                with tf.variable_scope('PrimaryCaps_layer') as scope:
                    image_output = slim.conv2d(
                        image_output,
                        num_outputs=primary_capsule_count *
                        primary_capsule_output_space,
                        kernel_size=primary_caps_kernel_size,
                        stride=2,
                        padding='VALID',
                        scope=scope,
                        activation_fn=lrelu_func)
                    data_size = (image_output.get_shape()[1] *
                                 image_output.get_shape()[2] *
                                 image_output.get_shape()[3]).value
                    data_size = int(data_size / primary_capsule_output_space)
                    image_output = tf.reshape(image_output, [
                        batch_size, data_size, 1, primary_capsule_output_space
                    ])

                with tf.variable_scope('DigitCaps_layer'):
                    u_hats = []
                    image_output_groups = tf.split(
                        axis=1,
                        num_or_size_splits=data_size,
                        value=image_output)
                    for i in range(data_size):
                        u_hat = slim.conv2d(image_output_groups[i],
                                            num_outputs=digit_capsule_count *
                                            digit_capsule_output_space,
                                            kernel_size=[1, 1],
                                            padding='VALID',
                                            scope='DigitCaps_layer_w_' +
                                            str(i),
                                            activation_fn=lrelu_func)
                        u_hat = tf.reshape(u_hat, [
                            batch_size, 1, digit_capsule_count,
                            digit_capsule_output_space
                        ])
                        u_hats.append(u_hat)

                    image_output = tf.concat(u_hats, axis=1)

                    b_ijs = tf.constant(
                        numpy.zeros([data_size, digit_capsule_count],
                                    dtype=numpy.float32))
                    v_js = []
                    for r_iter in range(iter_routing):
                        with tf.variable_scope('iter_' + str(r_iter)):
                            b_ij_groups = tf.split(
                                axis=1,
                                num_or_size_splits=digit_capsule_count,
                                value=b_ijs)

                            c_ijs = tf.nn.softmax(b_ijs, axis=1)
                            c_ij_groups = tf.split(
                                axis=1,
                                num_or_size_splits=digit_capsule_count,
                                value=c_ijs)

                            image_output_groups = tf.split(
                                axis=2,
                                num_or_size_splits=digit_capsule_count,
                                value=image_output)

                            for i in range(digit_capsule_count):
                                c_ij = tf.reshape(
                                    tf.tile(c_ij_groups[i],
                                            [1, digit_capsule_output_space]),
                                    [
                                        c_ij_groups[i].get_shape()[0], 1,
                                        digit_capsule_output_space, 1
                                    ])
                                s_j = tf.nn.depthwise_conv2d(
                                    image_output_groups[i],
                                    c_ij,
                                    strides=[1, 1, 1, 1],
                                    padding='VALID')

                                # Squash function
                                s_j = tf.reshape(
                                    s_j,
                                    [batch_size, digit_capsule_output_space])
                                s_j_norm_square = tf.reduce_mean(
                                    tf.square(s_j), axis=1, keepdims=True)
                                v_j = s_j_norm_square * s_j / (
                                    (1 + s_j_norm_square) *
                                    tf.sqrt(s_j_norm_square + 1e-9))

                                b_ij_groups[i] = b_ij_groups[
                                    i] + tf.reduce_sum(tf.matmul(
                                        tf.reshape(image_output_groups[i], [
                                            batch_size,
                                            image_output_groups[i].get_shape()
                                            [1], digit_capsule_output_space
                                        ]),
                                        tf.reshape(v_j, [
                                            batch_size,
                                            digit_capsule_output_space, 1
                                        ])),
                                                       axis=0)

                                if r_iter == iter_routing - 1:
                                    v_js.append(
                                        tf.reshape(v_j, [
                                            batch_size, 1,
                                            digit_capsule_output_space
                                        ]))

                            b_ijs = tf.concat(b_ij_groups, axis=1)

                    image_output = tf.concat(v_js, axis=1)

                    with tf.variable_scope('Masking'):
                        y_conv = tf.norm(image_output, axis=2)

                    decoder_image_output = None
                    if model_input_params.is_training and enable_decoding:
                        y_as_float = tf.cast(model_input_params.y,
                                             dtype=tf.float32)
                        masked_v = tf.matmul(
                            image_output,
                            tf.reshape(y_as_float,
                                       [batch_size, digit_capsule_count, 1]),
                            transpose_a=True)
                        masked_v = tf.reshape(
                            masked_v, [batch_size, digit_capsule_output_space])

                        with tf.variable_scope('Decoder'):
                            size = (model_input_params.x.get_shape()[1] *
                                    model_input_params.x.get_shape()[2] *
                                    model_input_params.x.get_shape()[3]).value
                            image_output = slim.fully_connected(
                                masked_v,
                                512,
                                scope='fc1',
                                activation_fn=lrelu_func,
                                trainable=model_input_params.is_training)
                            image_output = slim.fully_connected(
                                image_output,
                                1024,
                                scope='fc2',
                                activation_fn=lrelu_func,
                                trainable=model_input_params.is_training)
                            decoder_image_output = slim.fully_connected(
                                image_output,
                                size,
                                scope='fc3',
                                activation_fn=tf.sigmoid,
                                trainable=model_input_params.is_training)

        return ModelOutputTensors(y_conv=y_conv,
                                  image_output=decoder_image_output,
                                  image_original=model_input_params.x)