Esempio n. 1
0
def discriminate(input_data, scope="discriminate", reuse=False, is_training=True):
    batch_norm_params = {
        "is_training": is_training,
        "decay": 0.9997,
        "epsilon": 0.001,
        "updates_collections": tf.GraphKeys.UPDATE_OPS,
        "variables_collections": {
            "beta": None,
            "gamma": None,
            "moving_mean": ['moving_vars'],
            'moving_variance': ['moving_vars'],
        }
    }

    with tf.variable_scope(scope, 'discriminate', [input_data]):
        with slim.arg_scope([slim.conv2d],
                            normalizer_fn=slim.batch_norm,
                            normalizer_params=batch_norm_params,
                            weights_initializer=tf.truncated_normal_initializer(0.0, 0.1),
                            weights_regularizer=slim.l1_l2_regularizer()):
            conv1 = slim.conv2d(input_data, 32, [3, 3], padding="SAME", scope="d_conv1", reuse=reuse)
            conv2 = slim.conv2d(conv1, 64, [5, 5], padding="SAME", scope="d_conv2", reuse=reuse)
            conv3 = slim.conv2d(conv2, 32, [3, 3], padding="SAME", scope="d_conv3", reuse=reuse)
            out = slim.conv2d(conv3, 1, [28, 28], padding="VALID", scope="d_conv4", reuse=reuse)
        return out
 def discriminate(input_data, reuse=False):
     with tf.variable_scope("discriminate"):
         with slim.arg_scope(
             [slim.fully_connected],
                 weights_initializer=tf.truncated_normal_initializer(
                     0.0, 0.1),
                 weights_regularizer=slim.l1_l2_regularizer(),
                 activation_fn=None):
             fc1 = slim.fully_connected(inputs=input_data,
                                        num_outputs=length,
                                        scope="d_fc1",
                                        reuse=reuse)
             fc1 = tf.tanh(fc1)
             fc2 = slim.fully_connected(inputs=fc1,
                                        num_outputs=length,
                                        scope="d_fc2",
                                        reuse=reuse)
             fc2 = tf.tanh(fc2)
             fc3 = slim.fully_connected(inputs=fc2,
                                        num_outputs=1,
                                        scope="d_fc3",
                                        reuse=reuse)
             fc3 = tf.tanh(fc3)
             fc3 = tf.sigmoid(fc3)
     return fc3
Esempio n. 3
0
 def Encode(self, input_img):
     # I will use conv layer to imporove performance
     with tf.variable_scope('encoder', reuse=tf.AUTO_REUSE) as scope:
         conv_l = conv2d(input_img,
                         32, [3, 3],
                         2,
                         weights_regularizer=slim.l1_l2_regularizer(.001),
                         scope='conv_1')
         conv_l = conv2d(conv_l,
                         64, [3, 3],
                         2,
                         weights_regularizer=slim.l1_l2_regularizer(.005),
                         scope='conv_2')
         conv_l = conv2d(conv_l,
                         128, [3, 3],
                         4,
                         weights_regularizer=slim.l1_regularizer(.005),
                         scope=scope)
     return conv_l
Esempio n. 4
0
 def generate(input_data, reuse=False):
     with tf.variable_scope("generate"):
         with slim.arg_scope([slim.fully_connected],
                             weights_initializer=tf.truncated_normal_initializer(0.0, 0.1),
                             weights_regularizer=slim.l1_l2_regularizer(),activation_fn=None
                             ):
             fc1 = slim.fully_connected(inputs=input_data, num_outputs=length, scope="g_fc1", reuse=reuse)
             fc1 = tf.nn.softplus(fc1,name="g_softplus")
             fc2 = slim.fully_connected(inputs=fc1, num_outputs=length, scope="g_fc2", reuse=reuse)
     return fc2
Esempio n. 5
0
 def Decode(self, Decoded_img):
     with tf.variable_scope('decoder', reuse=tf.AUTO_REUSE) as scope:
         conv_d = slim.conv2d_transpose(
             Decoded_img,
             64, [3, 3],
             4,
             weights_regularizer=slim.l1_l2_regularizer(.001),
             scope='dconv_1')
         conv_d = slim.conv2d_transpose(
             conv_d,
             32, [3, 3],
             2,
             weights_regularizer=slim.l1_l2_regularizer(.005),
             scope='dconv_2')
         conv_d = slim.conv2d_transpose(
             conv_d,
             3, [3, 3],
             2,
             weights_regularizer=slim.l1_regularizer(.005),
             scope=scope)
     return conv_d
Esempio n. 6
0
def ds_cnn_arg_scope(scale_l1, scale_l2):
	"""Defines the default ds_cnn argument scope.
	Args:
		weight_decay: The weight decay to use for regularizing the model.
	Returns:
		An `arg_scope` to use for the DS-CNN model.
	"""
	with slim.arg_scope(
			[slim.convolution2d, slim.separable_convolution2d],
			weights_initializer=slim.initializers.xavier_initializer(),
			biases_initializer=slim.init_ops.zeros_initializer(),
			weights_regularizer=slim.l1_l2_regularizer(scale_l1=scale_l1, scale_l2=scale_l2)) as sc:
		return sc
Esempio n. 7
0
    def create_model(self,
                     model_input,
                     vocab_size,
                     num_mixtures=None,
                     l2_penalty=1e-6,
                     **unused_params):
        num_mixtures = num_mixtures or FLAGS.MoNN_num_experts

        gate_activations = slim.fully_connected(
            model_input,
            vocab_size * (num_mixtures + 1),
            activation_fn=None,
            biases_initializer=None,
            weights_regularizer=slim.l2_regularizer(l2_penalty),
            scope="gates")

        h1Units = 4096
        A1 = slim.fully_connected(
            model_input,
            h1Units,
            activation_fn=tf.nn.relu,
            weights_regularizer=slim.l2_regularizer(l2_penalty),
            scope='FC_H1')
        h2Units = 4096
        A2 = slim.fully_connected(
            A1,
            h2Units,
            activation_fn=tf.nn.relu,
            weights_regularizer=slim.l1_l2_regularizer(l2_penalty),
            scope='FC_H2')
        #
        expert_activations = slim.fully_connected(
            A2,
            vocab_size * num_mixtures,
            activation_fn=None,
            weights_regularizer=slim.l2_regularizer(l2_penalty),
            scope="experts")

        gating_distribution = tf.nn.softmax(
            tf.reshape(gate_activations,
                       [-1, num_mixtures + 1
                        ]))  # (Batch * #Labels) x (num_mixtures + 1)
        expert_distribution = tf.nn.sigmoid(
            tf.reshape(expert_activations,
                       [-1, num_mixtures]))  # (Batch * #Labels) x num_mixtures

        final_probabilities_by_class_and_batch = tf.reduce_sum(
            gating_distribution[:, :num_mixtures] * expert_distribution, 1)
        final_probabilities = tf.reshape(
            final_probabilities_by_class_and_batch, [-1, vocab_size])
        return {"predictions": final_probabilities}
Esempio n. 8
0
def generate(inputData,reuse=False):
    """
    生成器    
    :param inputData: 
    :param reuse: 
    :return: 
    """
    with tf.variable_scope("generate"):
        """
        arg_scope操作,这一操作符可以让定义在这一scope中的操作共享参数,即如不制定参数的话,则使用默认参数。且参数可以被局部覆盖。使得代码更加简洁
        """
        with slim.arg_scope([slim.fully_connected],
                            weights_initializer=tf.truncated_normal_initializer(0.0,0.1),
                            weights_regularizer=slim.l1_l2_regularizer(),
                            activation_fn=None):
            fc1=slim.fully_connected(inputs=inputData,num_outputs=length,scope="g_fc1",reuse=reuse)
            fc1=tf.nn.softplus(features=fc1,name="g_softplus")
            fc2=slim.fully_connected(inputs=fc1,num_outputs=length,scope="g_fc2",reuse=reuse)
    return fc2