コード例 #1
0
 def batch_norm_layer(self, x, train_phase, scope_bn):
     bn_train = batch_norm(x, decay=0.9, center=True, scale=True, updates_collections=None,
         is_training=True, reuse=None, trainable=True, scope=scope_bn)
     bn_inference = batch_norm(x, decay=0.9, center=True, scale=True, updates_collections=None,
         is_training=False, reuse=True, trainable=True, scope=scope_bn)
     z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
     return z
コード例 #2
0
def BatchNorm_GitHub_Ver(inputT, is_training=True, scope=None):
    # Note: is_training is tf.placeholder(tf.bool) type
    return tf.cond(is_training,
                lambda: batch_norm(inputT, is_training=True,
                                   center=False, updates_collections=None, scope=scope),
                lambda: batch_norm(inputT, is_training=False,
                                   updates_collections=None, center=False, scope=scope, reuse = True))
コード例 #3
0
ファイル: ops.py プロジェクト: stephan-who/DCGAN
def batch_norm_layer(value, is_train=True, name='batch_norm'):
    with tf.variable_scope(name) as scope:
        if is_train:
            return batch_norm(value, decay=0.9, epsilon=1e-5, scale=True, is_training=is_train,
                              updates_collections=None, scope=scope)
        else:
            return batch_norm(value, decay=0.9, epsilon=1e-5, scale=True, is_training=is_train, reuse=True,
                              updates_collections=None, scope=scope)
コード例 #4
0
	def batchnorm(self):
		from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
		with tf.name_scope('batchnorm') as scope:
			input = self.last_layer
			# mean, var = tf.nn.moments(input, axes=[0, 1, 2])
			# self.batch_norm = tf.nn.batch_normalization(input, mean, var, offset=1, scale=1, variance_epsilon=1e-6)
			# self.last_layer=self.batch_norm
			train_op = batch_norm(input, is_training=True, center=False, updates_collections=None, scope=scope)
			test_op = batch_norm(input, is_training=False, updates_collections=None, center=False, scope=scope,
			                     reuse=True)
			self.add(tf.cond(self.train_phase, lambda: train_op, lambda: test_op))
コード例 #5
0
def batch_norm_layer(x,train_phase,scope_bn):
    bn_train = batch_norm(x, decay=0.999, center=True, scale=True,
    is_training=True,
    reuse=None, # is this right?
    trainable=True,
    scope=scope_bn)
    bn_inference = batch_norm(x, decay=0.999, center=True, scale=True,
    is_training=False,
    reuse=True, # is this right?
    trainable=True,
    scope=scope_bn)
    z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
    return z
コード例 #6
0
ファイル: cifar10.py プロジェクト: yihui-he/context-cifar
def batch_norm_layer(x, train_phase,scope="BN"):
  bn_train = batch_norm(x, decay=0.8, center=True, scale=True,
  updates_collections=None,
  is_training=True,
  reuse=None, # is this right?
  trainable=True,
  scope=scope)
  bn_inference = batch_norm(x, decay=0.8, center=True, scale=True,
  updates_collections=None,
  is_training=False,
  reuse=True, # is this right?
  trainable=True,
  scope=scope)
  z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
  return z
コード例 #7
0
def batch_norm_layer(x,phase_train,scope_bn,trainable=True):
    print '======> official BN'
    bn_train = batch_norm(x, decay=0.999, center=True, scale=True,
    updates_collections=None,
    is_training=True,
    reuse=None, # is this right?
    trainable=trainable,
    scope=scope_bn)
    bn_inference = batch_norm(x, decay=0.999, center=True, scale=True,
    updates_collections=None,
    is_training=False,
    reuse=True, # is this right?
    trainable=trainable,
    scope=scope_bn)
    z = tf.cond(phase_train, lambda: bn_train, lambda: bn_inference)
    return z
コード例 #8
0
def batch_norm_layer(x, train_phase, scope_bn):
    return batch_norm(x, decay=0.9, center=True, scale=True,
    updates_collections=None,
    is_training=train_phase,
    reuse=None,
    trainable=True,
    scope=scope_bn)
コード例 #9
0
def batch_norm_layer(x,train_phase,scope_bn):
    """Adds a Batch Normalization layer from http://arxiv.org/abs/1502.03167.
        "Batch Normalization: Accelerating Deep Network Training by Reducing
        Internal Covariate Shift"
        Sergey Ioffe, Christian Szegedy
      Can be used as a normalizer function for conv2d and fully_connected.
    """
    
    bn_train = batch_norm(x, decay=0.999, center=True, scale=True, updates_collections=None,
    is_training=True, reuse=None, # is this right?
    trainable=True, scope=scope_bn)
    
    bn_inference = batch_norm(x, decay=0.999, center=True, scale=True, updates_collections=None,
    is_training=False, reuse=True, # is this right?
    trainable=True, scope=scope_bn)
    
    z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
    return z
コード例 #10
0
def normalizeBatch(x, is_training):
    return batch_norm(x,
                      decay=0.9,
                      center=True,
                      scale=True,
                      updates_collections=None,
                      is_training=is_training,
                      reuse=None,
                      trainable=True,
                      scope=None)
def batch_norm_layer(value, is_train=True, name='batch_norm'):
    with tf.variable_scope(name) as scope:
        if is_train:
            return batch_norm(value,
                              decay=0.9,
                              epsilon=1e-5,
                              scale=True,
                              is_training=is_train,
                              updates_collections=None,
                              scope=scope)
        else:
            return batch_norm(value,
                              decay=0.9,
                              epsilon=1e-5,
                              scale=True,
                              is_training=is_train,
                              reuse=True,
                              updates_collections=None,
                              scope=scope)
コード例 #12
0
def deconv2d(x,W,B,out_shape,stride=2,padding='SAME',act_name='leaky_relu',train=True,is_bn=False):
    with tf.name_scope('deconv2d_bias'):
        y=tf.nn.conv2d_transpose(x,W,output_shape=out_shape,strides=[1,stride,stride,1],padding=padding)
        y=tf.nn.bias_add(y,B)
    if is_bn is True:
        with tf.name_scope('BN'):
        #     # y = batch_norm_layer(y,is_training=is_training)
        #     # y=Batch_norm(value=y,is_training=train)
            y=batch_norm(inputs=y,decay=0.9,updates_collections=None,is_training=train)
    return y
コード例 #13
0
ファイル: icgan.py プロジェクト: zhenmao720/ML_project
def batch_normal(x, scope="bn", reuse=False, istraining=True):
    return batch_norm(x,
                      epsilon=1e-5,
                      decay=0.9,
                      scale=True,
                      scope=scope,
                      reuse=reuse,
                      is_training=istraining,
                      updates_collections=None,
                      center=True)
コード例 #14
0
ファイル: DeepFM.py プロジェクト: yongrl/ML_learning
    def batch_norm_layer(self,x,train_phase,scope_bn):
        '''
        批标准化所要解决的问题是:模型参数在学习阶段的变化,会使每个隐藏层输出的分布也发生改变。这意味着靠后的层要在训练过程中去适应这些变化。
        为了解决这个问题,论文BN2015提出了批标准化,即在训练时作用于每个神经元激活函数(比如sigmoid或者ReLU函数)的输入,
        使得基于每个批次的训练样本,激活函数的输入都能满足均值为0,方差为1的分布。
        :param x:
        :param train_phase:
        :param scope_bn:
        :return:
        '''
        bn_train = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
                              is_training=True, reuse=None, trainable=True, scope=scope_bn)
        bn_inference = batch_norm(x, decay=self.batch_norm_decay, center=True, scale=True, updates_collections=None,
                                  is_training=False, reuse=True, trainable=True, scope=scope_bn)

        #tf.cond()类似于c语言中的if...else...,用来控制数据流向,但是仅仅类似而已,其中差别还是挺大的。关于tf.cond()函数的具体操作,我参考了tf的说明文档。
        # format:tf.cond(pred, fn1, fn2, name=None)
        z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
        return z
コード例 #15
0
def normalize(inp, activation, reuse, scope):
    if FLAGS.norm == 'batch_norm':
        return tf_layers.batch_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
    elif FLAGS.norm == 'layer_norm':
        return tf_layers.layer_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
    elif FLAGS.norm == 'None':
        if activation is not None:
            return activation(inp)
        else:
            return inp
コード例 #16
0
def batch_norm_layer(x, train_phase, scope_bn):
    return batch_norm(x,
                      decay=0.9,
                      center=True,
                      scale=True,
                      updates_collections=None,
                      is_training=train_phase,
                      reuse=None,
                      trainable=True,
                      scope=scope_bn)
コード例 #17
0
ファイル: utils.py プロジェクト: jjayaram7/icf-jag-cycleGAN
def bn(x, is_training, name):
    return batch_norm(x,
                      decay=0.9,
                      center=True,
                      scale=True,
                      updates_collections=None,
                      is_training=is_training,
                      reuse=None,
                      trainable=True,
                      scope=name)
コード例 #18
0
def batchnorm(inputT, is_training=False, scope=None):
    #    is_training = tf.get_collection('istrainvar')[0]
    # Note: is_training is tf.placeholder(tf.bool) type
    return tf.cond(
        is_training, lambda: batch_norm(inputT,
                                        is_training=True,
                                        center=True,
                                        scale=True,
                                        decay=0.9,
                                        updates_collections=None,
                                        scope=scope),
        lambda: batch_norm(inputT,
                           is_training=False,
                           center=True,
                           scale=True,
                           decay=0.9,
                           updates_collections=None,
                           scope=scope,
                           reuse=True))
コード例 #19
0
        def create_conv(input, in_channels, out_channels, weight_set=[]):
            input = batch_norm(input)
            w = weight_var([patch_size, patch_size, in_channels, out_channels])
            b = weight_var([out_channels])
            weight_set.append(w)
            weight_set.append(b)

            conv = tf.nn.conv2d(input, w, strides=[1, 2, 2, 1], padding='SAME')
            activation = leaky_relu(conv + b)
            return activation
コード例 #20
0
ファイル: utils.py プロジェクト: martinkersner/maml
def normalize(inp, activation, reuse, scope):
    if FLAGS.norm == 'batch_norm':
        return tf_layers.batch_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
    elif FLAGS.norm == 'layer_norm':
        return tf_layers.layer_norm(inp, activation_fn=activation, reuse=reuse, scope=scope)
    elif FLAGS.norm == 'None':
        if activation is not None:
            return activation(inp)
        else:
            return inp
コード例 #21
0
ファイル: none_cond_DCGAN.py プロジェクト: VikingWong/GANs
def discriminator(image, reuse=False, name='discriminator'):

    with tf.name_scope(name):

        if reuse:
            tf.get_variable_scope().reuse_variables()

        h0 = lrelu(conv2d(image, DF, name='d_h0_conv'), name='d_h0_lrelu')
        h1 = lrelu(batch_norm(conv2d(h0, DF * 2, name='d_h1_conv'),
                              name='d_h1_bn'),
                   name='d_h1_lrelu')
        h2 = lrelu(batch_norm(conv2d(h1, DF * 4, name='d_h2_conv'),
                              name='d_h2_bn'),
                   name='d_h2_lrelu')
        h3 = lrelu(batch_norm(conv2d(h2, DF * 8, name='d_h3_conv'),
                              name='d_h3_bn'),
                   name='d_h3_lrelu')
        h4 = fully_connected(tf.reshape(h3, [BATCH_SIZE, -1]), 1, 'd_h4_fc')

        return tf.nn.sigmoid(h4), h4
コード例 #22
0
ファイル: deep_learn.py プロジェクト: wcs-ai/selfWeb
 def conv2d(self,
            IMG,
            FILTER,
            bas,
            training,
            STRIDE=[1, 1, 1, 1],
            PADDING='SAME'):
     cvd = tf.nn.conv2d(IMG, FILTER, strides=STRIDE, padding=PADDING) + bas
     norm_cvd = batch_norm(cvd, decay=0.9, is_training=training)
     elu_cvd = tf.nn.relu(norm_cvd)
     return elu_cvd
コード例 #23
0
 def batch_norm_layer(self, x, train_phase, scope_bn):
     with tf.variable_scope(scope_bn):
         return batch_norm(x,
                           decay=0.9,
                           center=True,
                           scale=True,
                           updates_collections=None,
                           is_training=True,
                           reuse=tf.AUTO_REUSE,
                           trainable=True,
                           scope=scope_bn)
コード例 #24
0
 def batchnorm(self):
     from tensorflow.contrib.layers.python.layers import batch_norm as batch_norm
     with tf.name_scope('batchnorm') as scope:
         input = self.last_layer
         # mean, var = tf.nn.moments(input, axes=[0, 1, 2])
         # self.batch_norm = tf.nn.batch_normalization(input, mean, var, offset=1, scale=1, variance_epsilon=1e-6)
         # self.last_layer=self.batch_norm
         train_op = batch_norm(input,
                               is_training=True,
                               center=False,
                               updates_collections=None,
                               scope=scope)
         test_op = batch_norm(input,
                              is_training=False,
                              updates_collections=None,
                              center=False,
                              scope=scope,
                              reuse=True)
         self.add(
             tf.cond(self.train_phase, lambda: train_op, lambda: test_op))
コード例 #25
0
def conv_block(x, weight, bias, reuse, scope):
    # conv
    x = tf.nn.conv2d(x, weight, [1, 1, 1, 1], 'SAME') + bias
    # batch norm
    x = tf_layers.batch_norm(x,
                             activation_fn=tf.nn.relu,
                             reuse=reuse,
                             scope=scope)
    # # pooling
    # x = tf.nn.max_pool(x, [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
    return x
コード例 #26
0
def normalize(inp, activation, reuse, scope):
    #BE CAUCIOUS: here reuse is set to tf.AUTO_REUSE instead of reuse
    if FLAGS.norm == 'batch_norm':
        return tf_layers.batch_norm(inp, activation_fn=activation, reuse=tf.AUTO_REUSE, scope=scope)
    elif FLAGS.norm == 'layer_norm':
        return tf_layers.layer_norm(inp, activation_fn=activation, reuse=tf.AUTO_REUSE, scope=scope)
    elif FLAGS.norm == 'None':
        if activation is not None:
            return activation(inp)
        else:
            return inp
コード例 #27
0
def batch_norm_layer(x, train_phase):
    bn_train = batch_norm(x,
                          decay=0.99,
                          center=True,
                          scale=True,
                          updates_collections=None,
                          is_training=train_phase,
                          reuse=None,
                          trainable=True,
                          scope='bn')
    return bn_train
コード例 #28
0
def conv_batch_norm(x, is_train, scope='bn', decay=0.9, reuse_var = False):

    out = batch_norm(x, 
               decay=decay,
               center=True, 
               scale=True, 
               updates_collections=None,
               is_training=is_train,
               reuse=reuse_var,
               trainable=True,
               scope=scope)
    return out
コード例 #29
0
 def batch_norm_layer(self, x, train_phrase, scope_bn):
     bn_train = batch_norm(x,
                           decay=self.batch_norm_decay,
                           center=True,
                           scale=True,
                           updates_collections=None,
                           is_training=True,
                           reuse=None,
                           trainable=True,
                           scope=scope_bn)
     bn_predict = batch_norm(x,
                             decay=self.batch_norm_decay,
                             center=True,
                             scale=True,
                             updates_collections=None,
                             is_training=False,
                             reuse=True,
                             trainable=True,
                             scope=scope_bn)
     z = tf.cond(train_phrase, lambda: bn_train, lambda: bn_predict)
     return z
コード例 #30
0
 def _batch_norm(self, x, is_training, scope="bn"):
     z = tf.cond(
         is_training, lambda: batch_norm(x,
                                         decay=0.9,
                                         center=True,
                                         scale=True,
                                         updates_collections=None,
                                         is_training=True,
                                         reuse=None,
                                         trainable=True,
                                         scope=scope),
         lambda: batch_norm(x,
                            decay=0.9,
                            center=True,
                            scale=True,
                            updates_collections=None,
                            is_training=False,
                            reuse=True,
                            trainable=False,
                            scope=scope))
     return z
コード例 #31
0
ファイル: unet_model.py プロジェクト: a2564109/hyspetracal
def conv2d(x,W,B,stride=1,padding='SAME',act_name='leaky_relu',train=True):
    with tf.name_scope('conv2d_bias'):
        y = tf.nn.conv2d(x,W,strides=[1,stride,stride,1],padding=padding)
        y = tf.nn.bias_add(y,B)
    with tf.name_scope('BN'):
        # y = batch_norm_layer(y,is_training=is_training)
        # y=Batch_norm(value=y,is_training=train)
        # y=batch_norm(inputs=y,decay=0.9,updates_collections=None,is_training=train)
        y = batch_norm(inputs=y, decay=0.9, is_training=train)
    with tf.name_scope(act_name):
        y = activation(y,ratio=0.2)
    return y
コード例 #32
0
def bn(x, is_training):
    with tf.variable_scope('bn') as scope:
        bn_train = batch_norm(x,
                              decay=0.99,
                              center=True,
                              scale=True,
                              updates_collections=None,
                              is_training=True,
                              reuse=None,
                              trainable=True,
                              scope=scope)
        bn_inf = batch_norm(x,
                            decay=0.99,
                            center=True,
                            scale=True,
                            updates_collections=None,
                            is_training=False,
                            reuse=True,
                            trainable=True,
                            scope=scope)
    return tf.cond(is_training, lambda: bn_train, lambda: bn_inf)
コード例 #33
0
def bn_layer(in_tensor, train_phase, scope_bn):
    bn_train = batch_norm(in_tensor,
                          decay=0.9,
                          center=True,
                          scale=True,
                          updates_collections=None,
                          is_training=True,
                          reuse=None,
                          trainable=True,
                          scope=scope_bn)
    bn_test = batch_norm(in_tensor,
                         decay=0.9,
                         center=True,
                         scale=True,
                         updates_collections=None,
                         is_training=False,
                         reuse=True,
                         trainable=True,
                         scope=scope_bn)
    out_tensor = tf.cond(train_phase, lambda: bn_train, lambda: bn_test)
    return out_tensor
コード例 #34
0
 def batch_norm_layer(self, x, train_phase, scope_bn):
     bn_train = batch_norm(x,
                           decay=0.9,
                           center=True,
                           scale=True,
                           updates_collections=None,
                           is_training=True,
                           reuse=None,
                           trainable=True,
                           scope=scope_bn)
     bn_inference = batch_norm(x,
                               decay=0.9,
                               center=True,
                               scale=True,
                               updates_collections=None,
                               is_training=False,
                               reuse=True,
                               trainable=True,
                               scope=scope_bn)
     z = tf.cond(train_phase, lambda: bn_train, lambda: bn_inference)
     return z
コード例 #35
0
def batch_norm(train_phase, x, decay=0.9, center=True, scale=True, label=""):
    with tf.variable_scope(label + "bn") as scope:
        bn_train = tf_layers.batch_norm(x,
                                        decay=decay,
                                        center=center,
                                        scale=scale,
                                        updates_collections=None,
                                        is_training=True,
                                        reuse=None,
                                        trainable=True,
                                        scope=scope)
        bn_test = tf_layers.batch_norm(x,
                                       decay=decay,
                                       center=center,
                                       scale=scale,
                                       updates_collections=None,
                                       is_training=False,
                                       reuse=True,
                                       trainable=True,
                                       scope=scope)
    return tf.cond(train_phase, lambda: bn_train, lambda: bn_test)
コード例 #36
0
def create_conv_layer(name, input_tensor, in_channels, out_channels, is_training = True, activation='relu', kx = 3, ky = 3, stride_x = 2, stride_y = 2, batchnorm=False, padding='VALID', add=None, deconv = False):
	if deconv == False:
		input_tensor = tf.pad(input_tensor, [[0, 0], [2, 2], [2, 2], [0, 0]], mode="CONSTANT")


	weights = tf.get_variable(name+'weights', shape=[kx, ky, in_channels, out_channels],
			initializer=tf.truncated_normal_initializer(stddev=np.sqrt(0.02 / kx / ky / in_channels)),
			dtype=tf.float32
	)
	biases = tf.get_variable(name+'biases', shape=[out_channels], initializer=tf.constant_initializer(0.0), dtype=tf.float32)



	if deconv == False:
		t = tf.nn.conv2d(input_tensor, weights, [1, stride_x, stride_y, 1], padding=padding)
		s = tf.nn.bias_add(t, biases)

	else:
		batch = tf.shape(input_tensor)[0]
		size = tf.shape(input_tensor)[1]


		print(input_tensor)
		print(tf.transpose(weights,perm=[0,1,3,2]))



		t = tf.nn.conv2d_transpose(input_tensor, tf.transpose(weights,perm=[0,1,3,2]),[batch, size * stride_x, size * stride_y, out_channels], [1, stride_x, stride_y, 1],
				padding='SAME', data_format = "NHWC")
		
		# t = tf.nn.conv2d_transpose(input_tensor, tf.transpose(weights,perm=[0,1,3,2]),tf.tensor([batch, size * stride_x, size * stride_y, out_channels]), [1, stride_x, stride_y, 1],
		# 		padding='SAME', data_format = "NHWC")
		

		s = tf.nn.bias_add(t, biases)

	if add is not None: # res
		s = s + add 

	if batchnorm:
		n = batch_norm(s, decay = 0.99, center=True, scale=True, updates_collections=None, is_training=is_training)
	else:
		n = s 

	if activation == 'relu':
			return tf.nn.relu(n), weights, biases
	elif activation == 'sigmoid':
			return tf.nn.sigmoid(n), weights, biases
	elif activation == 'tanh':
			return tf.nn.tanh(n), weights, biases
	elif activation == 'linear':
			return n, weights, biases
コード例 #37
0
ファイル: batchnorm.py プロジェクト: Dtananaev/DepthNet
def batch_norm_layer(x, train_phase, scope_bn, reuse=None):
    with tf.variable_scope(scope_bn, 'BatchNorm', [x], reuse=reuse):
        z = batch_norm(x,
                       decay=0.999,
                       center=True,
                       scale=True,
                       is_training=train_phase,
                       reuse=reuse,
                       trainable=True,
                       scope=scope_bn,
                       updates_collections=UPDATE_OPS_COLLECTION)

        return z
コード例 #38
0
 def batch_norm_layer(self, x, train_phase, scope_bn):  # BN 层
     bn_train = batch_norm(x,
                           decay=self.batch_norm_decay,
                           center=True,
                           scale=True,
                           updates_collections=None,
                           is_training=True,
                           reuse=tf.compat.v1.AUTO_REUSE,
                           trainable=True,
                           scope=scope_bn)  # 训练阶段
     bn_inderence = batch_norm(x,
                               decay=self.batch_norm_decay,
                               center=True,
                               scale=True,
                               updates_collections=None,
                               is_training=False,
                               reuse=tf.compat.v1.AUTO_REUSE,
                               trainable=True,
                               scope=scope_bn)  # 预测阶段, 只有钱箱
     z = tf.cond(train_phase, lambda: bn_train,
                 lambda: bn_inderence)  # train_phase为真,执行第一个
     return z
コード例 #39
0
def Batch_norm(value,is_training=False,name='batch_norm'):
    '''
        批量归一化  返回批量归一化的结果

        args:
            value:代表输入,第一个维度为batch_size
            is_training:当它为True,代表是训练过程,这时会不断更新样本集的均值与方差。当测试时,要设置成False,这样就会使用训练样本集的均值和方差。
                  默认测试模式
            name:名称。
        '''

    return batch_norm(inputs=value,decay=0.9,updates_collections=None,
                                        is_training=is_training,scope=name)
コード例 #40
0
 def batch_norm_layer(self, x, train_phase, scope_bn):
     z = tf.cond(
         train_phase,
         lambda: batch_norm(x,
                            decay=self.hparams.batch_norm_decay,
                            center=True,
                            scale=True,
                            updates_collections=None,
                            is_training=True,
                            reuse=None,
                            trainable=True,
                            scope=scope_bn),
         lambda: batch_norm(x,
                            decay=self.hparams.batch_norm_decay,
                            center=True,
                            scale=True,
                            updates_collections=None,
                            is_training=False,
                            reuse=True,
                            trainable=False,
                            scope=scope_bn))
     return z
コード例 #41
0
ファイル: cpu_utility.py プロジェクト: seogi1004/mathrookie
def batch_norm_layer(inputT, is_training=True, scope=None):
    # Note: is_training is tf.placeholder(tf.bool) type
    return tf.cond(is_training,
                   lambda: batch_norm(inputT, is_training=True, center=True, scale=True, activation_fn=tf.nn.relu, decay=0.9, scope=scope),
                   lambda: batch_norm(inputT, is_training=False, center=True, scale=True, activation_fn=tf.nn.relu, decay=0.9, scope=scope, reuse=True))
コード例 #42
0
def batch_normal(input , scope="scope" , reuse=False):
    return batch_norm(input , epsilon=1e-5, decay=0.9 , scale=True, scope=scope , reuse=reuse, fused=True, updates_collections=None)