示例#1
0
 def forward(self, X):
     conv_out = tf.nn.conv2d(X, self.W, strides=self.s, padding=self.p)
     # add a bias vector:
     conv_out = tf.nn.bias_add(conv_out, self.b)
     p1, p2 = self.poolsz
     pool_out = tf.nn.max_pool(conv_out,
                               ksize=[1, p1, p2, 1],
                               strides=[1, p1, p2, 1],
                               padding='SAME')
     return swish(pool_out, beta=10, tensorflow=True)
示例#2
0
 def activate(self, linear, name):
     if name == 'sigmoid':
         return tf.nn.sigmoid(linear, name='encoded')
     elif name == 'softmax':
         return tf.nn.softmax(linear, name='encoded')
     elif name == 'linear':
         return linear
     elif name == 'tanh':
         return tf.nn.tanh(linear, name='encoded')
     elif name == 'relu':
         return tf.nn.relu(linear, name='encoded')
     elif name == 'swish':
         return util.swish(linear, name='encoded')
	def forward(self, X):
		conv_out = conv2d(
			input=X, 
			filters=self.W, 
			border_mode=self.border_mode,
			subsample=self.s,			
		)
		pooled_out = pool_2d(
			input=conv_out,
			ws=self.poolsz, 
			ignore_border=True, 
			mode='max',
		)
		return swish(pooled_out + self.b.dimshuffle('x', 0, 'x', 'x'))
示例#4
0
    def forward(self, X, is_training, decay=0.9):
        Z = tf.matmul(X, self.W)
        if is_training:
            batch_mean, batch_var = tf.nn.moments(Z, [0])
            # update the running mean and running variance:
            update_rn_mean = tf.assign(
                self.rn_mean, self.rn_mean * decay + batch_mean * (1 - decay))

            update_rn_var = tf.assign(
                self.rn_var, self.rn_var * decay + batch_var * (1 - decay))
            # to make sure the aforementioned updates are calculated
            # every time we call the train function,
            # we have to use next function:
            with tf.control_dependencies([update_rn_mean, update_rn_var]):
                Z = tf.nn.batch_normalization(Z, batch_mean, batch_var,
                                              self.betta, self.gamma, 1e-4)
        else:
            Z = tf.nn.batch_normalization(Z, self.rn_mean, self.rn_var,
                                          self.betta, self.gamma, 1e-4)

        return swish(Z, tensorflow=True)
	def forward(self, X):
		return swish(X.dot(self.W) + self.b)
 def forward(self, X):
     return swish(tf.matmul(X, self.W) + self.b, tensorflow=True)