Exemple #1
0
def ResidualBlock(x,
                  stride,
                  filter_size,
                  i,
                  padding='VALID',
                  activation=activation.ReLU,
                  BatchNorm=False):
    block_name = "Residual_Block_" + str(i)
    with tf.variable_scope(block_name):
        p = int((filter_size[0] - 1) / 2)
        y = tf.pad(x, [[0, 0], [p, p], [p, p], [0, 0]], "REFLECT")
        y = conv2d(
            y,
            stride,
            [filter_size[0], filter_size[1], filter_size[2], filter_size[2]],
            padding=padding,
            i="c1")
        y = tf.pad(activation(y), [[0, 0], [p, p], [p, p], [0, 0]], "REFLECT")
        y = conv2d(
            y,
            stride,
            [filter_size[0], filter_size[1], filter_size[2], filter_size[2]],
            padding=padding,
            i="c2")
        output = y + x
    return output
Exemple #2
0
def forward(X, weights, biases, activations):
    Z_list = []
    A_list = []

    for i in range(len(weights)):
        Z_list.append(
            np.dot(weights[i], X if len(A_list) == 0 else A_list[-1]) +
            biases[i])
        A_list.append(activation(activations[i], Z_list[-1]))

    return Z_list, A_list
Exemple #3
0
    def forward(self, x):
        """
        Argument:
            x (np.array): (batch size, input_size)
        Return:
            out (np.array): (batch size, output_size)
        """

        # Complete the forward pass through your entire MLP.
        self.batch_size = x.shape[0]
        self.out = x

        for linear, activation, bn in zip(self.linear_layers, self.activations,
                                          self.bn_layers):
            if bn:
                self.out = activation(bn(linear(self.out)))
            else:
                self.out = activation(linear(self.out))

        return self.out
Exemple #4
0
def perceptron(x, w, activation, theta):
    '''Takes an input, weights
    applies an activation on the linear product of inputs and weights
    and returns 1 if the output of activation is greater than theta
    if not it returns 0 '''

    linear_sum = sum(i * j for i, j in zip(x, w))

    if activation(linear_sum, theta) >= theta:
        return 1
    else:
        return 0
Exemple #5
0
def defc(input,
         zdim,
         output,
         i=None,
         activation=activation.ReLU,
         BatchNorm=False):
    d_fc_w, d_fc_b = _fc_variable([zdim, output[1] * output[2] * output[3]],
                                  name="defc{0}".format(i))
    h_fc_r = tf.matmul(input, d_fc_w) + d_fc_b
    h_fc_a = activation(h_fc_r)
    defc = tf.reshape(h_fc_a, [output[0], output[1], output[2], output[3]])
    if BatchNorm:
        defc = batchNorm(defc)
    tf.summary.histogram("defc{0}".format(i), defc)
    return defc
def perceptron(x, w, activation, theta):
    linear_sum = sum(i * j for i, j in zip(x, w))
    if activation(linear_sum, theta) >= theta:
        return 1
    else:
        return 0