Exemple #1
0
def ConvReluMaxPool(X, weights, bias):
    conv = nn.conv2d(X, weights, strides=[1, 1, 1, 1], padding="VALID", data_format="NHWC")
    conv_bias = nn.bias_add(conv, bias, data_format="NHWC")

    relu = nn.relu(conv_bias)
    maxpool = nn.max_pool2d(relu, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding="VALID", data_format="NHWC")
    
    return maxpool
def ResizeConvReluMaxPool(X, weights, bias):
    resize = image.resize(X, [N + 2, N + 2])

    conv = nn.conv2d(resize,
                     weights,
                     strides=[1, 1, 1, 1],
                     padding="VALID",
                     data_format="NHWC")
    conv_bias = nn.bias_add(conv, bias, data_format="NHWC")

    relu = nn.relu(conv_bias)
    maxpool = nn.max_pool2d(relu,
                            ksize=[1, 2, 2, 1],
                            strides=[1, 2, 2, 1],
                            padding="VALID",
                            data_format="NHWC",
                            name="output")

    return maxpool
Exemple #3
0
    def _inception_layer(previous_layer, filters):
        unfiltered_1_x_1_conv = nn.conv2d(previous_layer,
                                          filters[0],
                                          strides=(1, 1),
                                          padding="SAME")

        stacked_conv_1_x_1_conv_1 = nn.conv2d(previous_layer,
                                              filters=filters[1],
                                              strides=(1, 1),
                                              padding="SAME")
        stacked_conv_1_x_1_conv_2 = nn.conv2d(previous_layer,
                                              filters=filters[2],
                                              strides=(1, 1),
                                              padding="SAME")

        stacked_conv_3_x_3_conv = nn.conv2d(stacked_conv_1_x_1_conv_1,
                                            filters=filters[3],
                                            strides=(1, 1),
                                            padding="SAME")
        stacked_conv_5_x_5_conv = nn.conv2d(stacked_conv_1_x_1_conv_2,
                                            filters=filters[4],
                                            strides=(1, 1),
                                            padding="SAME")

        pooled_layer = nn.max_pool2d(previous_layer, [3, 3],
                                     strides=(1, 1),
                                     padding="SAME")
        pooled_layer_conv = nn.conv2d(pooled_layer,
                                      filters=filters[5],
                                      strides=(1, 1),
                                      padding="SAME")

        return tf.concat([
            unfiltered_1_x_1_conv, stacked_conv_3_x_3_conv,
            stacked_conv_5_x_5_conv, pooled_layer_conv
        ],
                         axis=3)
Exemple #4
0
def vggBlock(X, weights1, bias1, weights2, bias2):
    conv1 = nn.conv2d(X,
                      weights1,
                      strides=[1, 1, 1, 1],
                      padding="VALID",
                      data_format="NCHW")
    conv1_bias = nn.bias_add(conv1, bias1, data_format="NCHW")
    relu1 = nn.relu(conv1_bias)

    conv2 = nn.conv2d(relu1,
                      weights2,
                      strides=[1, 1, 1, 1],
                      padding="SAME",
                      data_format="NCHW")
    conv2_bias = nn.bias_add(conv2, bias2, data_format="NCHW")

    relu2 = nn.relu(conv2_bias)
    maxpool = nn.max_pool2d(relu2,
                            ksize=[1, 2, 2, 1],
                            strides=[1, 2, 2, 1],
                            padding="VALID",
                            data_format="NHWC")

    return maxpool
Exemple #5
0
    def Net(self, input, prob=0.0):
        '''
        Define network.
        You can use init_weight() and init_bias() function to init weight matrix,
        for example:
            conv1_W = self.init_weight((3, 3, 1, 6))
            conv1_b = self.init_bias(6)
        '''
        # Define the parameters
        conv1_W = self.init_weight([11, 11, 3, 96])
        conv1_b = self.init_bias(96)

        conv2_W = self.init_weight([5, 5, 96, 256])
        conv2_b = self.init_bias(256)

        conv3_W = self.init_weight([3, 3, 256, 384])
        conv3_b = self.init_bias(384)

        conv4_W = self.init_weight([3, 3, 384, 384])
        conv4_b = self.init_bias(384)

        conv5_W = self.init_weight([3, 3, 384, 256])
        conv5_b = self.init_bias(256)

        fc1_W = self.init_weight([5 * 5 * 256, 4096])
        fc1_b = self.init_bias(4096)

        fc2_W = self.init_weight([4096, 4096])
        fc2_b = self.init_bias(4096)

        fc3_W = self.init_weight([4096, 10])
        fc3_b = self.init_bias(10)

        '''Define the architecture of the network'''
        # N=(W-F+2P)/S+1
        # Layer 1: Convolutional. Input = 224x224x3. Output = 54x54x96.
        x = nn.conv2d(input, conv1_W, strides=[1, 4, 4, 1], padding='VALID') + conv1_b
        x = nn.relu(x, name="conv_layer_01/relu")
        # LRN. depth_radius = n/2 = 3, bias = k = 2. Following the paper.
        x = nn.lrn(x, 3, bias=2.0, alpha=1e-4, beta=0.75, name="conv_layer_01/lrn1")
        # Pooling. Input = 54x54x96. Output = 26x26x96.
        x = nn.max_pool2d(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
                          padding='VALID', name="conv_layer_01/pooling")  # [batch, height, width, channels]

        # Layer 2: Convolutional. Input = 26x26x96. Output = 26x26x256.
        x = nn.conv2d(x, conv2_W, strides=[1, 1, 1, 1], padding='SAME') + conv2_b
        x = nn.relu(x, name="conv_layer_02/relu")
        # LRN. depth_radius = n/2 = 3, bias = k = 2. Following the paper.
        x = nn.lrn(x, 3, bias=2.0, alpha=1e-4, beta=0.75, name="conv_layer_01/lrn1")
        # Pooling. Input = 26x26x256. Output = 12x12x256.
        x = nn.max_pool2d(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name="conv_layer_02/pooling")

        # Layer 3: Convolutional. Input = 12x12x256. Output = 12x12x384.
        x = nn.conv2d(x, conv3_W, strides=[1, 1, 1, 1], padding='SAME') + conv3_b
        x = nn.relu(x, name="conv_layer_03/relu")

        # Layer 4: Convolutional. Input = 12x12x384. Output = 12x12x384.
        x = nn.conv2d(x, conv4_W, strides=[1, 1, 1, 1], padding='SAME') + conv4_b
        x = nn.relu(x, name="conv_layer_04/relu")

        # Layer 5: Convolutional. Input = 12x12x384. Output = 12x12x256.
        x = nn.conv2d(x, conv5_W, strides=[1, 1, 1, 1], padding='SAME') + conv5_b
        x = nn.relu(x, name="conv_layer_05/relu")
        # Pooling. Input = 12x12x256. Output = 5x5x256.
        x = nn.max_pool2d(x, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name="conv_layer_05/pooling")

        # Layer 6: Fully Connected. Input = 5x5x256=6400. Output = 4096.
        x = flatten(x)
        x = tf.matmul(x, fc1_W) + fc1_b
        x = nn.relu(x, name="full_layer_01/relu")
        # Dropout
        x = nn.dropout(x, rate=prob)

        # Layer 7: Fully Connected. Input = 4096. Output = 4096.
        x = tf.matmul(x, fc2_W) + fc2_b
        x = nn.relu(x, name="full_layer_02/relu")
        # Dropout
        x = nn.dropout(x, rate=prob)

        # # Layer 8: Fully Connected. Input = 4096. Output = 10.
        x = tf.add(tf.matmul(x, fc3_W), fc3_b, name="full_layer_03/linear")

        logits = x  # logits.shape = (batch_size, 10)

        return logits
Exemple #6
0
 def _max_pool(input_layer, size, stride):
     return nn.max_pool2d(input_layer, size, strides=stride, padding="SAME")
 def execute(self, inputs):
     myInput = concat(inputs, -1)
     return max_pool2d(myInput, [self.size, self.size], self.stride,
                       "VALID")