示例#1
0
    def forward(self, x):
        """Performs forward propagation.
        This function can be called using ``__call__`` method.
        See following example of method usage.

        Args:
            x (ndarray): Input data as ndarray.

        Returns:
            (Node): predicted values for input ndarray.

        Example:
            >>> import renom as rm
            >>> import numpy as np
            >>> from renom_rg.api.regression.gcnn import GCNet
            >>> n, c, variables, neighbors = (2, 10, 20, 5)
            >>> x = rm.Variable(np.random.rand(n, c, variables, neighbors))
            >>> feature_graph = np.random.rand(0, variables-1, (variables, neighbors))
            >>> model = GCNet(feature_graph)
            >>> t = model.forward(x)
            >>> t.shape
            (2, 1)

        """
        h = rm.relu(self.gc1(x))
        h = self.dropout(h)
        h = rm.relu(self.gc2(h))
        h = self.dropout(h)
        h = rm.relu(self.gc3(h))
        h = self.dropout(h)
        h = rm.flatten(h.reshape(h.shape[0], -1, h.shape[1]))
        h = self.dropout(rm.relu(self.fc1(h)))
        h = self.dropout(rm.relu(self.fc2(h)))
        h = self.fc3(h)
        return h
示例#2
0
    def forward(self, x):
        t = x
        t = self.block1(t)
        t = self.block2(t)
        t = self.block3(t)
        t = self.block4(t)
        pool4 = t
        t = self.block5(t)

        t = rm.relu(self.fc6(t))

        t = rm.relu(self.fc7(t))

        t = self.score_fr(t)

        t = self.upscore2(t)
        upscore2 = t

        t = self.score_pool4(pool4)
        score_pool4 = t

        t = upscore2 + score_pool4
        fuse_pool4 = t
        t = self.upscore16(fuse_pool4)
        upscore16 = t

        return t
 def forward(self, x):
     hidden = self.input(x)
     #print(hidden.shape)
     hidden = rm.max_pool2d(hidden, stride=1, padding=1)
     #print(hidden.shape)
     layers = self.hidden._layers
     for i in range(self.blocks):
         offset = i * (self.depth * 2 + 1)
         for j in range(self.depth):
             sub = rm.relu(layers[offset + 2 * j](hidden))
             #print('{}.{} b {}'.format(i,j,sub.shape))
             sub = layers[offset + 2 * j + 1](sub)
             #print('{}.{} + {}'.format(i,j,sub.shape))
             if self.dropout:
                 sub = rm.dropout(sub)
             hidden = rm.concat(hidden, sub)
             #print('{}.{} = {}'.format(i,j,hidden.shape))
         offset = (i + 1) * (self.depth * 2 + 1) - 1
         hidden = layers[offset](hidden)
         #print('{}.{} - {}'.format(i,j,hidden.shape))
         hidden = rm.average_pool2d(hidden, stride=2, padding=1)
         #print('{}.{} > {}'.format(i,j,hidden.shape))
     x = rm.flatten(hidden)
     layers = self.fcnn._layers
     for i in range(len(layers[:-2])):
         x = rm.relu(layers[i](x))
         #print(x.shape)
         if self.dropout:
             x = rm.dropout(x, dropout_ratio=0.5)
     z_mean = layers[-2](x)
     z_log_var = layers[-1](x)
     return z_mean, z_log_var
 def forward(self, x):
     layers = self.hidden._layers
     for i in range(self.depth):
         if self.batch_normal:
             x = layers[i * 4](x)
             x = rm.relu(layers[i * 4 + 1](x))
             x = layers[i * 4 + 2](x)
             x = rm.relu(layers[i * 4 + 3](x))
         else:
             x = rm.relu(layers[i * 2](x))
             #print(x.shape)
             x = rm.relu(layers[i * 2 + 1](x))
             #print(x.shape)
         if i == self.depth - 1:
             x = rm.average_pool2d(x, stride=2, padding=(1, 1))
         else:
             x = rm.max_pool2d(x, stride=2, padding=(1, 1))
         #print(x.shape)
     x = rm.flatten(x)
     layers = self.fcnn._layers
     for i in range(len(layers[:-2])):
         x = rm.relu(layers[i](x))
         #print(x.shape)
         if self.dropout:
             x = rm.dropout(x, dropout_ratio=0.5)
     z_mean = layers[-2](x)
     z_log_var = layers[-1](x)
     return z_mean, z_log_var
示例#5
0
    def forward(self, x):
        t = x
        t = self.block1(t)
        t = self.block2(t)
        t = self.block3(t)
        t = self.block4(t)
        pool4 = t
        t = self.block5(t)

        t = rm.relu(self.fc6(t))
        t = self.dr1(t)
        t = rm.relu(self.fc7(t))
        t = self.dr2(t)

        t = self.score_fr(t)

        t = self.upscore2(t)
        upscore2 = t

        pool4 = 0.01 * pool4
        t = self.score_pool4(pool4)
        score_pool4 = t

        score_pool4c = score_pool4[:, :, 5:5 + upscore2.shape[2],
                                   5:5 + upscore2.shape[3]]
        t = upscore2 + score_pool4c
        fuse_pool4 = t
        t = self.upscore16(fuse_pool4)
        upscore16 = t

        t = t[:, :, 27:27 + x.shape[2], 27:27 + x.shape[3]]
        score = t

        return t
示例#6
0
    def forward(self, x):
        t1 = rm.relu(self.batch_norm1_reduced(self.conv1_reduced(x)))
        t1 = rm.relu(self.batch_norm1_1(self.conv1_1(t1)))
        t1 = rm.relu(self.batch_norm1_2(self.conv1_2(t1)))

        t2 = rm.relu(self.batch_norm2(self.conv2(x)))

        t3 = rm.max_pool2d(x, filter=3, stride=2)
        return rm.concat([t1, t2, t3])
 def forward(self, x):
     t1 = rm.relu(self._l1(x))
     t2 = self._sd(self._pool(rm.relu(self._l2(t1))))
     t3 = rm.relu(self._l3(t2))
     t4 = self._sd(self._pool(rm.relu(self._l4(t3))))
     t5 = rm.flatten(t4)
     t6 = rm.dropout(rm.relu(self._l5(t5)))
     t7 = self._l6(t5)
     return t7
示例#8
0
文件: vgg.py 项目: clockfly/ReNomIMG
 def forward(self, x):
     t = self.block1(x)
     t = self.block2(t)
     t = self.block3(t)
     t = self.block4(t)
     t = self.block5(t)
     t = rm.flatten(t)
     t = rm.relu(self.fc1(t))
     t = self.dropout1(t)
     t = rm.relu(self.fc2(t))
     t = self.dropout2(t)
     t = self.fc3(t)
     return t
 def forward(self, x):
     h = self.transform(x)
     #print(h.shape)
     h = rm.reshape(h, (len(x), self.channels, self.dim, self.dim))
     #print(h.shape)
     layers = self.hidden._layers
     for i in range(len(layers)):
         if self.batch_normal:
             h = layers[2 * i](h)
             h = rm.relu(layers[2 * i + 1](h))
         else:
             h = rm.relu(layers[i](h))
         #print(h.shape)
     h = rm.sigmoid(self.output(h))
     return h
示例#10
0
    def forward(self, x):
        t1 = rm.average_pool2d(x, filter=3, padding=1)
        t1 = rm.relu(self.batch_norm1(self.conv1(t1)))

        t2 = rm.relu(self.batch_norm2(self.conv2(x)))

        t3 = rm.relu(self.batch_norm3_1(self.conv3_1(x)))
        t3 = rm.relu(self.batch_norm3_2(self.conv3_2(t3)))
        t3 = rm.relu(self.batch_norm3_3(self.conv3_3(t3)))

        t4 = rm.relu(self.batch_norm4_1(self.conv4_1(x)))
        t4 = rm.relu(self.batch_norm4_2(self.conv4_2(t4)))
        t4 = rm.relu(self.batch_norm4_3(self.conv4_3(t4)))
        t4 = rm.relu(self.batch_norm4_4(self.conv4_4(t4)))
        t4 = rm.relu(self.batch_norm4_5(self.conv4_5(t4)))
        return rm.concat([t1, t2, t3, t4])
示例#11
0
    def forward(self, x):
        t = x
        t = self.block1(t)
        t = self.block2(t)
        t = self.block3(t)
        pool3 = t
        t = self.block4(t)
        pool4 = t
        t = self.block5(t)

        t = rm.relu(self.fc6(t))
        t = self.dr1(t)
        t = rm.relu(self.fc7(t))
        t = self.dr2(t)

        t = self.score_fr(t)
        t = self.upscore2(t)
        upscore2 = t

        pool4 = 0.01 * pool4
        t = self.score_pool4(pool4)
        score_pool4 = t

        score_pool4c = score_pool4[:, :, 5:5 + upscore2.shape[2],
                                   5:5 + upscore2.shape[3]]
        t = upscore2 + score_pool4c

        fuse_pool4 = t
        t = self.upscore_pool4(fuse_pool4)
        upscore_pool4 = t

        pool3 = 0.0001 * pool3
        t = self.score_pool3(pool3)
        score_pool3 = t

        score_pool3c = score_pool3[:, :, 9:9 + upscore_pool4.shape[2],
                                   9:9 + upscore_pool4.shape[3]]
        t = upscore_pool4 + score_pool3c

        fuse_pool3 = t
        t = self.upscore8(fuse_pool3)
        upscore8 = t

        t = upscore8[:, :, 31:31 + x.shape[2], 31:31 + x.shape[3]]
        score = t

        return t
示例#12
0
def test_gpu_node_relu(a):
    set_cuda_active(True)

    g1 = Variable(a)

    g3 = sum(rm.relu(g1))
    g = g3.grad()
    g_g1 = g.get(g1)
    g3.to_cpu()

    set_cuda_active(False)
    c3 = sum(rm.relu(g1))
    c = c3.grad()
    c_g1 = c.get(g1)

    close(g3, c3)
    close(c_g1, g_g1)
示例#13
0
    def forward(self, x):
        t = x
        t = self.block1(t)
        t = self.block2(t)
        t = self.block3(t)
        t = self.block4(t)
        t = self.block5(t)

        t = rm.relu(self.fc6(t))
        fc6 = t

        t = rm.relu(self.fc7(t))
        fc7 = t

        t = self.score_fr(t)
        score_fr = t
        t = self.upscore(t)
        return t
示例#14
0
    def forward(self, x):
        t1 = rm.relu(self.batch_norm1(self.conv1(x)))

        t2 = rm.relu(self.batch_norm2_reduced(self.conv2_reduced(x)))
        t2 = rm.relu(self.batch_norm2_1(self.conv2_1(t2)))
        t2 = rm.relu(self.batch_norm2_2(self.conv2_2(t2)))

        t3 = rm.relu(self.batch_norm3_reduced(self.conv3_reduced(x)))
        t3 = rm.relu(self.batch_norm3_1(self.conv3_1(t3)))
        t3 = rm.relu(self.batch_norm3_2(self.conv3_2(t3)))
        t3 = rm.relu(self.batch_norm3_3(self.conv3_3(t3)))
        t3 = rm.relu(self.batch_norm3_4(self.conv3_4(t3)))

        t4 = rm.max_pool2d(x, filter=3, stride=1, padding=1)
        t4 = rm.relu(self.batch_norm4(self.conv4(t4)))

        return rm.concat([
            t1, t2, t3, t4
        ])
示例#15
0
    def forward(self, x):
        t = x
        t = self.block1(t)
        t = self.block2(t)
        t = self.block3(t)
        t = self.block4(t)
        t = self.block5(t)

        t = rm.relu(self.fc6(t))
        t = self.dr1(t)
        t = rm.relu(self.fc7(t))
        t = self.dr2(t)

        t = self.score_fr(t)
        t = self.upscore(t)

        t = t[:, :, 19:19 + x.shape[2], 19:19 + x.shape[3]]
        score = t

        return t
示例#16
0
    def forward(self, x):
        t = x
        t = self.block1(t)
        t = self.block2(t)
        t = self.block3(t)
        pool3 = t
        t = self.block4(t)
        pool4 = t
        t = self.block5(t)

        t = rm.relu(self.fc6(t))
        t = self.dr1(t)
        fc6 = t

        t = rm.relu(self.fc7(t))
        t = self.dr2(t)
        fc7 = t

        t = self.score_fr(t)
        score_fr = t

        t = self.upscore2(t)
        upscore2 = t

        t = self.score_pool4(pool4)
        score_pool4 = t

        t = upscore2 + score_pool4
        fuse_pool4 = t

        t = self.score_pool3(pool3)
        score_pool3 = t

        t = self.upscore_pool4(fuse_pool4)
        upscore_pool4 = t
        t = upscore_pool4 + score_pool3

        t = self.upscore8(t)
        return t
示例#17
0
    def forward(self, x):
        t = rm.relu(self.batch_norm1(self.conv1(x)))
        t = rm.relu(self.batch_norm2(self.conv2(t)))
        t = rm.relu(self.batch_norm3(self.conv3(t)))

        t = rm.max_pool2d(t, filter=3, stride=2)
        t = rm.relu(self.batch_norm4(self.conv4(t)))
        t = rm.relu(self.batch_norm5(self.conv5(t)))
        t = rm.relu(self.batch_norm6(self.conv6(t)))
        return t
示例#18
0
    def forward(self, x):
        t1 = rm.max_pool2d(x, filter=3, stride=2)

        t2 = rm.relu(self.batch_norm1_red(self.conv1_red(x)))
        t2 = rm.relu(self.batch_norm1(self.conv1(t2)))

        t3 = rm.relu(self.batch_norm2_red(self.conv2_red(x)))
        t3 = rm.relu(self.batch_norm2_1(self.conv2_1(t3)))
        t3 = rm.relu(self.batch_norm2_2(self.conv2_2(t3)))
        t3 = rm.relu(self.batch_norm2_3(self.conv2_3(t3)))

        return rm.concat([
            t1, t2, t3
        ])
示例#19
0
 def forward(self, x):
     i = 0
     t = self.base[i](x)
     i += 1
     t = rm.relu(self.base[i](t))
     i += 1
     t = rm.max_pool2d(t, filter=3, stride=2, padding=1)
     for j in self.layer_per_block[:-1]:
         for k in range(j):
             tmp = t
             t = self.base[i](t)
             i += 1
             t = rm.concat(tmp, t)
         t = self.base[i](t)
         i += 1
     for j in range(self.layer_per_block[-1]):
         tmp = t
         t = self.base[i](t)
         i += 1
         t = rm.concat(tmp, t)
     t = rm.average_pool2d(t, filter=7, stride=1)
     t = rm.flatten(t)
     t = self.fc(t)
     return t
示例#20
0
 def forward(self, x):
     print("")
     print("input:\n{}".format(x))
     print("input shape:{}".format(x.shape))
     print("")
     t1 = self.layer1(x)
     print("input x hidden weight:\n{}".format(self.layer1.params.w))
     print("input x hidden bias:\n{}".format(self.layer1.params.b))
     print("")
     print("hidden:\n{}".format(t1))
     print("hidden shape:{}".format(t1.shape))
     t2 = rm.relu(t1)
     print("")
     print("relu:\n{}".format(t2))
     print("relu shape:{}".format(t2.shape))
     print("")
     t3 = self.layer2(t2)
     print("hidden x output weight:\n{}".format(self.layer2.params.w))
     print("hidden x output bias:\n{}".format(self.layer2.params.b))
     print("")
     print("output:\n{}".format(t3))
     print("output shape:{}".format(t3.shape))
     print("")
     return t3
示例#21
0
    def forward(self, x):
        t = rm.relu(self.batch_norm1(self.conv1(x)))
        t = rm.relu(self.batch_norm2(self.conv2(t)))
        t = rm.relu(self.batch_norm3(self.conv3(t)))

        t1 = rm.max_pool2d(t, filter=3, stride=2)
        t2 = rm.relu(self.batch_norm4(self.conv4(t)))

        t = rm.concat([t1, t2])

        t1 = rm.relu(self.batch_norm5_1_1(self.conv5_1_1(t)))
        t1 = rm.relu(self.batch_norm5_1_2(self.conv5_1_2(t1)))

        t2 = rm.relu(self.batch_norm5_2_1(self.conv5_2_1(t)))
        t2 = rm.relu(self.batch_norm5_2_2(self.conv5_2_2(t2)))
        t2 = rm.relu(self.batch_norm5_2_3(self.conv5_2_3(t2)))
        t2 = rm.relu(self.batch_norm5_2_4(self.conv5_2_4(t2)))
        t = rm.concat([t1, t2])

        t1 = rm.relu(self.batch_norm6(self.conv6(t)))
        t2 = rm.max_pool2d(t, filter=3, stride=2)
        return rm.concat([t1, t2])
示例#22
0
    def forward(self, x):
        n = x.shape[0]
        t = x
        t = self.pool3(
            rm.relu(
                self.conv3_3(rm.relu(self.conv3_2(rm.relu(self.conv3_1(t)))))))
        t = rm.relu(
            self.conv4_3(rm.relu(self.conv4_2(rm.relu(self.conv4_1(t))))))

        # Normalize and compute location, confidence and priorbox aspect ratio
        conv4_norm = self.norm(t)
        #conv4_norm = t
        conv4_norm_loc = self.conv4_3_mbox_loc(conv4_norm)
        conv4_norm_loc_flat = rm.flatten(conv4_norm_loc)
        conv4_norm_conf = self.conv4_3_mbox_conf(conv4_norm)
        conv4_norm_conf_flat = rm.flatten(conv4_norm_conf)
        conv4_priorbox = self.conv4_3_priorbox(conv4_norm)

        t = self.pool4(t)

        t = self.pool5(
            rm.relu(
                self.conv5_3(rm.relu(self.conv5_2(rm.relu(self.conv5_1(t)))))))

        t = rm.relu(self.fc6(t))
        t = rm.relu(self.fc7(t))

        # Normalize and compute location, confidence and priorbox aspect ratio
        fc7_mbox_loc = self.fc7_mbox_loc(t)
        fc7_mbox_loc_flat = rm.flatten(fc7_mbox_loc)

        fc7_mbox_conf = self.fc7_mbox_conf(t)
        fc7_mbox_conf_flat = rm.flatten(fc7_mbox_conf)
        fc7_priorbox = self.fc7_priorbox(t)

        t = rm.relu(self.conv8_2(rm.relu(self.conv8_1(t))))
        # Normalize and compute location, confidence and priorbox aspect ratio
        conv8_mbox_loc = self.conv8_2_mbox_loc(t)
        conv8_mbox_loc_flat = rm.flatten(conv8_mbox_loc)

        conv8_mbox_conf = self.conv8_2_mbox_conf(t)
        conv8_mbox_conf_flat = rm.flatten(conv8_mbox_conf)
        conv8_priorbox = self.conv8_2_priorbox(t)

        t = rm.relu(self.conv9_2(rm.relu(self.conv9_1(t))))
        # Normalize and compute location, confidence and priorbox aspect ratio
        conv9_mbox_loc = self.conv9_2_mbox_loc(t)
        conv9_mbox_loc_flat = rm.flatten(conv9_mbox_loc)

        conv9_mbox_conf = self.conv9_2_mbox_conf(t)
        conv9_mbox_conf_flat = rm.flatten(conv9_mbox_conf)
        conv9_priorbox = self.conv9_2_priorbox(t)

        t = rm.relu(self.conv10_2(rm.relu(self.conv10_1(t))))
        conv10_mbox_loc = self.conv10_2_mbox_loc(t)
        conv10_mbox_loc_flat = rm.flatten(conv10_mbox_loc)

        conv10_mbox_conf = self.conv10_2_mbox_conf(t)
        conv10_mbox_conf_flat = rm.flatten(conv10_mbox_conf)
        conv10_priorbox = self.conv10_2_priorbox(t)

        t = rm.average_pool2d(t)
        t = rm.flatten(t)

        pool11_mbox_loc_flat = self.pool11_mbox_loc(t)

        pool11_mbox_conf_flat = self.pool11_mbox_conf(t)
        pool11_reshaped = t.reshape((t.shape[0], 256, 1, 1))
        pool11_priorbox = self.pool11_priorbox(pool11_reshaped)

        mbox_loc = rm.concat([
            conv4_norm_loc_flat, fc7_mbox_loc_flat, conv8_mbox_loc_flat,
            conv9_mbox_loc_flat, conv10_mbox_loc_flat, pool11_mbox_loc_flat
        ])
        mbox_conf = rm.concat([
            conv4_norm_conf_flat, fc7_mbox_conf_flat, conv8_mbox_conf_flat,
            conv9_mbox_conf_flat, conv10_mbox_conf_flat, pool11_mbox_conf_flat
        ])

        mbox_priorbox = np.concatenate([
            conv4_priorbox, fc7_priorbox, conv8_priorbox, conv9_priorbox,
            conv10_priorbox, pool11_priorbox
        ],
                                       axis=1)

        num_boxes = mbox_loc.shape[-1] // 4
        mbox_loc = mbox_loc.reshape((n, 4, num_boxes))
        mbox_conf = mbox_conf.reshape((n, self.num_class, num_boxes))

        predictions = rm.concat([
            mbox_loc, mbox_conf,
            np.broadcast_to(mbox_priorbox.transpose((0, 2, 1)),
                            (mbox_conf.shape[0], mbox_priorbox.shape[2],
                             mbox_priorbox.shape[1]))
        ])
        return predictions
示例#23
0
文件: vgg.py 项目: clockfly/ReNomIMG
    def forward(self, x):
        t = rm.relu(self.conv1_1(x))
        t = rm.relu(self.conv1_2(t))
        t = rm.max_pool2d(t, filter=2, stride=2)

        t = rm.relu(self.conv2_1(t))
        t = rm.relu(self.conv2_2(t))
        t = rm.max_pool2d(t, filter=2, stride=2)

        t = rm.relu(self.conv3_1(t))
        t = rm.relu(self.conv3_2(t))
        t = rm.relu(self.conv3_3(t))
        t = rm.max_pool2d(t, filter=2, stride=2)

        t = rm.relu(self.conv4_1(t))
        t = rm.relu(self.conv4_2(t))
        t = rm.relu(self.conv4_3(t))
        t = rm.max_pool2d(t, filter=2, stride=2)

        t = rm.relu(self.conv5_1(t))
        t = rm.relu(self.conv5_2(t))
        t = rm.relu(self.conv5_3(t))
        t = rm.max_pool2d(t, filter=2, stride=2)

        return t
 def forward(self, x):
     t1 = self.layer1(x)
     t2 = rm.relu(t1)
     t3 = self.layer2(t2)
     return t3
示例#25
0
 def forward(self, x):
     return self.layer2(rm.relu(self.layer1(x)))
示例#26
0
文件: unet.py 项目: clockfly/ReNomIMG
    def forward(self, x):
        t = rm.relu(self.bn1_1(self.conv1_1(x)))
        c1 = rm.relu(self.bn1_2(self.conv1_2(t)))
        t = rm.max_pool2d(c1, filter=2, stride=2)
        t = rm.relu(self.bn2_1(self.conv2_1(t)))
        c2 = rm.relu(self.bn2_2(self.conv2_2(t)))
        t = rm.max_pool2d(c2, filter=2, stride=2)
        t = rm.relu(self.bn3_1(self.conv3_1(t)))
        c3 = rm.relu(self.bn3_2(self.conv3_2(t)))
        t = rm.max_pool2d(c3, filter=2, stride=2)
        t = rm.relu(self.bn4_1(self.conv4_1(t)))
        c4 = rm.relu(self.bn4_2(self.conv4_2(t)))
        t = rm.max_pool2d(c4, filter=2, stride=2)
        t = rm.relu(self.bn5_1(self.conv5_1(t)))
        t = rm.relu(self.bn5_2(self.conv5_2(t)))

        t = self.deconv1(t)[:, :, :c4.shape[2], :c4.shape[3]]
        t = rm.concat([c4, t])
        t = rm.relu(self.conv6_1(t))
        t = rm.relu(self.conv6_2(t))
        t = self.deconv2(t)[:, :, :c3.shape[2], :c3.shape[3]]
        t = rm.concat([c3, t])

        t = rm.relu(self.conv7_1(t))
        t = rm.relu(self.conv7_2(t))
        t = self.deconv3(t)[:, :, :c2.shape[2], :c2.shape[3]]
        t = rm.concat([c2, t])

        t = rm.relu(self.conv8_1(t))
        t = rm.relu(self.conv8_2(t))
        t = self.deconv4(t)[:, :, :c1.shape[2], :c1.shape[3]]
        t = rm.concat([c1, t])

        t = self.conv9(t)

        return t
示例#27
0
    def forward(self, x):
        n = x.shape[0]
        t = x
        # Vgg 3rd Block
        t = rm.relu(self.conv3_1(t))
        t = rm.relu(self.conv3_2(t))
        t = rm.relu(self.conv3_3(t))
        t = self.pool3(t)

        # Vgg 4th Block
        t = rm.relu(self.conv4_1(t))
        t = rm.relu(self.conv4_2(t))
        t = rm.relu(self.conv4_3(t))

        # Normalize and compute location, confidence and priorbox aspect ratio
        conv4_norm = self.norm(t)

        conv4_norm_loc = self.conv4_3_mbox_loc(conv4_norm)
        conv4_norm_loc_flat = rm.flatten(conv4_norm_loc.transpose(0, 2, 3, 1))
        conv4_norm_conf = self.conv4_3_mbox_conf(conv4_norm)
        conv4_norm_conf_flat = rm.flatten(conv4_norm_conf.transpose(
            0, 2, 3, 1))

        t = self.pool4(t)

        # Vgg 5th Block
        t = rm.relu(self.conv5_1(t))
        t = rm.relu(self.conv5_2(t))
        t = rm.relu(self.conv5_3(t))
        t = self.pool5(t)

        # Vgg 6, 7th Block
        t = rm.relu(self.fc6(t))
        t = rm.relu(self.fc7(t))
        # Confirmed here.

        # Normalize and compute location, confidence and priorbox aspect ratio
        fc7_mbox_loc = self.fc7_mbox_loc(t)
        fc7_mbox_loc_flat = rm.flatten(fc7_mbox_loc.transpose(0, 2, 3, 1))

        fc7_mbox_conf = self.fc7_mbox_conf(t)
        fc7_mbox_conf_flat = rm.flatten(fc7_mbox_conf.transpose(0, 2, 3, 1))

        t = rm.relu(self.conv8_1(t))
        t = rm.relu(self.conv8_2(t))
        # Normalize and compute location, confidence and priorbox aspect ratio
        conv8_mbox_loc = self.conv8_2_mbox_loc(t)
        conv8_mbox_loc_flat = rm.flatten(conv8_mbox_loc.transpose(0, 2, 3, 1))

        conv8_mbox_conf = self.conv8_2_mbox_conf(t)
        conv8_mbox_conf_flat = rm.flatten(conv8_mbox_conf.transpose(
            0, 2, 3, 1))

        t = rm.relu(self.conv9_1(t))
        t = rm.relu(self.conv9_2(t))
        # Normalize and compute location, confidence and priorbox aspect ratio
        conv9_mbox_loc = self.conv9_2_mbox_loc(t)
        conv9_mbox_loc_flat = rm.flatten(conv9_mbox_loc.transpose(0, 2, 3, 1))

        conv9_mbox_conf = self.conv9_2_mbox_conf(t)
        conv9_mbox_conf_flat = rm.flatten(conv9_mbox_conf.transpose(
            0, 2, 3, 1))

        t = rm.relu(self.conv10_1(t))
        t = rm.relu(self.conv10_2(t))

        conv10_mbox_loc = self.conv10_2_mbox_loc(t)
        conv10_mbox_loc_flat = rm.flatten(conv10_mbox_loc.transpose(
            0, 2, 3, 1))

        conv10_mbox_conf = self.conv10_2_mbox_conf(t)
        conv10_mbox_conf_flat = rm.flatten(
            conv10_mbox_conf.transpose(0, 2, 3, 1))

        t = rm.relu(self.conv10_1(t))
        t = rm.relu(self.conv10_2(t))

        conv11_mbox_loc = self.conv11_2_mbox_loc(t)
        conv11_mbox_loc_flat = rm.flatten(conv11_mbox_loc.transpose(
            0, 2, 3, 1))

        conv11_mbox_conf = self.conv11_2_mbox_conf(t)
        conv11_mbox_conf_flat = rm.flatten(
            conv11_mbox_conf.transpose(0, 2, 3, 1))

        mbox_loc = rm.concat([
            conv4_norm_loc_flat, fc7_mbox_loc_flat, conv8_mbox_loc_flat,
            conv9_mbox_loc_flat, conv10_mbox_loc_flat, conv11_mbox_loc_flat
        ])

        mbox_conf = rm.concat([
            conv4_norm_conf_flat, fc7_mbox_conf_flat, conv8_mbox_conf_flat,
            conv9_mbox_conf_flat, conv10_mbox_conf_flat, conv11_mbox_conf_flat
        ])

        mbox_loc = mbox_loc.reshape((n, -1, 4))
        mbox_conf = mbox_conf.reshape((n, -1, self.num_class))

        predictions = rm.concat([mbox_loc, mbox_conf], axis=2)
        return predictions