コード例 #1
0
    def __call__(self, x):
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv2(h))), 2, stride=2)

        h = F.relu(self.conv3(h))
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv4(h))), 2, stride=2)

        h = F.relu(self.conv5(h))
        h = F.relu(self.conv6(h))
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv7(h))), 2, stride=2)

        h = F.relu(self.conv8(h))
        h = F.relu(self.conv9(h))
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv10(h))), 2, stride=2)

        h = F.relu(self.conv11(h))
        h = F.relu(self.conv12(h))
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv13(h))), 2, stride=2)

        h = F.dropout(F.relu(self.fc14(h)))
        h = F.dropout(F.relu(self.fc15(h)))
        h = self.fc16(h)

        return h
コード例 #2
0
ファイル: models.py プロジェクト: Kansea/videosCompare
    def __call__(self, x, t=None):
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv1(x))),
                             ksize=2,
                             stride=2)
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv2(h))),
                             ksize=2,
                             stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), ksize=2, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), ratio=0.9)
        h = F.dropout(F.relu(self.fc7(h)), ratio=0.9)
        y = self.fc8(h)

        if self.train:
            self.loss = F.softmax_cross_entropy(y, t)
            self.accuracy = F.accuracy(y, t)
            chainer.report({
                'loss': self.loss,
                'accuracy': self.accuracy
            }, self)
            return self.loss
        else:
            return h
コード例 #3
0
ファイル: model.py プロジェクト: knorth55/chainer-modelzoo
    def __call__(self, x, t=None):
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv1(x))),
                             3,
                             stride=2)
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv2(h))),
                             3,
                             stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)
        self.score = h

        if t is None:
            assert not self.train
            return

        self.loss = F.softmax_cross_entropy(self.score, t)
        self.accuracy = F.accuracy(self.score, t)
        chainer.report({'loss': self.loss, 'accuracy': self.accuracy})

        return self.loss
コード例 #4
0
def extract_features(model, data1, data2):
    feature_list = np.empty((0, 1024), dtype=float)
    # count = 0
    for i in range(len(data1)):
        # Forward処理を記述
        root1_h1 = F.relu(model.conv1(data1[i]))
        root1_h2 = F.max_pooling_2d(root1_h1, stride=2, ksize=2)
        root1_h3 = F.local_response_normalization(root1_h2, k=1, n=5, alpha=0.00002, beta=0.75)
        root1_h4 = F.relu(model.conv2(root1_h3))
        root1_h5 = F.max_pooling_2d(root1_h4, stride=2, ksize=3)
        root1_h6 = F.local_response_normalization(root1_h5, k=1, n=5, alpha=0.00002, beta=0.75)

        root2_h1 = F.relu(model.conv1(data2[i]))
        root2_h2 = F.max_pooling_2d(root2_h1, stride=2, ksize=2)
        root2_h3 = F.local_response_normalization(root2_h2, k=1, n=5, alpha=0.00002, beta=0.75)
        root2_h4 = F.relu(model.conv2(root2_h3))
        root2_h5 = F.max_pooling_2d(root2_h4, stride=2, ksize=3)
        root2_h6 = F.local_response_normalization(root2_h5, k=1, n=5, alpha=0.00002, beta=0.75)

        h6 = F.concat((root1_h6, root2_h6), axis=1)

        feature = F.relu(model.fc3(h6))
        # h8 = model.fc4(h7)

        # print("fc3層の出力は:" + str(feature.data[0]))
        feature_list = np.append(feature_list, np.array([feature.data[0]]), axis=0)
    # feature_list = feature_list.flatten


    return feature_list
コード例 #5
0
    def __call__(self, x, y, t):
        self.clear()
        hR = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.convR1(x))), 3, stride=2)
        hR = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.convR2(hR))), 3, stride=2)
        hR = F.relu(self.convR3(hR))
        hR = F.relu(self.convR4(hR))
        hR = F.max_pooling_2d(F.relu(self.convR5(hR)), 3, stride=2)
        hR = F.dropout(F.relu(self.fcR6(hR)), train=self.train)
        hR = F.dropout(F.relu(self.fcR7(hR)), train=self.train)
        hD = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.convD1(y))), 3, stride=2)
        hD = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.convD2(hD))), 3, stride=2)
        hD = F.relu(self.convD3(hD))
        hD = F.relu(self.convD4(hD))
        hD = F.max_pooling_2d(F.relu(self.convD5(hD)), 3, stride=2)
        hD = F.dropout(F.relu(self.fcD6(hD)), train=self.train)
        hD = F.dropout(F.relu(self.fcD7(hD)), train=self.train)
        h = F.dropout(F.relu(self.fc8(hR, hD)), train=self.train)
        h = self.fc9(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
コード例 #6
0
    def __call__(self, x, subtract_mean=True):
        if subtract_mean:
            x = x - self._image_mean
#        h = super(ModifiedGoogLeNet, self).__call__(
#            x, layers=['pool5'], train=train)['pool5']
#        h = self.bn_fc(h, test=not train)
#        y = self.fc(h)
#        return y
        h = F.relu(self.conv1(x))
        h = F.max_pooling_2d(h, 3, stride=2)
        h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4 / 5)
        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4 / 5)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)
        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)
        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)
        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.bn_fc(h)
        y = self.fc(h)
        if self.normalize_output:
            y = F.normalize(y)
        return y
コード例 #7
0
ファイル: Alex_cla.py プロジェクト: YIKU8800/w2v_laplacian
def forward(x_data, y_data, L=L, batchsize=batchsize):
    L = Variable(cuda.to_gpu(L))
    x, t = Variable(cuda.to_gpu(x_data)), Variable(cuda.to_gpu(y_data))
    #    x, t = Variable(x_data), Variable(y_data)
    h = F.max_pooling_2d(F.relu(F.local_response_normalization(
        model.conv1(x))),
                         3,
                         stride=2)
    h = F.max_pooling_2d(F.relu(F.local_response_normalization(
        model.conv2(h))),
                         3,
                         stride=2)
    h = F.relu(model.conv3(h))
    h = F.relu(model.conv4(h))
    h = F.max_pooling_2d(F.relu(model.conv5(h)), 3, stride=2)
    h = F.relu(model.fc6(h))
    y = model.fc7(h)

    y_f = F.sigmoid(y)
    y_ft = F.expand_dims(y_f, 2)
    #    print F.batch_matmul(y_f,L,transa=True).data.shape
    #    print F.batch_matmul(F.batch_matmul(y_f,L,transa=True),y_ft).data.shape
    term = (F.sum(F.batch_matmul(F.batch_matmul(y_f, L, transa=True),
                                 y_ft))) / batchsize

    sce = F.sigmoid_cross_entropy(y, t)
    #E=sce+(rate*term)
    E = sce
    return E, sce, term, y_f
コード例 #8
0
    def __call__(self, x):

        h = self.conv1(x)

        h = self.conv2(h)
        h = F.relu(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = F.local_response_normalization(h)

        h = self.conv3(h)
        h = F.relu(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = F.local_response_normalization(h)

        h = self.conv4(h)
        h = F.relu(h)
        self.cam = h
        h = F.max_pooling_2d(h, 3, stride=2)
        h = F.local_response_normalization(h)

        h = F.dropout(h, ratio=0.5)
        h = self.fc1(h)
        h = F.relu(h)
        h = F.dropout(h, ratio=0.5)

        h = self.fc2(h)

        return h
コード例 #9
0
ファイル: loadTrainedWeight.py プロジェクト: SvdWegen/DevRob
    def __call__(self, data, face, eyes_grid):
        # the network that uses data as input
        pool1 = F.max_pooling_2d(F.relu(self.conv1(data)), ksize=3, stride=2)
        norm1 = F.local_response_normalization(pool1,
                                               n=5,
                                               alpha=0.0001,
                                               beta=0.75)
        pool2 = F.max_pooling_2d(F.relu(self.conv2(norm1)), ksize=3, stride=2)
        norm2 = norm1 = F.local_response_normalization(pool2,
                                                       n=5,
                                                       alpha=0.0001,
                                                       beta=0.75)
        conv3 = F.relu(self.conv3(norm2))
        conv4 = F.relu(self.conv4(conv3))
        conv5 = F.relu(self.conv5(conv4))
        conv5_red = F.relu(self.conv5_red(conv5))

        # the network that uses face as input
        pool1_face = F.max_pooling_2d(F.relu(self.conv1_face(face)),
                                      ksize=3,
                                      stride=2)
        norm1_face = F.local_response_normalization(pool1_face,
                                                    n=5,
                                                    alpha=0.0001,
                                                    beta=0.75)
        pool2_face = F.max_pooling_2d(F.relu(self.conv2_face(norm1_face)),
                                      ksize=3,
                                      stride=2)
        norm2_face = F.local_response_normalization(pool2_face,
                                                    n=5,
                                                    alpha=0.0001,
                                                    beta=0.75)
        conv3_face = F.relu(self.conv3_face(norm2_face))
        conv4_face = F.relu(self.conv4_face(conv3_face))
        pool5_face = F.max_pooling_2d(F.relu(self.conv5_face(conv4_face)),
                                      ksize=3,
                                      stride=2)
        fc6_face = F.relu(self.fc6_face(pool5_face))

        # now the eyes
        eyes_grid_flat = F.flatten(eyes_grid)
        eyes_grid_mult = 24 * eyes_grid_flat
        eyes_grid_reshaped = F.reshape(
            eyes_grid_mult,
            (1, eyes_grid_mult.size))  # give it same ndim as fc6

        # now bring everything together
        face_input = F.concat((fc6_face, eyes_grid_reshaped), axis=1)
        fc7_face = F.relu(self.fc7_face(face_input))
        fc8_face = F.relu(self.fc8_face(fc7_face))
        importance_map_reshape = F.reshape(
            F.sigmoid(self.importance_no_sigmoid(fc8_face)), (1, 1, 13, 13))
        fc_7 = conv5_red * self.importance_map(importance_map_reshape)
        fc_0_0 = self.fc_0_0(fc_7)
        fc_1_0 = self.fc_1_0(fc_7)
        fc_0_1 = self.fc_0_1(fc_7)
        fc_m1_0 = self.fc_m1_0(fc_7)
        fc_0_m1 = self.fc_0_m1(fc_7)

        return fc_0_0, fc_1_0, fc_0_1, fc_0_m1, fc_m1_0
コード例 #10
0
    def __call__(self, x):
        # for chainer v1.x.x
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv1(x))),
                             3,
                             stride=2)
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv2(h))),
                             3,
                             stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.my_fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.my_fc7(h)), train=self.train)
        h = self.my_fc8(h)

        # for chainer v2.x.x
        # You don't need to use DelGradient hook.

        # with chainer.no_backprop_mode():
        #     h = F.max_pooling_2d(F.local_response_normalization(
        #         F.relu(self.conv1(x))), 3, stride=2)
        #     h = F.max_pooling_2d(F.local_response_normalization(
        #         F.relu(self.conv2(h))), 3, stride=2)
        #     h = F.relu(self.conv3(h))
        #     h = F.relu(self.conv4(h))
        #     h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        #     with chainer.force_backprop_mode():
        #         h = F.dropout(F.relu(self.my_fc6(h)), train=self.train)
        #         h = F.dropout(F.relu(self.my_fc7(h)), train=self.train)
        #         h = self.my_fc8(h)

        return h
コード例 #11
0
    def __call__(self, x):
        h_P1 = self.P1(x)
        if self.P1N_Normalize: h_P1 = F.local_response_normalization(h_P1)
        h_P1 = F.max_pooling_2d(F.relu(h_P1),
                                ksize=self.P1P_ksize,
                                cover_all=True)

        h_P2 = self.P2(h_P1)
        if self.P2N_Normalize: h_P2 = F.local_response_normalization(h_P2)
        h_P2 = F.max_pooling_2d(F.relu(h_P2),
                                ksize=self.P2P_ksize,
                                cover_all=True)

        h_P3 = self.P3(h_P2)
        if self.P3N_Normalize: h_P3 = F.local_response_normalization(h_P3)
        h_P3 = F.max_pooling_2d(F.relu(h_P3),
                                ksize=self.P3P_ksize,
                                cover_all=True)

        h_L1 = F.dropout(F.relu(self.L1(h_P3)),
                         ratio=self.L1_dropout,
                         train=self.IsTrain)
        h_L2 = F.dropout(F.relu(self.L2(h_L1)),
                         ratio=self.L2_dropout,
                         train=self.IsTrain)
        y = h_L2
        return y
コード例 #12
0
    def __call__(self, img_list):
        self.clear()
        xs = np.zeros((len(img_list), 3, self.ref_len, self.ref_len)).astype(np.float32)
        for i, img in enumerate(img_list):
            xs[i] = self.preprocess(img)

        # conv1->relu1->pool1->norm1
        h = F.local_response_normalization(
            F.max_pooling_2d(
                F.relu(self.conv1(xs)),
                ksize=3,
                stride=2
                # pad=0
            )
            # n(local_size)=5
            # alpha=0.0001
            # beta=0.75
        )
        # conv2->relu2->pool2->norm2
        h = F.local_response_normalization(F.max_pooling_2d(F.relu(self.conv2(h)), ksize=3, stride=2))
        # conv3->relu3
        h = F.relu(self.conv3(h))
        # conv4->relu4
        h = F.relu(self.conv4(h))
        # conv5->relu5->pooling5
        h = F.max_pooling_2d(F.relu(self.conv5(h)), ksize=3, stride=2)
        return self.fc6(h).data
コード例 #13
0
    def predictor(self, x):
        h = F.relu(self.conv1(x))
        h = F.local_response_normalization(
            F.max_pooling_2d(h, 3, stride=2), n=5)
        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(
            F.local_response_normalization(h, n=5), 3, stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)

        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.loss3_fc(F.dropout(h, 0.4, train=self.train))
        return h
コード例 #14
0
ファイル: RetweetNet_Org.py プロジェクト: demarche/twitterNet
    def forward(self, x_img, x_doc, y_data, train=True):

        x_img = cuda.cupy.asarray(x_img)
        x_doc = cuda.cupy.asarray(x_doc)
        y_data = cuda.cupy.asarray(y_data)

        img, doc, t = Variable(x_img), Variable(x_doc), Variable(y_data)

        h = F.max_pooling_2d(F.relu(self.conv1(img)), ksize=3, stride=2, pad=0)
        h = F.local_response_normalization(h)
        h = F.max_pooling_2d(F.relu(self.conv2(h)), ksize=3, stride=2, pad=0)
        h = F.local_response_normalization(h)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), ksize=3, stride=2, pad=0)
        h = F.dropout(F.relu(self.fc6(h)), train=train, ratio=0.5)
        h = F.dropout(F.relu(self.fc7(h)), train=train, ratio=0.5)
        h2 = F.relu(self.doc_fc1(doc))
        h2 = F.relu(self.doc_fc2(h2))
        b = F.relu(self.bi1(h, h2))
        y = self.fc8(b)
        if train:
            return F.softmax_cross_entropy(y, t)
        else:
            return F.accuracy(y, t)
コード例 #15
0
ファイル: RetweetNet_Org.py プロジェクト: demarche/twitterNet
    def forward(self, x_img, x_doc, y_data, train=True):

        x_img = cuda.cupy.asarray(x_img)
        x_doc = cuda.cupy.asarray(x_doc)
        y_data = cuda.cupy.asarray(y_data)

        img, doc, t = Variable(x_img), Variable(x_doc), Variable(y_data)

        h = F.max_pooling_2d(F.relu(self.conv1(img)), ksize=3, stride=2, pad=0)
        h = F.local_response_normalization(h)
        h = F.max_pooling_2d(F.relu(self.conv2(h)), ksize=3, stride=2, pad=0)
        h = F.local_response_normalization(h)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), ksize=3, stride=2, pad=0)
        h = F.dropout(F.relu(self.fc6(h)), train=train, ratio=0.5)
        h = F.dropout(F.relu(self.fc7(h)), train=train, ratio=0.5)
        h2 = F.relu(self.doc_fc1(doc))
        h2 = F.relu(self.doc_fc2(h2))
        b = F.relu(self.bi1(h, h2))
        y = self.fc8(b)
        if train:
            return F.softmax_cross_entropy(y, t)
        else:
            return F.accuracy(y, t)
コード例 #16
0
 def __call__(self, x, subtract_mean=True):
     if subtract_mean:
         x = x - self._image_mean
     h = F.relu(self.conv1(x))
     h = F.max_pooling_2d(h, 3, stride=2)
     h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4 / 5)
     h = F.relu(self.conv2_reduce(h))
     h = F.relu(self.conv2(h))
     h = F.local_response_normalization(h, n=5, k=1, alpha=1e-4 / 5)
     h = F.max_pooling_2d(h, 3, stride=2)
     h = self.inc3a(h)
     h = self.inc3b(h)
     h = F.max_pooling_2d(h, 3, stride=2)
     h = self.inc4a(h)
     h = self.inc4b(h)
     h = self.inc4c(h)
     h = self.inc4d(h)
     h = self.inc4e(h)
     h = F.max_pooling_2d(h, 3, stride=2)
     h = self.inc5a(h)
     h = self.inc5b(h)
     h = F.average_pooling_2d(h, 7, stride=1)
     h = self.bn_fc(h)
     y = self.fc(h)
     if self.normalize_output:
         y = F.normalize(y, axis=1)
     return y
コード例 #17
0
    def predict(self, x):
        with chainer.function.no_backprop_mode(), chainer.using_config('train', False):
            h = F.relu(self.conv1(x))
            h = F.local_response_normalization(
                F.max_pooling_2d(h, 3, stride=2), n=5)
            h = F.relu(self.conv2_reduce(h))
            h = F.relu(self.conv2(h))
            h = F.max_pooling_2d(
                F.local_response_normalization(h, n=5), 3, stride=2)

            h = self.inc3a(h)
            h = self.inc3b(h)
            h = F.max_pooling_2d(h, 3, stride=2)
            h = self.inc4a(h)

            h = self.inc4b(h)
            h = self.inc4c(h)
            h = self.inc4d(h)

            h = self.inc4e(h)
            h = F.max_pooling_2d(h, 3, stride=2)
            h = self.inc5a(h)
            h = self.inc5b(h)

            h = F.average_pooling_2d(h, 7, stride=1)
            h = self.loss3_fc(F.dropout(h, 0.4))
        return F.softmax(h)
コード例 #18
0
ファイル: my_models.py プロジェクト: kyo-ke/myhistory
    def forward(self, x):
        y1 = self.model['conv1/7x7_s2'](x)
        h = F.relu(y1)
        h = F.local_response_normalization(self.pool_func(h, 3, stride=2), n=5)
        h = F.relu(self.model['conv2/3x3_reduce'](h))
        y2 = self.model['conv2/3x3'](h)
        h = F.relu(y2)
        h = self.pool_func(F.local_response_normalization(h, n=5), 3, stride=2)
        out1 = self.model['inception_3a/1x1'](h)
        out3 = self.model['inception_3a/3x3'](F.relu(
            self.model['inception_3a/3x3_reduce'](h)))
        out5 = self.model['inception_3a/5x5'](F.relu(
            self.model['inception_3a/5x5_reduce'](h)))
        pool = self.model['inception_3a/pool_proj'](self.pool_func(h,
                                                                   3,
                                                                   stride=1,
                                                                   pad=1))
        y3 = F.concat((out1, out3, out5, pool), axis=1)
        h = F.relu(y3)

        out1 = self.model['inception_3b/1x1'](h)
        out3 = self.model['inception_3b/3x3'](F.relu(
            self.model['inception_3b/3x3_reduce'](h)))
        out5 = self.model['inception_3b/5x5'](F.relu(
            self.model['inception_3b/5x5_reduce'](h)))
        pool = self.model['inception_3b/pool_proj'](self.pool_func(h,
                                                                   3,
                                                                   stride=1,
                                                                   pad=1))
        y4 = F.concat((out1, out3, out5, pool), axis=1)
        h = F.relu(y4)

        h = self.pool_func(h, 3, stride=2)

        out1 = self.model['inception_4a/1x1'](h)
        out3 = self.model['inception_4a/3x3'](F.relu(
            self.model['inception_4a/3x3_reduce'](h)))
        out5 = self.model['inception_4a/5x5'](F.relu(
            self.model['inception_4a/5x5_reduce'](h)))
        pool = self.model['inception_4a/pool_proj'](self.pool_func(h,
                                                                   3,
                                                                   stride=1,
                                                                   pad=1))
        y5 = F.concat((out1, out3, out5, pool), axis=1)
        h = F.relu(y5)

        out1 = self.model['inception_4b/1x1'](h)
        out3 = self.model['inception_4b/3x3'](F.relu(
            self.model['inception_4b/3x3_reduce'](h)))
        out5 = self.model['inception_4b/5x5'](F.relu(
            self.model['inception_4b/5x5_reduce'](h)))
        pool = self.model['inception_4b/pool_proj'](self.pool_func(h,
                                                                   3,
                                                                   stride=1,
                                                                   pad=1))
        y6 = F.concat((out1, out3, out5, pool), axis=1)
        h = F.relu(y6)

        return [y1, y2, y3, y4, y5, y6]
コード例 #19
0
    def __call__(self, x, t, train):
        #if train:
        #	self.train=True
        #else:
        #	self.train=False
        x = chainer.Variable(x, volatile=not train)
        t = chainer.Variable(t, volatile=not train)

        h = F.relu(self.conv1(x))
        h = F.local_response_normalization(F.max_pooling_2d(h, 3, stride=2),
                                           n=5)
        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(F.local_response_normalization(h, n=5),
                             3,
                             stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss1_conv(l))
        l = F.relu(self.loss1_fc1(l))
        l = self.loss1_fc2(l)
        self.loss1 = F.softmax_cross_entropy(l, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss2_conv(l))
        l = F.relu(self.loss2_fc1(l))
        l = self.loss2_fc2(l)
        self.loss2 = F.softmax_cross_entropy(l, t)

        h = self.inc4e(h)
        #print h.data
        h = F.max_pooling_2d(h, 3, stride=2)
        #print h.data
        h = self.inc5a(h)
        #print h.data
        h = self.inc5b(h)
        #print h.data
        h = F.average_pooling_2d(h, 7, stride=1)
        #print h.data
        #h = self.loss3_fc(F.dropout(h, 0.4, train=self.train))
        h = self.loss3_fc(F.dropout(h, 0.4, train=train))
        #print h.data
        self.loss3 = F.softmax_cross_entropy(h, t)
        print h.data
        print t.data
        self.loss = 0.3 * (self.loss1 + self.loss2) + self.loss3
        self.accuracy = F.accuracy(h, t)
        return self.loss, self.accuracy
コード例 #20
0
 def forward_fe(self, x):
     h1 = F.local_response_normalization(F.relu(self.conv1(x)))
     h1 = F.max_pooling_2d(h1, 3, stride=2)
     h2 = F.local_response_normalization(F.relu(self.conv2(h1)))
     h2 = F.max_pooling_2d(h2, 3, stride=2)
     h3 = F.relu(self.conv3(h2))
     h4 = F.relu(self.conv4(h3))
     h5 = F.max_pooling_2d(F.relu(self.conv5(h4)), 3, stride=2)
     return h5
コード例 #21
0
ファイル: alex.py プロジェクト: ennnyo/LRCN
 def __forward(self, x):
     h = F.max_pooling_2d(F.relu(
         F.local_response_normalization(self.conv1(x))), 3, stride=2)
     h = F.max_pooling_2d(F.relu(
         F.local_response_normalization(self.conv2(h))), 3, stride=2)
     h = F.relu(self.conv3(h))
     h = F.relu(self.conv4(h))
     h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
     return self.fc6(h)
 def __call__(self, x, train=True):
     h = F.max_pooling_2d(F.relu(
         F.local_response_normalization(self.conv1(x))), 3, stride=2)
     h = F.max_pooling_2d(F.relu(
         F.local_response_normalization(self.conv2(h))), 3, stride=2)
     h = F.relu(self.conv3(h))	
     h = F.dropout(F.relu(self.fc4(h)), train=train)
     h = self.fc5(h)
     return h
コード例 #23
0
    def __call__(self, x, t):
        h = F.relu(self['conv1/7x7_s2'](x))
        h = F.local_response_normalization(F.max_pooling_2d(h, 3, stride=2),
                                           n=5,
                                           alpha=(1e-4) / 5,
                                           k=1)
        h = F.relu(self['conv2/3x3_reduce'](h))
        h = F.relu(self['conv2/3x3'](h))
        h = F.max_pooling_2d(F.local_response_normalization(h,
                                                            n=5,
                                                            alpha=(1e-4) / 5,
                                                            k=1),
                             3,
                             stride=2)

        h = self.call_inception(h, 'inception_3a')
        h = self.call_inception(h, 'inception_3b')
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.call_inception(h, 'inception_4a')

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self['loss1/conv'](l))
        l = F.dropout(F.relu(self['loss1/fc'](l)), 0.7, train=self.train)
        l = self['loss1/classifier'](l)
        loss1 = F.softmax_cross_entropy(l, t)

        h = self.call_inception(h, 'inception_4b')
        h = self.call_inception(h, 'inception_4c')
        h = self.call_inception(h, 'inception_4d')

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self['loss2/conv'](l))
        l = F.dropout(F.relu(self['loss2/fc'](l)), 0.7, train=self.train)
        l = self['loss2/classifier'](l)
        loss2 = F.softmax_cross_entropy(l, t)

        h = self.call_inception(h, 'inception_4e')
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.call_inception(h, 'inception_5a')
        h = self.call_inception(h, 'inception_5b')

        h = F.average_pooling_2d(h, 7, stride=1)
        h = self['fc8-20'](F.dropout(h, 0.4, train=self.train))
        loss3 = F.softmax_cross_entropy(h, t)

        loss = 0.3 * (loss1 + loss2) + loss3
        accuracy = F.accuracy(h, t)
        chainer.report(
            {
                'loss': loss,
                'loss1': loss1,
                'loss2': loss2,
                'loss3': loss3,
                'accuracy': accuracy
            }, self)
        return loss
コード例 #24
0
    def forward(self, x_img, x_doc, y_data, train=True):
        x_img = cuda.cupy.asarray(x_img)
        x_doc = cuda.cupy.asarray(x_doc)
        y_data = cuda.cupy.asarray(y_data)

        img, doc, t = Variable(x_img), Variable(x_doc), Variable(y_data)

        h = F.relu(self.conv1(img))
        h = F.local_response_normalization(F.max_pooling_2d(h, 3, stride=2),
                                           n=5)
        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(F.local_response_normalization(h, n=5),
                             3,
                             stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss1_conv(l))
        l = F.relu(self.loss1_fc1(l))
        l = self.loss1_fc2(l)
        self.loss1 = F.softmax_cross_entropy(l, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss2_conv(l))
        l = F.relu(self.loss2_fc1(l))
        l = self.loss2_fc2(l)
        self.loss2 = F.softmax_cross_entropy(l, t)

        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)

        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.loss3_fc1(F.dropout(h, 0.4, train=train))

        h2 = F.relu(self.doc_fc1(F.dropout(doc, train=train)))
        h2 = F.relu(self.doc_fc2(h2))
        b = F.relu(self.bi1(h, h2))
        h = self.loss3_fc2(b)

        self.loss3 = F.softmax_cross_entropy(h, t)

        if train:
            return 0.3 * (self.loss1 + self.loss2) + self.loss3
        else:
            return F.accuracy(h, t)
コード例 #25
0
    def __call__(self, x, t):
        h = F.relu(self.conv1(x))
        h = F.local_response_normalization(
            F.max_pooling_2d(h, 3, stride=2), n=5)
        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(
            F.local_response_normalization(h, n=5), 3, stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        #l = F.average_pooling_2d(h, 5, stride=3)
        l = F.average_pooling_2d(h, 5*(GoogLeNet.scale),
                                 stride=3*(GoogLeNet.scale))
        l = F.relu(self.loss1_conv(l))
        l = F.relu(self.loss1_fc1(l))
        l = self.loss1_fc2(l)
        loss1 = F.softmax_cross_entropy(l, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        #l = F.average_pooling_2d(h, 5, stride=3)
        l = F.average_pooling_2d(h, 5*(GoogLeNet.scale),
                                 stride=3*(GoogLeNet.scale))
        l = F.relu(self.loss2_conv(l))
        l = F.relu(self.loss2_fc1(l))
        l = self.loss2_fc2(l)
        loss2 = F.softmax_cross_entropy(l, t)

        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)

        #h = F.average_pooling_2d(h, 7, stride=1)
        h = F.average_pooling_2d(h, 7*(GoogLeNet.scale),
                                 stride=1*(GoogLeNet.scale))
        h = self.loss3_fc(F.dropout(h, 0.4))
        loss3 = F.softmax_cross_entropy(h, t)

        loss = 0.3 * (loss1 + loss2) + loss3
        accuracy = F.accuracy(h, t)

        chainer.report({
            'loss': loss,
            'loss1': loss1,
            'loss2': loss2,
            'loss3': loss3,
            'accuracy': accuracy
        }, self)
        return loss
コード例 #26
0
 def __call__(self, x, train=True):
     h = F.max_pooling_2d(F.relu(F.local_response_normalization(self.conv1(x))), 3, stride=2)
     h = F.max_pooling_2d(F.relu(F.local_response_normalization(self.conv2(h))), 3, stride=2)
     h = F.relu(self.conv3(h))
     h = F.relu(self.conv4(h))
     h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
     h = F.dropout(F.relu(self.fc6(h)), train=self.train)
     h = F.dropout(F.relu(self.fc7(h)), train=self.train)
     h = self.fc8(h)
     return h
コード例 #27
0
 def __call__(self, x):
     self.clear()
     h = F.max_pooling_2d(F.relu(
         F.local_response_normalization(self.conv1(x))), 2, stride=2)
     h = F.max_pooling_2d(F.relu(
         F.local_response_normalization(self.conv2(h))), 2, stride=2)
     h = F.relu(self.fc6(h))
     h = F.relu(self.fc7(h))
     h = self.fc8(h)
     return h
コード例 #28
0
ファイル: models.py プロジェクト: AIllIll/hyperface-new
    def __call__(self, x_img, t_detection, **others):
        # Alexnet
        h = F.relu(self.conv1(x_img))  # conv1
        h = F.max_pooling_2d(h, 3, stride=2, pad=0)  # max1
        h = F.local_response_normalization(h)  # norm1
        h = F.relu(self.conv2(h))  # conv2
        h = F.max_pooling_2d(h, 3, stride=2, pad=0)  # max2
        h = F.local_response_normalization(h)  # norm2
        h = F.relu(self.conv3(h))  # conv3
        h = F.relu(self.conv4(h))  # conv4
        h = F.relu(self.conv5(h))  # conv5
        h = F.max_pooling_2d(h, 3, stride=2, pad=0)  # pool5

        with chainer.using_config('train', True):
            h = F.dropout(F.relu(self.fc6(h)), ratio=0.0)  # fc6

        with chainer.using_config('train', True):
            h = F.dropout(F.relu(self.fc7(h)), ratio=0.0)  # fc7
        h_detection = self.fc8(h)  # fc8

        # Loss
        loss = F.softmax_cross_entropy(h_detection, t_detection)

        chainer.report({'loss': loss}, self)

        # Prediction
        h_detection = F.argmax(h_detection, axis=1)

        # Report results
        predict_data = {'img': x_img, 'detection': h_detection}
        teacher_data = {'img': x_img, 'detection': t_detection}
        chainer.report({'predict': predict_data}, self)
        chainer.report({'teacher': teacher_data}, self)

        # Report layer weights
        chainer.report(
            {
                'conv1_w': {
                    'weights': self.conv1.W
                },
                'conv2_w': {
                    'weights': self.conv2.W
                },
                'conv3_w': {
                    'weights': self.conv3.W
                },
                'conv4_w': {
                    'weights': self.conv4.W
                },
                'conv5_w': {
                    'weights': self.conv5.W
                }
            }, self)

        return loss
コード例 #29
0
ファイル: GoogleNet.py プロジェクト: demarche/twitterNet
    def forward(self, x_img, x_doc, y_data, train=True):
        x_img = cuda.cupy.asarray(x_img)
        x_doc = cuda.cupy.asarray(x_doc)
        y_data = cuda.cupy.asarray(y_data)

        img, doc, t = Variable(x_img), Variable(x_doc), Variable(y_data)

        h = F.relu(self.conv1(img))
        h = F.local_response_normalization(
        F.max_pooling_2d(h, 3, stride=2), n=5)
        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(
        F.local_response_normalization(h, n=5), 3, stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss1_conv(l))
        l = F.relu(self.loss1_fc1(l))
        l = self.loss1_fc2(l)
        self.loss1 = F.softmax_cross_entropy(l, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss2_conv(l))
        l = F.relu(self.loss2_fc1(l))
        l = self.loss2_fc2(l)
        self.loss2 = F.softmax_cross_entropy(l, t)

        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)

        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.loss3_fc1(F.dropout(h, 0.4, train=train))

        h2 = F.relu(self.doc_fc1(F.dropout(doc, train=train)))
        h2 = F.relu(self.doc_fc2(h2))
        b = F.relu(self.bi1(h, h2))
        h = self.loss3_fc2(b)

        self.loss3 = F.softmax_cross_entropy(h, t)

        if train:
            return 0.3 * (self.loss1 + self.loss2) + self.loss3
        else:
            return F.accuracy(h, t)
コード例 #30
0
 def reduct(self, x):
     h = F.max_pooling_2d(F.relu(
         F.local_response_normalization(self.conv1(x))),
                          ksize=3,
                          stride=2)
     h = F.max_pooling_2d(F.relu(
         F.local_response_normalization(self.conv2(h))),
                          ksize=4,
                          stride=2)
     h = F.relu(self.conv3(h))
     return h
コード例 #31
0
ファイル: alex.py プロジェクト: yizumi1012xxx/research-rcnn
 def __call__(self, x):
     h = F.max_pooling_2d(F.relu(
         F.local_response_normalization(self.conv1(x))), 3, stride=2)
     h = F.max_pooling_2d(F.relu(
         F.local_response_normalization(self.conv2(h))), 3, stride=2)
     h = F.relu(self.conv3(h))
     h = F.relu(self.conv4(h))
     h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
     h = F.dropout(F.relu(self.fc6(h)), train=self.train)
     h = F.dropout(F.relu(self.fc7(h)), train=self.train)
     return self.fc8(h)
コード例 #32
0
    def __call__(self, x, layers):
        ret = {}
        en = layers[-1]
        h = self.conv1(x)
        if 'conv1' in layers:
            ret.update({'conv1': h})
            if en == 'conv1':
                return ret
        h = F.max_pooling_2d(F.local_response_normalization(F.relu(h)),
                             3,
                             stride=2)
        h = F.dropout(self.conv2(h), ratio=self.dropout_rate)
        if 'conv2' in layers:
            ret.update({'conv2': h})
            if en == 'conv2':
                return ret
        h = F.max_pooling_2d(F.local_response_normalization(F.relu(h)),
                             3,
                             stride=2)
        h = F.dropout(self.conv3(h), ratio=self.dropout_rate)
        if 'conv3' in layers:
            ret.update({'conv3': h})
            if en == 'conv3':
                return ret
        h = F.relu(h)
        h = F.dropout(self.conv4(h), ratio=self.dropout_rate)
        if 'conv4' in layers:
            ret.update({'conv4': h})
            if en == 'conv4':
                return ret
        h = F.relu(h)
        h = F.dropout(self.conv5(h), ratio=self.dropout_rate)
        if 'conv5' in layers:
            ret.update({'conv5': h})
            if en == 'conv5':
                return ret
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = F.dropout(self.fc6(h), ratio=self.dropout_rate)
        if 'fc6' in layers:
            ret.update({'fc6': h})
            if en == 'fc6':
                return ret
        h = F.relu(h)
        h = F.dropout(self.fc7(h), ratio=self.dropout_rate)
        if 'fc7' in layers:
            ret.update({'fc7': h})
            if en == 'fc7':
                return ret
        h = F.relu(h)
        h = self.fc8(h)
        if 'fc8' in layers:
            ret.update({'fc8': h})

        return ret
コード例 #33
0
 def forward(self, x):
     h = F.max_pooling_2d(F.local_response_normalization(
         F.relu(self.conv1(x))), 3, stride=2)
     h = F.max_pooling_2d(F.local_response_normalization(
         F.relu(self.conv2(h))), 3, stride=2)
     h = F.relu(self.conv3(h))
     h = F.relu(self.conv4(h))
     h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
     h = F.dropout(F.relu(self.fc6(h)))
     h = F.dropout(F.relu(self.fc7(h)))
     h = self.fc8(h)
     return h
コード例 #34
0
    def forward(self, x, t=None):
        h = F.relu(self.conv1(x))
        h = F.local_response_normalization(F.max_pooling_2d(h, 3, stride=2),
                                           n=5)
        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(F.local_response_normalization(h, n=5),
                             3,
                             stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss1_conv(l))
        l = F.relu(self.loss1_fc1(l))
        l = self.loss1_fc2(l)
        if t is not None:
            loss1 = F.softmax_cross_entropy(l, t)
            self.loss1 = loss1

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss2_conv(l))
        l = F.relu(self.loss2_fc1(l))
        l = self.loss2_fc2(l)
        if t is not None:
            loss2 = F.softmax_cross_entropy(l, t)
            self.loss2 = loss2

        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)

        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.loss3_fc(F.dropout(h, 0.4))
        if t is not None:
            loss3 = F.softmax_cross_entropy(h, t)
            self.loss3 = loss3

        if t is not None:
            loss = 0.3 * (loss1 + loss2) + loss3
            accuracy = F.accuracy(h, t)
            self.loss = loss
            self.accuracy = accuracy
        else:
            self.out = h
コード例 #35
0
    def __call__(self, x, return_activations=False):

        # Activations for feature matching are returned before applying
        # any non-linearities.

        activations = []

        h = self.conv1(x)
        if return_activations:
            activations.append(h)  # [0]
        h = F.max_pooling_2d(F.local_response_normalization(F.relu(h)),
                             3,
                             stride=2)

        h = self.conv2(h)
        if return_activations:
            activations.append(h)  # [1]
        h = F.max_pooling_2d(F.local_response_normalization(F.relu(h)),
                             3,
                             stride=2)

        h = self.conv3(h)
        if return_activations:
            activations.append(h)  # [2]
        h = F.relu(h)

        h = self.conv4(h)
        if return_activations:
            activations.append(h)  # [3]
        h = F.relu(h)

        h = self.conv5(h)
        if return_activations:
            activations.append(h)  # [4]
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)

        h = self.fc6(h)
        if return_activations:
            activations.append(h)  # [5]
        h = F.dropout(F.relu(h))

        h = self.fc7(h)
        if return_activations:
            activations.append(h)  # [6]
        h = F.dropout(F.relu(h))

        h = self.fc8(h)
        if return_activations:
            activations.append(h)  # [7]
        if return_activations:
            return h, activations  # return non-softmax model output and activations

        return h  # only return model output (non-softmax)
コード例 #36
0
ファイル: voxelchain.py プロジェクト: christie888/voxcelchain
 def fwd(self, x):
     h = F.max_pooling_nd(F.local_response_normalization(
         F.relu(self.conv1(x))),
                          3,
                          stride=2)
     h = F.max_pooling_nd(F.local_response_normalization(
         F.relu(self.conv2(h))),
                          3,
                          stride=2)
     h = F.dropout(F.relu(self.fc3(h)), train=self.train)
     h = self.fc4(h)
     return h
コード例 #37
0
ファイル: inception.py プロジェクト: nihohi0428/chainer
    def forward(self, x_data, y_data, train=True):
        x = Variable(x_data, volatile=not train)
        t = Variable(y_data, volatile=not train)

        h = F.relu(self.conv1(x))
        h = F.local_response_normalization(F.max_pooling_2d(h, 3, stride=2),
                                           n=5)

        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(F.local_response_normalization(h, n=5),
                             3,
                             stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        if train:
            loss1 = F.average_pooling_2d(h, 5, stride=3)
            loss1 = F.relu(self.loss1_conv(loss1))
            loss1 = F.relu(self.loss1_fc1(loss1))
            loss1 = self.loss1_fc2(loss1)
            loss1 = F.softmax_cross_entropy(loss1, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        if train:
            loss2 = F.average_pooling_2d(h, 5, stride=3)
            loss2 = F.relu(self.loss2_conv(loss2))
            loss2 = F.relu(self.loss2_fc1(loss2))
            loss2 = self.loss2_fc2(loss2)
            loss2 = F.softmax_cross_entropy(loss2, t)

        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)

        h = F.dropout(F.average_pooling_2d(h, 7, stride=1), 0.4, train=train)
        h = self.loss3_fc(h)
        loss3 = F.softmax_cross_entropy(h, t)

        if train:
            loss = 0.3 * (loss1 + loss2) + loss3
        else:
            loss = loss3
        accuracy = F.accuracy(h, t)
        return loss, accuracy
コード例 #38
0
ファイル: alex.py プロジェクト: philip30/chainer
    def __call__(self, x, t):
        h = F.max_pooling_2d(F.relu(F.local_response_normalization(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(F.local_response_normalization(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = self.fc8(h)

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
コード例 #39
0
ファイル: alex.py プロジェクト: Mogmogu/deepstation
 def predict(self, x_data):
     x = chainer.Variable(x_data, volatile=True)
     h = F.max_pooling_2d(F.relu(
         F.local_response_normalization(self.conv1(x))), 3, stride=2)
     h = F.max_pooling_2d(F.relu(
         F.local_response_normalization(self.conv2(h))), 3, stride=2)
     h = F.relu(self.conv3(h))
     h = F.relu(self.conv4(h))
     h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
     h = F.dropout(F.relu(self.fc6(h)), train=self.train)
     h = F.dropout(F.relu(self.fc7(h)), train=self.train)
     h = self.fc8(h)
     return F.softmax(h)
コード例 #40
0
ファイル: models.py プロジェクト: peace098beat/chainer-hikaru
    def forward(self, x):
        y1 = self.model['conv1/7x7_s2'](x)
        h = F.relu(y1)
        h = F.local_response_normalization(self.pool_func(h, 3, stride=2), n=5)
        h = F.relu(self.model['conv2/3x3_reduce'](h))
        y2 = self.model['conv2/3x3'](h)
        h = F.relu(y2)
        h = self.pool_func(F.local_response_normalization(h, n=5), 3, stride=2)
        out1 = self.model['inception_3a/1x1'](h)
        out3 = self.model[
            'inception_3a/3x3'](F.relu(self.model['inception_3a/3x3_reduce'](h)))
        out5 = self.model[
            'inception_3a/5x5'](F.relu(self.model['inception_3a/5x5_reduce'](h)))
        pool = self.model[
            'inception_3a/pool_proj'](self.pool_func(h, 3, stride=1, pad=1))
        y3 = F.concat((out1, out3, out5, pool), axis=1)
        h = F.relu(y3)

        out1 = self.model['inception_3b/1x1'](h)
        out3 = self.model[
            'inception_3b/3x3'](F.relu(self.model['inception_3b/3x3_reduce'](h)))
        out5 = self.model[
            'inception_3b/5x5'](F.relu(self.model['inception_3b/5x5_reduce'](h)))
        pool = self.model[
            'inception_3b/pool_proj'](self.pool_func(h, 3, stride=1, pad=1))
        y4 = F.concat((out1, out3, out5, pool), axis=1)
        h = F.relu(y4)

        h = self.pool_func(h, 3, stride=2)

        out1 = self.model['inception_4a/1x1'](h)
        out3 = self.model[
            'inception_4a/3x3'](F.relu(self.model['inception_4a/3x3_reduce'](h)))
        out5 = self.model[
            'inception_4a/5x5'](F.relu(self.model['inception_4a/5x5_reduce'](h)))
        pool = self.model[
            'inception_4a/pool_proj'](self.pool_func(h, 3, stride=1, pad=1))
        y5 = F.concat((out1, out3, out5, pool), axis=1)
        h = F.relu(y5)

        out1 = self.model['inception_4b/1x1'](h)
        out3 = self.model[
            'inception_4b/3x3'](F.relu(self.model['inception_4b/3x3_reduce'](h)))
        out5 = self.model[
            'inception_4b/5x5'](F.relu(self.model['inception_4b/5x5_reduce'](h)))
        pool = self.model[
            'inception_4b/pool_proj'](self.pool_func(h, 3, stride=1, pad=1))
        y6 = F.concat((out1, out3, out5, pool), axis=1)
        h = F.relu(y6)

        return [y1, y2, y3, y4, y5, y6]
コード例 #41
0
    def forward(self, x_data, y_data, train=True):
        x = chainer.Variable(x_data, volatile=not train)
        t = chainer.Variable(y_data, volatile=not train)

        h = F.relu(self.conv1(x))
        h = F.local_response_normalization(
            F.max_pooling_2d(h, 3, stride=2), n=5)

        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(
            F.local_response_normalization(h, n=5), 3, stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        if train:
            loss1 = F.average_pooling_2d(h, 5, stride=3)
            loss1 = F.relu(self.loss1_conv(loss1))
            loss1 = F.relu(self.loss1_fc1(loss1))
            loss1 = self.loss1_fc2(loss1)
            loss1 = F.softmax_cross_entropy(loss1, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        if train:
            loss2 = F.average_pooling_2d(h, 5, stride=3)
            loss2 = F.relu(self.loss2_conv(loss2))
            loss2 = F.relu(self.loss2_fc1(loss2))
            loss2 = self.loss2_fc2(loss2)
            loss2 = F.softmax_cross_entropy(loss2, t)

        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)

        h = F.dropout(F.average_pooling_2d(h, 7, stride=1), 0.4, train=train)
        h = self.loss3_fc(h)
        loss3 = F.softmax_cross_entropy(h, t)

        if train:
            loss = 0.3 * (loss1 + loss2) + loss3
        else:
            loss = loss3
        accuracy = F.accuracy(h, t)
        return loss, accuracy
コード例 #42
0
ファイル: googlenet.py プロジェクト: Fhrozen/chainer
    def __call__(self, x, t):
        h = F.relu(self.conv1(x))
        h = F.local_response_normalization(
            F.max_pooling_2d(h, 3, stride=2), n=5)
        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(
            F.local_response_normalization(h, n=5), 3, stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss1_conv(l))
        l = F.relu(self.loss1_fc1(l))
        l = self.loss1_fc2(l)
        loss1 = F.softmax_cross_entropy(l, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss2_conv(l))
        l = F.relu(self.loss2_fc1(l))
        l = self.loss2_fc2(l)
        loss2 = F.softmax_cross_entropy(l, t)

        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)

        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.loss3_fc(F.dropout(h, 0.4))
        loss3 = F.softmax_cross_entropy(h, t)

        loss = 0.3 * (loss1 + loss2) + loss3
        accuracy = F.accuracy(h, t)

        chainer.report({
            'loss': loss,
            'loss1': loss1,
            'loss2': loss2,
            'loss3': loss3,
            'accuracy': accuracy
        }, self)
        return loss
コード例 #43
0
    def __call__(self, x, t):
        h = F.relu(self['conv1/7x7_s2'](x))
        h = F.local_response_normalization(
            F.max_pooling_2d(h, 3, stride=2), n=5, alpha=(1e-4)/5, k=1)
        h = F.relu(self['conv2/3x3_reduce'](h))
        h = F.relu(self['conv2/3x3'](h))
        h = F.max_pooling_2d(F.local_response_normalization(
            h, n=5, alpha=(1e-4)/5, k=1), 3, stride=2)

        h = self.call_inception(h, 'inception_3a')
        h = self.call_inception(h, 'inception_3b')
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.call_inception(h, 'inception_4a')

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self['loss1/conv'](l))
        l = F.dropout(F.relu(self['loss1/fc'](l)), 0.7, train=self.train)
        l = self['loss1/classifier'](l)
        loss1 = F.softmax_cross_entropy(l, t)

        h = self.call_inception(h, 'inception_4b')
        h = self.call_inception(h, 'inception_4c')
        h = self.call_inception(h, 'inception_4d')

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self['loss2/conv'](l))
        l = F.dropout(F.relu(self['loss2/fc'](l)), 0.7, train=self.train)
        l = self['loss2/classifier'](l)
        loss2 = F.softmax_cross_entropy(l, t)

        h = self.call_inception(h, 'inception_4e')
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.call_inception(h, 'inception_5a')
        h = self.call_inception(h, 'inception_5b')

        h = F.average_pooling_2d(h, 7, stride=1)
        h = self['loss3/classifier'](F.dropout(h, 0.4, train=self.train))
        loss3 = F.softmax_cross_entropy(h, t)

        loss = 0.3 * (loss1 + loss2) + loss3
        accuracy = F.accuracy(h, t)
        chainer.report({
            'loss': loss,
            'loss1': loss1,
            'loss2': loss2,
            'loss3': loss3,
            'accuracy': accuracy
        }, self)
        return loss
コード例 #44
0
ファイル: train_FinalTask.py プロジェクト: tuananct/MyProject
 def forward (x_data, y_data, train = True):
     x, t = chainer.Variable(x_data), chainer.Variable(y_data)
     h = F.max_pooling_2d(F.relu(F.local_response_normalization(model.conv1(x))), 2, stride=2)
     h = F.max_pooling_2d(F.relu(F.local_response_normalization(model.conv2(h))), 2, stride=2)
     h = F.relu(model.conv3(h))
     h = F.relu(model.conv4(h))
     h = F.max_pooling_2d(F.relu(model.conv5(h)), 2, stride=2)
     h = F.dropout(F.relu(model.fc6(h)), train=train)
     h = F.dropout(F.relu(model.fc7(h)), train=train)
     y = model.fc8(h)
  
     if train:
     	return F.softmax_cross_entropy(y, t)
     else:
     	return F.accuracy(y, t) 
コード例 #45
0
ファイル: alex.py プロジェクト: ruo91/convnet-benchmarks
    def forward(self, x_data, y_data, train=True):
        x = chainer.Variable(x_data, volatile=not train)
        t = chainer.Variable(y_data, volatile=not train)

        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)))
        h = F.dropout(F.relu(self.fc7(h)))
        h = self.fc8(h)
        return F.softmax_cross_entropy(h, t), F.accuracy(h, t)
コード例 #46
0
ファイル: alex.py プロジェクト: Fhrozen/chainer
    def __call__(self, x, t):
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.local_response_normalization(
            F.relu(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)))
        h = F.dropout(F.relu(self.fc7(h)))
        h = self.fc8(h)

        loss = F.softmax_cross_entropy(h, t)
        chainer.report({'loss': loss, 'accuracy': F.accuracy(h, t)}, self)
        return loss
コード例 #47
0
    def __call__(self, x, rois, t=None, train=False):
        h = self.conv1(x)
        h = F.relu(h)
        h = F.local_response_normalization(h, n=5, k=2, alpha=5e-4, beta=.75)
        h = F.max_pooling_2d(h, ksize=3, stride=2)

        h = self.conv2(h)
        h = F.relu(h)
        h = F.local_response_normalization(h, n=5, k=2, alpha=5e-4, beta=.75)
        h = F.max_pooling_2d(h, ksize=3, stride=2)

        h = self.conv3(h)
        h = F.relu(h)

        h = self.conv4(h)
        h = F.relu(h)

        h = self.conv5(h)
        h = F.relu(h)

        h = roi_pooling_2d(h, rois, 6, 6, spatial_scale=0.0625)

        h = self.fc6(h)
        h = F.relu(h)
        h = F.dropout(h, train=train, ratio=.5)

        h = self.fc7(h)
        h = F.relu(h)
        h = F.dropout(h, train=train, ratio=.5)

        h_cls_score = self.cls_score(h)
        cls_score = F.softmax(h_cls_score)
        bbox_pred = self.bbox_pred(h)

        if t is None:
            assert train is False
            return cls_score, bbox_pred

        assert train
        t_cls, t_bbox = t
        self.cls_loss = F.softmax_cross_entropy(h_cls_score, t_cls)
        self.bbox_loss = F.smooth_l1_loss(bbox_pred, t_bbox)

        xp = cuda.get_array_module(x.data)
        lambda_ = (0.5 * (t_cls.data != self.bg_label)).astype(xp.float32)
        lambda_ = Variable(lambda_, volatile=not train)
        L = self.cls_loss + F.sum(lambda_ * self.bbox_loss)
        return L
コード例 #48
0
 def forward(self, x, train=False):
     self.data = x
     self.conv1 = F.relu(self.caffe.conv1(self.data))
     self.pool1 = F.max_pooling_2d(self.conv1, ksize=3, stride=2)
     self.norm1 = F.local_response_normalization(self.pool1, k=5, n=5, alpha=0.0001, beta=0.75) * np.power(5, 0.75)
     self.conv2 = F.relu(self.caffe.conv2(self.norm1))
     self.pool2 = F.max_pooling_2d(self.conv2, ksize=3, stride=2)
     self.norm2 = F.local_response_normalization(self.pool2, k=5, n=5, alpha=0.0001, beta=0.75) * np.power(5, 0.75)
     self.conv3 = F.relu(self.caffe.conv3(self.norm2))
     self.conv4 = F.relu(self.caffe.conv4(self.conv3))
     self.conv5 = F.relu(self.caffe.conv5(self.conv4))
     self.pool5 = F.max_pooling_2d(self.conv5, ksize=3, stride=2)
     self.fc6 = F.dropout(F.relu(self.caffe.fc6(self.pool5)), train=train)
     self.fc7 = F.dropout(F.relu(self.caffe.fc7(self.fc6)), train=train)
     self.fc8 = self.fine.fc8ft(self.fc7)
     return self.fc8
コード例 #49
0
ファイル: segnet_basic.py プロジェクト: gwtnb/chainercv
    def __call__(self, x):
        """Compute an image-wise score from a batch of images

        Args:
            x (chainer.Variable): A variable with 4D image array.

        Returns:
            chainer.Variable:
            An image-wise score. Its channel size is :obj:`self.n_class`.

        """
        p1 = F.MaxPooling2D(2, 2)
        p2 = F.MaxPooling2D(2, 2)
        p3 = F.MaxPooling2D(2, 2)
        p4 = F.MaxPooling2D(2, 2)
        h = F.local_response_normalization(x, 5, 1, 1e-4 / 5., 0.75)
        h = _pool_without_cudnn(p1, F.relu(self.conv1_bn(self.conv1(h))))
        h = _pool_without_cudnn(p2, F.relu(self.conv2_bn(self.conv2(h))))
        h = _pool_without_cudnn(p3, F.relu(self.conv3_bn(self.conv3(h))))
        h = _pool_without_cudnn(p4, F.relu(self.conv4_bn(self.conv4(h))))
        h = self._upsampling_2d(h, p4)
        h = self.conv_decode4_bn(self.conv_decode4(h))
        h = self._upsampling_2d(h, p3)
        h = self.conv_decode3_bn(self.conv_decode3(h))
        h = self._upsampling_2d(h, p2)
        h = self.conv_decode2_bn(self.conv_decode2(h))
        h = self._upsampling_2d(h, p1)
        h = self.conv_decode1_bn(self.conv_decode1(h))
        score = self.conv_classifier(h)
        return score
コード例 #50
0
ファイル: alex.py プロジェクト: syundo0730/deresta-cnn
    def forward(self, x):
        pool1 = lambda x: F.max_pooling_2d(F.relu(F.local_response_normalization(x)), 3, stride=2)  # (55 - 3)/2 + 1 = 27
        pool2 = lambda x: F.max_pooling_2d(F.relu(F.local_response_normalization(x)), 3, stride=2)  # (27 - 3)/2 + 1 = 13
        pool5 = lambda x: F.max_pooling_2d(F.relu(x), 3, stride=2)  # (13 - 3)

        h = pool1(self.conv1(x))
        h = pool2(self.conv2(h))
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = pool5(self.conv5(h))
        h = F.dropout(F.relu(self.fc6(h)), train=self.train)
        h = F.dropout(F.relu(self.fc7(h)), train=self.train)
        h = F.dropout(F.sigmoid(self.fc8(h)), train=self.train)
        y = self.fc9(h)

        return y
コード例 #51
0
    def __call__(self, x):
        h_P1 = self.P1(x)
        if self.P1N_Normalize: h_P1 = F.local_response_normalization(h_P1)
        h_P1 = F.max_pooling_2d(F.relu(h_P1), ksize=self.P1P_ksize, cover_all=True)

        h_P2 = self.P2(h_P1)
        if self.P2N_Normalize: h_P2 = F.local_response_normalization(h_P2)
        h_P2 = F.max_pooling_2d(F.relu(h_P2), ksize=self.P2P_ksize, cover_all=True)

        h_P3 = self.P3(h_P2)
        if self.P3N_Normalize: h_P3 = F.local_response_normalization(h_P3)
        h_P3 = F.max_pooling_2d(F.relu(h_P3), ksize=self.P3P_ksize, cover_all=True)

        h_L1 = F.dropout(F.relu(self.L1(h_P3)),ratio=self.L1_dropout,train=self.IsTrain)
        h_L2 = F.dropout(F.relu(self.L2(h_L1)),ratio=self.L2_dropout,train=self.IsTrain)
        y    = h_L2
        return y
コード例 #52
0
ファイル: caffe_function.py プロジェクト: naokiiiii/chainer
    def _setup_lrn(self, layer):
        param = layer.lrn_param
        if param.norm_region != param.ACROSS_CHANNELS:
            raise RuntimeError("Within-channel LRN is not supported")

        fwd = lambda x: functions.local_response_normalization(
            x, n=param.local_size, k=param.k, alpha=param.alpha / param.local_size, beta=param.beta
        )
        self.forwards[layer.name] = fwd
        self._add_layer(layer)
コード例 #53
0
ファイル: googlenet.py プロジェクト: BRETT71/chainer
    def __call__(self, x, t):
        h = F.relu(self.conv1(x))
        h = F.local_response_normalization(
            F.max_pooling_2d(h, 3, stride=2), n=5)
        h = F.relu(self.conv2_reduce(h))
        h = F.relu(self.conv2(h))
        h = F.max_pooling_2d(
            F.local_response_normalization(h, n=5), 3, stride=2)

        h = self.inc3a(h)
        h = self.inc3b(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc4a(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss1_conv(l))
        l = F.relu(self.loss1_fc1(l))
        l = self.loss1_fc2(l)
        self.loss1 = F.softmax_cross_entropy(l, t)

        h = self.inc4b(h)
        h = self.inc4c(h)
        h = self.inc4d(h)

        l = F.average_pooling_2d(h, 5, stride=3)
        l = F.relu(self.loss2_conv(l))
        l = F.relu(self.loss2_fc1(l))
        l = self.loss2_fc2(l)
        self.loss2 = F.softmax_cross_entropy(l, t)

        h = self.inc4e(h)
        h = F.max_pooling_2d(h, 3, stride=2)
        h = self.inc5a(h)
        h = self.inc5b(h)

        h = F.average_pooling_2d(h, 7, stride=1)
        h = self.loss3_fc(F.dropout(h, 0.4, train=self.train))
        self.loss3 = F.softmax_cross_entropy(h, t)

        self.loss = 0.3 * (self.loss1 + self.loss2) + self.loss3
        self.accuracy = F.accuracy(h, t)
        return self.loss
コード例 #54
0
    def check_forward(self, inputs, backend_config):
        y_expect, = self.forward_cpu(inputs)

        if backend_config.use_cuda:
            inputs = cuda.to_gpu(inputs)

        with backend_config:
            y = functions.local_response_normalization(*inputs)

        assert y.data.dtype == self.dtype
        testing.assert_allclose(y_expect, y.data, **self.check_forward_options)
コード例 #55
0
    def __call__(self, x, t=None):
        h = F.local_response_normalization(self.conv1(x))
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = F.local_response_normalization(self.conv2(h))
        h = F.max_pooling_2d(F.relu(h), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)))
        h = F.dropout(F.relu(self.fc7(h)))
        h = self.fc8(h)

        self.pred = F.softmax(h)
        if t is None:
            assert not chainer.config.train
            return

        self.loss = F.softmax_cross_entropy(h, t)
        self.accuracy = F.accuracy(h, t)
        return self.loss
コード例 #56
0
    def check_backward(self, x_data, y_grad):
        x = Variable(x_data)
        y = local_response_normalization(x)
        y.grad = y_grad
        y.backward()

        func = y.creator
        f = lambda: func.forward((x.data,))
        gx, = numerical_grad(f, (x.data,), (y.grad,), eps=1)

        assert_allclose(gx, x.grad, atol=1e-3)
コード例 #57
0
    def __call__(self, x):
        h = x
        for iL in range(self.NPLayers):
            h = self.__dict__["P%d"%iL](h)
            h = F.local_response_normalization(h)
            h = F.max_pooling_2d(F.relu(h), ksize=self.NKsize[iL+1], cover_all=True)

        h = F.dropout(F.relu(self.L1(h)),ratio=self.L1_dropout,train=self.IsTrain)
        h = F.dropout(F.relu(self.L2(h)),ratio=self.L2_dropout,train=self.IsTrain)
        y    = h
        return y
コード例 #58
0
    def predict(self, x_test, gpu=-1):
        if gpu >= 0:
            x_test = cuda.to_gpu(x_test)
        x = Variable(x_test)

        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv1(x))), 3, stride=2)
        h = F.max_pooling_2d(F.relu(
            F.local_response_normalization(self.conv2(h))), 3, stride=2)
        h = F.relu(self.conv3(h))
        h = F.relu(self.conv4(h))
        h = F.max_pooling_2d(F.relu(self.conv5(h)), 3, stride=2)
        h = F.dropout(F.relu(self.fc6(h)))
        h = F.dropout(F.relu(self.fc7(h)))
        y = self.fc8(h)

        predictions = np.array([], np.float32)
        for o in y.data:
            predictions = np.append(predictions, np.array([np.argmax(o)], np.float32))
        return predictions
コード例 #59
0
    def __call__(self, x):
        c1 = F.relu(self.conv1(x))
        m1 = F.max_pooling_2d(c1, 3, stride=2, pad=0)
        m1_n = F.local_response_normalization(m1)
        c1a = F.relu(self.conv1a(m1_n))
        c2 = F.relu(self.conv2(m1_n))
        m2 = F.max_pooling_2d(c2, 3, stride=2, pad=0)
        m2_n = F.local_response_normalization(m2)
        c3 = F.relu(self.conv3(m2_n))
        c3a = F.relu(self.conv3a(c3))
        c4 = F.relu(self.conv4(c3))
        c5 = F.relu(self.conv5(c4))
        m5 = F.max_pooling_2d(c5, 3, stride=2, pad=0)

        c = F.concat((c1a, c3a, m5))

        c_all = F.relu(self.conv_all(c))
        fc = F.relu(self.fc_full(c_all))

        detection = F.relu(self.fc_detection1(fc))
        detection = self.fc_detection2(detection)
        detection = F.softmax(detection)
        landmark = F.relu(self.fc_landmarks1(fc))
        landmark = self.fc_landmarks2(landmark)
        visibility = F.relu(self.fc_visibility1(fc))
        visibility = self.fc_visibility2(visibility)
        pose = F.relu(self.fc_pose1(fc))
        pose = self.fc_pose2(pose)
        gender = F.relu(self.fc_gender1(fc))
        gender = self.fc_gender2(gender)
        gender = F.softmax(gender)

        detection = F.softmax(detection)[:, 1]
        gender = F.softmax(gender)[:, 1]

        return {'detection': detection,
                'landmark': landmark,
                'visibility': visibility,
                'gender': gender,
                'pose': pose}