コード例 #1
0
    def forward(self, x):
        out = self.body(x)

        # FPN
        fpn = self.fpn(out)

        # SSH
        feature1 = self.ssh1(fpn[0])
        feature2 = self.ssh2(fpn[1])
        feature3 = self.ssh3(fpn[2])
        features = [feature1, feature2, feature3]

        bbox_regressions = flops_counter.cat(
            [self.BboxHead[i](feature) for i, feature in enumerate(features)],
            dim=1)
        classifications = flops_counter.cat(
            [self.ClassHead[i](feature) for i, feature in enumerate(features)],
            dim=1)
        ldm_regressions = flops_counter.cat([
            self.LandmarkHead[i](feature) for i, feature in enumerate(features)
        ],
                                            dim=1)

        output = (bbox_regressions, self.softmax(classifications),
                  ldm_regressions)
        return output
コード例 #2
0
    def forward(self, x):
        detection_sources = list()
        loc = list()
        conf = list()

        x = self.conv1(x)
        x = self.maxpool1(x)
        x = self.conv2(x)
        x = self.maxpool2(x)
        x = self.inception1(x)
        x = self.inception2(x)
        x = self.inception3(x)
        detection_sources.append(x)

        x = self.conv3_1(x)
        x = self.conv3_2(x)
        detection_sources.append(x)

        x = self.conv4_1(x)
        x = self.conv4_2(x)
        detection_sources.append(x)

        for (x, l, c) in zip(detection_sources, self.loc, self.conf):
            loc.append(l(x).permute(0, 2, 3, 1))
            conf.append(c(x).permute(0, 2, 3, 1))

        loc = flops_counter.cat([o.view(o.size(0), -1) for o in loc], 1)
        conf = flops_counter.cat([o.view(o.size(0), -1) for o in conf], 1)

        output = (loc.view(loc.size(0), -1, 4),
                  self.softmax(conf.view(conf.size(0), -1, self.num_classes)))

        return output
コード例 #3
0
    def forward(self, x):
        confidences = []
        locations = []
        start_layer_index = 0
        header_index = 0
        end_layer_index = 0
        for end_layer_index in self.source_layer_indexes:
            for layer in self.backbone[start_layer_index: end_layer_index]:
                x = layer(x)
            y = x
            start_layer_index = end_layer_index
            confidence, location = self.compute_header(header_index, y)
            header_index += 1
            confidences.append(confidence)
            locations.append(location)

        for layer in self.backbone[end_layer_index:]:
            x = layer(x)

        layer = self.extras
        x = layer(x)
        confidence, location = self.compute_header(header_index, x)
        header_index += 1
        confidences.append(confidence)
        locations.append(location)

        confidences = flops_counter.cat(confidences, 1)
        locations = flops_counter.cat(locations, 1)
        confidences = self.softmax(confidences)
        return locations, confidences
コード例 #4
0
    def forward(self, x):
        loc = list()
        conf = list()

        conv1_x = self.inception1(self.conv1(x))
        conv2_x = self.inception2(self.maxpool1(conv1_x))
        conv3_x = self.inception3(self.conv3(conv2_x))
        conv4_x = self.inception4(self.maxpool2(conv3_x))
        conv5_x = self.conv5_2(self.conv5_1(conv4_x))
        conv6_x = self.conv6_2(self.conv6_1(conv5_x))

        # fpn
        conv6_x = self.latlayer6(conv6_x)
        conv5_x = self.latlayer5(conv5_x)
        conv4_x = self.latlayer4(conv4_x)
        conv3_x = self.latlayer3(conv3_x)
        conv2_x = self.latlayer2(conv2_x)
        conv1_x = self.latlayer1(conv1_x)

        conv4_x_up = self.upsample(conv4_x)
        if conv4_x_up.value[2] != conv3_x.value[2] or conv4_x_up.value[3] != conv3_x.value[3]:
            pad = (0, conv3_x.value[3] - conv4_x_up.value[3], 0, conv3_x.value[2] - conv4_x_up.value[2])
            conv4_x_up = F.pad(conv4_x_up, pad)
        conv3_x = self.smooth3(self.eltmul(conv4_x_up , conv3_x))

        conv3_x_up = self.upsample(conv2_x)
        if conv3_x_up.value[2] != conv2_x.value[2] or conv3_x_up.value[3] != conv2_x.value[3]:
            pad = (0, conv2_x.value[3] - conv3_x_up.value[3], 0, conv2_x.value[2] - conv3_x_up.value[2])
            conv3_x_up = F.pad(conv3_x_up, pad)
        conv2_x = self.smooth2(self.eltmul(conv3_x_up , conv2_x))

        conv2_x_up = self.upsample(conv2_x)
        if conv2_x_up.value[2] != conv1_x.value[2] or conv2_x_up.value[3] != conv1_x.value[3]:
            pad = (0, conv1_x.value[3] - conv2_x_up.value[3], 0, conv1_x.value[2] - conv2_x_up.value[2])
            conv2_x_up = F.pad(conv2_x_up, pad)
        conv1_x = self.smooth1(self.eltmul(conv2_x_up , conv1_x))

        sources = [conv1_x, conv2_x, conv3_x, conv4_x, conv5_x, conv6_x]
        # cpm
        sources[0] = self.cpm1(sources[0])
        sources[1] = self.cpm2(sources[1])
        sources[2] = self.cpm3(sources[2])
        sources[3] = self.cpm4(sources[3])
        sources[4] = self.cpm5(sources[4])
        sources[5] = self.cpm6(sources[5])

        # head
        featuremap_size = []
        for (x, l, c) in zip(sources, self.loc, self.conf):
            featuremap_size.append([x.size(2), x.size(3)])
            loc.append(l(x).permute(0, 2, 3, 1))
            conf.append(c(x).permute(0, 2, 3, 1))

        face_loc = flops_counter.cat([o.view(o.size(0), -1) for o in loc], 1)
        face_conf = flops_counter.cat([o.view(o.size(0), -1) for o in conf], 1)

        return face_loc.view(face_loc.size(0), -1, 4), self.softmax(face_conf.view(face_conf.size(0), -1, 2))
コード例 #5
0
 def forward(self, x):
     x1_1 = self.relu1(self.cpm1(x))
     x1_2 = self.relu2(self.cpm2(x))
     x2_1 = self.relu3(self.cpm3(x1_2))
     x2_2 = self.relu4(self.cpm4(x1_2))
     x3_1 = self.relu5(self.cpm5(x2_2))
     return flops_counter.cat([x1_1, x2_1, x3_1], 1)
コード例 #6
0
 def forward(self, x):
     x = self.conv(x)
     x = self.bn(x)
     x = flops_counter.cat([x, -x], 1)
     # x = F.relu(x, inplace=True)
     x = self.relu(x)
     return x
コード例 #7
0
    def forward(self, x):

        detection_sources = list()
        loc_data = list()
        conf_data = list()

        x = self.model1(x)
        # x = F.max_pool2d(x, 2)
        x = self.pool1(x)
        x = self.model2(x)
        # x = F.max_pool2d(x, 2)
        x = self.pool2(x)
        x = self.model3(x)
        detection_sources.append(x)

        # x = F.max_pool2d(x, 2)
        x = self.pool3(x)
        x = self.model4(x)
        detection_sources.append(x)

        # x = F.max_pool2d(x, 2)
        x = self.pool4(x)
        x = self.model5(x)
        detection_sources.append(x)

        # x = F.max_pool2d(x, 2)
        x = self.pool5(x)
        x = self.model6(x)
        detection_sources.append(x)

        for (x, l, c) in zip(detection_sources, self.loc, self.conf):
            # loc_data.append(l(x).permute(0, 2, 3, 1).contiguous())
            # conf_data.append(c(x).permute(0, 2, 3, 1).contiguous())
            loc_data.append(l(x).permute(0, 2, 3, 1))
            conf_data.append(c(x).permute(0, 2, 3, 1))

        loc_data = flops_counter.cat([o.view(o.size(0), -1) for o in loc_data], 1)
        conf_data = flops_counter.cat([o.view(o.size(0), -1) for o in conf_data], 1)

        output = (loc_data.view(loc_data.size(0), -1, 14),
                self.softmax(conf_data.view(conf_data.size(0), -1, self.num_classes)))

        return output
コード例 #8
0
ファイル: sfa.py プロジェクト: fengyuentau/PyTorch-FLOPs
    def forward(self, x):
        ssh_3x3_f = self.ssh_3x3(x)

        ssh_dimred_f = self.ssh_dimred(x)

        ssh_5x5_f = self.ssh_5x5(ssh_dimred_f)

        ssh_7x7_f = self.ssh_7x7(ssh_dimred_f)

        return self.out_relu(flops_counter.cat([ssh_3x3_f, ssh_5x5_f, ssh_7x7_f], 1))
コード例 #9
0
    def forward(self, x):
        x0 = self.branch0(x)
        x1 = self.branch1(x)
        x2 = self.branch2(x)
        x3 = self.branch3(x)

        out = flops_counter.cat((x0, x1, x2, x3), 1)
        out = self.cated_conv(out)
        out = self.eltadd(out, x)

        return out
コード例 #10
0
    def forward(self, x):
        x0 = self.branch0(x)
        x1 = self.branch1(x)
        x2 = self.branch2(x)

        out = flops_counter.cat((x0, x1, x2), 1)
        out = self.ConvLinear(out)
        short = self.shortcut(x)
        # out = out * self.scale + short
        out = self.eltadd(out * self.scale, short)
        out = self.relu(out)

        return out
コード例 #11
0
    def forward(self, input):
        conv3X3 = self.conv3X3(input)

        conv5X5_1 = self.conv5X5_1(input)
        conv5X5 = self.conv5X5_2(conv5X5_1)

        conv7X7_2 = self.conv7X7_2(conv5X5_1)
        conv7X7 = self.conv7x7_3(conv7X7_2)

        out = flops_counter.cat([conv3X3, conv5X5, conv7X7], dim=1)
        # out = F.relu(out)
        out = self.relu(out)
        return out
コード例 #12
0
    def forward(self, x):
        x = self.conv1(x)
        x = self.conv2(x)
        x = self.conv3(x)
        conv4_3 = self.conv4(x)

        # branch 1
        conv5_3 = self.conv5(self.pool4(conv4_3))
        conv5_256 = self.conv5_256(conv5_3)
        conv5_256_up = self.conv5_256_up(conv5_256)
        # branch 2
        conv4_256 = self.conv4_256(conv4_3)
        # fuse(conv5_256_up, conv4_256)
        if conv5_256_up.value[2] != conv4_256.value[3] or conv5_256_up.value[
                3] != conv4_256.value[3]:
            pad = (0, conv4_256.value[3] - conv5_256_up.value[3], 0,
                   conv4_256.value[2] - conv5_256_up.value[2])
            conv5_256_up = F.pad(conv5_256_up, pad)
        conv4_fuse = flops_counter.cat([conv5_256_up, conv4_256], 1)

        conv4_fuse_final_dim_red = self.conv4_fuse_final_dim_red(
            self.conv4_fuse_final(conv4_fuse))

        head1_f = self.head1(conv4_fuse_final_dim_red)
        head2_f = self.head2(conv4_fuse_final_dim_red)
        head4_f = self.head4(conv4_fuse_final_dim_red)
        outs = [head1_f, head2_f, head4_f]

        loc = []
        conf = []
        for i, o in enumerate(outs):
            loc += [self.bbox_head[i](o)]
            conf += [self.cls_head[i](o)]

        loc_cat = flops_counter.cat(loc, 1)
        conf_cat = self.softmax(flops_counter.cat(conf, 2))

        return loc_cat, conf_cat
コード例 #13
0
    def forward(self, x):
        ssh_3x3 = self.ssh_3x3(x)

        ssh_dimred = self.ssh_dimred(x)
        ssh_dimred_relu = self.ssh_dimred_relu(ssh_dimred)
        ssh_5x5 = self.ssh_5x5(ssh_dimred_relu)

        ssh_7x7_1 = self.ssh_7x7_1(ssh_dimred_relu)
        ssh_7x7_1_relu = self.ssh_7x7_1_relu(ssh_7x7_1)
        ssh_7x7 = self.ssh_7x7(ssh_7x7_1_relu)

        # merge
        ssh_output = flops_counter.cat([ssh_3x3, ssh_5x5, ssh_7x7], 1)
        ssh_output_relu = self.ssh_output_relu(ssh_output)

        return ssh_output_relu
コード例 #14
0
    def forward(self, x):
        branch1x1 = self.branch1x1(x)

        # branch1x1_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1)
        branch1x1_pool = self.branch1x1_pool(x)
        branch1x1_2 = self.branch1x1_2(branch1x1_pool)

        branch3x3_reduce = self.branch3x3_reduce(x)
        branch3x3 = self.branch3x3(branch3x3_reduce)

        branch3x3_reduce_2 = self.branch3x3_reduce_2(x)
        branch3x3_2 = self.branch3x3_2(branch3x3_reduce_2)
        branch3x3_3 = self.branch3x3_3(branch3x3_2)

        outputs = [branch1x1, branch1x1_2, branch3x3, branch3x3_3]
        return flops_counter.cat(outputs, 1)
コード例 #15
0
ファイル: extd.py プロジェクト: fengyuentau/PyTorch-FLOPs
    def forward(self, x):
        # x_1 = x[:, :self.inp, :, :]
        # x_2 = x[:, self.inp:, :, :]
        b, c, h, w = x.value
        x_1 = flops_counter.TensorSize([b, self.inp, h, w])
        x_2 = flops_counter.TensorSize([b, c - self.inp, h, w])

        a_1 = self.conv1x1_1(x_1)
        # g_1 = F.sigmoid(self.gate_1(x_1))
        g_1 = self.sigmoid1(self.gate_1(x_1))

        a_2 = self.conv1x1_2(x_2)
        # g_2 = F.sigmoid(self.gate_2(x_2))
        g_2 = self.sigmoid2(self.gate_2(x_2))

        ret = flops_counter.cat(
            (self.eltmul1(a_1, g_1), self.eltmul2(a_2, g_2)), 1)

        return ret
コード例 #16
0
    def forward(self, x):
        # residual
        branch1 = self.branch1(x)
        branch2a = self.branch2a(x)
        branch2b = self.branch2b(branch2a)
        branch2c = self.branch2c(branch2b)
        branch_sum = self.eltadd(branch1, branch2c)
        rescomb = self.rescomb_relu(branch_sum)

        # ssh
        ssh_1 = self.ssh_1_conv(rescomb)
        ssh_dimred = self.ssh_dimred_conv(rescomb)
        ssh_dimred = self.ssh_dimred_relu(ssh_dimred)
        ssh_2 = self.ssh_2_conv(ssh_dimred)
        ssh_3a = self.ssh_3a_conv(ssh_dimred)
        ssh_3a = self.ssh_3a_relu(ssh_3a)
        ssh_3b = self.ssh_3b_conv(ssh_3a)

        ssh_concat = flops_counter.cat([ssh_1, ssh_2, ssh_3b], 1)
        ssh_concat = self.concat_relu(ssh_concat)

        return ssh_concat
コード例 #17
0
ファイル: csp.py プロジェクト: fengyuentau/PyTorch-FLOPs
    def forward(self, x):
        # base
        out = self.base_conv(x)
        out = self.base_bn(out)
        out = self.base_relu(out)
        out = self.base_maxpooling(out)

        # layer1
        out = self.layer1_bottleneck0(out)
        out = self.layer1_bottleneck1(out)
        out = self.layer1_bottleneck2(out)

        # layer2
        out = self.layer2_bottleneck0(out)
        out = self.layer2_bottleneck1(out)
        out = self.layer2_bottleneck2(out)
        stage3 = self.layer2_bottleneck3(out)

        # layer3
        out = self.layer3_bottleneck0(stage3)
        out = self.layer3_bottleneck1(out)
        out = self.layer3_bottleneck2(out)
        out = self.layer3_bottleneck3(out)
        out = self.layer3_bottleneck4(out)
        stage4 = self.layer3_bottleneck5(out)

        # layer4
        out = self.layer4_bottleneck0(stage4)
        out = self.layer4_bottleneck1(out)
        stage5 = self.layer4_bottleneck2(out)

        # pn_deconv
        p3up = self.p3up_trconv(stage3)
        p4up = self.p4up_trconv(stage4)
        p5up = self.p5up_trconv(stage5)

        # l2norm
        p3up = self.p3up_l2norm(p3up)
        p4up = self.p4up_l2norm(p4up)
        p5up = self.p5up_l2norm(p5up)

        # concat
        conc = flops_counter.cat((p3up, p4up, p5up), 1)

        # detection head - feat
        feat = self.feat_conv(conc)
        feat = self.feat_bn(feat)
        feat = self.feat_relu(feat)

        # detection head - class
        x_class = self.class_conv(feat)
        x_class = self.class_sigmoid(x_class)

        # detection head - regr
        x_regr = self.regr_conv(feat)
        x_regr = self.regr_relu(x_regr)

        # detection head - offset
        x_offset = self.offset_conv(feat)
        x_offset = self.offset_relu(x_offset)

        return x_class, x_regr, x_offset
コード例 #18
0
ファイル: extd.py プロジェクト: fengyuentau/PyTorch-FLOPs
    def forward(self, x):
        sources = list()
        loc = list()
        conf = list()

        end = 8 if self.model_type == 32 else 6
        for k in range(end):
            x = self.base[k](x)
        s1 = x  # (640, 640) -> (320, 320)

        for k in range(2, end):
            x = self.base[k](x)
        s2 = x  # (160, 160) -> (80, 80)

        for k in range(2, end):
            x = self.base[k](x)
        s3 = x  # (80, 80) -> (40, 40)

        for k in range(2, end):
            x = self.base[k](x)
        s4 = x  # (40, 40) -> (20, 20)

        for k in range(2, end):
            x = self.base[k](x)
        s5 = x  # (20, 20) -> (10, 10)

        for k in range(2, end):
            x = self.base[k](x)
        s6 = x  # (10, 10) -> (5, 5)

        sources.append(s6)

        # def upsample_add(seq, source, target, up_handle, add_handle):
        u1 = upsample_add(self.upfeat[0], s6, s5, self.upsample, self.eltadd)
        sources.append(u1)
        u2 = upsample_add(self.upfeat[0], u1, s4, self.upsample, self.eltadd)
        sources.append(u2)
        u3 = upsample_add(self.upfeat[0], u2, s3, self.upsample, self.eltadd)
        sources.append(u3)
        u4 = upsample_add(self.upfeat[0], u3, s2, self.upsample, self.eltadd)
        sources.append(u4)
        u5 = upsample_add(self.upfeat[0], u4, s1, self.upsample, self.eltadd)
        sources.append(u5)
        sources = sources[::-1]

        loc_x = self.loc[0](sources[0])
        conf_x = self.conf[0](sources[0])
        conf_x_b, conf_x_c, conf_x_h, conf_x_w = conf_x.value
        conf_x = flops_counter.TensorSize([conf_x_b, 2, conf_x_h, conf_x_w])

        loc.append(loc_x.permute(0, 2, 3, 1))
        conf.append(conf_x.permute(0, 2, 3, 1))

        for i in range(1, len(sources)):
            x = sources[i]
            conf.append(self.conf[i](x).permute(0, 2, 3, 1))
            loc.append(self.loc[i](x).permute(0, 2, 3, 1))

        loc = flops_counter.cat([o.view(o.size(0), -1) for o in loc], 1)
        conf = flops_counter.cat([o.view(o.size(0), -1) for o in conf], 1)

        return loc.view(loc.size(0), -1,
                        4), self.softmax(conf.view(conf.size(0), -1, 2))
コード例 #19
0
    def forward(self, x):
        conv1, pool1 = self.layer1(x)
        conv2, pool2 = self.layer2(pool1)

        conv3, pool3 = self.layer3(pool2)
        conv4, pool4 = self.layer4(pool3)
        conv5, pool5 = self.layer5(pool4)

        conv6 = self.layer6(pool5)
        conv7 = self.layer7(conv6)
        conv8 = self.layer8(conv7)

        lfpn2_on_conv5 = self.lfpn2_on_conv5(conv6, conv5)
        lfpn1_on_conv4 = self.lfpn1_on_conv4(lfpn2_on_conv5, conv4)
        lfpn0_on_conv3 = self.lfpn0_on_conv3(lfpn1_on_conv4, conv3)

        ssh_conv3 = self.ssh_conv3(lfpn0_on_conv3)
        ssh_conv4 = self.ssh_conv4(lfpn1_on_conv4)
        ssh_conv5 = self.ssh_conv5(lfpn2_on_conv5)
        ssh_conv6 = self.ssh_conv6(conv6)
        ssh_conv7 = self.ssh_conv7(conv7)
        ssh_conv8 = self.ssh_conv8(conv8)

        ssh_conv3_l2norm = self.ssh_conv3_l2norm(ssh_conv3)
        ssh_conv4_l2norm = self.ssh_conv4_l2norm(ssh_conv4)
        ssh_conv5_l2norm = self.ssh_conv5_l2norm(ssh_conv5)

        inputs = [
            ssh_conv3_l2norm, ssh_conv4_l2norm, ssh_conv5_l2norm, ssh_conv6,
            ssh_conv7, ssh_conv8
        ]
        face_confs = []
        head_confs = []
        for i, feat in enumerate(inputs):
            mbox_loc = self.mbox_loc[i](feat)
            # print(mbox_loc)
            if i == 0:
                temp_conf = self.mbox_conf[i](feat)
                # face_conf3 = temp_conf[:, 0:3, :, :]
                face_conf3 = [x for x in temp_conf.value]
                face_conf3[1] = 3
                face_conf3 = flops_counter.TensorSize(face_conf3)
                # face_conf1 = temp_conf[:, 3:4, :, :]
                face_conf1 = [x for x in temp_conf.value]
                face_conf1[1] = 1
                face_conf1 = flops_counter.TensorSize(face_conf1)
                # head_conf3 = temp_conf[:, 4:7, :, :]
                head_conf3 = [x for x in temp_conf.value]
                head_conf3[1] = 3
                head_conf3 = flops_counter.TensorSize(head_conf3)
                # head_conf1 = temp_conf[:, 7:, :, :]
                head_conf1 = [x for x in temp_conf.value]
                head_conf1[1] = 1
                head_conf1 = flops_counter.TensorSize(head_conf1)
                # face conf
                face_conf3_maxin = face_conf3.max(1, keepdim=True)
                face_confs.append(
                    flops_counter.cat([face_conf3_maxin, face_conf1],
                                      1).permute(0, 2, 3, 1))
                # head conf
                head_conf3_maxin = head_conf3.max(1, keepdim=True)
                head_confs.append(
                    flops_counter.cat([head_conf3_maxin, head_conf1],
                                      1).permute(0, 2, 3, 1))
            else:
                temp_conf = self.mbox_conf[i](feat)
                # face_conf1 = temp_conf[:, 0:1, :, :]
                face_conf1 = [x for x in temp_conf.value]
                face_conf1[1] = 1
                face_conf1 = flops_counter.TensorSize(face_conf1)
                # face_conf3 = temp_conf[:, 1:4, :, :]
                face_conf3 = [x for x in temp_conf.value]
                face_conf3[1] = 3
                face_conf3 = flops_counter.TensorSize(face_conf3)
                # head_conf = temp_conf[:, 4:, :, :]
                head_conf1 = [x for x in temp_conf.value]
                head_conf1[1] = 4
                head_conf1 = flops_counter.TensorSize(head_conf1)

                # face conf
                face_conf3_maxin = face_conf3.max(1, keepdim=True)
                face_confs.append(
                    flops_counter.cat([face_conf1, face_conf3_maxin],
                                      1).permute(0, 2, 3, 1))
                # head conf
                head_confs.append(head_conf1.permute(0, 2, 3, 1))
            # print(temp_conf)

        face_conf = flops_counter.cat(
            [o.view(o.value[0], -1) for o in face_confs], 1)

        head_conf = flops_counter.cat(
            [o.view(o.value[0], -1) for o in head_confs], 1)

        face_conf_softmax = self.softmax(
            face_conf.view(face_conf.value[0], -1, 2))

        return face_conf_softmax
コード例 #20
0
    def forward(self, x):
        loc = list()
        conf = list()

        ######
        # Backbone
        ######
        conv3_3_x = self.layer1(x)
        conv4_3_x = self.layer2(conv3_3_x)
        conv5_3_x = self.layer3(conv4_3_x)
        fc7_x = self.layer4(conv5_3_x)
        conv6_2_x = self.layer5(fc7_x)
        conv7_2_x = self.layer6(conv6_2_x)

        ######
        # dsfd specific layers
        ######
        # fpn
        lfpn3_fc7_x_up = self.upsample(self.latlayer3(fc7_x))
        lfpn3_conv5_3_x = self.smooth3(conv5_3_x)
        if lfpn3_fc7_x_up.value[2] != lfpn3_conv5_3_x.value[
                2] or lfpn3_fc7_x_up.value[3] != lfpn3_conv5_3_x.value[3]:
            pad = (0, lfpn3_conv5_3_x.value[3] - lfpn3_fc7_x_up.value[3], 0,
                   lfpn3_conv5_3_x.value[2] - lfpn3_fc7_x_up.value[2])
            lfpn3_fc7_x_up = F.pad(lfpn3_fc7_x_up, pad)
        lfpn3 = self.eltmul(lfpn3_fc7_x_up, lfpn3_conv5_3_x)

        lfpn2_lfpn3_up = self.upsample(self.latlayer2(lfpn3))
        lfpn2_conv4_3_x = self.smooth2(conv4_3_x)
        if lfpn2_lfpn3_up.value[2] != lfpn2_conv4_3_x.value[
                2] or lfpn2_lfpn3_up.value[3] != lfpn2_conv4_3_x.value[3]:
            pad = (0, lfpn2_conv4_3_x.value[3] - lfpn2_lfpn3_up.value[3], 0,
                   lfpn2_conv4_3_x.value[2] - lfpn2_lfpn3_up.value[2])
            lfpn2_lfpn3_up = F.pad(lfpn2_lfpn3_up, pad)
        lfpn2 = self.eltmul(lfpn2_lfpn3_up, lfpn2_conv4_3_x)

        lfpn1_lfpn2_up = self.upsample(self.latlayer1(lfpn2))
        lfpn1_conv3_3_x = self.smooth1(conv3_3_x)
        if lfpn1_lfpn2_up.value[2] != lfpn1_conv3_3_x.value[
                2] or lfpn1_lfpn2_up.value[3] != lfpn1_conv3_3_x.value[3]:
            pad = (0, lfpn1_conv3_3_x.value[3] - lfpn1_lfpn2_up.value[3], 0,
                   lfpn1_conv3_3_x.value[2] - lfpn1_lfpn2_up.value[2])
            lfpn1_lfpn2_up = F.pad(lfpn1_lfpn2_up, pad)
        lfpn1 = self.eltmul(lfpn1_lfpn2_up, lfpn1_conv3_3_x)

        conv5_3_x = lfpn3
        conv4_3_x = lfpn2
        conv3_3_x = lfpn1

        # fem
        sources = [
            conv3_3_x, conv4_3_x, conv5_3_x, fc7_x, conv6_2_x, conv7_2_x
        ]
        sources[0] = self.cpm3_3(sources[0])
        sources[1] = self.cpm4_3(sources[1])
        sources[2] = self.cpm5_3(sources[2])
        sources[3] = self.cpm7(sources[3])
        sources[4] = self.cpm6_2(sources[4])
        sources[5] = self.cpm7_2(sources[5])

        # apply multibox head to source layers
        loc = list()
        conf = list()
        for x, l, c in zip(sources, self.loc, self.conf):
            # l(x)
            loc.append(l(x).permute(0, 2, 3, 1))
            # mio: max_in_out
            conf.append(c(x).permute(0, 2, 3, 1))
        # face_conf = flops_counter.cat([flops_counter.view([o[1], o[2], 2], (1, -1)) for o in conf], 1)
        # output = self.softmax(flops_counter.view(face_conf, (1, -1, 2)))
        face_confs = list()
        for o in conf:
            dst = [i for i in o.value]
            dst[-1] = 2
            face_confs.append(flops_counter.TensorSize(dst))
        face_conf = flops_counter.cat(
            [o.view(o.value[0], -1) for o in face_confs], 1)
        output = self.softmax(face_conf.view(face_conf.value[0], -1, 2))
        return output
コード例 #21
0
    def forward(self, x):
        out = x

        # get conv3_3
        for k in range(16):
            out = self.vgg16[k](out)
        conv3_3 = out  # channels = 256
        conv3_3_norm = self.l2norm_conv3_3(conv3_3)

        # get conv4_3
        for k in range(16, 23):
            out = self.vgg16[k](out)
        conv4_3 = out  # channels = 512
        conv4_3_norm = self.l2norm_conv4_3(conv4_3)

        # get conv5_3
        for k in range(23, 30):
            out = self.vgg16[k](out)
        conv5_3 = out  # channels = 512
        conv5_3_norm = self.l2norm_conv5_3(conv5_3)

        out = self.vgg16[30](out)

        # get conv_fc7
        out = self.conv_fc6(out)
        out = self.relu_fc6(out)
        out = self.conv_fc7(out)
        out = self.relu_fc7(out)
        conv_fc7 = out

        # get conv6_2
        out = self.conv6_1(out)
        out = self.relu_conv6_1(out)
        out = self.conv6_2(out)
        out = self.relu_conv6_2(out)
        conv6_2 = out

        # get conv7_2
        out = self.conv7_1(out)
        out = self.relu_conv7_1(out)
        out = self.conv7_2(out)
        out = self.relu_conv7_2(out)
        conv7_2 = out

        # Detection Head - mbox_loc
        mbox_loc_inputs = [
            self.mbox_loc_conv3_3_norm(conv3_3_norm),
            self.mbox_loc_conv4_3_norm(conv4_3_norm),
            self.mbox_loc_conv5_3_norm(conv5_3_norm),
            self.mbox_loc_conv_fc7(conv_fc7),
            self.mbox_loc_conv6_2(conv6_2),
            self.mbox_loc_conv7_2(conv7_2)
        ]
        mbox_loc = flops_counter.cat(
            [o.permute(0, 2, 3, 1).view(1, -1, 4) for o in mbox_loc_inputs], 1)
        # Detection Head - mbox_conf
        mbox_conf_conv3_3_norm = self.mbox_conf_conv3_3_norm(conv3_3_norm)

        conf1 = [i for i in mbox_conf_conv3_3_norm.value]
        conf1[1] = 1
        conf1 = flops_counter.TensorSize(conf1)

        conf234 = [i for i in mbox_conf_conv3_3_norm.value]
        conf234[1] = 3
        conf234 = flops_counter.TensorSize(conf234)
        conf234 = conf234.max(1, keepdim=True)

        mbox_conf_conv3_3_norm = flops_counter.cat([conf1, conf234], 1)

        mbox_conf_inputs = [
            mbox_conf_conv3_3_norm,
            self.mbox_conf_conv4_3_norm(conv4_3_norm),
            self.mbox_conf_conv5_3_norm(conv5_3_norm),
            self.mbox_conf_conv_fc7(conv_fc7),
            self.mbox_conf_conv6_2(conv6_2),
            self.mbox_conf_conv7_2(conv7_2)
        ]
        mbox_conf = flops_counter.cat(
            [o.permute(0, 2, 3, 1).view(1, -1, 2) for o in mbox_conf_inputs],
            1)
        mbox_conf = self.softmax(mbox_conf)

        return mbox_loc, mbox_conf