Пример #1
0
    def forward(self, x):
        # x_1 = x[:, :self.inp, :, :]
        # x_2 = x[:, self.inp:, :, :]
        b, c, h, w = x.value
        x_1 = flops_counter.TensorSize([b, self.inp, h, w])
        x_2 = flops_counter.TensorSize([b, c - self.inp, h, w])

        a_1 = self.conv1x1_1(x_1)
        # g_1 = F.sigmoid(self.gate_1(x_1))
        g_1 = self.sigmoid1(self.gate_1(x_1))

        a_2 = self.conv1x1_2(x_2)
        # g_2 = F.sigmoid(self.gate_2(x_2))
        g_2 = self.sigmoid2(self.gate_2(x_2))

        ret = flops_counter.cat(
            (self.eltmul1(a_1, g_1), self.eltmul2(a_2, g_2)), 1)

        return ret
Пример #2
0
    def _calc_flops(self, img, scale=1, flip=False):
        x = img

        h, w = x.shape[:2]
        scale_h, scale_w = scale, scale
        if self.max_downsample > 1:
            h_new = int(
                np.ceil(scale * h / self.max_downsample) * self.max_downsample)
            w_new = int(
                np.ceil(scale * w / self.max_downsample) * self.max_downsample)
            scale_h = h_new / h
            scale_w = w_new / w
            x = cv2.resize(img,
                           None,
                           None,
                           fx=scale_w,
                           fy=scale_h,
                           interpolation=cv2.INTER_LINEAR)
        elif scale != 1:
            x = cv2.resize(img,
                           None,
                           None,
                           fx=scale_w,
                           fy=scale_h,
                           interpolation=cv2.INTER_LINEAR)
        h, w, c = x.shape

        if 'hr' in self.model.name.lower():
            if scale > 1 and (h > 5000 or w > 5000):
                return 0

        x = flops_counter.TensorSize([1, c, h, w])
        y = self.model(x)

        flops = self.model.flops * 2 if flip else self.model.flops

        self.model.set_flops_zero()
        return flops
Пример #3
0
import sys
sys.path.append('.')
import flops_counter as fc
from models import YuFaceDetectNet

model = YuFaceDetectNet()
x = fc.TensorSize([1, 3, 224, 210])
y = model(x)
print(y, model.flops)
# print(model)
Пример #4
0
    def forward(self, x):
        conv1, pool1 = self.layer1(x)
        conv2, pool2 = self.layer2(pool1)

        conv3, pool3 = self.layer3(pool2)
        conv4, pool4 = self.layer4(pool3)
        conv5, pool5 = self.layer5(pool4)

        conv6 = self.layer6(pool5)
        conv7 = self.layer7(conv6)
        conv8 = self.layer8(conv7)

        lfpn2_on_conv5 = self.lfpn2_on_conv5(conv6, conv5)
        lfpn1_on_conv4 = self.lfpn1_on_conv4(lfpn2_on_conv5, conv4)
        lfpn0_on_conv3 = self.lfpn0_on_conv3(lfpn1_on_conv4, conv3)

        ssh_conv3 = self.ssh_conv3(lfpn0_on_conv3)
        ssh_conv4 = self.ssh_conv4(lfpn1_on_conv4)
        ssh_conv5 = self.ssh_conv5(lfpn2_on_conv5)
        ssh_conv6 = self.ssh_conv6(conv6)
        ssh_conv7 = self.ssh_conv7(conv7)
        ssh_conv8 = self.ssh_conv8(conv8)

        ssh_conv3_l2norm = self.ssh_conv3_l2norm(ssh_conv3)
        ssh_conv4_l2norm = self.ssh_conv4_l2norm(ssh_conv4)
        ssh_conv5_l2norm = self.ssh_conv5_l2norm(ssh_conv5)

        inputs = [
            ssh_conv3_l2norm, ssh_conv4_l2norm, ssh_conv5_l2norm, ssh_conv6,
            ssh_conv7, ssh_conv8
        ]
        face_confs = []
        head_confs = []
        for i, feat in enumerate(inputs):
            mbox_loc = self.mbox_loc[i](feat)
            # print(mbox_loc)
            if i == 0:
                temp_conf = self.mbox_conf[i](feat)
                # face_conf3 = temp_conf[:, 0:3, :, :]
                face_conf3 = [x for x in temp_conf.value]
                face_conf3[1] = 3
                face_conf3 = flops_counter.TensorSize(face_conf3)
                # face_conf1 = temp_conf[:, 3:4, :, :]
                face_conf1 = [x for x in temp_conf.value]
                face_conf1[1] = 1
                face_conf1 = flops_counter.TensorSize(face_conf1)
                # head_conf3 = temp_conf[:, 4:7, :, :]
                head_conf3 = [x for x in temp_conf.value]
                head_conf3[1] = 3
                head_conf3 = flops_counter.TensorSize(head_conf3)
                # head_conf1 = temp_conf[:, 7:, :, :]
                head_conf1 = [x for x in temp_conf.value]
                head_conf1[1] = 1
                head_conf1 = flops_counter.TensorSize(head_conf1)
                # face conf
                face_conf3_maxin = face_conf3.max(1, keepdim=True)
                face_confs.append(
                    flops_counter.cat([face_conf3_maxin, face_conf1],
                                      1).permute(0, 2, 3, 1))
                # head conf
                head_conf3_maxin = head_conf3.max(1, keepdim=True)
                head_confs.append(
                    flops_counter.cat([head_conf3_maxin, head_conf1],
                                      1).permute(0, 2, 3, 1))
            else:
                temp_conf = self.mbox_conf[i](feat)
                # face_conf1 = temp_conf[:, 0:1, :, :]
                face_conf1 = [x for x in temp_conf.value]
                face_conf1[1] = 1
                face_conf1 = flops_counter.TensorSize(face_conf1)
                # face_conf3 = temp_conf[:, 1:4, :, :]
                face_conf3 = [x for x in temp_conf.value]
                face_conf3[1] = 3
                face_conf3 = flops_counter.TensorSize(face_conf3)
                # head_conf = temp_conf[:, 4:, :, :]
                head_conf1 = [x for x in temp_conf.value]
                head_conf1[1] = 4
                head_conf1 = flops_counter.TensorSize(head_conf1)

                # face conf
                face_conf3_maxin = face_conf3.max(1, keepdim=True)
                face_confs.append(
                    flops_counter.cat([face_conf1, face_conf3_maxin],
                                      1).permute(0, 2, 3, 1))
                # head conf
                head_confs.append(head_conf1.permute(0, 2, 3, 1))
            # print(temp_conf)

        face_conf = flops_counter.cat(
            [o.view(o.value[0], -1) for o in face_confs], 1)

        head_conf = flops_counter.cat(
            [o.view(o.value[0], -1) for o in head_confs], 1)

        face_conf_softmax = self.softmax(
            face_conf.view(face_conf.value[0], -1, 2))

        return face_conf_softmax
Пример #5
0
    def forward(self, x):
        loc = list()
        conf = list()

        ######
        # Backbone
        ######
        conv3_3_x = self.layer1(x)
        conv4_3_x = self.layer2(conv3_3_x)
        conv5_3_x = self.layer3(conv4_3_x)
        fc7_x = self.layer4(conv5_3_x)
        conv6_2_x = self.layer5(fc7_x)
        conv7_2_x = self.layer6(conv6_2_x)

        ######
        # dsfd specific layers
        ######
        # fpn
        lfpn3_fc7_x_up = self.upsample(self.latlayer3(fc7_x))
        lfpn3_conv5_3_x = self.smooth3(conv5_3_x)
        if lfpn3_fc7_x_up.value[2] != lfpn3_conv5_3_x.value[
                2] or lfpn3_fc7_x_up.value[3] != lfpn3_conv5_3_x.value[3]:
            pad = (0, lfpn3_conv5_3_x.value[3] - lfpn3_fc7_x_up.value[3], 0,
                   lfpn3_conv5_3_x.value[2] - lfpn3_fc7_x_up.value[2])
            lfpn3_fc7_x_up = F.pad(lfpn3_fc7_x_up, pad)
        lfpn3 = self.eltmul(lfpn3_fc7_x_up, lfpn3_conv5_3_x)

        lfpn2_lfpn3_up = self.upsample(self.latlayer2(lfpn3))
        lfpn2_conv4_3_x = self.smooth2(conv4_3_x)
        if lfpn2_lfpn3_up.value[2] != lfpn2_conv4_3_x.value[
                2] or lfpn2_lfpn3_up.value[3] != lfpn2_conv4_3_x.value[3]:
            pad = (0, lfpn2_conv4_3_x.value[3] - lfpn2_lfpn3_up.value[3], 0,
                   lfpn2_conv4_3_x.value[2] - lfpn2_lfpn3_up.value[2])
            lfpn2_lfpn3_up = F.pad(lfpn2_lfpn3_up, pad)
        lfpn2 = self.eltmul(lfpn2_lfpn3_up, lfpn2_conv4_3_x)

        lfpn1_lfpn2_up = self.upsample(self.latlayer1(lfpn2))
        lfpn1_conv3_3_x = self.smooth1(conv3_3_x)
        if lfpn1_lfpn2_up.value[2] != lfpn1_conv3_3_x.value[
                2] or lfpn1_lfpn2_up.value[3] != lfpn1_conv3_3_x.value[3]:
            pad = (0, lfpn1_conv3_3_x.value[3] - lfpn1_lfpn2_up.value[3], 0,
                   lfpn1_conv3_3_x.value[2] - lfpn1_lfpn2_up.value[2])
            lfpn1_lfpn2_up = F.pad(lfpn1_lfpn2_up, pad)
        lfpn1 = self.eltmul(lfpn1_lfpn2_up, lfpn1_conv3_3_x)

        conv5_3_x = lfpn3
        conv4_3_x = lfpn2
        conv3_3_x = lfpn1

        # fem
        sources = [
            conv3_3_x, conv4_3_x, conv5_3_x, fc7_x, conv6_2_x, conv7_2_x
        ]
        sources[0] = self.cpm3_3(sources[0])
        sources[1] = self.cpm4_3(sources[1])
        sources[2] = self.cpm5_3(sources[2])
        sources[3] = self.cpm7(sources[3])
        sources[4] = self.cpm6_2(sources[4])
        sources[5] = self.cpm7_2(sources[5])

        # apply multibox head to source layers
        loc = list()
        conf = list()
        for x, l, c in zip(sources, self.loc, self.conf):
            # l(x)
            loc.append(l(x).permute(0, 2, 3, 1))
            # mio: max_in_out
            conf.append(c(x).permute(0, 2, 3, 1))
        # face_conf = flops_counter.cat([flops_counter.view([o[1], o[2], 2], (1, -1)) for o in conf], 1)
        # output = self.softmax(flops_counter.view(face_conf, (1, -1, 2)))
        face_confs = list()
        for o in conf:
            dst = [i for i in o.value]
            dst[-1] = 2
            face_confs.append(flops_counter.TensorSize(dst))
        face_conf = flops_counter.cat(
            [o.view(o.value[0], -1) for o in face_confs], 1)
        output = self.softmax(face_conf.view(face_conf.value[0], -1, 2))
        return output
Пример #6
0
    def forward(self, x):
        out = x

        # get conv3_3
        for k in range(16):
            out = self.vgg16[k](out)
        conv3_3 = out  # channels = 256
        conv3_3_norm = self.l2norm_conv3_3(conv3_3)

        # get conv4_3
        for k in range(16, 23):
            out = self.vgg16[k](out)
        conv4_3 = out  # channels = 512
        conv4_3_norm = self.l2norm_conv4_3(conv4_3)

        # get conv5_3
        for k in range(23, 30):
            out = self.vgg16[k](out)
        conv5_3 = out  # channels = 512
        conv5_3_norm = self.l2norm_conv5_3(conv5_3)

        out = self.vgg16[30](out)

        # get conv_fc7
        out = self.conv_fc6(out)
        out = self.relu_fc6(out)
        out = self.conv_fc7(out)
        out = self.relu_fc7(out)
        conv_fc7 = out

        # get conv6_2
        out = self.conv6_1(out)
        out = self.relu_conv6_1(out)
        out = self.conv6_2(out)
        out = self.relu_conv6_2(out)
        conv6_2 = out

        # get conv7_2
        out = self.conv7_1(out)
        out = self.relu_conv7_1(out)
        out = self.conv7_2(out)
        out = self.relu_conv7_2(out)
        conv7_2 = out

        # Detection Head - mbox_loc
        mbox_loc_inputs = [
            self.mbox_loc_conv3_3_norm(conv3_3_norm),
            self.mbox_loc_conv4_3_norm(conv4_3_norm),
            self.mbox_loc_conv5_3_norm(conv5_3_norm),
            self.mbox_loc_conv_fc7(conv_fc7),
            self.mbox_loc_conv6_2(conv6_2),
            self.mbox_loc_conv7_2(conv7_2)
        ]
        mbox_loc = flops_counter.cat(
            [o.permute(0, 2, 3, 1).view(1, -1, 4) for o in mbox_loc_inputs], 1)
        # Detection Head - mbox_conf
        mbox_conf_conv3_3_norm = self.mbox_conf_conv3_3_norm(conv3_3_norm)

        conf1 = [i for i in mbox_conf_conv3_3_norm.value]
        conf1[1] = 1
        conf1 = flops_counter.TensorSize(conf1)

        conf234 = [i for i in mbox_conf_conv3_3_norm.value]
        conf234[1] = 3
        conf234 = flops_counter.TensorSize(conf234)
        conf234 = conf234.max(1, keepdim=True)

        mbox_conf_conv3_3_norm = flops_counter.cat([conf1, conf234], 1)

        mbox_conf_inputs = [
            mbox_conf_conv3_3_norm,
            self.mbox_conf_conv4_3_norm(conv4_3_norm),
            self.mbox_conf_conv5_3_norm(conv5_3_norm),
            self.mbox_conf_conv_fc7(conv_fc7),
            self.mbox_conf_conv6_2(conv6_2),
            self.mbox_conf_conv7_2(conv7_2)
        ]
        mbox_conf = flops_counter.cat(
            [o.permute(0, 2, 3, 1).view(1, -1, 2) for o in mbox_conf_inputs],
            1)
        mbox_conf = self.softmax(mbox_conf)

        return mbox_loc, mbox_conf
Пример #7
0
    def forward(self, x):
        sources = list()
        loc = list()
        conf = list()

        end = 8 if self.model_type == 32 else 6
        for k in range(end):
            x = self.base[k](x)
        s1 = x  # (640, 640) -> (320, 320)

        for k in range(2, end):
            x = self.base[k](x)
        s2 = x  # (160, 160) -> (80, 80)

        for k in range(2, end):
            x = self.base[k](x)
        s3 = x  # (80, 80) -> (40, 40)

        for k in range(2, end):
            x = self.base[k](x)
        s4 = x  # (40, 40) -> (20, 20)

        for k in range(2, end):
            x = self.base[k](x)
        s5 = x  # (20, 20) -> (10, 10)

        for k in range(2, end):
            x = self.base[k](x)
        s6 = x  # (10, 10) -> (5, 5)

        sources.append(s6)

        # def upsample_add(seq, source, target, up_handle, add_handle):
        u1 = upsample_add(self.upfeat[0], s6, s5, self.upsample, self.eltadd)
        sources.append(u1)
        u2 = upsample_add(self.upfeat[0], u1, s4, self.upsample, self.eltadd)
        sources.append(u2)
        u3 = upsample_add(self.upfeat[0], u2, s3, self.upsample, self.eltadd)
        sources.append(u3)
        u4 = upsample_add(self.upfeat[0], u3, s2, self.upsample, self.eltadd)
        sources.append(u4)
        u5 = upsample_add(self.upfeat[0], u4, s1, self.upsample, self.eltadd)
        sources.append(u5)
        sources = sources[::-1]

        loc_x = self.loc[0](sources[0])
        conf_x = self.conf[0](sources[0])
        conf_x_b, conf_x_c, conf_x_h, conf_x_w = conf_x.value
        conf_x = flops_counter.TensorSize([conf_x_b, 2, conf_x_h, conf_x_w])

        loc.append(loc_x.permute(0, 2, 3, 1))
        conf.append(conf_x.permute(0, 2, 3, 1))

        for i in range(1, len(sources)):
            x = sources[i]
            conf.append(self.conf[i](x).permute(0, 2, 3, 1))
            loc.append(self.loc[i](x).permute(0, 2, 3, 1))

        loc = flops_counter.cat([o.view(o.size(0), -1) for o in loc], 1)
        conf = flops_counter.cat([o.view(o.size(0), -1) for o in conf], 1)

        return loc.view(loc.size(0), -1,
                        4), self.softmax(conf.view(conf.size(0), -1, 2))
Пример #8
0
import sys
sys.path.append('.')
import flops_counter as fc
from models import CSP

model = CSP()
# input size either height or width must be dividable by 16,
# this is due to the model design
x = fc.TensorSize([1, 3, 224, 112])
y = model(x)
print(y, model.flops)
# print(model)