コード例 #1
0
    def yuNetDetection(self, frame):
        if self.init == 0:
            frameWidth, frameHeight = frame.shape[:2]
            self.pb = PriorBox(input_shape=(640, 480),
                               output_shape=(frameHeight, frameWidth))
            self.init = 1

        blob = cv2.dnn.blobFromImage(frame, size=(640, 480))
        outputNames = ['loc', 'conf', 'iou']
        self.detector.setInput(blob)
        loc, conf, iou = self.detector.forward(outputNames)
        dets = self.pb.decode(np.squeeze(loc, axis=0), np.squeeze(conf,
                                                                  axis=0),
                              np.squeeze(iou, axis=0))
        idx = np.where(dets[:, -1] > self.confidence)[0]
        dets = dets[idx]

        if dets.shape[0]:
            facess = nms(dets, self.threshold)
        else:
            facess = ()
            return facess
        faces = np.array(facess[:, :4])
        faces = faces.astype(np.int)
        faceStartXY = faces[:, :2]
        faceEndXY = faces[:, 2:4]
        faceWH = faceEndXY - faceStartXY
        faces = np.hstack((faceStartXY, faceWH))
        # scores = facess[:, -1]
        return faces
コード例 #2
0
 def __init__(self,args):
     if args.ctx and torch.cuda.is_available():
         self.use_cuda = True
     else:
         self.use_cuda = False
     if self.use_cuda:
         torch.set_default_tensor_type('torch.cuda.FloatTensor')
     else:
         torch.set_default_tensor_type('torch.FloatTensor')
     self.loadmodel(args.headmodelpath)
     self.threshold = args.conf_thresh
     self.img_dir = args.img_dir
     
     self.detect = Detect(cfg)
     self.Prior = PriorBox(cfg)
     with torch.no_grad():
         self.priors =  self.Prior.forward()
コード例 #3
0
    def __init__(self, num_classes, num_blocks, top_k, conf_thresh, nms_thresh,
                 variance):
        super(ASSD_ResNet101, self).__init__()
        self.num_classes = num_classes
        ############################################################################################
        self.inplanes = 64
        layers = [3, 4, 23, 3]
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(Bottleneck, 64, layers[0])
        self.layer2 = self._make_layer(Bottleneck, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(Bottleneck, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(Bottleneck, 512, layers[3], stride=2)
        #self.L2Norm = L2Norm(n_channels=512, scale=20)
        self.extra_layers = nn.ModuleList(
            add_extras(layer_cfg['extra'], batch_norm=True))
        self.conf_layers = nn.ModuleList(
            build_conf(layer_cfg['pred'], num_blocks, num_classes))
        self.locs_layers = nn.ModuleList(
            build_locs(layer_cfg['pred'], num_blocks))
        self.prior_boxes = PriorBox()
        self.prior_boxes = self.prior_boxes.forward()

        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.prior_boxes = self.prior_boxes.to(device)

        self.fusion_layers = nn.ModuleList(fusionModule())
        self.fusion_bn = nn.BatchNorm2d(768)  #256*3
        self.fusion_conv = nn.Conv2d(768, 512, kernel_size=1)

        self.att_layers = nn.ModuleList(make_attention())

        self.softmax = nn.Softmax(dim=1)
        self.detect = Detect(num_classes=num_classes,
                             top_k=top_k,
                             conf_thresh=conf_thresh,
                             nms_thresh=nms_thresh,
                             variance=variance)
コード例 #4
0
ファイル: s3fd.py プロジェクト: jimeffry/deep_sort_face
    def __init__(self, phase, base, extras, head, num_classes):
        super(S3FD, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        '''
        self.priorbox = PriorBox(size,cfg)
        self.priors = Variable(self.priorbox.forward(), volatile=True)
        '''
        # SSD network
        self.vgg = nn.ModuleList(base)
        # Layer learns to scale the l2 normalized features from conv4_3
        self.L2Norm3_3 = L2Norm(256, 10)
        self.L2Norm4_3 = L2Norm(512, 8)
        self.L2Norm5_3 = L2Norm(512, 5)

        self.extras = nn.ModuleList(extras)

        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        self.priorbox = PriorBox(cfg)
        with torch.no_grad():
            self.priors = self.priorbox.forward()
コード例 #5
0
img_resize = cv2.resize(img,
                        dst=None,
                        dsize=(input_shape),
                        interpolation=cv2.INTER_LINEAR)
hr, wr, _ = img_resize.shape
print('Network input size: h={}, w={}'.format(hr, wr))

blob = cv2.dnn.blobFromImage(img_resize, size=input_shape)

# run the net
output_names = ['loc', 'conf']
net.setInput(blob)
loc, conf = net.forward(output_names)

# Decode bboxes and landmarks
pb = PriorBox(input_shape=input_shape, output_shape=(w, h))
dets = pb.decode(np.squeeze(loc, axis=0), np.squeeze(conf, axis=0))

# Ignore low scores
idx = np.where(dets[:, -1] > args.conf_thresh)[0]
dets = dets[idx]

# NMS
if dets.shape[0] > 0:
    dets = nms(dets, args.nms_thresh)
    faces = dets[:args.keep_top_k, :]
    print('Detection results: {} faces found'.format(faces.shape[0]))
    print(faces)
else:
    print('No faces found.')
    exit()
コード例 #6
0
class ASSD_ResNet101(nn.Module):
    def __init__(self, num_classes, num_blocks, top_k, conf_thresh, nms_thresh,
                 variance):
        super(ASSD_ResNet101, self).__init__()
        self.num_classes = num_classes
        ############################################################################################
        self.inplanes = 64
        layers = [3, 4, 23, 3]
        self.conv1 = nn.Conv2d(3,
                               64,
                               kernel_size=7,
                               stride=2,
                               padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(Bottleneck, 64, layers[0])
        self.layer2 = self._make_layer(Bottleneck, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(Bottleneck, 256, layers[2], stride=2)
        self.layer4 = self._make_layer(Bottleneck, 512, layers[3], stride=2)
        #self.L2Norm = L2Norm(n_channels=512, scale=20)
        self.extra_layers = nn.ModuleList(
            add_extras(layer_cfg['extra'], batch_norm=True))
        self.conf_layers = nn.ModuleList(
            build_conf(layer_cfg['pred'], num_blocks, num_classes))
        self.locs_layers = nn.ModuleList(
            build_locs(layer_cfg['pred'], num_blocks))
        self.prior_boxes = PriorBox()
        self.prior_boxes = self.prior_boxes.forward()

        device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        self.prior_boxes = self.prior_boxes.to(device)

        self.fusion_layers = nn.ModuleList(fusionModule())
        self.fusion_bn = nn.BatchNorm2d(768)  #256*3
        self.fusion_conv = nn.Conv2d(768, 512, kernel_size=1)

        self.att_layers = nn.ModuleList(make_attention())

        self.softmax = nn.Softmax(dim=1)
        self.detect = Detect(num_classes=num_classes,
                             top_k=top_k,
                             conf_thresh=conf_thresh,
                             nms_thresh=nms_thresh,
                             variance=variance)

    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes,
                          planes * block.expansion,
                          kernel_size=1,
                          stride=stride,
                          bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)

    def forward(self, x, phase=None):
        feat = []
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)
        x = self.layer1(x)  #(2L, 256L, 129L, 129L)
        x = self.layer2(x)  #(2L, 512L, 65L, 65L)
        feat += [x]
        feat0 = x
        x = self.layer3(x)  #(2L, 1024L, 33L, 33L)
        feat += [x]
        feat1 = x
        x = self.layer4(x)
        feat += [x]
        feat2 = x
        for k, v in enumerate(self.extra_layers):
            x = v(x)
            if k in [5, 11, 17, 23]:
                feat += [x]

        ########## fusion #################################################
        feat0 = self.fusion_layers[0](feat0)
        feat1 = F.upsample_bilinear(self.fusion_layers[1](feat1),
                                    size=(65, 65))
        feat2 = F.upsample_bilinear(self.fusion_layers[2](feat2),
                                    size=(65, 65))
        feat[0] = F.relu(
            self.fusion_conv(
                self.fusion_bn(torch.cat([feat0, feat1, feat2], dim=1))))
        ##################################################################
        feat_new = []
        for (x, l) in zip(feat, self.att_layers):
            feat_new.append(l(x))
        ########## PreEnd #################################################
        locs = []
        conf = []
        for (x, l, c) in zip(feat_new, self.locs_layers, self.conf_layers):
            locs += [l(x).permute(0, 2, 3, 1).contiguous()]
            conf += [c(x).permute(0, 2, 3, 1).contiguous()]

        locs = torch.cat([o.view(o.size(0), -1) for o in locs], dim=1)
        conf = torch.cat([o.view(o.size(0), -1) for o in conf], dim=1)

        if phase == 'test':
            output = self.detect(locs.view(locs.size(0), -1, 4),
                                 self.softmax(conf.view(-1, self.num_classes)),
                                 self.prior_boxes.type(type(x.data)))
        else:
            output = (locs.view(locs.size(0), -1, 4),
                      conf.view(conf.size(0), -1,
                                self.num_classes), self.prior_boxes)
        return output
コード例 #7
0
             loc[:, 8:10] * self.variance[0] * self.priors[:, 2:4],
             self.priors[:, 0:2] +
             loc[:, 10:12] * self.variance[0] * self.priors[:, 2:4],
             self.priors[:, 0:2] +
             loc[:, 12:14] * self.variance[0] * self.priors[:, 2:4]))
        # scale recover
        landmark_scale = np.array([self.out_w, self.out_h] * 5)
        landmarks = landmarks * landmark_scale

        # get score
        cls_scores = conf[:, 1]
        iou_scores = iou[:, 0]
        scores = np.sqrt(cls_scores * iou_scores)
        scores = scores[:, np.newaxis]

        dets = np.hstack((bboxes, landmarks, scores))
        return dets


if __name__ == '__main__':
    from priorbox import PriorBox
    pb = PriorBox()
    print(pb.generate_priors().shape)

    loc = np.random.rand(1, 4385, 14)
    conf = np.random.rand(1, 4385, 2)
    iou = np.random.rand(1, 4385, 1)

    dets = pb.decode(np.squeeze(loc, axis=0), np.squeeze(conf, axis=0),
                     np.squeeze(iou, axis=0))
    print(dets.shape)
コード例 #8
0
ファイル: detect.py プロジェクト: zysilence/libfacedetection
print('Image size: h={}, w={}'.format(h, w))
blob = cv2.dnn.blobFromImage(
    img)  # 'size' param resize the output to the given shape

# Load the net
net = cv2.dnn.readNet(args.model)
net.setPreferableBackend(args.backend)
net.setPreferableTarget(args.target)

# Run the net
output_names = ['loc', 'conf', 'iou']
net.setInput(blob)
loc, conf, iou = net.forward(output_names)

# Decode bboxes and landmarks
pb = PriorBox(input_shape=(w, h), output_shape=(w, h))
dets = pb.decode(np.squeeze(loc, axis=0), np.squeeze(conf, axis=0),
                 np.squeeze(iou, axis=0), args.conf_thresh)

# NMS
if dets.shape[0] > 0:
    dets = nms(dets, args.nms_thresh)
    faces = dets[:args.keep_top_k, :]
    print('Detection results: {} faces found'.format(faces.shape[0]))
    print(faces)
else:
    print('No faces found.')
    exit()

# Draw boudning boxes and landmarks on the original image
img_res = draw(cv2.cvtColor(img, cv2.COLOR_BGR2RGB), faces[:, :4],
コード例 #9
0
class faceDetectorModel:
    def __init__(self,
                 method='haarCascades',
                 gpu=0,
                 confidence=0.7,
                 threshold=0.3):
        self.gpu = gpu
        self.method = method
        self.init = 0
        self.detector = None
        self.pb = None
        self.detectorInit()
        self.confidence = confidence
        self.threshold = threshold

    def detectorInit(self):
        if self.method == 'haarCascades':
            if self.gpu == 0:
                self.detector = cv2.CascadeClassifier(
                    'faceDetect/haarcascade_frontalface_default.xml')
            elif self.gpu == 1:
                self.detector = cv2.cuda.CascadeClassifier_create(
                    'faceDetect'
                    '/haarcascade_frontalface_default_cuda.xml')

        elif self.method == 'lbpCascades':
            if self.gpu == 0:
                self.detector = cv2.CascadeClassifier(
                    'faceDetect/lbpcascade_frontalface_improved.xml')
            elif self.gpu == 1:
                self.detector = cv2.cuda.CascadeClassifier_create(
                    'faceDetect/lbpcascade_frontalface_improved.xml')

        if self.method == 'yuNet':
            self.detector = cv2.dnn.readNet(
                'faceDetect/YuFaceDetectNet_640.onnx')
            self.detector.setPreferableBackend(cv2.dnn.DNN_BACKEND_OPENCV)
            self.detector.setPreferableTarget(cv2.dnn.DNN_TARGET_CPU)

    def CascadesDetector(self, frame):
        if self.gpu == 1:
            faces = []
            gpuFrame = cv2.cuda_GpuMat()
            gpuFrame.upload(frame)
            gpuMat = cv2.cuda.cvtColor(gpuFrame, cv2.COLOR_BGR2GRAY)
            objbuff = self.detector.detectMultiScale(gpuMat)
            facess = objbuff.download()
            if facess is None:
                facess = ()
            np.array(facess)
            for multipleFace in facess:
                for face in multipleFace:
                    faces.append(face)
            return faces
        elif self.gpu == 0:
            grayFrame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
            faces = self.detector.detectMultiScale(grayFrame,
                                                   scaleFactor=1.2,
                                                   minNeighbors=5,
                                                   minSize=(20, 20))
            return faces

    def yuNetDetection(self, frame):
        if self.init == 0:
            frameWidth, frameHeight = frame.shape[:2]
            self.pb = PriorBox(input_shape=(640, 480),
                               output_shape=(frameHeight, frameWidth))
            self.init = 1

        blob = cv2.dnn.blobFromImage(frame, size=(640, 480))
        outputNames = ['loc', 'conf', 'iou']
        self.detector.setInput(blob)
        loc, conf, iou = self.detector.forward(outputNames)
        dets = self.pb.decode(np.squeeze(loc, axis=0), np.squeeze(conf,
                                                                  axis=0),
                              np.squeeze(iou, axis=0))
        idx = np.where(dets[:, -1] > self.confidence)[0]
        dets = dets[idx]

        if dets.shape[0]:
            facess = nms(dets, self.threshold)
        else:
            facess = ()
            return facess
        faces = np.array(facess[:, :4])
        faces = faces.astype(np.int)
        faceStartXY = faces[:, :2]
        faceEndXY = faces[:, 2:4]
        faceWH = faceEndXY - faceStartXY
        faces = np.hstack((faceStartXY, faceWH))
        # scores = facess[:, -1]
        return faces

    def predict(self, frame, painted=1):
        frameNew = frame.copy()
        faces = ()
        if self.method == 'haarCascades' or self.method == 'lbpCascades':
            faces = self.CascadesDetector(frameNew)
        elif self.method == 'yuNet':
            faces = self.yuNetDetection(frameNew)

        if painted:
            for (x, y, w, h) in faces:
                cv2.rectangle(frameNew, (x, y), (x + w, y + h), (0, 0, 255))

        return frameNew, faces
コード例 #10
0
ファイル: s3fd.py プロジェクト: jimeffry/deep_sort_face
class S3FD(nn.Module):
    """Single Shot Multibox Architecture
    The network is composed of a base VGG network followed by the
    added multibox conv layers.  Each multibox layer branches into
        1) conv2d for class conf scores
        2) conv2d for localization predictions
        3) associated priorbox layer to produce default bounding
           boxes specific to the layer's feature map size.
    See: https://arxiv.org/pdf/1512.02325.pdf for more details.

    Args:
        phase: (string) Can be "test" or "train"
        size: input image size
        base: VGG16 layers for input, size of either 300 or 500
        extras: extra layers that feed to multibox loc and conf layers
        head: "multibox head" consists of loc and conf conv layers
    """
    def __init__(self, phase, base, extras, head, num_classes):
        super(S3FD, self).__init__()
        self.phase = phase
        self.num_classes = num_classes
        '''
        self.priorbox = PriorBox(size,cfg)
        self.priors = Variable(self.priorbox.forward(), volatile=True)
        '''
        # SSD network
        self.vgg = nn.ModuleList(base)
        # Layer learns to scale the l2 normalized features from conv4_3
        self.L2Norm3_3 = L2Norm(256, 10)
        self.L2Norm4_3 = L2Norm(512, 8)
        self.L2Norm5_3 = L2Norm(512, 5)

        self.extras = nn.ModuleList(extras)

        self.loc = nn.ModuleList(head[0])
        self.conf = nn.ModuleList(head[1])
        self.priorbox = PriorBox(cfg)
        with torch.no_grad():
            self.priors = self.priorbox.forward()
        # if self.phase == 'test':
        #     self.softmax = nn.Softmax(dim=-1)
        #     self.detect = Detect(cfg)

    def forward(self, x):
        """Applies network layers and ops on input image(s) x.

        Args:
            x: input image or batch of images. Shape: [batch,3,300,300].

        Return:
            Depending on phase:
            test:
                Variable(tensor) of output class label predictions,
                confidence score, and corresponding location predictions for
                each object detected. Shape: [batch,topk,7]

            train:
                list of concat outputs from:
                    1: confidence layers, Shape: [batch*num_priors,num_classes]
                    2: localization layers, Shape: [batch,num_priors*4]
                    3: priorbox layers, Shape: [2,num_priors*4]
        """
        #size = x.size()[2:]
        sources = list()
        loc = list()
        conf = list()

        # apply vgg up to conv4_3 relu
        for k in range(16):
            x = self.vgg[k](x)

        s = self.L2Norm3_3(x)
        sources.append(s)
        #print('conv3:',s.size())
        # apply vgg up to fc7
        for k in range(16, 23):
            x = self.vgg[k](x)

        s = self.L2Norm4_3(x)
        sources.append(s)
        #print('conv4:',s.size())
        for k in range(23, 30):
            x = self.vgg[k](x)

        s = self.L2Norm5_3(x)
        sources.append(s)

        for k in range(30, len(self.vgg)):
            x = self.vgg[k](x)
        sources.append(x)

        # apply extra layers and cache source layer outputs
        for k, v in enumerate(self.extras):
            x = F.relu(v(x), inplace=True)
            if k % 2 == 1:
                sources.append(x)

        # apply multibox head to source layers

        loc_x = self.loc[0](sources[0])
        conf_x = self.conf[0](sources[0])

        max_conf, _ = torch.max(conf_x[:, 0:3, :, :], dim=1, keepdim=True)
        conf_x = torch.cat((max_conf, conf_x[:, 3:, :, :]), dim=1)

        loc.append(loc_x.permute(0, 2, 3, 1).contiguous())
        conf.append(conf_x.permute(0, 2, 3, 1).contiguous())

        for i in range(1, len(sources)):
            x = sources[i]
            conf.append(self.conf[i](x).permute(0, 2, 3, 1).contiguous())
            loc.append(self.loc[i](x).permute(0, 2, 3, 1).contiguous())
        '''
        for (x, l, c) in zip(sources, self.loc, self.conf):
            loc.append(l(x).permute(0, 2, 3, 1).contiguous())
            conf.append(c(x).permute(0, 2, 3, 1).contiguous())
        '''

        # features_maps = []
        # for i in range(len(loc)):
        #     feat = []
        #     feat += [loc[i].size(1), loc[i].size(2)]
        #     features_maps += [feat]
        #     print(i,loc[i].size(1), loc[i].size(2))

        #Variable(self.priorbox.forward(), volatile=True)

        loc = torch.cat([o.view(o.size(0), -1) for o in loc], 1)
        conf = torch.cat([o.view(o.size(0), -1) for o in conf], 1)

        # if self.phase == 'test':
        #     output = self.detect(
        #         loc.view(loc.size(0), -1, 4),
        #         self.softmax(conf.view(conf.size(0), -1,self.num_classes)),
        #         self.priors)

        # else:
        #     output = (
        #         loc.view(loc.size(0), -1, 4),
        #         conf.view(conf.size(0), -1,self.num_classes),
        #         self.priors
        #     )
        output = (loc.view(loc.size(0), -1,
                           4), conf.view(conf.size(0), -1,
                                         self.num_classes), self.priors)
        return output
コード例 #11
0
class HeadDetect(object):
    def __init__(self,args):
        if args.ctx and torch.cuda.is_available():
            self.use_cuda = True
        else:
            self.use_cuda = False
        if self.use_cuda:
            torch.set_default_tensor_type('torch.cuda.FloatTensor')
        else:
            torch.set_default_tensor_type('torch.FloatTensor')
        self.loadmodel(args.headmodelpath)
        self.threshold = args.conf_thresh
        self.img_dir = args.img_dir
        
        self.detect = Detect(cfg)
        self.Prior = PriorBox(cfg)
        with torch.no_grad():
            self.priors =  self.Prior.forward()

    def loadmodel(self,modelpath):
        if self.use_cuda:
            device = 'cuda'
        else:
            device = 'cpu'
        # self.net = build_s3fd('test', cfg.NUM_CLASSES)
        self.net = S3FD(cfg.NUM_CLASSES)
        self.net.load_state_dict(torch.load(modelpath,map_location=device))
        self.net.eval()
        # print(self.net)
        if self.use_cuda:
            self.net.cuda()
            cudnn.benckmark = True
    def propress(self,img):
        rgb_mean = np.array([123.,117.,104.])[np.newaxis, np.newaxis,:].astype('float32')
        img = cv2.resize(img,(cfg.resize_width,cfg.resize_height))
        img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
        img = img.astype('float32')
        img -= rgb_mean
        #img = img[:,:,::-1]
        img = np.transpose(img,(2,0,1))
        return img
    def xyxy2xywh(self,bbox_score):
        bboxes = bbox_score[0]
        bbox = bboxes[0] 
        score = bboxes[1]
        bbox[:,2] = bbox[:,2] -bbox[:,0] 
        bbox[:,3] = bbox[:,3] -bbox[:,1]  
        bbox_out=[]
        scores = []
        for j in range(bbox.shape[0]):
            dets = bbox[j] 
            sc = score[j]
            min_re = min(dets[2],dets[3])
            if min_re < 16:
                thresh = 0.2
            else:
                thresh = 0.8
            if sc >= thresh:
                bbox_out.append(dets)
                scores.append(sc)
        return np.array(bbox_out),np.array(scores)
    def nms_filter(self,bboxes,scale):
        boxes = bboxes[0][0] * scale
        scores = bboxes[0][1]
        ids, count = nms_py(boxes, scores, 0.3,1000)
        boxes = boxes[ids[:count]]
        scores = scores[ids[:count]]
        return [[boxes,scores]]
    def inference_img(self,imgorg):
        t1 = time.time()
        imgh,imgw = imgorg.shape[:2]
        scale = np.array([imgw,imgh,imgw,imgh])
        scale = np.expand_dims(scale,0)
        img = self.propress(imgorg.copy())
        bt_img = Variable(torch.from_numpy(img).unsqueeze(0))
        if self.use_cuda:
            bt_img = bt_img.cuda()
        output = self.net(bt_img)
        t2 = time.time()
        with torch.no_grad():
            bboxes = self.detect(output[0],output[1],self.priors)
        t3 = time.time()
        bboxes = self.nms_filter(bboxes,scale)
        print('consuming:',t2-t1,t3-t2)
        #showimg = self.label_show(bboxes,imgorg)
        bbox = []
        score = []
        if len(bboxes)>0:
            bbox,score = self.xyxy2xywh(bboxes)
        # showimg = self.label_show(bbox,score,imgorg)
        return bbox,score
        # return showimg,bbox
    def label_show(self,rectangles,scores,img):
        # imgh,imgw,_ = img.shape
        # scale = np.array([imgw,imgh,imgw,imgh])
        for j in range(rectangles.shape[0]):
            dets = rectangles[j]
            score = scores[j]
            x1,y1 = dets[:2]
            x2,y2 = dets[:2] +dets[2:]
            cv2.rectangle(img,(int(x1),int(y1)),(int(x2),int(y2)),(0,0,255),2)
            txt = "{:.3f}".format(score)
            point = (int(x1),int(y1-5))
            cv2.putText(img,txt,point,cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0),1)
        return img

    def detectheads(self,imgpath):
        if os.path.isdir(imgpath):
            cnts = os.listdir(imgpath)
            for tmp in cnts:
                tmppath = os.path.join(imgpath,tmp.strip())
                img = cv2.imread(tmppath)
                if img is None:
                    continue
                showimg,_ = self.inference_img(img)
                cv2.imshow('demo',showimg)
                cv2.waitKey(0)
        elif os.path.isfile(imgpath) and imgpath.endswith('txt'):
            # if not os.path.exists(self.save_dir):
            #     os.makedirs(self.save_dir)
            f_r = open(imgpath,'r')
            file_cnts = f_r.readlines()
            for j in tqdm(range(len(file_cnts))):
                tmp_file = file_cnts[j].strip()
                if len(tmp_file.split(','))>0:
                    tmp_file = tmp_file.split(',')[0]
                if not tmp_file.endswith('jpg'):
                    tmp_file = tmp_file +'.jpeg'
                tmp_path = os.path.join(self.img_dir,tmp_file) 
                if not os.path.exists(tmp_path):
                    print(tmp_path)
                    continue
                img = cv2.imread(tmp_path) 
                if img is None:
                    print('None',tmp)
                    continue
                frame,_ = self.inference_img(img)                
                cv2.imshow('result',frame)
                #savepath = os.path.join(self.save_dir,save_name)
                #cv2.imwrite('test.jpg',frame)
                cv2.waitKey(0) 
        elif os.path.isfile(imgpath) and imgpath.endswith(('.mp4','.avi')) :
            cap = cv2.VideoCapture(imgpath)
            if not cap.isOpened():
                print("failed open camera")
                return 0
            else: 
                while cap.isOpened():
                    _,img = cap.read()
                    frame,_ = self.inference_img(img)
                    cv2.imshow('result',frame)
                    q=cv2.waitKey(10) & 0xFF
                    if q == 27 or q ==ord('q'):
                        break
            cap.release()
            cv2.destroyAllWindows()
        elif os.path.isfile(imgpath):
            img = cv2.imread(imgpath)
            if img is not None:
                # grab next frame
                # update FPS counter
                frame,odm_maps = self.inference_img(img)
                # hotmaps = self.get_hotmaps(odm_maps)
                # self.display_hotmap(hotmaps)
                # keybindings for display
                cv2.imshow('result',frame)
                #cv2.imwrite('test30.jpg',frame)
                key = cv2.waitKey(0) 
        else:
            print('please input the right img-path')