Exemplo n.º 1
0
    def update(self):
        # keep looping infinitely
        i = 0
        while True:
            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                (grabbed, frame) = self.stream.read()
                frame = cv2.resize(
                    frame,
                    (int(frame.shape[1] * 0.2), int(frame.shape[0] * 0.2)),
                    interpolation=cv2.INTER_AREA)
                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if not grabbed:
                    self.stop()
                    return
                inp_dim = int(opt.inp_dim)
                img, orig_img, dim = prep_frame(frame, inp_dim)

                im_name = str(i) + '.jpg'

                with torch.no_grad():
                    # Human Detection

                    im_dim_list = torch.FloatTensor([dim]).repeat(1, 2)
                    self.Q.put((img, orig_img, im_name, im_dim_list))
                    i = i + 1
            else:
                with self.Q.mutex:
                    self.Q.queue.clear()
Exemplo n.º 2
0
    def getitem_yolo(self):
        for i in range(self.num_batches):
            img = []
            orig_img = []
            im_name = []
            im_dim_list = []
            for k in range(i * self.batchSize, min((i + 1) * self.batchSize, self.datalen)):
                inp_dim = int(opt.inp_dim)
                image_k = self.imglist[k]
                img_k, orig_img_k, im_dim_list_k = prep_frame(image_k, inp_dim)

                img.append(img_k)
                orig_img.append(orig_img_k)
                im_name.append(self.img_names[k])
                im_dim_list.append(im_dim_list_k)

            with torch.no_grad():
                # Human Detection
                img = torch.cat(img)
                im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)

            while self.Q.full():
                time.sleep(2)

            self.Q.put((img, orig_img, im_name, im_dim_list))
Exemplo n.º 3
0
    def forward(self, Q_load):
        # keep looping infinitely
        i = 0
        while True:
            time.sleep(0.02)
            while Q_load.qsize() > 14:
                Q_load.get()
            img = []
            orig_img = []
            im_dim_list = []

            (grabbed, frame) = self.stream.read()

            # if the `grabbed` boolean is `False`, then we have
            # reached the end of the video file
            if not grabbed:
                return 0

            frame = frame[:, 250:]
            #frame = cv2.resize(frame,None,fx=0.5, fy=0.5, interpolation = cv2.INTER_CUBIC)

            inp_dim = int(opt.inp_dim)
            img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
            img.append(img_k)
            orig_img.append(orig_img_k)
            im_dim_list.append(im_dim_list_k)

            img = torch.cat(img)
            im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
            Q_load.put((img, orig_img, im_dim_list))
Exemplo n.º 4
0
    def getitem_yolo(self):
        for i in range(self.num_batches):  # 1
            img = []
            orig_img = []
            im_name = []
            im_dim_list = []
            for k in range(i * self.batchSize,
                           min((i + 1) * self.batchSize, self.datalen)):
                inp_dim = int(opt.inp_dim)
                #im_name_k = self.imglist[k].rstrip('\n').rstrip('\r')
                #im_name_k = os.path.join(self.img_dir, im_name_k)   # Path
                #img_k, orig_img_k, im_dim_list_k = prep_image(im_name_k, inp_dim)

                img_k, orig_img_k, im_dim_list_k = prep_frame(
                    self.recv_image, inp_dim)

                img.append(img_k)
                orig_img.append(orig_img_k)
                #im_name.append(im_name_k)
                im_name.append("./")
                im_dim_list.append(im_dim_list_k)

            with torch.no_grad():
                # Human Detection
                img = torch.cat(img)
                im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
                im_dim_list_ = im_dim_list

            while self.Q.full():
                time.sleep(2)

            self.Q.put((img, orig_img, im_name, im_dim_list))
Exemplo n.º 5
0
    def update(self):
        # keep looping infinitely
        i = 0
        while True:
            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                img = []
                orig_img = []
                im_name = []
                im_dim_list = []
                for k in range(self.batchSize):
                    (grabbed, frame) = self.stream.read()
                    # if the `grabbed` boolean is `False`, then we have
                    # reached the end of the video file
                    if not grabbed:
                        self.stop()
                        return
                    inp_dim = int(opt.inp_dim)
                    img_k, orig_img_k, im_dim_list_k = prep_frame(
                        frame, inp_dim)

                    img.append(img_k)
                    orig_img.append(orig_img_k)
                    im_name.append(str(i) + '.jpg')
                    im_dim_list.append(im_dim_list_k)

                with torch.no_grad():
                    # Human Detection
                    img = torch.cat(img)
                    im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
                    self.Q.put((img, orig_img, im_name, im_dim_list))
                    i = i + 1
            else:
                with self.Q.mutex:
                    self.Q.queue.clear()
Exemplo n.º 6
0
    def getitem_yolo(self): # 主循环线程, 在节点文件的回调函数中不断传入新图像即可
        i = 0
        while True:
            if not self.Q.full():
                img = []
                orig_img = []
                im_name = []
                im_dim_list = []
                for k in range(self.batchSize): # for k in range(0, 1):
                    ### 订阅更新图像信息
                    frame = self.img_sorce.getImage()
                    print ("process new frame...")

                    inp_dim = int(opt.inp_dim)
                    img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
                
                    img.append(img_k)
                    orig_img.append(orig_img_k)
                    im_name.append(str(i)+'.jpg')
                    im_dim_list.append(im_dim_list_k)

                with torch.no_grad():
                    # Human Detection
                    img = torch.cat(img)
                    im_dim_list = torch.FloatTensor(im_dim_list).repeat(1,2)
                    im_dim_list_ = im_dim_list
                    
                    self.Q.put((img, orig_img, im_name, im_dim_list))
                    i += 1
            else:
                with self.Q.mutex:
                    self.Q.queue.clear()
    def update(self):
        for i in range(self.num_batches):
            img = []
            orig_img = []
            im_name = []
            im_dim_list = []
            for k in range(i * self.batchSize,
                           min((i + 1) * self.batchSize, self.datalen)):
                inp_dim = int(opt.inp_dim)
                (grabbed, frame) = self.stream.read()
                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if not grabbed:
                    return
                img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
                img.append(img_k)
                orig_img.append(orig_img_k)
                im_name.append(k)
                im_dim_list.append(im_dim_list_k)

            with torch.no_grad():
                # Human Detection
                img = torch.cat(img)
                im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
            self.Q.append((img, orig_img, im_name, im_dim_list))
Exemplo n.º 8
0
    def update(self):
        print(f'WebcamLoader_update_thread: {threading.currentThread().name}')
        # keep looping infinitely
        i = 0
        is_disconnet = False
        while not self.stopped:
            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                img = []
                orig_img = []
                im_name = []
                im_dim_list = []
                for k in range(self.batchSize):
                    (grabbed, frame) = self.stream.read()
                    # if the `grabbed` boolean is `False`, then we have
                    # reached the end of the video file or we disconnect

                    if not grabbed:
                        print(f'{get_time_now()} read frame,grabbed:{grabbed}')
                        is_disconnet = True
                        break
                    inp_dim = int(opt.inp_dim)
                    img_k, orig_img_k, im_dim_list_k = prep_frame(
                        frame, inp_dim)

                    img.append(img_k)
                    orig_img.append(orig_img_k)
                    im_name.append(str(i) + '.jpg')
                    im_dim_list.append(im_dim_list_k)

                if is_disconnet:
                    break

                if is_disconnet:
                    break

                with torch.no_grad():
                    # Human Detection
                    img = torch.cat(img)
                    im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)

                    self.Q.put((img, orig_img, im_name, im_dim_list))
                    i = i + 1
            else:
                with self.Q.mutex:
                    self.Q.queue.clear()
        # 视频断线重连
        if is_disconnet:
            self.reLoadStream()
            self.update()
Exemplo n.º 9
0
    def image_preprocess(self, img_source):
        """
        Pre-process the img before fed to the object detection network
        Input: image name(str) or raw image data(ndarray or torch.Tensor,channel GBR)
        Output: pre-processed image data(torch.FloatTensor,(1,3,h,w))
        """
        if isinstance(img_source, str):
            img, orig_img, im_dim_list = prep_image(img_source, self.inp_dim)
        elif isinstance(img_source, torch.Tensor) or isinstance(img_source, np.ndarray):
            img, orig_img, im_dim_list = prep_frame(img_source, self.inp_dim)
        else:
            raise IOError('Unknown image source type: {}'.format(type(img_source)))

        return img
Exemplo n.º 10
0
 def getitem_yolo(self):
     img = []
     orig_img = []
     im_dim_list = []
     inp_dim = int(opt.inp_dim)
     img_k, orig_img_k, im_dim_list_k = prep_frame(self.frame, inp_dim)
     img.append(img_k)
     orig_img.append(orig_img_k)
     im_dim_list.append(im_dim_list_k)
     self.orig_img = orig_img
     with torch.no_grad():
         # Human Detection
         self.img = torch.cat(img)
         self.im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
         self.im_dim_list_ = im_dim_list
Exemplo n.º 11
0
    def update(self):
        while (True):
            sys.stdout.flush()
            print("camera load len : " + str(self.Q.qsize()))

            img = []
            orig_img = []
            im_name = []
            im_dim_list = []

            inp_dim = int(opt.inp_dim)
            time_take = time.time()
            # (grabbed, frame) = self.stream.read()
            grabbed, frame = self.stream.read()

            print(type(frame))
            # if the `grabbed` boolean is `False`, then we have
            # reached the end of the video file
            # if not grabbed:
            #     self.Q.put((None, None, None, None))
            #     print('===========================> This video get ' + str(k) + ' frames in total.')
            #     sys.stdout.flush()
            #     return
            # process and add the frame to the queue
            img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
            # cv2.imshow("camera", frame)
            # cv2.waitKey(10)

            img.append(img_k)
            orig_img.append(orig_img_k)
            im_name.append(str(time.time()) + '.jpg')
            im_dim_list.append(im_dim_list_k)

            with torch.no_grad():
                # Human Detection
                img = torch.cat(img)
                im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)

            sys.stdout.flush()
            print(img.shape)

            # while self.Q.full():
            #     time.sleep(2)

            self.Q.put((img, orig_img, im_name, im_dim_list))
            time_save = time.time()
Exemplo n.º 12
0
    def process(self, frame):
        img = []
        orig_img = []
        im_name = []
        im_dim_list = []
        img_k, orig_img_k, im_dim_list_k = prep_frame(frame, self.in_dim)

        img.append(img_k)
        orig_img.append(orig_img_k)
        im_name.append('0.jpg')
        im_dim_list.append(im_dim_list_k)

        with torch.no_grad():
            # Human Detection
            img = torch.cat(img)
            im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
            im_dim_list_ = im_dim_list

        return img, orig_img, im_name, im_dim_list
Exemplo n.º 13
0
    def update(self):
        stream = cv2.VideoCapture(self.path)
        assert stream.isOpened(), 'Cannot capture source'

        for i in range(self.num_batches):
            img = []
            orig_img = []
            im_name = []
            im_dim_list = []
            for k in range(i * self.batchSize,
                           min((i + 1) * self.batchSize, self.datalen)):
                inp_dim = int(opt.inp_dim)
                (grabbed, frame) = stream.read()
                if frame is not None:
                    frame = frame[:, 250:]
                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if not grabbed:
                    self.Q.put((None, None, None, None))
                    print('===========================> This video get ' +
                          str(k) + ' frames in total.')
                    sys.stdout.flush()
                    return
                # process and add the frame to the queue
                img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)

                img.append(img_k)
                orig_img.append(orig_img_k)
                im_name.append(str(k) + '.jpg')
                im_dim_list.append(im_dim_list_k)

            with torch.no_grad():
                # Human Detection
                img = torch.cat(img)
                im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
                im_dim_list_ = im_dim_list

            while self.Q.full():
                time.sleep(2)

            self.Q.put((img, orig_img, im_name, im_dim_list))
Exemplo n.º 14
0
    def update(self):
        # keep looping infinitely
        while True:
            # otherwise, ensure the queue has room in it
            if not self.Q.full():
                # read the next frame from the file
                (grabbed, frame) = self.stream.read()
                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if not grabbed:
                    self.stop()
                    return
                # process and add the frame to the queue
                inp_dim = int(opt.inp_dim)
                img, orig_img, dim = prep_frame(frame, inp_dim)
                inp = im_to_torch(orig_img)
                im_dim_list = torch.FloatTensor([dim]).repeat(1, 2)

                self.Q.put((img, orig_img, inp, im_dim_list))
            else:
                with self.Q.mutex:
                    self.Q.queue.clear()
Exemplo n.º 15
0
    def update(self):
        stream = cv2.VideoCapture(self.path)
        assert stream.isOpened(), 'Cannot capture source'

        for i in range(self.num_batches):
            img = []
            orig_img = []
            im_name = []
            im_dim_list = []
            for k in range(i * self.batchSize,
                           min((i + 1) * self.batchSize, self.datalen)):
                inp_dim = int(opt.inp_dim)
                (grabbed, frame) = stream.read()
                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if not grabbed:
                    print(
                        "Video ended abnormally! Check if the video is standard format."
                    )
                    return
                # process and add the frame to the queue
                img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)

                img.append(img_k)
                orig_img.append(orig_img_k)
                im_name.append(str(k) + '.jpg')
                im_dim_list.append(im_dim_list_k)

            with torch.no_grad():
                # Human Detection
                img = torch.cat(img)
                im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
                im_dim_list_ = im_dim_list

            while self.Q.full():
                time.sleep(2)

            self.Q.put((img, orig_img, im_name, im_dim_list))
Exemplo n.º 16
0
    def update(self):
        # keep looping the whole video
        for i in range(self.num_batches):
            img = []
            inp = []
            orig_img = []
            im_name = []
            im_dim_list = []
            for k in range(i * self.batchSize,
                           min((i + 1) * self.batchSize, self.datalen)):
                (grabbed, frame) = self.stream.read()
                # if the `grabbed` boolean is `False`, then we have
                # reached the end of the video file
                if not grabbed:
                    self.stop()
                    return
                # process and add the frame to the queue
                inp_dim = int(opt.inp_dim)
                img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
                inp_k = im_to_torch(orig_img_k)

                img.append(img_k)
                inp.append(inp_k)
                orig_img.append(orig_img_k)
                im_dim_list.append(im_dim_list_k)

            with torch.no_grad():
                ht = inp[0].size(1)
                wd = inp[0].size(2)
                # Human Detection
                img = Variable(torch.cat(img)).cuda()
                im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
                im_dim_list = im_dim_list.cuda()

                prediction = self.det_model(img, CUDA=True)
                # NMS process
                dets = dynamic_write_results(prediction,
                                             opt.confidence,
                                             opt.num_classes,
                                             nms=True,
                                             nms_conf=opt.nms_thesh)
                if isinstance(dets, int) or dets.shape[0] == 0:
                    for k in range(len(inp)):
                        while self.Q.full():
                            time.sleep(0.2)
                        self.Q.put((inp[k], orig_img[k], None, None))
                    continue

                im_dim_list = torch.index_select(im_dim_list, 0,
                                                 dets[:, 0].long())
                scaling_factor = torch.min(self.det_inp_dim / im_dim_list,
                                           1)[0].view(-1, 1)

                # coordinate transfer
                dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor *
                                    im_dim_list[:, 0].view(-1, 1)) / 2
                dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor *
                                    im_dim_list[:, 1].view(-1, 1)) / 2

                dets[:, 1:5] /= scaling_factor
                for j in range(dets.shape[0]):
                    dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0,
                                                  im_dim_list[j, 0])
                    dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0,
                                                  im_dim_list[j, 1])
                boxes = dets[:, 1:5].cpu()
                scores = dets[:, 5:6].cpu()

            for k in range(len(inp)):
                while self.Q.full():
                    time.sleep(0.2)
                self.Q.put((inp[k], orig_img[k], boxes[dets[:, 0] == k],
                            scores[dets[:, 0] == k]))
Exemplo n.º 17
0
def preprocess(frame_0, frame_1):
    frame = np.concatenate([frame_0, frame_1], 0)
    inp_dim = int(args.inp_dim)  # default=608
    img, orig_img, dim = prep_frame(frame, inp_dim)
    im_dim_list = torch.FloatTensor([dim]).repeat(1, 2)
    return img, orig_img, dim, im_dim_list
Exemplo n.º 18
0
    def update(self):

        time1 = time.time()

        _, frame = self.stream.read()
        # frame = cv2.resize(frame, (frame.shape[1]//2,frame.shape[0]//2))

        #TODO TESTING
        # frame[:,:200,:]=0
        # frame[:,450:,:]=0


        img_k, self.orig_img, im_dim_list_k = prep_frame(frame, self.inp_dim)
        
        img = [img_k]
        im_name = ["im_name"]
        im_dim_list = [im_dim_list_k] 

        img = torch.cat(img)
        im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)

        time2 = time.time()


        with torch.no_grad():
            ### detector 
            #########################
            # Human Detection
            img = img.cuda()
            prediction = self.det_model(img, CUDA=True)
            # NMS process
            dets = dynamic_write_results(prediction, opt.confidence,
                                        opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
            if isinstance(dets, int) or dets.shape[0] == 0:   
                self.visualize2dnoperson()
                return None
                
            
            dets = dets.cpu()
            im_dim_list = torch.index_select(im_dim_list, 0, dets[:, 0].long())
            scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)

            # coordinate transfer
            dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
            dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2

            dets[:, 1:5] /= scaling_factor
            for j in range(dets.shape[0]):
                dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
                dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
            boxes = dets[:, 1:5]
            scores = dets[:, 5:6]

            boxes_k = boxes[dets[:, 0] == 0]
            if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
                self.visualize2dnoperson()
                raise NotImplementedError
                return None
            inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW)
            pt1 = torch.zeros(boxes_k.size(0), 2)
            pt2 = torch.zeros(boxes_k.size(0), 2)

            time3 = time.time()


            ### processor 
            #########################
            inp = im_to_torch(cv2.cvtColor(self.orig_img, cv2.COLOR_BGR2RGB))
            inps, pt1, pt2 = self.crop_from_dets(inp, boxes, inps, pt1, pt2)

            ### generator
            #########################            
            self.orig_img = np.array(self.orig_img, dtype=np.uint8)
            # location prediction (n, kp, 2) | score prediction (n, kp, 1)

            datalen = inps.size(0)
            batchSize = 20 #args.posebatch()
            leftover = 0
            if datalen % batchSize:
                leftover = 1
            num_batches = datalen // batchSize + leftover
            hm = []

            time4 = time.time()

            for j in range(num_batches):
                inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda()
                hm_j = self.pose_model(inps_j)
                hm.append(hm_j)
            
            
            hm = torch.cat(hm)
            hm = hm.cpu().data

            preds_hm, preds_img, preds_scores = getPrediction(
                hm, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
            result = pose_nms(
                boxes, scores, preds_img, preds_scores)

            time5 = time.time() 
            
                    
            if not result: # No people
                self.visualize2dnoperson()
                return None
            else:
                self.kpt = max(result,
                        key=lambda x: x['proposal_score'].data[0] * calculate_area(x['keypoints']), )['keypoints']
                self.visualize2d()
                return self.kpt 

            time6 = time.time()
            print("process time : {} ".format(time6 - time5))
Exemplo n.º 19
0
    print('Starting webcam demo, press Ctrl + C to terminate...')
    sys.stdout.flush()
    im_names_desc = tqdm(loop())
    for i in im_names_desc:
        try:
            begin = time.time()
            start_time = getTime()
            frame_0 = fvs_0.read()
            frame_1 = fvs_1.read()
            single_height = frame_0.shape[0]
            print(frame_0.shape)  # (432, 768, 3)

            # pre-process
            frame = np.concatenate([frame_0, frame_1], 0)
            inp_dim = int(args.inp_dim)  # default=608
            img, orig_img, dim = prep_frame(frame, inp_dim)
            #print('img:',img.shape)  # torch.Size([1, 3, 608, 608])
            # print('orig_img:',orig_img.shape)  # (864, 768, 3)
            # print('dim',dim)    # (768, 864)

            inp = im_to_torch(orig_img)
            im_dim_list = torch.FloatTensor([dim]).repeat(1, 2)
            # print(im_dim_list) # tensor([[768., 864., 768., 864.]])

            ckpt_time, load_time = getTime(start_time)
            runtime_profile['ld'].append(load_time)
            with torch.no_grad():
                # human detection
                img = Variable(img).cuda()
                im_dim_list = im_dim_list.cuda()
Exemplo n.º 20
0
    def update(self):
        print(
            f'WebcamDetectionLoader_update_thread: {threading.currentThread().name}'
        )
        # keep looping
        while True:
            img = []
            inp = []
            orig_img = []
            im_name = []
            im_dim_list = []
            for k in range(self.batchSize):
                (grabbed, frame) = self.stream.read()
                h, w, c = frame.shape
                # frame = cv2.resize(frame, (int(w / 4), int(h / 4)), interpolation=cv2.INTER_CUBIC)

                if not grabbed:
                    continue
                # process and add the frame to the queue
                inp_dim = int(opt.inp_dim)
                img_k, orig_img_k, im_dim_list_k = prep_frame(frame, inp_dim)
                inp_k = im_to_torch(orig_img_k)

                img.append(img_k)
                inp.append(inp_k)
                orig_img.append(orig_img_k)
                im_dim_list.append(im_dim_list_k)

            with torch.no_grad():
                ht = inp[0].size(1)
                wd = inp[0].size(2)
                # Human Detection
                img = Variable(torch.cat(img)).cuda()
                im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)
                im_dim_list = im_dim_list.cuda()

                prediction = self.det_model(img, CUDA=True)
                # NMS process
                dets = dynamic_write_results(prediction,
                                             opt.confidence,
                                             opt.num_classes,
                                             nms=True,
                                             nms_conf=opt.nms_thesh)
                if isinstance(dets, int) or dets.shape[0] == 0:
                    for k in range(len(inp)):
                        if self.Q.full():
                            with self.Q.mutex:
                                self.Q.queue.clear()
                        self.Q.put((inp[k], orig_img[k], None, None))
                    continue

                im_dim_list = torch.index_select(im_dim_list, 0,
                                                 dets[:, 0].long())
                scaling_factor = torch.min(self.det_inp_dim / im_dim_list,
                                           1)[0].view(-1, 1)

                # coordinate transfer
                dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor *
                                    im_dim_list[:, 0].view(-1, 1)) / 2
                dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor *
                                    im_dim_list[:, 1].view(-1, 1)) / 2

                dets[:, 1:5] /= scaling_factor
                for j in range(dets.shape[0]):
                    dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0,
                                                  im_dim_list[j, 0])
                    dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0,
                                                  im_dim_list[j, 1])
                boxes = dets[:, 1:5].cpu()
                scores = dets[:, 5:6].cpu()

            for k in range(len(inp)):
                if self.Q.full():
                    with self.Q.mutex:
                        self.Q.queue.clear()
                self.Q.put((inp[k], orig_img[k], boxes[dets[:, 0] == k],
                            scores[dets[:, 0] == k]))