Exemplo n.º 1
0
    def main_logic(self, *args, **kwargs):
        try:
            frame = self.in_queue.get(block=False)
            gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)

            faces = self.face_cas.detectMultiScale(gray,
                                                   scaleFactor=1.1,
                                                   minNeighbors=5,
                                                   minSize=(20, 20))
            if len(faces):
                faces = torch.from_numpy(faces)
                faces[:, 2:] += faces[:, :2]
                print(faces.size(), faces)
                new_instances = Instances(frame.shape[:2])
                new_instances.set("pred_boxes", Boxes(faces))
                new_instances.set("pred_classes",
                                  torch.zeros(faces.size(0)).int())
            else:
                new_instances = Instances(frame.shape[:2])
                new_instances.set("pred_classes", [])

            try:
                self.out_queue.get(block=False)
                self.state.dropped += 1
            except Empty:
                pass
            self.out_queue.put(new_instances)
            return True

        except Empty:
            time.sleep(0)
            return False
Exemplo n.º 2
0
    def main_logic(self, *args, **kwargs):

        msg = self.in_queue.non_blocking_get()
        if msg:
            self.logger.info("Received the following message: %s", str(msg))
            im0 = msg.get_payload()
            img, *_ = letterbox(im0, new_shape=self.img_size)

            # Normalize RGB
            img = img[:, :, ::-1].transpose(2, 0, 1)  # BGR to RGB
            img = np.ascontiguousarray(img,
                                       dtype=np.float16 if self.half else
                                       np.float32)  # uint8 to fp16/fp32
            img /= 255.0
            img = torch.from_numpy(img).to(self.device)
            # print(f"yolo {self.device}")
            if img.ndimension() == 3:
                img = img.unsqueeze(0)
            with torch.no_grad():
                pred, _ = self.model(img)
            det = non_max_suppression(pred, opt.conf_thres, opt.nms_thres)[0]
            if det is not None and len(det):
                # Rescale boxes from img_size to im0 size
                det[:, :4] = scale_coords(img.shape[2:], det[:, :4],
                                          im0.shape).round()
                # print(det.shape)
                # print(det)
                # for *xyxy, conf, _, cls in det:
                #     label = '%s %.2f' % (self.classes[int(cls)], conf)
                #     plot_one_box(xyxy, im0, label=label, color=self.colors[int(cls)])
                res = Instances(im0.shape)
                res.set("pred_boxes", Boxes(det[:, :4]))
                res.set("scores", det[:, 4])
                res.set("class_scores", det[:, 5])
                res.set("pred_classes", det[:, 6].round().int())
            else:
                res = Instances(im0.shape)
                res.set("pred_boxes", [])

            msg.payload = PredictionPayload(res.to("cpu"))
            success = self.out_queue.deque_non_blocking_put(msg)
            return success

        else:
            return None
Exemplo n.º 3
0
    def update_instances(self, instances: Instances):
        im_size = instances.image_size
        tracks = None
        if len(instances):
            boxes = instances.get("pred_boxes").tensor.cpu().numpy()
            scores = instances.get("scores").cpu().unsqueeze(1).numpy()
            pred_classes = instances.get("pred_classes").cpu().unsqueeze(
                1).numpy()
            dets = np.concatenate((boxes, scores, pred_classes), axis=1)
            tracks = self.update(dets)

        ret_tracks = Instances(im_size)
        if tracks is not None:
            tracks = torch.tensor(tracks)
            ret_tracks.set("pred_boxes", Boxes(tracks[:, :4]))
            ret_tracks.set("scores", tracks[:, 4])
            if tracks.shape[0] != 0:
                ret_tracks.set("pred_classes", tracks[:, 5].round().int())
                ret_tracks.set("track_ids", tracks[:, -1].int())

        return ret_tracks