Beispiel #1
0
    def _gen():
        detected = {}  # FIXME need to gc
        counter = 0
        while True:
            img = onnx.last_img.copy()
            detections = []
            if inference:
                height, width = img.shape[0], img.shape[1]
                predictions = onnx.last_prediction
                for prediction in predictions:
                    tag = prediction["tagName"]

                    if onnx.has_aoi:
                        draw_aoi(img, onnx.aoi_info)

                    (x1, y1), (x2, y2) = parse_bbox(prediction, width, height)

                    detections.append(
                        Detection(tag, x1, y1, x2, y2,
                                  prediction["probability"]))
                    # print(x1, y1, x2, y2)

                    if prediction["probability"] > onnx.threshold:
                        if onnx.has_aoi:
                            if not is_inside_aoi(x1, y1, x2, y2,
                                                 onnx.aoi_info):
                                continue

                        # img = cv2.rectangle(
                        #    img, (x1, y1), (x2, y2), (255, 255, 255), 1)
                        # img = draw_confidence_level(img, prediction)

            # objs = mot_tracker.update(np.array(detections))
            t0 = time.time()
            scenario.update(detections)
            print("update", time.time() - t0)
            t0 = time.time()
            scenario.draw_counter(img)
            print("draw", time.time() - t0)
            t0 = time.time()
            scenario.draw_constraint(img)
            print("draw c", time.time() - t0)
            scenario.draw_objs(img)
            # counter, objs, counted = tracker.update(detections)

            # print(objs)
            time.sleep(0.02)
            # print(img.shape)
            # img = cv2.line(img, (int(170/2), int(680/2)), (int(1487/2), int(815/2)), (0, 255, 255), 5)
            # img = tracker.draw_counter(img)
            # img = tracker.draw_line(img)
            yield (b"--frame\r\n"
                   b"Content-Type: image/jpeg\r\n\r\n" +
                   cv2.imencode(".jpg", img)[1].tobytes() + b"\r\n")
Beispiel #2
0
    def predict(self, image):

        width = self.IMG_WIDTH
        ratio = self.IMG_WIDTH / image.shape[1]
        height = int(image.shape[0] * ratio + 0.000001)
        if height >= self.IMG_HEIGHT:
            height = self.IMG_HEIGHT
            ratio = self.IMG_HEIGHT / image.shape[0]
            width = int(image.shape[1] * ratio + 0.000001)

        image = cv2.resize(image, (width, height))

        # prediction
        self.mutex.acquire()
        predictions, inf_time = self.model.Score(image)
        #print('predictions', predictions, flush=True)
        self.mutex.release()

        # check whether it's the tag we want
        predictions = list(
            p for p in predictions if p['tagName'] in self.model.parts)


        # check whether it's inside aoi (if has)
        if self.has_aoi:
            _predictions = []
            for p in predictions:
                (x1, y1), (x2, y2) = parse_bbox(p, width, height)
                if is_inside_aoi(x1, y1, x2, y2, self.aoi_info):
                    _predictions.append(p)
            predictions = _predictions


        # update detection status before filter out by threshold
        self.update_detection_status(predictions)

        if self.is_retrain:
            self.process_retrain_image(predictions, image)

        if self.iothub_is_send:
            self.process_send_message_to_iothub(predictions)


        # check whether it's larger than threshold 
        predictions = list(
            p for p in predictions if p['probability'] >= self.threshold)


        # update last_prediction_count
        _last_prediction_count = {}
        for p in predictions:
            tag = p['tagName']
            if tag not in _last_prediction_count:
                _last_prediction_count[tag] = 1
            else:
                _last_prediction_count[tag] += 1
        self.last_prediction_count = _last_prediction_count

        # update the buffer
        # no need to copy since resize already did it
        self.last_img = image
        self.last_prediction = predictions

        # FIXME support more scenarios
        # Update Tracker / Scenario
        _detections = []
        for prediction in predictions:
            tag = prediction['tagName']
            #if prediction['probability'] > 0.5:
            (x1, y1), (x2, y2) = parse_bbox(prediction, width, height)
            _detections.append(
                Detection(tag, x1, y1, x2, y2, prediction['probability']))
        if self.scenario:
            self.scenario.update(_detections)


        self.draw_img()
        if self.model.send_video_to_cloud:
            self.precess_send_signal_to_lva()

        if self.scenario:
            #print('drawing...', flush=True)
            #print(self.scenario, flush=True)
            self.last_drawn_img = self.scenario.draw_counter(
                self.last_drawn_img)
            # FIXME close this
            # self.scenario.draw_constraint(self.last_drawn_img)
            if self.get_mode() == 'DD':
                self.scenario.draw_objs(self.last_drawn_img)

        # update avg inference time (moving avg)
        inf_time_ms = inf_time * 1000
        self.average_inference_time = 1/16*inf_time_ms + 15/16*self.average_inference_time
Beispiel #3
0
    def predict(self, image):

        width = self.IMG_WIDTH
        ratio = self.IMG_WIDTH / image.shape[1]
        height = int(image.shape[0] * ratio + 0.000001)
        if height >= self.IMG_HEIGHT:
            height = self.IMG_HEIGHT
            ratio = self.IMG_HEIGHT / image.shape[0]
            width = int(image.shape[1] * ratio + 0.000001)

        # prediction
        # self.mutex.acquire()
        # predictions, inf_time = self.model.Score(image)
        if ':7777/predict' in self.model.endpoint.lower():
            image = cv2.resize(image, (width, height))
            data = image.tobytes()
            res = requests.post(self.model.endpoint, data=data)
            if res.json()[1] == 200:
                lva_prediction = json.loads(res.json()[0])['inferences']
                inf_time = json.loads(res.json()[0])['inf_time']
                predictions = lva_to_customvision_format(lva_prediction)
            else:
                logger.warning('No inference result')
                predictions = []
                inf_time = 0
        else:
            image = cv2.resize(image, (416, 416))   # for yolo enpoint testing
            str_encode = cv2.imencode('.jpg', image)[1].tostring()
            f4 = BytesIO(str_encode)
            f5 = BufferedReader(f4)
            s = time.time()
            res = requests.post(self.model.endpoint, data=f5)
            inf_time = time.time() - s
            if res.status_code == 200:
                lva_prediction = res.json()['inferences']
                predictions = lva_to_customvision_format(lva_prediction)
            else:
                logger.warning('No inference result')
                predictions = []
            logger.warning('request prediction time: {}'.format(inf_time))
        # print('predictions', predictions, flush=True)
        # self.mutex.release()

        # check whether it's the tag we want
        predictions = list(
            p for p in predictions if p["tagName"] in self.model.parts)

        # check whether it's inside aoi (if has)
        if self.has_aoi:
            _predictions = []
            for p in predictions:
                (x1, y1), (x2, y2) = parse_bbox(p, width, height)
                if is_inside_aoi(x1, y1, x2, y2, self.aoi_info):
                    _predictions.append(p)
            predictions = _predictions

        # update detection status before filter out by threshold
        self.update_detection_status(predictions)

        if self.is_retrain:
            self.process_retrain_image(predictions, image)

        # check whether it's larger than threshold
        predictions = list(
            p for p in predictions if p["probability"] >= self.threshold)

        # update last_prediction_count
        _last_prediction_count = {}
        for p in predictions:
            tag = p["tagName"]
            if tag not in _last_prediction_count:
                _last_prediction_count[tag] = 1
            else:
                _last_prediction_count[tag] += 1
        self.last_prediction_count = _last_prediction_count

        # update the buffer
        # no need to copy since resize already did it
        self.last_img = image
        self.last_prediction = predictions

        # FIXME support more scenarios
        # Update Tracker / Scenario
        _detections = []
        for prediction in predictions:
            tag = prediction["tagName"]
            # if prediction['probability'] > 0.5:
            (x1, y1), (x2, y2) = parse_bbox(prediction, width, height)
            _detections.append(
                Detection(tag, x1, y1, x2, y2, prediction["probability"])
            )
        if self.scenario:
            self.scenario.update(_detections)

        self.draw_img()

        if self.scenario:
            if (self.get_mode() == 'ES' and self.use_zone == True) or (self.get_mode() in ['DD', 'PD', 'PC'] and self.use_line == True):
                self.scenario.draw_counter(self.last_drawn_img)
            if self.get_mode() == "DD":
                self.scenario.draw_objs(self.last_drawn_img)
            if self.get_mode() == 'PD' and self.use_tracker is True:
                self.scenario.draw_objs(self.last_drawn_img)

        if self.iothub_is_send:
            if self.get_mode() == 'ES':
                if self.scenario.has_new_event:
                    self.process_send_message_to_iothub(predictions)
            else:
                self.process_send_message_to_iothub(predictions)

        if self.send_video_to_cloud:
            if self.get_mode() == 'ES':
                if self.scenario.has_new_event:
                    self.precess_send_signal_to_lva()
            else:
                self.precess_send_signal_to_lva()

        # update avg inference time (moving avg)
        inf_time_ms = inf_time * 1000
        self.average_inference_time = (
            1 / 16 * inf_time_ms + 15 / 16 * self.average_inference_time
        )