Esempio n. 1
0
def detectVideo(path, output_path=""):
    yolo = YOLO()
    if path.isnumeric():
        cap = cv2.VideoCapture(int(path))
    else:
        cap = cv2.VideoCapture(path)
    if not cap.isOpened():
        raise IOError("Couldn't open camera or video")
    video_FourCC = int(cap.get(cv2.CAP_PROP_FOURCC))
    video_fps = cap.get(cv2.CAP_PROP_FPS)
    video_size = (int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)),
                  int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)))
    isOutput = True if output_path != "" else False
    if isOutput:
        print("!!! TYPE:", type(output_path), type(video_FourCC),
              type(video_fps), type(video_size))
        out = cv2.VideoWriter(output_path, video_FourCC, video_fps, video_size)
    while True:
        # get a frame
        ret, frame = cap.read()
        if not ret:
            print("finish to read capture")
        # show a frame
        result = yolo.detect(frame)
        cv2.imshow("capture", result)
        if cv2.waitKey(1) > 0:
            break
        if isOutput:
            out.write(result)
    cap.release()
    cv2.destroyAllWindows()
    print("finish to read capture")
Esempio n. 2
0
def get_result2json(cocoGt, data_path, log_dir, model_name, classes_path,
                    json_name):
    yolo = YOLO(model_path=os.path.join(log_dir, model_name),
                classes_path=classes_path)
    result_list = []
    for id in cocoGt.getImgIds():
        Img = cocoGt.loadImgs(id)[0]
        image = Image.open(os.path.join(data_path, Img['file_name']))
        box_result, score_result = yolo.detect(image)
        for i in range(box_result.shape[0]):
            x1, y1, x2, y2, cls = box_result[i]
            score = score_result[i]
            result_list.append({
                "image_id":
                id,
                "category_id":
                cls + 1,
                "bbox": [float(x1),
                         float(y1),
                         float(x2 - x1),
                         float(y2 - y1)],
                "score":
                score
            })
    # 存储json文件
    json_str = json.dumps(result_list, cls=MyEncoder)
    with open('./eval/{}'.format(json_name), 'w') as json_file:
        json_file.write(json_str)
    return './eval/{}'.format(json_name)
Esempio n. 3
0
def detectFolder(path, outputPath="output"):
    if not os.path.exists(outputPath):
        os.mkdir(outputPath)
    yolo = YOLO()
    names = os.listdir(path)
    for name in names:
        filepath = os.path.join(path, name)
        image = cv2.imread(filepath)
        if image is None:
            print("Open file {} error".format(path))
            break
        result = yolo.detect(image)
        if not result is None:
            output = os.path.join(outputPath, name)
            print(output)
            cv2.imwrite(output, result)
Esempio n. 4
0
def detectImage():
    yolo = YOLO(configPath="yolo4/yolov4-tiny.cfg",
                weightPath="yolo4/yolov4-tiny_final.weights",
                classPath="yolo4/obj.names")
    while True:
        path = input("Input filename:")
        if path == "exit":
            break
        image = cv2.imread(path)
        if image is None:
            print('Open Error! Try again!')
            continue
        result = yolo.detect(image)
        if not result is None:
            cv2.imshow("Result", result)
            cv2.waitKey()
            cv2.destroyAllWindows()
    print("Input finish")
Esempio n. 5
0
class ImageAnalysisService:
    """This is a image detection class that identifies the location and name of objects
       in an image.It uses Yolo for object detection task. If requested, It  also returns the original image with
       overlaid object bounding boxes and  their labels.
    """
    def __init__(self):
        self.yolo = YOLO()
        self.CONFIDENCE_THRESHOLD = 0.3
        self.DEFAULT_DRAW_COLOR = (0, 255, 0)
        self.target_labels = [
            'car', 'person', 'bus', 'truck', 'bicycle', 'motorbike'
        ]

    def detect(self, request):
        """Detects the object in image using YOLO"""
        return self._run_detection(self._parse_params(request))

    def _parse_params(self, payload):
        """Parses the request params"""
        try:
            default_params = {}
            default_params['image'] = payload['image']
            default_params['createImage'] = payload.get('createImage', False)
            default_params['summerize'] = payload.get('summerize', False)
            default_params['confidenceThreshold'] = payload.get(
                'confidenceThreshold', self.CONFIDENCE_THRESHOLD)
            if 'drawRGBColor' in payload:
                default_params['drawRGBColor'] = make_tuple(
                    payload['drawRGBColor'])
            else:
                default_params['drawRGBColor'] = self.DEFAULT_DRAW_COLOR

            return default_params

        except Exception as err:
            raise BadRequest(f"Bad Request: {err}")

    def _run_detection(self, params):
        image = self._parse_image(params)
        detections = self.yolo.detect(image)
        return self._format_results(image, detections, params)

    def _parse_image(self, params):
        if params['image'].startswith('http'):
            return io.imread(params['image'])
        else:
            bgr_encoded_image = Image.open(
                BytesIO(base64.b64decode(params['image'])))
            rgb_encoded_image = cv2.cvtColor(np.array(bgr_encoded_image),
                                             cv2.COLOR_BGR2RGB)
            return rgb_encoded_image

    def _format_results(self, image, detections, params):

        if params['createImage']:
            image = self._draw_objects(image, detections, params)
            image = self.base64_encoded(image)
        else:
            image = None

        if params['summerize']:
            detections = self._group_detections(detections)
        else:
            for detection in detections:
                detection['confidence'] = str(detection['confidence'])

        return {'detections': detections, 'image': image}

    def _group_detections(self, detections):
        groups = defaultdict(int)
        for detection in detections:
            if detection['label'] in self.target_labels:
                groups[detection['label']] += 1
        return groups

    def _draw_objects(self, image, detections, params):

        for detection in detections:
            if detection['label'] in self.target_labels and detection[
                    'confidence'] >= params['confidenceThreshold']:
                point_top_left = (detection['topleft']['x'],
                                  detection['topleft']['y'])
                point_bottom_right = (detection['bottomright']['x'],
                                      detection['bottomright']['y'])
                image = cv2.rectangle(image, point_top_left,
                                      point_bottom_right,
                                      params['drawRGBColor'], 2)
                image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
        image = image_resize(image, height=200, width=400)
        return image

    def base64_encoded(self, image):
        _, values = cv2.imencode('.jpg', image)
        return base64.b64encode(values)
Esempio n. 6
0
video = Video(path)
video.read_frames()
(H, W) = video.shape

yolo = YOLO(conf_thresh=0.65, nms_thresh=0.5)
yolo.prepare_network()

if __name__ == "__main__":

    start_time = time.time()

    print("[INFO] Detections are collecting..")
    for f in tqdm(range(video.total_frame)):
        if f % N == 0:
            # yolo detection
            detections = yolo.detect(video.frames_inp[f], H, W)
            if detections:
                trackers = []
                frame = video.frames_inp[f]  # pick the related frame
                for detection in detections:
                    if detection:
                        trackers.append(Tracker(tracker_type))
                        trackers[-1].start(f, frame, detection)
        else:
            detections = []
            for tracker in trackers:
                detection = tracker.update(video.frames_inp[f])
                if detection:
                    detections.append(detection)

            frame_detections = (f, detections)