Exemplo n.º 1
0
def ObjectTracking(self):
    detector = Detector()
    myiter = glob.iglob(os.path.join(IMAGE_FOLDER, '**', '*.jpg'),
                        recursive=True)
    newdict = reduce(lambda a, b: reduce_tracking(a, b), myiter, dict())
    startID = max(map(int, newdict.keys()), default=0) + 1
    ct = CentroidTracker(startID=startID)
    #camera = cv2.VideoCapture(0)
    camera = cv2.VideoCapture(
        'nvarguscamerasrc ! video/x-raw(memory:NVMM), width=3280, height=2464, format=(string)NV12, framerate=(fraction)20/1 ! nvvidconv ! video/x-raw, format=(string)BGRx ! videoconvert ! video/x-raw, format=(string)BGR ! appsink',
        cv2.CAP_GSTREAMER)
    if not camera.isOpened():
        raise RuntimeError('Could not start camera.')

    try:
        while True:
            _, img = camera.read()
            output = detector.prediction(img)
            df = detector.filter_prediction(output, img)
            img = detector.draw_boxes(img, df)
            boxes = df[['x1', 'y1', 'x2', 'y2']].values
            previous_object_ID = ct.nextObjectID
            #self.update_state(state='PROGRESS',
            #        meta={
            #            'object_id': previous_object_ID,
            #            })
            objects = ct.update(boxes)
            if len(boxes) > 0 and (df['class_name'].str.contains('person').any(
            )) and previous_object_ID in list(objects.keys()):
                for (objectID, centroid) in objects.items():
                    text = "ID {}".format(objectID)
                    cv2.putText(img, text,
                                (centroid[0] - 10, centroid[1] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0),
                               -1)

                day = datetime.now().strftime("%Y%m%d")
                directory = os.path.join(IMAGE_FOLDER, 'pi', day)
                if not os.path.exists(directory):
                    os.makedirs(directory)
                ids = "-".join(list([str(i) for i in objects.keys()]))
                hour = datetime.now().strftime("%H%M%S")
                filename_output = os.path.join(
                    directory, "{}_person_{}_.jpg".format(hour, ids))
                cv2.imwrite(filename_output, img)
            #time.sleep(0.100)
    except KeyboardInterrupt:
        print('interrupted!')
        camera.release()
        print(type(objects))
        print(objects)
    except Exception as e:
        print('interrupted! by:')
        print(e)
        camera.release()
        print(type(objects))
        print(objects)
Exemplo n.º 2
0
def ObjectTracking(self):
    detector = Detector()
    myiter = glob.iglob(os.path.join(IMAGE_FOLDER, '**', '*.jpg'),
                        recursive=True)
    newdict = reduce(lambda a, b: reduce_tracking(a, b), myiter, dict())
    startID = max(map(int, newdict.keys()), default=0) + 1
    ct = CentroidTracker(startID=startID)
    camera = cv2.VideoCapture(gstreamer_pipeline(flip_method=0),
                              cv2.CAP_GSTREAMER)
    if not camera.isOpened():
        raise RuntimeError('Could not start camera.')

    try:
        while True:
            _, img = camera.read()
            boxes, confs, clss = detector.prediction(img,
                                                     conf_th=0.8,
                                                     conf_class=[1])
            img = detector.draw_boxes(img, boxes, confs, clss)
            previous_object_ID = ct.nextObjectID
            objects = ct.update(boxes)
            if len(boxes) > 0 and 1 in clss and previous_object_ID in list(
                    objects.keys()):
                print("detected {} {} {} {}".format(ct.nextObjectID, confs,
                                                    objects, boxes))

                # loop over the tracked objects
                for (objectID, centroid) in objects.items():
                    text = "ID {}".format(objectID)
                    cv2.putText(img, text,
                                (centroid[0] - 10, centroid[1] - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                    cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0),
                               -1)

                day = datetime.now().strftime("%Y%m%d")
                directory = os.path.join(IMAGE_FOLDER, 'pi', day)
                if not os.path.exists(directory):
                    os.makedirs(directory)
                ids = "-".join(list([str(i) for i in objects.keys()]))
                hour = datetime.now().strftime("%H%M%S")
                filename_output = os.path.join(
                    directory, "{}_person_{}_.jpg".format(hour, ids))
                cv2.imwrite(filename_output, img)
            time.sleep(0.100)
    except KeyboardInterrupt:
        print('interrupted!')
        camera.release()
        print(type(objects))
        print(objects)
    except Exception as e:
        print('interrupted! by:')
        print(e)
        camera.release()
        print(type(objects))
        print(objects)
Exemplo n.º 3
0
class Predictor(object):
    """Docstring for Predictor. """
    def __init__(self):
        self.detector = Detector()
        self.ct = CentroidTracker(maxDisappeared=20)

    def prediction(self, img, conf_th=0.3, conf_class=[]):
        output = self.detector.prediction(img)
        boxes, confs, clss = self.detector.filter_prediction(
            output, img, conf_th=conf_th, conf_class=conf_class)
        img = self.detector.draw_boxes(img, boxes, confs, clss)
        return img

    def object_track(self, img, conf_th=0.3, conf_class=[]):
        output = self.detector.prediction(img)
        boxes, confs, clss = self.detector.filter_prediction(
            output, img, conf_th=conf_th, conf_class=conf_class)
        img = self.detector.draw_boxes(img, boxes, confs, clss)
        objects = self.ct.update(boxes)
        if len(boxes) > 0 and 1 in clss:
            for (objectID, centroid) in objects.items():
                text = "ID {}".format(objectID)
                cv2.putText(img, text, (centroid[0] - 10, centroid[1] - 10),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
                cv2.circle(img, (centroid[0], centroid[1]), 4, (0, 255, 0), -1)
        return img

    def img_to_base64(self, img):
        """encode as a jpeg image and return it"""
        buffer = cv2.imencode('.jpg', img)[1].tobytes()
        jpg_as_text = base64.b64encode(buffer)
        base64_string = jpg_as_text.decode('utf-8')
        return base64_string
Exemplo n.º 4
0
    def ObjectTracking(self):
        detector = Detector()
        myiter = glob.iglob(os.path.join(IMAGE_FOLDER, '**', '*.jpg'),
                            recursive=True)
        newdict = reduce(lambda a, b: reduce_tracking(a, b), myiter, dict())
        startID = max(map(int, newdict.keys()), default=0) + 1
        ct = CentroidTracker(startID=startID)
        with PiCamera() as camera:
            camera.resolution = (1280, 960)  # twice height and widht
            camera.rotation = int(str(os.environ['CAMERA_ROTATION']))
            camera.framerate = 10
            with PiRGBArray(camera, size=(WIDTH, HEIGHT)) as output:
                while True:
                    camera.capture(output, 'bgr', resize=(WIDTH, HEIGHT))
                    img = output.array
                    result = detector.prediction(img)
                    df = detector.filter_prediction(result, img)
                    img = detector.draw_boxes(img, df)
                    boxes = df[['x1', 'y1', 'x2', 'y2']].values
                    previous_object_ID = ct.nextObjectID
                    objects = ct.update(boxes)
                    if len(boxes) > 0 and (df['class_name'].str.contains(
                            'person').any()) and previous_object_ID in list(
                                objects.keys()):
                        for (objectID, centroid) in objects.items():
                            text = "ID {}".format(objectID)
                            cv2.putText(img, text,
                                        (centroid[0] - 10, centroid[1] - 10),
                                        cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                                        (0, 255, 0), 2)
                            cv2.circle(img, (centroid[0], centroid[1]), 4,
                                       (0, 255, 0), -1)

                        day = datetime.now().strftime("%Y%m%d")
                        directory = os.path.join(IMAGE_FOLDER, 'pi', day)
                        if not os.path.exists(directory):
                            os.makedirs(directory)
                        ids = "-".join(list([str(i) for i in objects.keys()]))
                        hour = datetime.now().strftime("%H%M%S")
                        filename_output = os.path.join(
                            directory, "{}_person_{}_.jpg".format(hour, ids))
                        cv2.imwrite(filename_output, img)
                    time.sleep(0.300)
Exemplo n.º 5
0
 def __init__(self):
     self.detector = Detector()
     self.ct = CentroidTracker(maxDisappeared=20)
Exemplo n.º 6
0
def load_detector():
    global detector, ct
    detector = Detector()
    ct = CentroidTracker(maxDisappeared=50)