예제 #1
0
    def __init__(self, hostname, port, client_id):
        self.hostname = hostname
        self.port = port

        # Create MQTT client
        self.client = mqtt.Client(client_id=client_id,
                                  clean_session=True,
                                  userdata=None,
                                  protocol=mqtt.MQTTv31)

        # Register callback functions
        self.client.on_connect = self._on_connect
        self.client.on_message = self._on_message

        self.lastState = 0

        global camera
        self.camera = CAMERA()
        camera = self.camera
예제 #2
0
class MQTTClient(object):
    def __init__(self, hostname, port, client_id):
        self.hostname = hostname
        self.port = port

        # Create MQTT client
        self.client = mqtt.Client(client_id=client_id,
                                  clean_session=True,
                                  userdata=None,
                                  protocol=mqtt.MQTTv31)

        # Register callback functions
        self.client.on_connect = self._on_connect
        self.client.on_message = self._on_message

        self.lastState = 0

        global camera
        self.camera = CAMERA()
        camera = self.camera

    def _on_connect(self, client, userdata, flags, rc):
        print("Connected with result code " + str(rc))

    def _on_message(self, client, userdata, message):
        # Convert message payload to string
        message_string = message.payload.decode(encoding='UTF-8')

        self.cameraAction()

    def subscribe(self, topic):
        self.client.subscribe(topic)

    def cameraAction(self):
        image = 'image.jpg'
        self.camera.takePicture(image)
        encoded = self.camera.convertImageToBase64(image)
        self.camera.publishEncodedImage(encoded, self.hostname, 1883)

    def start(self):
        # Start MQTT client
        self.client.connect(self.hostname, self.port, 60)
        self.client.loop_start()
예제 #3
0
"""
Created on Thu Apr 15 10:23:53 2021

@author: loa
"""

from PyQt5.QtWidgets import QApplication
import sys
import os
import pathlib
import qdarkstyle
from camera import CAMERA

if __name__ == "__main__":

    appli = QApplication(sys.argv)
    appli.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5())
    p = pathlib.Path(__file__)
    sepa = os.sep
    confCameraPath = str(p.parent) + sepa + 'confCamera.ini'

    e = CAMERA(cam="cam1",
               fft='off',
               meas='on',
               affLight=False,
               aff='right',
               separate=False,
               multi=False)  #,confPath=confCameraPath)
    e.show()

    appli.exec_()
예제 #4
0
파일: frame.py 프로젝트: willvin313/copilot
class FRAME:
    fps: float
    camera: CAMERA
    yolo: classmethod
    PERSP_PERIOD = 100000
    YOLO_PERIOD = 2  # SECONDS
    verbose = 3

    yellow_lower = np.uint8([20, 50, 50]),
    yellow_upper = np.uint8([35, 255, 255]),
    white_lower = np.uint8([0, 200, 0]),
    white_upper = np.uint8([180, 255, 100]),
    lum_factor = 150,
    max_gap_th = 2 / 5,
    lane_start = [0.35, 0.75]

    ego_vehicle_offset = 0
    time = datetime.utcnow().timestamp()
    l_gap_skipped = 0
    l_breached = 0
    l_reset = 0
    l_appended = 0

    n_gap_skipped = 0
    n_breached = 0
    n_reset = 0
    n_appended = 0
    _defaults = {
        "id": 0,
        "first": True,
        "speed": 0,
        "n_objects": 0,
        "camera": CAMERA(),
        "image": [],
        "LANE_WIDTH": 3.66,
        "fps": 22,
        "ego_vehicle_offset": 0,
        'verbose': 3,
        'YOLO_PERIOD': 2,
        "yellow_lower": np.uint8([20, 50, 50]),
        "yellow_upper": np.uint8([35, 255, 255]),
        "white_lower": np.uint8([0, 200, 0]),
        "white_upper": np.uint8([180, 255, 100]),
        "lum_factor": 150,
        "max_gap_th": 2 / 5,
        "lane_start": [0.35, 0.75],
        "verbose": 3
    }

    @classmethod
    def get_defaults(cls, n):
        if n in cls._defaults:
            return cls._defaults[n]
        else:
            return "Unrecognized attribute name '" + n + "'"

    def __init__(self, **kwargs):
        # calc pers => detect cars and dist > detect lanes

        self.__dict__.update(self._defaults)  # set up default values
        self.__dict__.update(kwargs)  # and update with user overrides
        self.speed = self.get_speed()
        ### IMAGE PROPERTIES
        self.image: np.ndarray
        # if  self.image.size ==0 :
        #     raise ValueError("No Image")
        self.img_shp: (int, int)
        self.area: int

        self.temp_dir = './images/detection/'
        self.perspective_done_at = datetime.utcnow().timestamp()

        # self.image =  self.camera.undistort(self.image)
        ### OBJECT DETECTION AND TRACKING
        self.yolo = YOLO()
        self.first_detect = True
        self.obstacles: [OBSTACLE] = []
        self.__yp = int(self.YOLO_PERIOD * self.fps)
        ### LANE FINDER
        self.count = 0
        self.lane: LANE_DETECTION = None

    def perspective_tfm(self, pos):
        now = datetime.utcnow().timestamp()
        if now - self.perspective_done_at > self.PERSP_PERIOD:
            self.lane = LANE_DETECTION(self.image,
                                       self.fps,
                                       verbose=self.verbose)
        return cv2.perspectiveTransform(pos, self.lane.trans_mat)

    def determine_stats(self):
        n = 30
        t = datetime.utcnow().timestamp()
        dt = int(t - self.time)
        if self.count % (self.fps * n) == 0:

            self.n_gap_skipped = int(
                (self.lane.n_gap_skip - self.l_gap_skipped) * 100 /
                (self.fps * n))
            self.n_appended = int((self.lane.lane.appended - self.l_appended) *
                                  100 / (self.fps * n))
            self.n_breached = int((self.lane.lane.breached - self.l_breached) *
                                  100 / (self.fps * n))
            self.n_reset = int(
                (self.lane.lane.reset - self.l_reset) * 100 / (self.fps * n))

            self.l_gap_skipped = self.lane.n_gap_skip
            self.l_appended = self.lane.lane.appended
            self.l_breached = self.lane.lane.breached
            self.l_reset = self.lane.lane.reset
            print("SKIPPED {:d}% BREACHED {:d}% RESET {:d}% APPENDED {:d}% | Time {:d}s , Processing FPS {:.2f} vs Desired FPS {:.2f}  "\
                .format(self.n_gap_skipped, self.n_breached, self.n_reset, self.n_appended,\
                    dt, self.fps * n / dt, self.fps ))
            self.time = t

    def get_speed(self):
        return 30

    def process_and_plot(self, image):
        self.update_trackers(image)
        lane_img = self.lane.process_image(image, self.obstacles)
        self.determine_stats()
        return lane_img

    @staticmethod
    def corwh2box(corwh):
        box = BoundBox(int(corwh[0]), int(corwh[1]), int(corwh[0] + corwh[2]),
                       int(corwh[1] + corwh[3]))
        return box

    def tracker2object(self, boxes: [OBSTACLE], th=0.5):
        n_b = len(boxes)
        n_o = len(self.obstacles)
        iou_mat = np.zeros((n_o, n_b))
        for i in range(n_o):
            for j in range(n_b):
                iou_mat[i, j] = bbox_iou(self.obstacles[i], boxes[j])
        count = min(n_b, n_o)
        used = []
        idmax = 0
        obstacles = []
        while count > 0:
            r, k = np.unravel_index(np.argmax(iou_mat, axis=None),
                                    iou_mat.shape)
            if iou_mat[r, k] > th:
                used.append(k)
                obstacle = self.obstacles[r]
                box = boxes[k]
                if idmax < obstacle._id:
                    idmax = obstacle._id
                obstacle.update_box(box)
                obstacles.append(obstacle)
            iou_mat[r, :] = -99
            iou_mat[:, k] = -99
            count = count - 1
        idx = range(n_b)
        idx = [elem for elem in idx if elem not in used]
        self.obstacles = obstacles
        for i, c in enumerate(idx):
            # dst  =  self.calculate_position(boxes[c])
            obstacle = OBSTACLE(boxes[c], i + idmax + 1)
            self.obstacles.append(obstacle)
        return

    def update_trackers(self, img):
        image = img.copy()
        for n, obs in enumerate(self.obstacles):

            success, corwh = obs.tracker.update(image)
            if not success:
                del self.obstacles[n]

                continue
            box = self.corwh2box(corwh)
            # dst = self.calculate_position( box)
            self.obstacles[n].update_coord(box)

        if self.count % self.__yp == 0:
            boxes = self.yolo.make_predictions(image,
                                               obstructions=obstructions,
                                               plot=True)
            self.tracker2object(boxes)
            image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
            n_obs = len(self.obstacles)
            for i in range(n_obs):
                tracker = cv2.TrackerKCF_create()  #
                # tracker = cv2.TrackerMIL_create()#  # Note: Try comparing KCF with MIL
                box = self.obstacles[i]
                bbox = (box.xmin, box.ymin, box.xmax - box.xmin,
                        box.ymax - box.ymin)
                # print(bbox)
                success = tracker.init(image, bbox)
                if success:
                    self.obstacles[i].tracker = tracker

        self.count += 1

        return

    def warp(self, img):
        now = datetime.utcnow().timestamp()
        if now - self.perspective_done_at > self.PERSP_PERIOD:
            self.lane = LANE_DETECTION(self.image, self.fps)
        return cv2.warpPerspective(img,
                                   self.lane.trans_mat,
                                   self.lane.UNWARPED_SIZE,
                                   flags=cv2.WARP_FILL_OUTLIERS +
                                   cv2.INTER_CUBIC)

    def unwarp(self, img):
        now = datetime.utcnow().timestamp()
        if now - self.perspective_done_at > self.PERSP_PERIOD:
            self.lane = LANE_DETECTION(self.image, self.fps)
        return cv2.warpPerspective(img,
                                   self.lane.trans_mat,
                                   self.img_shp,
                                   flags=cv2.WARP_FILL_OUTLIERS +
                                   cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP)

    def process_video(self, file_path, fps_factor,\
            video_out = "videos/output11.mov",pers_frame_time =14,\
            t0  =None , t1 =None ):

        video_reader = cv2.VideoCapture(file_path)
        fps_actual = video_reader.get(cv2.CAP_PROP_FPS)

        self.fps = fps_actual // fps_factor
        nb_frames = int(video_reader.get(cv2.CAP_PROP_FRAME_COUNT))
        frame_h = int(video_reader.get(cv2.CAP_PROP_FRAME_HEIGHT))
        frame_w = int(video_reader.get(cv2.CAP_PROP_FRAME_WIDTH))
        frame_h_out = int(frame_h * (1 - self.ego_vehicle_offset))
        print("{:s} WIDTH {:d} HEIGHT {:d} FPS {:.2f} DUR {:.1f} s".format(\
            file_path,frame_w,frame_h,fps_actual,nb_frames//fps_actual
            ))

        video_writer = cv2.VideoWriter(
            video_out, cv2.VideoWriter_fourcc('m', 'p', '4', 'v'), self.fps,
            (frame_w, frame_h_out))
        #180# 310# seconds
        pers_frame = int(pers_frame_time * fps_actual)
        video_reader.set(1, pers_frame)
        _, self.image = video_reader.read()
        self.image = self.image[:frame_h_out, :, :]
        self.img_shp = (self.image.shape[1], self.image.shape[0])
        # self.ego_vehicle_offset = self.img_shp[0]*int(1-self.ego_vehicle_offset)
        self.area = self.img_shp[0] * self.img_shp[1]
        self.lane = LANE_DETECTION(self.image, self.fps,\
            verbose=self.verbose,
            yellow_lower =self.yellow_lower,
            yellow_upper = self.yellow_upper,
            white_lower = self.white_lower,
            white_upper = self.white_upper,
            lum_factor = self.lum_factor,
            max_gap_th = self.max_gap_th,
            lane_start=self.lane_start ,
        )
        t1 = t1 if t1 is not None else nb_frames / fps_actual
        t0 = t0 if t0 is not None else pers_frame_time
        video_reader.set(1, t0 * fps_actual)
        for i in tqdm(range(int(t0 * fps_actual), int(t1 * fps_actual)),
                      mininterval=3):
            status, image = video_reader.read()

            if status and (i % fps_factor == 0):
                image = image[:frame_h_out, :, :]
                try:
                    procs_img = self.process_and_plot(image)
                    video_writer.write(procs_img)
                except:
                    print("\n\rGOT EXEPTION TO PROCES THE IMAGE\033[F",
                          self.count)
                #  l1 =  self.lane.white_lower[1]
                #  self.lane.compute_bounds(image)
                #  print(l1,"->",self.lane.white_lower[1])
        print("SKIPPED {:d} BREACHED {:d} RESET {:d} APPENDED {:d} | Total {:d} ".\
            format(self.lane.n_gap_skip, self.lane.lane.breached,\
                self.lane.lane.reset,self.lane.lane.appended, self.count))
        print("SAVED TO ", video_out)
        video_reader.release()
        video_writer.release()
        cv2.destroyAllWindows()

    def vehicle_speed(self):
        return
예제 #5
0
class FRAME:
    fps: float
    UNWARPED_SIZE: (int, int)
    LANE_WIDTH: int
    WRAPPED_WIDTH: int
    camera: CAMERA
    yolo: classmethod
    PERSP_PERIOD = 100000
    YOLO_PERIOD = 0.5  # SECONDS
    _defaults = {
        "id": 0,
        "first": True,
        "speed": 0,
        "n_objects": 0,
        "camera": CAMERA(),
        "image": [],
        "LANE_WIDTH": 3.66,
        "fps": 22
    }

    @classmethod
    def get_defaults(cls, n):
        if n in cls._defaults:
            return cls._defaults[n]
        else:
            return "Unrecognized attribute name '" + n + "'"

    def __init__(self, **kwargs):
        # calc pers => detect cars and dist > detect lanes
        self.__dict__.update(self._defaults)  # set up default values
        self.__dict__.update(kwargs)  # and update with user overrides
        self.speed = self.get_speed()
        ### IMAGE PROPERTIES
        self.image: np.ndarray
        if self.image.size == 0:
            raise ValueError("No Image")
        self.temp_dir = './images/detection/'
        self.size: (int, int) = (self.image.shape[0], self.image.shape[1])
        self.UNWARPED_SIZE = (int(self.size[0]), int(self.size[1] * .4))
        self.WRAPPED_WIDTH = int(self.UNWARPED_SIZE[0] * 1.25)
        self.trans_mat = None
        self.inv_trans_mat = None
        self.pixels_per_meter = [0, 0]
        self.perspective_done_at = 0
        self.img_shp = (self.image.shape[1], self.image.shape[0])
        self.area = self.img_shp[0] * self.img_shp[1]
        # self.image =  self.camera.undistort(self.image)
        ### OBJECT DETECTION AND TRACKING
        self.yolo = YOLO()
        self.first_detect = True
        self.obstacles: [OBSTACLE] = []
        self.__yp = int(self.YOLO_PERIOD * self.fps)
        ### LANE FINDER
        self.lane_found = False
        self.count = 0
        self.mask = np.zeros((self.UNWARPED_SIZE[1], self.UNWARPED_SIZE[0], 3),
                             dtype=np.uint8)
        self.roi_mask = np.ones(
            (self.UNWARPED_SIZE[1], self.UNWARPED_SIZE[0], 3), dtype=np.uint8)
        self.total_mask = np.zeros_like(self.roi_mask)
        self.warped_mask = np.zeros(
            (self.UNWARPED_SIZE[1], self.UNWARPED_SIZE[0]), dtype=np.uint8)
        self.lane_count = 0

        self.left_line = LaneLineFinder(self.UNWARPED_SIZE,
                                        self.pixels_per_meter,
                                        -1.8288)  # 6 feet in meters
        self.right_line = LaneLineFinder(self.UNWARPED_SIZE,
                                         self.pixels_per_meter, 1.8288)

    def perspective_tfm(self, pos):
        now = datetime.utcnow().timestamp()
        if now - self.perspective_done_at > self.PERSP_PERIOD:
            self.calc_perspective()

        return cv2.perspectiveTransform(pos, self.trans_mat)
        #cv2.warpPerspective(image, self.trans_mat, self.UNWARPED_SIZE)

    def calc_perspective(self, verbose=True):
        roi = np.zeros((self.size[0], self.size[1]),
                       dtype=np.uint8)  # 720 , 1280
        roi_points = np.array([[0, self.size[0]], [self.size[1], self.size[0]],
                               [self.size[1] // 2 + 100, -0 * self.size[0]],
                               [self.size[1] // 2 - 100, -0 * self.size[0]]],
                              dtype=np.int32)
        cv2.fillPoly(roi, [roi_points], 1)

        Lhs = np.zeros((2, 2), dtype=np.float32)
        Rhs = np.zeros((2, 1), dtype=np.float32)
        grey = cv2.cvtColor(self.image, cv2.COLOR_BGR2GRAY)
        mn_hsl = np.median(grey)  #grey.median()
        edges = cv2.Canny(grey, int(mn_hsl * 4), int(mn_hsl * 3))
        # edges = cv2.Canny(grey[:, :, 1], 500, 400)
        edges2 = edges * roi
        cv2.imwrite(self.temp_dir + "mask.jpg", edges2)
        lines = cv2.HoughLinesP(edges * roi,
                                rho=4,
                                theta=np.pi / 180,
                                threshold=4,
                                minLineLength=80,
                                maxLineGap=40)

        # print(lines)
        for line in lines:

            for x1, y1, x2, y2 in line:
                normal = np.array([[-(y2 - y1)], [x2 - x1]], dtype=np.float32)
                normal /= np.linalg.norm(normal)
                point = np.array([[x1], [y1]], dtype=np.float32)
                outer = np.matmul(normal, normal.T)
                Lhs += outer
                Rhs += np.matmul(outer, point)
        vanishing_point = np.matmul(np.linalg.inv(Lhs), Rhs)
        top = vanishing_point[1] + 50
        bottom = self.size[1] - 100

        def on_line(p1, p2, ycoord):
            return [
                p1[0] + (p2[0] - p1[0]) / float(p2[1] - p1[1]) *
                (ycoord - p1[1]), ycoord
            ]

        #define source and destination targets
        p1 = [vanishing_point[0] - self.WRAPPED_WIDTH / 2, top]
        p2 = [vanishing_point[0] + self.WRAPPED_WIDTH / 2, top]
        p3 = on_line(p2, vanishing_point, bottom)
        p4 = on_line(p1, vanishing_point, bottom)
        src_points = np.array([p1, p2, p3, p4], dtype=np.float32)
        # print(src_points,vanishing_point)
        dst_points = np.array([[0, 0], [self.UNWARPED_SIZE[0], 0],
                               [self.UNWARPED_SIZE[0], self.UNWARPED_SIZE[1]],
                               [0, self.UNWARPED_SIZE[1]]],
                              dtype=np.float32)

        self.trans_mat = cv2.getPerspectiveTransform(src_points, dst_points)
        self.inv_trans_mat = cv2.getPerspectiveTransform(
            dst_points, src_points)
        min_wid = 1000
        img = cv2.warpPerspective(self.image, self.trans_mat,
                                  self.UNWARPED_SIZE)
        grey = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
        mask = grey[:, :, 1] > 128
        mask[:, :50] = 0
        mask[:, -50:] = 0
        cv2.imshow("grey", grey)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        mom = cv2.moments(mask[:, :self.UNWARPED_SIZE[0] // 2].astype(
            np.uint8))
        x1 = mom["m10"] / mom["m00"]
        mom = cv2.moments(mask[:,
                               self.UNWARPED_SIZE[0] // 2:].astype(np.uint8))
        x2 = self.UNWARPED_SIZE[0] // 2 + mom["m10"] / mom["m00"]

        if (x2 - x1 < min_wid):
            min_wid = x2 - x1
        self.pixels_per_meter[0] = min_wid / self.LANE_WIDTH
        if False:  #self.camera.callibration_done :
            Lh = np.linalg.inv(
                np.matmul(self.trans_mat, self.camera.cam_matrix))
        else:
            Lh = np.linalg.inv(self.trans_mat)
        self.pixels_per_meter[1] = self.pixels_per_meter[0] * np.linalg.norm(
            Lh[:, 0]) / np.linalg.norm(Lh[:, 1])
        self.perspective_done_at = datetime.utcnow().timestamp()
        if verbose:
            img_orig = cv2.polylines(self.image, [src_points.astype(np.int32)],
                                     True, (0, 0, 255),
                                     thickness=5)
            cv2.line(img, (int(x1), 0), (int(x1), self.UNWARPED_SIZE[1]),
                     (255, 0, 0), 3)
            cv2.line(img, (int(x2), 0), (int(x2), self.UNWARPED_SIZE[1]),
                     (0, 0, 255), 3)

            cv2.circle(img_orig,
                       tuple(vanishing_point),
                       10,
                       color=(0, 0, 255),
                       thickness=5)

            cv2.imwrite(self.temp_dir + "perspective1.jpg", img_orig)
            cv2.imwrite(self.temp_dir + "perspective2.jpg", img)
            # cv2.imshow(cv2.hconcat((img_orig, cv2.resize(img, img_orig.shape))))
        return

    def get_speed(self):
        return 30

    def determine_lane(self, box: OBSTACLE):
        points = np.array([box.xmid, box.ymid],
                          dtype='float32').reshape(1, 1, 2)
        new_points = cv2.perspectiveTransform(points, self.inv_trans_mat)
        new_points = new_points.reshape(2)
        left = np.polyval(self.left_line.poly_coeffs,
                          new_points[0]) - new_points[1]
        right = np.polyval(self.right_line.poly_coeffs,
                           new_points[0]) - new_points[1]

        left2 = np.polyval(self.left_line.poly_coeffs,
                           new_points[1]) - new_points[0]
        right2 = np.polyval(self.right_line.poly_coeffs,
                            new_points[1]) - new_points[0]
        status = "my"
        if left < 0 and right < 0:
            status = "left"
        elif right > 0 and left > 0:
            status = "right"
        print(box._id, status, left, right, "|", left2, right2)
        return status

    def calculate_position(self, box: BoundBox):
        if (self.perspective_done_at > 0):
            pos = np.array(
                (box.xmax / 2 + box.xmin / 2, box.ymax)).reshape(1, 1, -1)
            dst = self.perspective_tfm(pos).reshape(2)
            dst = np.array([
                dst[0] / self.pixels_per_meter[0],
                (self.UNWARPED_SIZE[1] - dst[1]) / self.pixels_per_meter[1]
            ])
            return dst
        else:

            return np.array([0, 0])

    @staticmethod
    def corwh2box(corwh):
        box = BoundBox(int(corwh[0]), int(corwh[1]), int(corwh[0] + corwh[2]),
                       int(corwh[1] + corwh[3]))
        return box

    def tracker2object(self, boxes: [OBSTACLE], th=0.5):
        n_b = len(boxes)
        n_o = len(self.obstacles)
        iou_mat = np.zeros((n_o, n_b))
        for i in range(n_o):
            for j in range(n_b):
                iou_mat[i, j] = bbox_iou(self.obstacles[i], boxes[j])
        count = min(n_b, n_o)
        used = []
        while count > 0:
            r, k = np.unravel_index(np.argmax(iou_mat, axis=None),
                                    iou_mat.shape)
            if iou_mat[r, k] > th:
                used.append(k)
                obstacle = self.obstacles[r]
                box = boxes[k]
                obstacle.update_box(box)
                self.obstacles[r] = obstacle
            iou_mat[r, :] = -99
            iou_mat[:, k] = -99
            count = count - 1
        idx = range(n_b)
        idx = [elem for elem in idx if elem not in used]
        for i, c in enumerate(idx):
            dst = self.calculate_position(boxes[c])
            obstacle = OBSTACLE(boxes[c], dst, i + n_o)
            self.obstacles.append(obstacle)
        return

    def update_trackers(self, img, plot=False):
        image = img.copy()
        self.find_lane(img, distorted=False, reset=False)
        for n, obs in enumerate(self.obstacles):

            success, corwh = obs.tracker.update(image)
            # print("tracking", corwh ,  self.obstacles[n].xmin,self.obstacles[n].ymin,self.obstacles[n].xmax,self.obstacles[n].ymax)
            if not success:
                del self.obstacles[n]

                continue
            box = self.corwh2box(corwh)
            dst = self.calculate_position(box)
            self.obstacles[n].update_obstacle(box, dst, self.fps)

        if self.count % self.__yp == 0:
            boxes = self.yolo.make_predictions(image,
                                               obstructions=obstructions,
                                               plot=True)
            self.tracker2object(boxes)
            image = cv2.cvtColor(np.array(image), cv2.COLOR_RGB2BGR)
            n_obs = len(self.obstacles)
            for i in range(n_obs):
                tracker = cv2.TrackerKCF_create(
                )  # cv2.TrackerMIL_create()#  # Note: Try comparing KCF with MIL
                box = self.obstacles[i]
                bbox = (box.xmin, box.ymin, box.xmax - box.xmin,
                        box.ymax - box.ymin)
                # print(bbox)
                success = tracker.init(image, bbox)
                if success:
                    self.obstacles[i].tracker = tracker

        self.count += 1
        for i in range(len(self.obstacles)):
            lane = self.determine_lane(self.obstacles[i])
            self.obstacles[i].lane = lane

        if plot and self.count > 1:
            self.draw_lane_weighted(img)
        return

    def warp(self, img):
        now = datetime.utcnow().timestamp()
        if now - self.perspective_done_at > self.PERSP_PERIOD:
            self.calc_perspective()
        return cv2.warpPerspective(img,
                                   self.trans_mat,
                                   self.UNWARPED_SIZE,
                                   flags=cv2.WARP_FILL_OUTLIERS +
                                   cv2.INTER_CUBIC)

    def unwarp(self, img):
        now = datetime.utcnow().timestamp()
        if now - self.perspective_done_at > self.PERSP_PERIOD:
            self.calc_perspective()
        return cv2.warpPerspective(img,
                                   self.trans_mat,
                                   self.img_shp,
                                   flags=cv2.WARP_FILL_OUTLIERS +
                                   cv2.INTER_CUBIC + cv2.WARP_INVERSE_MAP)

    def equalize_lines(self, alpha=0.9):
        mean = 0.5 * (self.left_line.coeff_history[:, 0] +
                      self.right_line.coeff_history[:, 0])
        self.left_line.coeff_history[:, 0] = alpha * self.left_line.coeff_history[:, 0] + \
                                             (1-alpha)*(mean - np.array([0,0, 1.8288], dtype=np.uint8))
        self.right_line.coeff_history[:, 0] = alpha * self.right_line.coeff_history[:, 0] + \
                                              (1-alpha)*(mean + np.array([0,0, 1.8288], dtype=np.uint8))

    def find_lane(self, img, distorted=False, reset=False):
        # undistort, warp, change space, filter
        # save =  "detecetion.jpg"
        image = img.copy()
        if distorted:
            image = self.camera.undistort(image)
        if reset:
            self.left_line.reset_lane_line()
            self.right_line.reset_lane_line()

        image = self.warp(image)

        # cv2.imwrite(self.temp_dir+save,image)
        img_hls = cv2.cvtColor(image, cv2.COLOR_RGB2HLS)
        img_hls = cv2.medianBlur(img_hls, 5)
        img_lab = cv2.cvtColor(image, cv2.COLOR_RGB2LAB)
        img_lab = cv2.medianBlur(img_lab, 5)

        big_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (31, 31))
        small_kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (7, 7))

        greenery = (img_lab[:, :, 2].astype(np.uint8) > 130) & cv2.inRange(
            img_hls, (0, 0, 50), (35, 190, 255))

        road_mask = np.logical_not(greenery).astype(
            np.uint8) & (img_hls[:, :, 1] < 250)
        road_mask = cv2.morphologyEx(road_mask, cv2.MORPH_OPEN, small_kernel)
        road_mask = cv2.dilate(road_mask, big_kernel)

        img2, contours, hierarchy = cv2.findContours(road_mask, cv2.RETR_LIST,
                                                     cv2.CHAIN_APPROX_NONE)

        biggest_area = 0
        for contour in contours:
            area = cv2.contourArea(contour)
            if area > biggest_area:
                biggest_area = area
                biggest_contour = contour
        road_mask = np.zeros_like(road_mask)
        cv2.fillPoly(road_mask, [biggest_contour], 1)

        self.roi_mask[:, :, 0] = (self.left_line.line_mask
                                  | self.right_line.line_mask) & road_mask
        self.roi_mask[:, :, 1] = self.roi_mask[:, :, 0]
        self.roi_mask[:, :, 2] = self.roi_mask[:, :, 0]

        kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 3))
        black = cv2.morphologyEx(img_lab[:, :, 0], cv2.MORPH_TOPHAT, kernel)
        lanes = cv2.morphologyEx(img_hls[:, :, 1], cv2.MORPH_TOPHAT, kernel)

        kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (13, 13))
        lanes_yellow = cv2.morphologyEx(img_lab[:, :, 2], cv2.MORPH_TOPHAT,
                                        kernel)

        self.mask[:, :, 0] = cv2.adaptiveThreshold(black, 1,
                                                   cv2.ADAPTIVE_THRESH_MEAN_C,
                                                   cv2.THRESH_BINARY, 13, -6)
        self.mask[:, :, 1] = cv2.adaptiveThreshold(lanes, 1,
                                                   cv2.ADAPTIVE_THRESH_MEAN_C,
                                                   cv2.THRESH_BINARY, 13, -4)
        self.mask[:, :, 2] = cv2.adaptiveThreshold(lanes_yellow, 1,
                                                   cv2.ADAPTIVE_THRESH_MEAN_C,
                                                   cv2.THRESH_BINARY, 13, -1.5)
        self.mask *= self.roi_mask
        small_kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        self.total_mask = np.any(self.mask, axis=2).astype(np.uint8)
        self.total_mask = cv2.morphologyEx(self.total_mask.astype(np.uint8),
                                           cv2.MORPH_ERODE, small_kernel)

        left_mask = np.copy(self.total_mask)
        right_mask = np.copy(self.total_mask)
        if self.right_line.lane_line_found:
            left_mask = left_mask & np.logical_not(
                self.right_line.line_mask) & self.right_line.other_line_mask
        if self.left_line.lane_line_found:
            right_mask = right_mask & np.logical_not(
                self.left_line.line_mask) & self.left_line.other_line_mask
        print("LEFT")
        self.left_line.find_lane_line(left_mask, reset)
        print("RIGHT")
        self.right_line.find_lane_line(right_mask, reset)
        self.lane_found = self.left_line.lane_line_found and self.right_line.lane_line_found

        if self.lane_found:
            self.equalize_lines(0.875)

    @staticmethod
    def put_text(overlay, text, coord, color=WHITE):
        ft_sz = 5e-4 * overlay.shape[0]
        sz = ft_sz * 25
        font = cv2.FONT_HERSHEY_SIMPLEX
        rect_ht = int(sz * 1.2)
        rect_wd = int(len(text) * sz * 0.8)
        p1 = (coord[0], coord[1])
        p2 = (coord[0] + rect_wd, coord[1] - rect_ht)
        cv2.rectangle(overlay, p1, p2, (0, 0, 0), -1)
        # cv2.putText(overlay, text,   coord,  font, ft_sz, (0, 0, 0), 5)
        cv2.putText(overlay, text, coord, font, ft_sz, color, 1)

        return

    def draw_lane_weighted(self,
                           image,
                           thickness=5,
                           alpha=1,
                           beta=0.8,
                           gamma=0):
        overlay = image.copy()
        font = cv2.FONT_HERSHEY_COMPLEX_SMALL
        for i, box in enumerate(self.obstacles):
            past = [box.xmin, box.ymin, box.xmax, box.ymax]

            t1 = classes[obstructions[box.label]] + " [" + str(
                int(box.position[1])) + "m]"
            t2 = "(" + str(int(box.score * 100)) + "%) ID: " + str(box._id)
            b1 = "Lane " + box.lane
            b2 = str(int(box.velocity[1])) + "m/s"
            b3 = "Col " + str(int(box.col_time)) + "s"
            pt1 = (box.xmin, box.ymin - 10)
            pt2 = (box.xmin, box.ymin)
            pb1 = (box.xmin, box.ymax + 10)
            pb2 = (box.xmin, box.ymax + 20)
            pb3 = (box.xmin, box.ymax + 30)
            self.put_text(overlay, t1, pt1)
            self.put_text(overlay, t2, pt2)
            self.put_text(overlay, b1, pb1)
            self.put_text(overlay, b2, pb2)
            self.put_text(overlay, b3, pb3)

            # centr_lbl = classes[obstructions[box.label]] +"|"+ str(int(box.score*100))+"%"
            # top_lbl = str(box._id)+"|"+box.lane
            # bot_lbl = str(int(box.col_time)) +"s | " + str(int(box.position[0])) + " m | " +str(int(box.velocity[0])) + " m/s"
            past_center = (int(past[0] / 2 + past[2] / 2), past[3])
            # font_thk =1

            color = ORANGE if box.velocity[1] > 0 else GREEN
            cv2.rectangle(overlay, (box.xmin, box.ymin), (box.xmax, box.ymax),
                          color, 2)
            cv2.circle(overlay, past_center, 1, GRAY, 2)
            # self.put_text(overlay, top_lbl,   (box.xmin+5, box.ymin))
            # self.put_text(overlay, centr_lbl,   (box.xmin, box.ymid))
            # self.put_text(overlay, bot_lbl,   (box.xmin, box.ymax),)
            # cv2.putText(overlay, top_lbl,
            #             (box.xmin+5, box.ymin),  font,
            #             6e-4 * image.shape[0],   YELLOW, font_thk)
            # cv2.putText(overlay, centr_lbl,
            #             (box.xmin, box.ymid),  font,
            #             6e-4 * image.shape[0], LIGHT_CYAN  , font_thk)
            # cv2.putText(overlay, bot_lbl,
            #             (box.xmin, box.ymax),  font,
            #             6e-4 * image.shape[0], LIGHT_CYAN  , font_thk)
        image = cv2.addWeighted(image, alpha, overlay, beta, gamma)
        left_line = self.left_line.get_line_points()
        right_line = self.right_line.get_line_points()
        both_lines = np.concatenate((left_line, np.flipud(right_line)), axis=0)
        lanes = np.zeros((self.UNWARPED_SIZE[1], self.UNWARPED_SIZE[0], 3),
                         dtype=np.uint8)
        center_line = (left_line + right_line) // 2
        if self.lane_found:
            cv2.fillPoly(lanes, [both_lines.astype(np.int32)], LIGHT_CYAN)
            cv2.polylines(lanes, [left_line.astype(np.int32)],
                          False,
                          RED,
                          thickness=5)
            cv2.polylines(lanes, [right_line.astype(np.int32)],
                          False,
                          DARK_BLUE,
                          thickness=5)
            cv2.polylines(lanes, [center_line.astype(np.int32)],
                          False,
                          ORANGE,
                          thickness=5)
            # mid_coef = 0.5 * (self.left_line.poly_coeffs + self.right_line.poly_coeffs)
            # curve = get_curvature(mid_coef, img_size=self.UNWARPED_SIZE, pixels_per_meter=self.left_line.pixels_per_meter)
            # shift = get_center_shift(mid_coef, img_size=self.UNWARPED_SIZE,
            #                          pixels_per_meter=self.left_line.pixels_per_meter)
            # cv2.putText(image, "Road curvature: {:6.2f}m".format(curve), (420, 50), font, fontScale=2.5,
            #             thickness=5, color=(255, 255, 255))
            # cv2.putText(image, "Road curvature: {:6.2f}m".format(curve), (420, 50), font, fontScale=2.5,
            #             thickness=3, color=(0, 0, 0))
            # cv2.putText(image, "Car position: {:4.2f}m".format(shift), (460, 100), font, fontScale=2.5,
            #             thickness=5, color=(255, 255, 255))
            # cv2.putText(image, "Car position: {:4.2f}m".format(shift), (460, 100), font, fontScale=2.5,
            #             thickness=3, color=(0, 0, 0))
            lanes_unwarped = self.unwarp(lanes)
            overlay = cv2.addWeighted(image, alpha, lanes_unwarped, beta,
                                      gamma)
            cv2.imwrite(self.temp_dir + "detection.jpg", overlay)
        else:
            # # warning_shape = self.warning_icon.shape
            # corner = (10, (image.shape[1]-warning_shape[1])//2)
            # patch = image[corner[0]:corner[0]+warning_shape[0], corner[1]:corner[1]+warning_shape[1]]
            # # patch[self.warning_icon[:, :, 3] > 0] = self.warning_icon[self.warning_icon[:, :, 3] > 0, 0:3]
            # image[corner[0]:corner[0]+warning_shape[0], corner[1]:corner[1]+warning_shape[1]]=patch
            cv2.putText(image,
                        "Lane lost!", (550, 170),
                        font,
                        fontScale=2.5,
                        thickness=5,
                        color=(255, 255, 255))
            cv2.putText(image,
                        "Lane lost!", (550, 170),
                        font,
                        fontScale=2.5,
                        thickness=3,
                        color=(0, 0, 0))
            cv2.imwrite(self.temp_dir + "detection.jpg", image)

        return

    def vehicle_speed(self):
        return
 def subroutine(): 
     time.sleep(2) 
     picam=CAMERA() 
     picam.take_multiple_pics()  
     email=EMAIL() 
     email.send_email_with_images(picam.filenames)