Пример #1
0
    #AWC=cap.get(cv2.CAP_PROP_AUTOFOCUS)

    #AWC=cap.get(cv2.CAP_PROP_IOS_DEVICE_WHITEBALANCE)
    #print AWC
    return cap


if __name__ == '__main__':
    from fps import FPS
    fps = FPS().start()
    cap = cv2.VideoCapture(1)
    #cap.set(cv2.CAP_PROP_FRAME_WIDTH, 640);
    #cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 480);
    time.sleep(2)
    #print cap.set(cv2.CAP_PROP_WHITE_BALANCE_BLUE_U,4000)

    #AWC=cap.set(cv2.CAP_PROP_FPS,0)
    while True:
        _, frame = cap.read()
        #print cap.get(cv2.CAP_PROP_XI_AUTO_WB)
        f = fps.approx_compute()
        cv2.putText(frame, 'FPS {:.3f}'.format(f), (10, 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1,
                    cv2.LINE_AA)
        cv2.imshow('test', frame)
        ch = cv2.waitKey(2)
        if ch == 27:
            break
        if ch == ord('r'):
            fps.reset()
    cap.release()
Пример #2
0
class App(object):
    def __init__(self, video_src):
        #树莓派ip
        #self.server_address='rtmp://localhost/dji/stream.h264'
        #self.server_address='rtmp://127.0.0.1:1935/dji'
        self.server_address = 'http://192.168.40.146:8000/stream.mjpg'
        #self.server_address='rtsp://:192.168.40.118/1'
        #self.server_address=0
        #self.server_address='udp://@:8000 --demux=h264'
        #self.cam = video.create_capture(self.server_address)
        self.cam = WebcamVideoStream(self.server_address).start()
        ret, self.frame = self.cam.read()
        self.fish_cali = fish_calibration(self.frame)
        self.drag_start = None
        self.list_camshift = []
        self.show_backproj = False
        self.newcamshift = None
        self.selection = None
        self.lock = False
        self.mdp = MyUdp()
        #self.count=0
        self.light = self.get_light()

        self.swicht = False
        #self.list_camshift.append(self.get_car('red.jpg',0))
        #self.list_camshift.append(self.get_car('green.jpg',1))

        self.fps = FPS().start()

        #wifi模块IP
        self.mdp.client_address = ('192.168.40.31', 8899)
        cv2.namedWindow('TUCanshift')
        cv2.setMouseCallback('TUCanshift', self.onmouse)

    def onmouse(self, event, x, y, flags, param):
        if self.lock:
            if event == cv2.EVENT_RBUTTONDOWN:
                self.pop_camshift()
                return
            if event == cv2.EVENT_LBUTTONDOWN:
                self.drag_start = (x, y)
                self.newcamshift = mycamshift()
            if self.drag_start:
                xmin = min(x, self.drag_start[0])
                ymin = min(y, self.drag_start[1])
                xmax = max(x, self.drag_start[0])
                ymax = max(y, self.drag_start[1])
                self.selection = (xmin, ymin, xmax, ymax)
            if event == cv2.EVENT_LBUTTONUP:
                self.fps.reset()
                self.drag_start = None
                if self.newcamshift is not None and self.newcamshift.getHist(
                ) is not None:
                    self.newcamshift.ID = len(self.list_camshift)
                    self.list_camshift.append(self.newcamshift)
                self.newcamshift = None
                self.selection = None

    def pop_camshift(self):
        if (len(self.list_camshift) < 1):
            return True
        cv2.destroyWindow(str(len(self.list_camshift) - 1))
        cv2.destroyWindow('%s%s' % ('cam', str(len(self.list_camshift) - 1)))
        self.list_camshift.pop()
        return False

    @staticmethod
    def creat_camshift_from_img(hsv):
        camshift = mycamshift()
        mask = np.ones((hsv.shape[0], hsv.shape[1]), dtype=np.uint8)
        camshift.preProcess(hsv, mask, (0, 0, hsv.shape[1], hsv.shape[0]), 32)
        return camshift

    def get_light(self):
        temp = mycamshift()
        temp.prProcess_light(self.frame)
        temp.ID = 99
        return temp

    def get_car(self, file, ID):
        img = cv2.imread(file, cv2.IMREAD_UNCHANGED)
        img = cv2.resize(img, (self.frame.shape[1], self.frame.shape[0]))
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        temp = App.creat_camshift_from_img(hsv)
        cv2.imshow(str(ID), temp.getHist())
        temp.ID = ID

        return temp

    def run(self):
        while True:
            if not (self.cam.renew and self.cam.grabbed):
                continue

            ret, self.frame = self.cam.read()
            #self.frame=cv2.GaussianBlur(self.frame,(5,5),2)
            self.frame = cv2.medianBlur(self.frame, 5)
            #self.frame=self.fish_cali.cali(self.frame)

            imshow_vis = self.frame.copy()

            hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)
            mask = mycamshift.filte_background_color(hsv,
                                                     offset1=16,
                                                     offset2=40,
                                                     iterations=3)

            if self.newcamshift is not None:
                if self.newcamshift.preProcess(hsv, mask, self.selection, 16):
                    cv2.imshow(str(ll), self.newcamshift.getHist())

            self.lock = False
            ll = len(self.list_camshift)
            if ll > 0:
                light_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
                mean, temp = cv2.threshold(light_gray, 0, 255,
                                           cv2.THRESH_BINARY + cv2.THRESH_OTSU)
                thresh = (255 - mean) * 0.8 + mean
                if thresh > 230:
                    thresh = 230
                _, light_gray = cv2.threshold(light_gray, thresh, 255,
                                              cv2.THRESH_BINARY)
                light_gray = cv2.morphologyEx(light_gray,
                                              cv2.MORPH_OPEN,
                                              cv2.getStructuringElement(
                                                  cv2.MORPH_ELLIPSE, (5, 5)),
                                              iterations=2,
                                              borderType=cv2.BORDER_REPLICATE)

                cv2.imshow('light', light_gray)

                track_box = [self.light.go_once_gray(light_gray)]

                for x in self.list_camshift:
                    track_box.append(x.go_once(hsv, mask))

                n = len(track_box)
                #if n>2:
                if n > 2:
                    p3 = track_box[0]
                    p1, p2 = track_box[n - 2:]
                    try:
                        p1 = p1[0]
                    except:
                        p1 = None
                    try:
                        p2 = p2[0]
                    except:
                        p2 = None
                    try:
                        p3 = p3[0]
                    except:
                        p3 = None
                    if p1 and p2:
                        try:
                            #snap(img,p1,p2,障碍侦测范围,障碍侦测宽度,微调:避免将车头识别为障碍)
                            theta, D, dst = snap(mask, p1, p2, 5.0, 2.3, 2.1,
                                                 2.9)
                            dst = cv2.resize(dst, (400, 200))
                            cv2.imshow('snap', dst)
                            if theta is not None:
                                mes = (int(theta), int(D))
                                self.mdp.send_message('avoid', mes)
                                print('Block ahead')
                                cv2.putText(imshow_vis,
                                            'Block ahead:%s,%s' % mes,
                                            (10, 230),
                                            cv2.FONT_HERSHEY_SIMPLEX, 1,
                                            (255, 255, 255), 1, cv2.LINE_AA)
                                print(mes)

                            elif p3:
                                t, d = get_direction(p1, p2, p3)
                                mes = (int(t), int(d))
                                self.mdp.send_message('guidance', mes)
                                print('guidance')
                                cv2.putText(imshow_vis, 'Guidance:%s,%s' % mes,
                                            (10, 230),
                                            cv2.FONT_HERSHEY_SIMPLEX, 1,
                                            (255, 255, 255), 1, cv2.LINE_AA)
                                print mes
                            else:
                                cv2.putText(imshow_vis, 'Taget LOST',
                                            (10, 230),
                                            cv2.FONT_HERSHEY_SIMPLEX, 1,
                                            (255, 255, 255), 1, cv2.LINE_AA)
                                self.mdp.send_message('lost')
                        except:
                            cv2.putText(imshow_vis, '0/0 is error', (10, 230),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (255, 255, 255), 1, cv2.LINE_AA)
                            self.mdp.send_message('lost')
                    else:
                        cv2.putText(imshow_vis, 'Wait for START', (10, 230),
                                    cv2.FONT_HERSHEY_SIMPLEX, 1,
                                    (255, 255, 255), 1, cv2.LINE_AA)
                        self.mdp.send_message('lost')

                else:
                    cv2.putText(imshow_vis, 'Wait for START', (10, 230),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),
                                1, cv2.LINE_AA)
                    #self.mdp.send_message('lost')

                prob = self.list_camshift[ll - 1].prob
                if self.show_backproj and prob is not None:
                    self.frame = prob[..., np.newaxis]

                for x in track_box:
                    try:
                        cv2.ellipse(imshow_vis, x, (0, 0, 255), 2)
                        pts = cv2.boxPoints(x)
                        pts = np.int0(pts)
                        cv2.polylines(imshow_vis, [pts], True, 255, 2)
                    except:
                        pass

            else:
                cv2.putText(imshow_vis, 'Wait for START', (10, 230),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1,
                            cv2.LINE_AA)
                #self.mdp.send_message('lost')
            self.lock = True

            if self.selection is not None:
                x0, y0, x1, y1 = self.selection
                vis_roi = self.frame[y0:y1, x0:x1]
                cv2.bitwise_not(vis_roi, vis_roi)

            fps = self.fps.approx_compute()
            # print("FPS: {:.3f}".format(fps))
            cv2.putText(imshow_vis, 'FPS {:.3f}'.format(fps), (10, 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1,
                        cv2.LINE_AA)
            cv2.imshow('TUCanshift', imshow_vis)

            #print (str(self.count))
            #self.count=self.count+1
            ch = cv2.waitKey(2)
            if ch == 27:
                break
            if ch == ord('b'):
                self.show_backproj = not self.show_backproj

            #data, addr = self.mdp.recv_message()
            #data=MyUdp.getdata(data)
            #if data:
            #    print "received:", data, "from", addr

        cv2.destroyAllWindows()
        self.cam.release()
        self.mdp.close()
class App(object):
    def __init__(self):
        # self.cam = cv2.VideoCapture(0)
        # self.cam.set(3, 320)
        # self.cam.set(4, 240)
        self.cam = WebcamVideoStream(src=0, resolution=(640, 480)).start()
        self.fps = FPS().start()

        ret, self.frame = self.cam.read()

        self.suspend_tracking = SuspendTracking(teta=3)

        self.height, self.width = self.frame.shape[:2]
        self.kernel_erode = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                      (3, 3))
        self.kernel_dilate = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,
                                                       (7, 7))

        cv2.namedWindow('camshift')
        self.obj_select = RectSelector('camshift', self.onmouse)

        radius = 3
        n_point = 8 * radius
        self.lbpDesc = LBP(n_point, radius)

        self.HSV_CHANNELS = (
            (24, [0, 180], "hue"),  # Hue
            (8, [0, 256], "sat"),  # Saturation
            (8, [0, 256], "val")  # Value
        )

        self.show_backproj = False
        self.track_window = None
        self.histHSV = []
        self.track_box = None

    def onmouse(self, rect):
        xmin, ymin, xmax, ymax = rect
        hsvRoi = self.hsv[ymin:ymax, xmin:xmax]

        self.calcHSVhist(hsvRoi)
        self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin)
        self.init_suspend(hsvRoi)
        self.fps.reset()

    def init_suspend(self, hsvRoi):
        track_window_condition = self.track_window and self.track_window[
            2] > 0 and self.track_window[3] > 0
        if track_window_condition:
            self.camshift_algorithm()
            self.suspend_tracking.init(hsvRoi)

    def calcHSVhist(self, hsvRoi):
        self.histHSV = []
        for channel, param in enumerate(self.HSV_CHANNELS):
            # Init HSV histogram
            hist = cv2.calcHist([hsvRoi], [channel], None, [param[0]],
                                param[1])
            hist = cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
            self.histHSV.append(hist)
            # Show hist of each channel separately
            self.show_hist(hist, param[2])

    def calcBackProjection(self):
        ch_prob = []
        ch_back_proj_prob = []
        # back_proj_prob = np.ones(shape=(self.height, self.width), dtype=np.uint8) * 255
        # back_proj_prob = np.zeros(shape=(self.height, self.width), dtype=np.uint8)

        for channel, param in enumerate(self.HSV_CHANNELS):
            prob = cv2.calcBackProject([self.hsv], [channel],
                                       self.histHSV[channel], param[1], 1)
            cv2.imshow('Back projection ' + str(param[2]), prob)
            # ret, prob = cv2.threshold(prob, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            ret, prob = cv2.threshold(prob, 70, 255, cv2.THRESH_BINARY)
            cv2.imshow('Back projection thresh ' + str(param[2]), prob)
            # prob = cv2.morphologyEx(prob, cv2.MORPH_ERODE, self.kernel_erode, iterations=2)
            # prob = cv2.morphologyEx(prob, cv2.MORPH_DILATE, self.kernel_dilate, iterations=3)
            # back_proj_prob = cv2.bitwise_and(back_proj_prob, prob)
            # back_proj_prob = cv2.addWeighted(back_proj_prob, 0.4, prob, 0.6, 0)
            ch_prob.append(prob)

        ch_back_proj_prob.append(
            cv2.addWeighted(ch_prob[0], 0.6, ch_prob[1], 0.4, 0))

        ch_back_proj_prob.append(
            cv2.addWeighted(ch_prob[0], 0.6, ch_prob[2], 0.4, 0))

        back_proj_prob = cv2.bitwise_and(ch_back_proj_prob[0],
                                         ch_back_proj_prob[1])
        ret, back_proj_prob = cv2.threshold(back_proj_prob, 150, 255,
                                            cv2.THRESH_BINARY)

        back_proj_prob = cv2.morphologyEx(back_proj_prob,
                                          cv2.MORPH_ERODE,
                                          self.kernel_erode,
                                          iterations=1)
        back_proj_prob = cv2.morphologyEx(back_proj_prob,
                                          cv2.MORPH_DILATE,
                                          self.kernel_erode,
                                          iterations=2)

        return back_proj_prob

    @staticmethod
    def show_hist(hist, channel='None'):
        bin_count = hist.shape[0]
        bin_w = 24
        img = np.zeros((256, bin_count * bin_w, 3), np.uint8)
        for i in range(bin_count):
            h = int(hist[i])
            if str(channel) == 'hue':
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h),
                              (int(180.0 * i / bin_count), 255, 255), -1)
            elif str(channel) == 'sat':
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h),
                              (180, int(255.0 * i / bin_count), 255), -1)
            elif str(channel) == 'val':
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h),
                              (180, 255, int(255.0 * i / bin_count)), -1)
            else:
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h), (180, 255, 255),
                              -1)
        img = cv2.cvtColor(img, cv2.COLOR_HSV2BGR)
        cv2.imshow('hist ' + str(channel), img)

    def camshift_algorithm(self):
        prob = self.calcBackProjection()
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
        self.track_box, self.track_window = cv2.CamShift(
            prob, self.track_window, term_crit)

        if self.show_backproj:
            cv2.imshow("Back Projection", prob[..., np.newaxis])
        else:
            cv2.destroyWindow("Back Projection")

    def run(self):
        scaling_factor = 0.5
        while True:
            if not self.obj_select.dragging:
                ret, self.frame = self.cam.read()
                self.frame = cv2.resize(self.frame,
                                        None,
                                        fx=scaling_factor,
                                        fy=scaling_factor,
                                        interpolation=cv2.INTER_AREA)
                # blur_frame = cv2.GaussianBlur(self.frame, (21,21), 0)
                self.hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)

            if ret:
                vis = self.frame.copy()

            track_window_condition = self.track_window and self.track_window[
                2] > 0 and self.track_window[3] > 0

            if track_window_condition and not self.suspend_tracking.is_suspend(
                    self.hsv, self.track_box):
                self.camshift_algorithm()

                try:
                    cv2.ellipse(vis, self.track_box, (0, 0, 255), 2)
                    pts = cv2.boxPoints(self.track_box)
                    pts = np.int0(pts)
                    cv2.polylines(vis, [pts], True, 255, 2)
                except:
                    print(self.track_box)
            else:
                cv2.putText(vis, 'Target Lost', (10, 230),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1,
                            cv2.LINE_AA)

            # frame processing throughput rate
            fps = self.fps.approx_compute()
            # print("FPS: {:.3f}".format(fps))
            cv2.putText(vis, 'FPS {:.3f}'.format(fps), (10, 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1,
                        cv2.LINE_AA)

            self.obj_select.draw(vis)
            cv2.imshow('camshift', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
            if ch == ord('b'):
                self.show_backproj = not self.show_backproj
        cv2.destroyAllWindows()
class App(object):
    def __init__(self):
        # self.cam = cv2.VideoCapture(0)
        # self.cam.set(3, 320)
        # self.cam.set(4, 240)
        self.cam = WebcamVideoStream(src=0, resolution=(640, 480)).start()
        self.fps = FPS().start()

        ret, self.frame = self.cam.read()

        self.conf = {
            'ColorFrameNum': 7,
            'LBPFrameNum': 7,
            'MaxFrameDiffClr': 15,
            'MaxLBPFrameUpdate': 30,
            'L_Weight': 0.3,
            'A_Weight': 0.7,
            'B_Weight': 0.7
        }

        self.ColorCheck = AdaptiveThreshold(teta=3, max_lost_cnt=1)
        self.LBPCheck = AdaptiveThreshold(teta=2, max_lost_cnt=1)

        self.ColorDistance = LABDistance()
        self.LBPDistance = LocalBinaryPatterns(
            numPoints=8,
            radius=2,
            update_prev_hist=self.conf['MaxLBPFrameUpdate'])

        self.isLost = False
        self.isLBPLost = False

        self.height, self.width = self.frame.shape[:2]

        self.kernel_e = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (3, 3))
        self.kernel_d = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7))
        self.kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

        cv2.namedWindow('camshift')
        self.obj_select = RectSelector('camshift', self.onmouse)

        self.LAB_CHANNELS = (
            (24, [0, 256], "light"),  # L
            (24, [0, 256], "a"),  # a
            (24, [0, 256], "b")  # b
        )

        self.show_backproj = False
        self.track_window = None
        self.histLAB = []
        self.track_box = None

    def onmouse(self, rect):
        xmin, ymin, xmax, ymax = rect
        labRoi = self.lab[ymin:ymax, xmin:xmax]
        bgrRoi = self.frame[ymin:ymax, xmin:xmax]

        self.calcLABhist(labRoi)
        self.ColorDistance.init(bgrRoi)

        self.track_window = (xmin, ymin, xmax - xmin, ymax - ymin)
        # self.init_suspend(labRoi)
        self.isLost = False
        self.isLBPLost = False
        self.fps.reset()

    def calcLABhist(self, labRoi):
        self.histLAB = []
        for channel, param in enumerate(self.LAB_CHANNELS):
            # Init LAB histogram
            hist = cv2.calcHist([labRoi], [channel], None, [param[0]],
                                param[1])
            hist = cv2.normalize(hist, hist, 0, 255, cv2.NORM_MINMAX)
            self.histLAB.append(hist)
            # Show hist of each channel separately
            # self.show_hist(hist, param[2])

    def calcBackProjection(self):
        ch_prob = []
        ch_back_proj_prob = []

        for channel, param in enumerate(self.LAB_CHANNELS):
            prob = cv2.calcBackProject([self.lab], [channel],
                                       self.histLAB[channel], param[1], 1)
            cv2.imshow('Back projection ' + str(param[2]), prob)
            ret, prob = cv2.threshold(prob, 70, 255, cv2.THRESH_BINARY)
            cv2.imshow('Back projection thresh ' + str(param[2]), prob)
            # prob = cv2.morphologyEx(prob, cv2.MORPH_ERODE, self.kernel_e, iterations=1)
            # prob = cv2.morphologyEx(prob, cv2.MORPH_DILATE, self.kernel, iterations=1)
            prov = cv2.morphologyEx(prob,
                                    cv2.MORPH_CLOSE,
                                    self.kernel_e,
                                    iterations=2)
            ch_prob.append(prob)

        ch_back_proj_prob.append(
            cv2.addWeighted(ch_prob[0], self.conf['L_Weight'], ch_prob[1],
                            self.conf['A_Weight'], 0))

        ch_back_proj_prob.append(
            cv2.addWeighted(ch_prob[0], self.conf['L_Weight'], ch_prob[2],
                            self.conf['B_Weight'], 0))

        back_proj_prob = cv2.bitwise_and(ch_back_proj_prob[0],
                                         ch_back_proj_prob[1])
        ret, back_proj_prob = cv2.threshold(back_proj_prob, 150, 255,
                                            cv2.THRESH_BINARY)

        back_proj_prob = cv2.morphologyEx(back_proj_prob,
                                          cv2.MORPH_ERODE,
                                          self.kernel_e,
                                          iterations=1)
        back_proj_prob = cv2.morphologyEx(back_proj_prob,
                                          cv2.MORPH_DILATE,
                                          self.kernel_e,
                                          iterations=2)

        return back_proj_prob

    @staticmethod
    def show_hist(hist, channel='None'):
        bin_count = hist.shape[0]
        bin_w = 24
        img = np.zeros((256, bin_count * bin_w, 3), np.uint8)
        for i in range(bin_count):
            h = int(hist[i])
            if str(channel) == 'light':
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h),
                              (int(255.0 * i / bin_count), 255, 255), -1)
            elif str(channel) == 'a':
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h),
                              (255, int(255.0 * i / bin_count), 255), -1)
            elif str(channel) == 'b':
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h),
                              (255, 255, int(255.0 * i / bin_count)), -1)
            else:
                cv2.rectangle(img, (i * bin_w + 2, 255),
                              ((i + 1) * bin_w - 2, 255 - h), (255, 255, 255),
                              -1)
        img = cv2.cvtColor(img, cv2.COLOR_LAB2BGR)
        cv2.imshow('hist ' + str(channel), img)

    def camshift_algorithm(self):
        prob = self.calcBackProjection()
        term_crit = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 1)
        self.track_box, self.track_window = cv2.CamShift(
            prob, self.track_window, term_crit)

        if self.show_backproj:
            cv2.imshow("Back Projection", prob[..., np.newaxis])
        else:
            cv2.destroyWindow("Back Projection")

    def run(self):
        scaling_factor = 0.5
        last_frame_number = 0

        while True:
            if not self.obj_select.dragging:
                ret, self.frame = self.cam.read()
                self.frame = cv2.resize(self.frame,
                                        None,
                                        fx=scaling_factor,
                                        fy=scaling_factor,
                                        interpolation=cv2.INTER_AREA)
                self.lab = cv2.cvtColor(self.frame, cv2.COLOR_BGR2LAB)
                # self.lab = cv2.GaussianBlur(self.lab, (3,3), 0)
                kernel = np.ones((5, 5), np.float32) / 25
                self.lab = cv2.filter2D(self.lab, -1, kernel)
                self.gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)

            if ret:
                vis = self.frame.copy()

            track_window_condition = ((self.track_window)
                                      and (self.track_window[2] > 0)
                                      and (self.track_window[3] > 0))

            target_lost = self.isLost and self.isLBPLost

            # Main proccess flow
            if track_window_condition:
                if not target_lost:
                    # Apply CamShift algorithm and get new track_box
                    self.camshift_algorithm()

                    if self.fps.NumFrame % self.conf[
                            'ColorFrameNum'] == 0 and not self.isLost:
                        color_distance = self.ColorDistance.update(
                            self.frame, self.track_box)
                        self.isLost = self.ColorCheck.target_lost(
                            color_distance)
                        print("[INFO] Color track is lost:  '{}'\n".format(
                            self.isLost))
                        if self.isLost:
                            last_frame_number = self.fps.NumFrame

                    if self.fps.NumFrame % self.conf[
                            'LBPFrameNum'] == 0 and not self.isLBPLost:
                        LBP_distance = self.LBPDistance.update(
                            self.gray, self.track_window)
                        self.isLBPLost = self.LBPCheck.target_lost(
                            LBP_distance)
                        print("[INFO] LBP track is lost:  '{}'\n".format(
                            self.isLBPLost))
                        if self.isLBPLost:
                            last_frame_number = self.fps.NumFrame

                    if self.fps.NumFrame - last_frame_number >= self.conf[
                            'MaxLBPFrameUpdate']:
                        self.isLBPLost = False
                        self.isLost = False

                    try:
                        cv2.ellipse(vis, self.track_box, (0, 0, 255), 2)
                        pts = cv2.boxPoints(self.track_box)
                        pts = np.int0(pts)
                        cv2.polylines(vis, [pts], True, 255, 2)
                    except:
                        print(self.track_box)
                else:
                    cv2.putText(vis, 'Target Lost', (10, 230),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 1,
                                cv2.LINE_AA)
                    # print("[INFO] Starting recovery proccess")

            elif not track_window_condition:
                cv2.putText(vis, 'Mark area of the object', (10, 230),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1,
                            cv2.LINE_AA)

            # frame processing throughput rate
            fps = self.fps.approx_compute()
            cv2.putText(vis, 'FPS {:.3f}'.format(fps), (10, 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1,
                        cv2.LINE_AA)

            self.obj_select.draw(vis)
            cv2.imshow('camshift', vis)

            ch = 0xFF & cv2.waitKey(1)
            if ch == 27:
                break
            if ch == ord('b'):
                self.show_backproj = not self.show_backproj
        cv2.destroyAllWindows()
Пример #5
0
class VideoCapture(object):
    def __init__(self,
                 videoPath="",
                 verbose=True,
                 displayW=1920,
                 displayH=1080,
                 fontScale=1.0,
                 inference=True,
                 confidenceLevel=0.5):

        self.verbose = verbose
        self._debug = False

        self.videoPath = videoPath
        self._videoSourceType = CaptureDevice.Unknown
        self._videoSourceState = CaptureDeviceState.Unknown
        self.videoStream = None

        self._videoReadyEvent = Event()

        self._capture_in_progress = False

        # Display Resolution
        # Will try to set camera's resolution to the specified resolution
        self._displayW = displayW
        self._displayH = displayH

        self._cameraW = 0
        self._cameraH = 0

        # Camera's FPS
        self._cameraFPS = 30

        # Font Scale for putText
        self._fontScale = float(fontScale)

        # turn inference on/off
        self.runInference = inference

        # confidence level threshold
        self.confidenceLevel = confidenceLevel

        # various frame data

        # frame data for UI
        self._displayFrame = None

        # wallpapers for UI
        self._frame_wp_init_system = cv2.imread(
            "./www/WP-InitializingSystem.png")
        self._frame_wp_no_video = cv2.imread("./www/WP-NoVideoData.png")
        self._frame_wp_init_iothub = cv2.imread(
            "./www/WP-InitializeIotHub.png")

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        logging.info(
            '===============================================================')
        logging.info(
            'Initializing Video Capture with the following parameters:')
        logging.info('   - OpenCV Version     : {}'.format(cv2.__version__))
        logging.info('   - Video path         : {}'.format(self.videoPath))
        logging.info('   - Display Resolution : {} x {}'.format(
            self._displayW, self._displayH))
        logging.info('   - Font Scale         : {}'.format(self._fontScale))
        logging.info('   - Inference?         : {}'.format(self.runInference))
        logging.info('   - ConfidenceLevel    : {}'.format(
            self.confidenceLevel))
        logging.info(
            '===============================================================')

        # set wallpaper
        self.set_Wallpaper(self._frame_wp_init_system)

        # set FPS
        self.fps = FPS()

        self.imageStreamHandler = None

        # Start Web Server for View
        self.imageServer = ImageServer(80, self)
        self.imageServer.start()

        # Set Video Source
        self.set_Video_Source(self.videoPath)

        self.set_Wallpaper(cv2.imread("./www/WP-InitializeAIEngine.png"))
        # logging.info('Yolo Inference Initializing\r\n')
        self.yoloInference = YoloInference(self._fontScale, sendMessage=False)
        # logging.info('Yolo Inference Initialized\r\n')

    def __enter__(self):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        # self.set_Video_Source(self.videoPath)

        return self

    def videoStreamReadTimeoutHandler(self, signum, frame):
        raise Exception("VideoStream Read Timeout")

    #
    # Video Source Management
    #
    def _set_Video_Source_Type(self, videoPath):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name +
                         '() : {}'.format(videoPath))

        self._reset_Video_Source()

        if '/dev/video' in videoPath.lower():
            self._videoSourceType = CaptureDevice.Webcam

        elif 'rtsp:' in videoPath.lower():
            self._videoSourceType = CaptureDevice.Rtsp

        elif '/api/holographic/stream' in videoPath.lower():
            self._videoSourceType = CaptureDevice.Hololens

        if self.verbose:
            logging.info('<< ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name +
                         '() : {}'.format(self._videoSourceType))

    def _get_Video_Source_Type(self, videoPath):

        videoType = CaptureDevice.Unknown

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name +
                         '() : {}'.format(videoPath))

        if '/dev/video' in videoPath.lower():
            videoType = CaptureDevice.Webcam

        elif 'rtsp:' in videoPath.lower():
            videoType = CaptureDevice.Rtsp

        elif '/api/holographic/stream' in videoPath.lower():
            videoType = CaptureDevice.Hololens

        return videoType

    #
    # Resets video capture/stream settings
    #
    def _reset_Video_Source(self):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        if self.videoStream:
            self.videoStream.stop()
        #    self.videoStream.close()
        #     self.videoStream = None

        self._videoSourceType = CaptureDevice.Unknown
        self._videoSourceState = CaptureDeviceState.Unknown

    def set_Video_Source(self, newVideoPath):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        retVal = False
        realVideoPath = newVideoPath

        if self.videoPath == newVideoPath and self._videoSourceState == CaptureDeviceState.Running:
            return True

        if self.imageStreamHandler != None:
            statusMsg = '{{\"DeviceStatus\":\"Connecting to {}\",\"isSuccess\":{}}}'.format(
                self._remove_credential(newVideoPath), 1)
            self.imageStreamHandler.submit_write(statusMsg)

        self._videoSourceState = CaptureDeviceState.Stop

        if self._capture_in_progress:
            # wait for queue to drain and loop to exit
            time.sleep(1.0)

        self._capture_in_progress = False

        self._set_Video_Source_Type(realVideoPath)

        if self._videoSourceType == CaptureDevice.Unknown:
            self._videoSourceState = CaptureDeviceState.ErrorNotSupported
            logging.error('>> ' + self.__class__.__name__ + "." +
                          sys._getframe().f_code.co_name +
                          '() : Unsupported Video Source {}'.format(
                              self._videoSourceType))
        else:
            self._videoSourceState = CaptureDeviceState.Init

            if self._videoSourceType == CaptureDevice.Hololens:
                strHololens = realVideoPath.split('?')
                # disable audio
                realVideoPath = '{}?holo=true&pv=true&mic=false&loopback=false'.format(
                    strHololens[0])

            self.videoStream = VideoStream(videoCapture=self,
                                           path=realVideoPath)

            fps_override = 30

            if not self.videoStream.videoCapture == None:

                # get resolution
                cameraH1 = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_HEIGHT))
                cameraW1 = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_WIDTH))
                cameraFPS1 = int(
                    self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS))

                if self._videoSourceType == CaptureDevice.Webcam:

                    if not cameraH1 == self._displayH:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_HEIGHT, self._displayH)
                    if not cameraW1 == self._displayW:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_WIDTH, self._displayW)

                elif self._videoSourceType == CaptureDevice.Rtsp:

                    if not cameraH1 == self._displayH:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_HEIGHT, self._displayH)
                    if not cameraW1 == self._displayW:
                        self.videoStream.videoCapture.set(
                            cv2.CAP_PROP_FRAME_WIDTH, self._displayW)

                elif self._videoSourceType == CaptureDevice.Hololens:

                    holo_w = 1280
                    holo_h = 720

                    if 'live_med.mp4' in realVideoPath:
                        holo_w = 854
                        holo_h = 480
                    elif 'live_low.mp4' in realVideoPath:
                        holo_w = 428
                        holo_h = 240
                        fps_override = 15

                    self.videoStream.videoCapture.set(
                        cv2.CAP_PROP_FRAME_HEIGHT, holo_h)
                    self.videoStream.videoCapture.set(cv2.CAP_PROP_FRAME_WIDTH,
                                                      holo_w)

                self.videoStream.videoCapture.set(cv2.CAP_PROP_FPS,
                                                  fps_override)

                self._cameraH = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_HEIGHT))
                self._cameraW = int(
                    self.videoStream.videoCapture.get(
                        cv2.CAP_PROP_FRAME_WIDTH))
                self._cameraFPS = int(
                    self.videoStream.videoCapture.get(cv2.CAP_PROP_FPS))

                logging.info(
                    '==============================================================='
                )
                logging.info(
                    'Setting Video Capture with the following parameters:')
                logging.info('   - Video Source Type  : {}'.format(
                    self._videoSourceType))
                logging.info('   - Display Resolution : {} x {}'.format(
                    self._displayW, self._displayH))
                logging.info('   Original             : {} x {} @ {}'.format(
                    cameraW1, cameraH1, cameraFPS1))
                logging.info('   New                  : {} x {} @ {}'.format(
                    self._cameraW, self._cameraH, self._cameraFPS))
                logging.info(
                    '==============================================================='
                )

                if self.videoStream.start():
                    self._videoSourceState = CaptureDeviceState.Running
                    retVal = True
                else:
                    self._videoSourceState = CaptureDeviceState.ErrorRead
            else:

                if self._videoSourceType == CaptureDevice.Hololens or self._videoSourceType == CaptureDevice.Rtsp:
                    url_parsed = urlparse(realVideoPath)

                    if url_parsed.password != None or url_parsed.username != None:
                        url_parsed = url_parsed._replace(
                            netloc="{}".format(url_parsed.hostname))

                    ipAddress = url_parsed.netloc

                    ping_ret = subprocess.call(
                        ['ping', '-c', '5', '-W', '3', ipAddress],
                        stdout=open(os.devnull, 'w'),
                        stderr=open(os.devnull, 'w'))

                    if ping_ret == 0:
                        self._videoSourceState = CaptureDeviceState.ErrorOpen
                    else:
                        self._videoSourceState = CaptureDeviceState.ErrorNoNetwork

                logging.error('>> ' + self.__class__.__name__ + "." +
                              sys._getframe().f_code.co_name +
                              '() : Failed to open Video Capture')

        self.videoPath = realVideoPath

        if retVal == False:
            self.set_Wallpaper(self._frame_wp_no_video)
        else:
            self._videoReadyEvent.set()

        self.sendCurrentVideoPath(realVideoPath)

        return retVal

    def get_display_frame(self):
        return self.displayFrame

    def set_status(self, device_status):
        self._videoSourceState = device_status

        if self._videoSourceState != CaptureDeviceState.Running:
            self.sendCurrentVideoPath("")

    def sendCurrentVideoPath(self, videoPath):

        if videoPath == "":
            video_path = self._remove_credential(self.videoPath)
        else:
            video_path = self._remove_credential(videoPath)

        logging.info('>> Current Video Status {}'.format(
            self._videoSourceState))

        if self.imageStreamHandler != None:
            if self._videoSourceState == CaptureDeviceState.Running:
                strUserName = ""
                strPassword = ""

                videoType = self._get_Video_Source_Type(videoPath)

                if videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens:
                    url_parsed = urlparse(videoPath)

                    if url_parsed.password != None:
                        strPassword = url_parsed.password
                    if url_parsed.username != None:
                        strUserName = url_parsed.username

                statusMsg = '{{\"DevicePath\":\"{}\",\"isSuccess\":{},\"UserName\":\"{}\",\"Password\":\"{}\"}}'.format(
                    video_path, 1, strUserName, strPassword)
            else:
                statusMsg = '{{\"DeviceStatus\":\"Error ({}): {}\",\"isSuccess\":{},\"UserName\":\"\",\"Password\":\"\"}}'.format(
                    self._videoSourceState, video_path, 0)
            self.imageStreamHandler.submit_write(statusMsg)

    def setVideoPathFromUI(self, json_Data):

        videoPath = ""
        json_Data = json.loads(json_Data)
        logging.info('>> ' + self.__class__.__name__ + "." +
                     sys._getframe().f_code.co_name +
                     '() : {}'.format(json_Data["VideoPath"]))
        logging.info('>> {}'.format(json_Data["VideoPath"]))
        logging.info('>> {}'.format(json_Data["UserName"]))
        logging.info('>> {}'.format(json_Data["Password"]))

        videoType = self._get_Video_Source_Type(json_Data["VideoPath"])

        if videoType == CaptureDevice.Webcam:
            videoPath = json_Data["VideoPath"].strip()
        elif videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens:
            url_parsed = urlparse(json_Data["VideoPath"].strip())

            if '@' in url_parsed.netloc or len(json_Data["UserName"]) == 0:
                # already contains password or user name not specified
                videoPath = json_Data["VideoPath"]
            else:
                url_parsed = url_parsed._replace(netloc='{}:{}@{}'.format(
                    json_Data["UserName"], json_Data["Password"],
                    url_parsed.netloc))
                videoPath = url_parsed.geturl()

        self.set_Video_Source(videoPath)

    def _remove_credential(self, videoPath):

        logging.info('>> ' + self.__class__.__name__ + "." +
                     sys._getframe().f_code.co_name + '()')

        ret_Path = ""
        videoType = self._get_Video_Source_Type(videoPath)

        if videoType == CaptureDevice.Webcam:
            ret_Path = videoPath
        elif videoType == CaptureDevice.Rtsp or videoType == CaptureDevice.Hololens:

            url_parsed = urlparse(videoPath)

            if url_parsed.password != None or url_parsed.username != None:
                url_parsed = url_parsed._replace(
                    netloc="{}".format(url_parsed.hostname))

            ret_Path = url_parsed.geturl()

        return ret_Path

    def set_Wallpaper(self, image):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        self.displayFrame = cv2.imencode('.jpg', image)[1].tobytes()

    def start(self):

        if self.verbose:
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        while True:
            if self._videoSourceState == CaptureDeviceState.Running:
                self._capture_in_progress = True
                self.__Run__()
                self._capture_in_progress = False
            else:

                if self._videoSourceState == CaptureDeviceState.ErrorOpen or self._videoSourceState == CaptureDeviceState.ErrorRead:
                    self.set_Wallpaper(self._frame_wp_no_video)

                if self._videoSourceType == CaptureDevice.Unknown:
                    if self._debug:
                        logging.info('>> ' + self.__class__.__name__ + "." +
                                     sys._getframe().f_code.co_name +
                                     '() : Unknown Device')
                    time.sleep(1.0)
                else:
                    if self._debug:
                        logging.info('>> ' + self.__class__.__name__ + "." +
                                     sys._getframe().f_code.co_name +
                                     '() : Device Not Running')
                    # time.sleep(1.0)
                    logging.info('>> Video Ready Event Enter ---------------')
                    self._videoReadyEvent.wait()
                    logging.info('<< Video Ready Event Exit  ---------------')
                    self._videoReadyEvent.clear()

    def __Run__(self):

        if self.verbose:
            logging.info(
                '==============================================================='
            )
            logging.info('>> ' + self.__class__.__name__ + "." +
                         sys._getframe().f_code.co_name + '()')

        # Check camera's FPS
        if self._cameraFPS == 0:
            logging.error('Error : Could not read FPS')
            # raise Exception("Unable to acquire FPS for Video Source")
            return

        logging.info('>> Frame rate (FPS)     : {}'.format(self._cameraFPS))
        logging.info('>> Run Inference {}'.format(self.runInference))

        perFrameTimeInMs = 1000 / self._cameraFPS

        self.fps.start()
        self.fps.reset()

        while True:

            # Get current time before we capture a frame
            tFrameStart = time.time()
            frame = np.array([])
            captureRet = False

            if not self._videoSourceState == CaptureDeviceState.Running:
                break

            captureRet, frame = self.videoStream.read()

            if captureRet == False:
                self._videoSourceState = CaptureDeviceState.ErrorRead
                logging.error("ERROR : Failed to read from video source")
                break

            if frame.size > 0:

                # Run Object Detection
                if self.runInference:
                    self.yoloInference.runInference(frame, self._cameraW,
                                                    self._cameraH,
                                                    self.confidenceLevel)

                # Calculate FPS
                currentFPS = self.fps.fps()

                if (currentFPS > self._cameraFPS):
                    # Cannot go faster than Camera's FPS
                    currentFPS = self._cameraFPS

                # Add FPS Text to the frame
                cv2.putText(frame, "FPS " + str(currentFPS),
                            (10, int(30 * self._fontScale)),
                            cv2.FONT_HERSHEY_SIMPLEX, self._fontScale,
                            (0, 0, 255), 2)

                self.displayFrame = cv2.imencode('.jpg', frame)[1].tobytes()

            timeElapsedInMs = (time.time() - tFrameStart) * 1000

            if perFrameTimeInMs > timeElapsedInMs:
                # This is faster than image source (e.g. camera) can feed.
                waitTimeBetweenFrames = perFrameTimeInMs - timeElapsedInMs
                time.sleep(waitTimeBetweenFrames / 1000.0)

    def __exit__(self, exception_type, exception_value, traceback):

        self.imageServer.close()
        cv2.destroyAllWindows()
Пример #6
0
class App(object):
    def __init__(self, video_src):
        #树莓派ip
        self.mdp = MyUdp()
        #self.server_address='http://%s:8000/stream.mjpg' % MyUdp.get_piIP('raspberrypi')
        #self.server_address='http://192.168.56.146:8080/?action=stream'
        #self.server_address='rtmp://127.0.0.1/live/stream'
        #self.server_address='rtmp://127.0.0.1:1935/dji'
        #self.server_address='http://192.168.56.240:8000/stream.mjpg'

        #self.server_address='http://192.168.56.240:8000/?action=stream'
        #self.server_address='http://192.168.191.3:8000/stream.mjpg'

        #self.server_address='rtsp://:192.168.40.118/1'
        self.server_address = 0
        #self.server_address='C:\\Users\\Carole\\Desktop\\as.mp4'
        #self.cam = video.create_capture(self.server_address)
        self.cam = WebcamVideoStream(self.server_address).start()
        ret, self.frame = self.cam.read()
        self.fish_cali = fish_calibration((560, 420))
        self.drag_start = None
        try:
            self.list_camshift = pickle.load(open("color_instant.p", "rb"))
        except:
            self.list_camshift = []
            print 'Load color_instant.p FAIL...'
        else:
            print 'Load color_instant.p Success!!!'

        #self.list_camshift=[]
        #self.show_backproj = False
        self.newcamshift = None
        self.selection = None
        self.lock = False
        self.lastorder = None
        self.track_box = []
        self.first_start = True
        self.car_found_first = False
        self.lastime = time.time()
        self.lastime_car = time.time()
        self.sumtime = 0
        self.car_lost_time = 0
        self.car_lost = False
        #self.count=0
        self.light = self.get_light()
        self.JK_flag = False
        self.swicht = False
        #self.list_camshift.append(self.get_car('red.jpg',0))
        #self.list_camshift.append(self.get_car('yellow.jpg',1))
        #H,S
        #self.mask_avoid=cv2.cvtColor(cv2.imread('C:\\Users\\nuc\\Desktop\\src\\mask_avoid_1.bmp'),cv2.COLOR_BGR2GRAY)
        self.mask_avoid = cv2.cvtColor(cv2.imread('mask_avoid_1.bmp'),
                                       cv2.COLOR_BGR2GRAY)

        self.BACKGROUND_PARAM = App.calc_HS(
            cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV))

        self.miste = True

        self.fps = FPS().start()

        #wifi模块IP

        while True:
            try:
                self.mdp.client_address = (socket.gethostbyname('Doit_WiFi'),
                                           8899)
                break
            except:
                print 'Doit_WiFi NOT FOUND'

        #新车
        #self.mdp.client_address=('192.168.56.207', 8899)
        cv2.namedWindow('TUCanshift')
        cv2.setMouseCallback('TUCanshift', self.onmouse)
        hm = pyHook.HookManager()
        hm.KeyDown = self.onKeyBoard
        hm.HookKeyboard()

    def onKeyBoard(self, event):
        if event.Key == 'K':
            self.JK_flag = False
        if event.Key == 'J':
            self.JK_flag = True
        if event.Key == 'Escape':
            cv2.destroyAllWindows()
            self.cam.release()
            self.mdp.close()
            import os
            os._exit(0)
        if event.Key == 'W':
            print 'W'
            self.mdp.send_message('guidance', (0, 5))
        if event.Key == 'S':
            print 'S'
            self.mdp.send_message('back_car', (0, 0))
        #if event.Key=='B':
        #    if len(self.list_camshift)==2:
        #        pickle.dump(self.list_camshift,open("color_instant.p","wb"))
        #        print 'Dump Success!'

        return True

    def onmouse(self, event, x, y, flags, param):
        if self.lock:
            if event == cv2.EVENT_RBUTTONDOWN:
                self.pop_camshift()
                return
            if event == cv2.EVENT_LBUTTONDOWN and len(self.list_camshift) <= 1:
                self.drag_start = (x, y)
                self.newcamshift = mycamshift()
            if self.drag_start:
                xmin = min(x, self.drag_start[0])
                ymin = min(y, self.drag_start[1])
                xmax = max(x, self.drag_start[0])
                ymax = max(y, self.drag_start[1])
                self.selection = (xmin, ymin, xmax, ymax)
            if event == cv2.EVENT_LBUTTONUP:
                self.fps.reset()
                self.drag_start = None
                if self.newcamshift is not None and self.newcamshift.getHist(
                ) is not None:
                    self.newcamshift.ID = len(self.list_camshift)
                    self.list_camshift.append(self.newcamshift)
                self.newcamshift = None
                self.selection = None
        return True

    def pop_camshift(self):
        if (len(self.list_camshift) < 1):
            return True
        cv2.destroyWindow(str(len(self.list_camshift) - 1))
        cv2.destroyWindow('%s%s' % ('cam', str(len(self.list_camshift) - 1)))
        self.list_camshift.pop()
        return False

    @staticmethod
    def creat_camshift_from_img(hsv):
        camshift = mycamshift()
        mask = np.ones((hsv.shape[0], hsv.shape[1]), dtype=np.uint8)
        camshift.preProcess(hsv, mask, (0, 0, hsv.shape[1], hsv.shape[0]), 32)
        return camshift

    def get_light(self):
        temp = mycamshift()
        temp.prProcess_light(self.frame)
        temp.ID = 99
        return temp

    def get_car(self, file, ID):
        img = cv2.imread(file, cv2.IMREAD_UNCHANGED)
        img = cv2.resize(img, (self.frame.shape[1], self.frame.shape[0]))
        hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
        temp = App.creat_camshift_from_img(hsv)
        cv2.imshow(str(ID), temp.getHist())
        temp.ID = ID

        return temp

    @staticmethod
    def calc_HS(hsv):
        H_hist = cv2.calcHist([hsv], [0], None, [180], [0, 180])
        H = H_hist.argmax(axis=None, out=None)
        S_hist = cv2.calcHist([hsv], [1], None, [255], [0, 255])
        S = S_hist.argmax(axis=None, out=None)
        return (H, S)

    def run(self):
        while True:
            if not (self.cam.renew and self.cam.grabbed):
                if not self.cam.grabbed:
                    self.mdp.send_message('lost')
                continue

            ret, self.frame = self.cam.read()
            #self.frame=cv2.resize(self.frame,(560,420),interpolation=cv2.INTER_AREA)

            #self.frame=cv2.GaussianBlur(self.frame,(5,5),2)

            #self.frame=cv2.medianBlur(self.frame,5)

            self.frame = self.fish_cali.cali(self.frame)

            imshow_vis = self.frame.copy()

            hsv = cv2.cvtColor(self.frame, cv2.COLOR_BGR2HSV)

            #注掉使用背景参数的静态方法
            self.BACKGROUND_PARAM = App.calc_HS(hsv)

            mask = mycamshift.filte_background_color(hsv,
                                                     self.BACKGROUND_PARAM,
                                                     offset1=16.,
                                                     offset2=90.,
                                                     iterations=3)
            if self.miste:
                cv2.imshow('fore_ground', mask)

            if self.newcamshift is not None:
                if self.newcamshift.preProcess(hsv, mask, self.selection, 24):
                    cv2.imshow(str(ll), self.newcamshift.getHist())

            light_gray = cv2.cvtColor(self.frame, cv2.COLOR_BGR2GRAY)
            #cv2.imshow('gray',light_gray)
            mean, temp = cv2.threshold(light_gray, 0, 255,
                                       cv2.THRESH_BINARY + cv2.THRESH_OTSU)
            thresh = (255 - mean) * 0.7 + mean
            if thresh > 230:
                thresh = 230

            #_,light_gray=cv2.threshold(light_gray,thresh,255,cv2.THRESH_BINARY)

            _, light_gray_ths = cv2.threshold(light_gray, thresh, 255,
                                              cv2.THRESH_BINARY)
            light_gray = cv2.bitwise_and(light_gray,
                                         light_gray,
                                         mask=cv2.bitwise_and(
                                             mask, light_gray_ths))
            light_gray = cv2.morphologyEx(light_gray,
                                          cv2.MORPH_OPEN,
                                          cv2.getStructuringElement(
                                              cv2.MORPH_ELLIPSE, (3, 3)),
                                          iterations=2,
                                          borderType=cv2.BORDER_REPLICATE)

            if self.miste:
                cv2.imshow('light', light_gray)

            self.track_box = [self.light.go_once_gray(light_gray)]

            if self.track_box[0] is None:
                self.sumtime = time.time() - self.lastime + self.sumtime
            else:
                self.sumtime = 0
                self.first_start = True
            self.lastime = time.time()
            #print self.sumtime

            if self.sumtime > 0.5 and self.first_start:
                print 'lost light GUIDANCE'
                self.track_box = [(self.frame.shape[1] / 2,
                                   self.frame.shape[0] / 2)]
            if self.sumtime > 3600:
                self.sumtime = 36

            self.lock = False
            ll = len(self.list_camshift)
            if ll > 0:
                #mask_hsv=cv2.bitwise_and(hsv,hsv,mask=mask)
                #cv2.imshow('mask_hsv',mask_hsv)
                for x in self.list_camshift:
                    self.track_box.append(x.go_once(hsv, mask))
                #prob=self.list_camshift[ll-1].prob
                #if self.show_backproj and prob is not None:
                #    self.frame=prob[...,np.newaxis]
            else:
                cv2.putText(imshow_vis, 'Wait for START', (10, 230),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1,
                            cv2.LINE_AA)
                #self.mdp.send_message('lost')
            self.lock = True

            n = len(self.track_box)
            #if n>2:
            if n == 3:
                p3 = self.track_box[0]
                p1, p2 = self.track_box[n - 2:]
                try:
                    p1 = p1[0]
                except:
                    p1 = None
                try:
                    p2 = p2[0]
                except:
                    p2 = None
                try:
                    p3 = p3[0]
                except:
                    p3 = None
                if p1 and p2:
                    self.car_found_first = True
                    try:
                        #snap(img,p1,p2,障碍侦测范围,障碍侦测宽度,微调:避免将车头识别为障碍)
                        #theta,D,dst=snap(mask,p1,p2,4.5,0.75,2.0,2.2)
                        #theta,D,dst=snap_c(mask,p1,p2,4.5,1.7,1.7,1.65)

                        #新车
                        #theta,D,dst=snap(mask,p1,p2,8.0,0.9,2.2,2.2)
                        if p3:
                            try:
                                t_guidance, d_guidance = get_direction(
                                    p1, p2, p3)
                            except:
                                t_guidance = None
                                d_guidance = None
                            if t_guidance is not None and d_guidance is not None:
                                if abs(t_guidance) < 70 and d_guidance < 4:
                                    mask = light_gray
                                if abs(t_guidance) < 20 and d_guidance < 7:
                                    mask = light_gray

                        theta, D, dst = snap_test(mask, self.mask_avoid, p1,
                                                  p2, 5.0, 0.9, 2.2, 2.2)
                        dst = cv2.resize(dst, (400, 200))
                        if self.miste:
                            cv2.imshow('snap', dst)
                        if theta is not None:
                            mes = (int(theta), int(D))
                            self.mdp.send_message('avoid', mes)

                            self.lastorder = ('avoid', mes)

                            #print('Block ahead')
                            cv2.putText(imshow_vis, 'Block ahead:%s,%s' % mes,
                                        (10, 230), cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (255, 255, 255), 1, cv2.LINE_AA)
                            #print(mes)

                        elif p3:
                            t, d = get_direction(p1, p2, p3)
                            mes = (int(t), int(d))
                            self.mdp.send_message('guidance', mes)

                            self.lastorder = ('guidance', mes)

                            #print('guidance')
                            cv2.putText(imshow_vis, 'Guidance:%s,%s' % mes,
                                        (10, 230), cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (255, 255, 255), 1, cv2.LINE_AA)
                            #print mes
                        else:
                            cv2.putText(imshow_vis, 'Taget LOST', (10, 230),
                                        cv2.FONT_HERSHEY_SIMPLEX, 1,
                                        (255, 255, 255), 1, cv2.LINE_AA)
                            #self.mdp.send_message('lost')
                    except:
                        cv2.putText(imshow_vis, '0/0 is error', (10, 230),
                                    cv2.FONT_HERSHEY_SIMPLEX, 1,
                                    (255, 255, 255), 1, cv2.LINE_AA)
                        self.mdp.send_message('lost')
                else:
                    cv2.putText(imshow_vis, 'Car LOST', (10, 230),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),
                                1, cv2.LINE_AA)
                    self.car_lost = True
                    #self.mdp.send_message('lost')


#            elif n>1:
#                cv2.putText(imshow_vis, 'Wait for START', (10, 230),cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255,255), 1, cv2.LINE_AA)
#                self.mdp.send_message('lost')
            else:
                cv2.putText(imshow_vis, 'Wait for START', (10, 230),
                            cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 1,
                            cv2.LINE_AA)

                self.mdp.send_message('lost')

            if self.car_lost and self.first_start:
                self.car_lost = False
                self.car_lost_time = time.time(
                ) - self.lastime_car + self.car_lost_time
            else:
                self.car_lost_time = 0
            self.lastime_car = time.time()

            if self.car_lost_time > 0.5 and self.car_found_first and self.first_start and self.lastorder is not None:
                o, m = self.lastorder
                if m[0] < 50 and m[0] >= 0:
                    m = (50, m[1])
                if m[0] > -50 and m[0] < 0:
                    m = (-50, m[1])
                self.mdp.send_message(o, m)
                print 'car LOST guidance'

            if len(self.track_box) > 0:
                for x in self.track_box:
                    try:
                        cv2.ellipse(imshow_vis, x, (0, 0, 255), 2)
                        pts = cv2.boxPoints(x)
                        pts = np.int0(pts)
                        cv2.polylines(imshow_vis, [pts], True, 255, 2)
                    except:
                        pass

            fps = self.fps.approx_compute()
            # print("FPS: {:.3f}".format(fps))
            cv2.putText(imshow_vis, 'FPS {:.3f}'.format(fps), (10, 10),
                        cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1,
                        cv2.LINE_AA)

            if self.selection is not None:
                x0, y0, x1, y1 = self.selection
                vis_roi = imshow_vis[y0:y1, x0:x1]
                cv2.bitwise_not(vis_roi, vis_roi)

            if self.miste:
                cv2.imshow('TUCanshift', imshow_vis)

            #if not self.first_start:
            #    print 'WAIT...'
            #else:
            #    print 'GO'

            ch = cv2.waitKey(1)

            if ch == ord('b'):
                if len(self.list_camshift) == 2:
                    pickle.dump(self.list_camshift,
                                open('color_instant.p', 'wb'))
                    print 'Dump Success!'
                else:
                    print 'ni mei quan -_-'

            #if ch == 27:
            #    break
            #if ch==ord('r'):
            #    self.BACKGROUND_PARAM=App.calc_HS(hsv)
            #    self.first_start=False
            #if ch==ord('w'):
            #    self.mdp.send_message('guidance',(0,10))
            #if ch==ord('s'):
            #    self.mdp.send_message('back_car',(0,0))
            #if ch==ord('['):
            #    self.miste=not self.miste
            while self.JK_flag:
                try:
                    self.mdp.client_address = (
                        socket.gethostbyname('Doit_WiFi'), 8899)
                    self.mdp.send_message('lost')
                except:
                    print 'Doit_WiFi NOT FOUND'
                else:
                    print 'NEW IP:%s' % self.mdp.client_address[0]
                    cv2.waitKey(30)

        cv2.destroyAllWindows()
        self.cam.release()
        self.mdp.close()