Ejemplo n.º 1
0
def init_cameras():
    #capture thermic frame
    cap_thermal = VideoCaptureAsync(thermal=True)
    cap_thermal.start()
    #capture normal frame
    cap_normal = VideoCaptureAsync(thermal=False)
    cap_normal.start()
    return cap_normal, cap_thermal
Ejemplo n.º 2
0
def main():
    input_video_filepath = sys.argv[1]
    write_video_flag = False
    output_video_filepath = ""
    if len(sys.argv) > 2:
        output_video_filepath = sys.argv[2]
        write_video_flag = True
    async_video_flag = False

    config = OmegaConf.load("config.yaml")
    detector = LineCrossingDetector(config)
    counters = [PeopleCounter(**c) for c in config.people_counter]

    if async_video_flag:
        video_capture = VideoCaptureAsync(input_video_filepath)
    else:
        video_capture = cv2.VideoCapture(input_video_filepath)

    if async_video_flag:
        video_capture.start()

    if write_video_flag:
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))

        fourcc = cv2.VideoWriter_fourcc(*"XVID")
        output_writer = cv2.VideoWriter(output_video_filepath, fourcc, 30,
                                        (w, h))

    frame_index = 0
    pbar = tqdm(total=int(video_capture.get(cv2.CAP_PROP_FRAME_COUNT)))
    while True and frame_index < 12000:
        ret, frame = video_capture.read()
        frame_index = frame_index + 1
        if not ret:
            break
        if frame_index < 10000:
            continue

        detections = detector.detect(frame, visualize=True)
        for counter in counters:
            counter.update(detections, frame_index)
            counter.visualize(frame)
        for d in detections:
            print(
                f"Frame: {frame_index}. Track id: {d.track_id}. Line id: {d.line_id}"
            )
        if write_video_flag:
            output_writer.write(frame)
        pbar.update()

    if async_video_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if write_video_flag:
        output_writer.release()
Ejemplo n.º 3
0
def main(width=640, height=360, k=False):
    last_detected = datetime(1990, 1, 1)
    if k:
        cap = VideoCaptureAsync(0)
    else:
        cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    if k:
        cap.start()
    t0 = time.time()
    i = 0
    net = Detector(bytes("cfg/yolo-lite.cfg", encoding="utf-8"),
                   bytes("moirai.weights", encoding="utf-8"), 0,
                   bytes("obj.data", encoding="utf-8"))
    while True:
        r, frame = cap.read()
        if r:
            dark_frame = Image(frame)
            results = net.detect(dark_frame)
            del dark_frame

            for cat, score, bounds in results:
                x, y, w, h = bounds
                cv2.rectangle(frame, (int(x - w / 2), int(y - h / 2)),
                              (int(x + w / 2), int(y + h / 2)), (255, 0, 255))
            if len(results) > 0:
                if datetime.now() > last_detected + timedelta(seconds=6):
                    last_detected = datetime.now()
                    prob = results[0][1]
                    requests.post('http://192.168.6.219:8080/area/alert',
                                  data={
                                      "cam_id": 1,
                                      "prob": prob
                                  })
        cv2.imshow('Frame', frame)
        cv2.waitKey(1) & 0xFF
    if k:
        cap.stop()
    cv2.destroyAllWindows()
Ejemplo n.º 4
0
def test(n_frames=500, width=1280, height=720, asyncr=False):
    if asyncr:
        cap = VideoCaptureAsync(0)
        print("If async")
    else:
        cap = cv2.VideoCapture(0)
        cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
        cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
        print("XD")
    if asyncr:
        cap.start()
    t0 = time.time()
    i = 0
    while i < n_frames:
        _, frame = cap.read()
        cv2.imshow('Frame', frame)
        cv2.waitKey(1) & 0xFF
        i += 1
        print('[i] Frames per second: {:.2f}, asyncr={}'.format(
            n_frames / (time.time() - t0), asyncr))
    if asyncr:
        cap.stop()
    cv2.destroyAllWindows()
Ejemplo n.º 5
0
def test(n_frames=500, width=1280, height=720, async_flag=False):
    if async_flag:
        cap = VideoCaptureAsync(0)
        # does not support using captured video
        # cap = VideoCaptureAsync('407156.avi')
    else:
        cap = cv2.VideoCapture(0)
    cap.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    if async_flag:
        cap.start()
    t0 = time.time()
    i = 0
    while i < n_frames:
        _, frame = cap.read()
        cv2.imshow('Frame', frame)
        cv2.waitKey(1) & 0xFF
        i += 1
    print('[i] Frames per second: {:.2f}, async_flag={}'.format(
        n_frames / (time.time() - t0), async_flag))
    if async_flag:
        cap.stop()
    cv2.destroyAllWindows()
Ejemplo n.º 6
0
def test(n_frames=500, width=1280, height=720):

    cap1 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch1")
    cap2 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch2")
    cap3 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch3")
    cap4 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch4")
    cap1.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap1.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap2.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap2.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap3.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap3.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap4.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap4.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

    cap1.start()
    cap2.start()
    cap3.start()
    cap4.start()
    while 1:
        _, frame1 = cap1.read()
        _, frame2 = cap2.read()
        _, frame3 = cap3.read()
        _, frame4 = cap4.read()

        cv2.imshow('Frame1', frame1)
        cv2.imshow('Frame2', frame2)
        cv2.imshow('Frame3', frame3)
        cv2.imshow('Frame4', frame4)

        cv2.waitKey(1) & 0xFF

    cap1.stop()
    cap2.stop()
    cap3.stop()
    cap4.stop()
    cv2.destroyAllWindows()
Ejemplo n.º 7
0
class Recognition():

    #parametros globales para la segunda ventana
    def distance(self, accuracy, name):
        #pasar dos encodings procesa el nivel de accuracy de cada uno y devuelve un loading bar
        load = accuracy * 270
        color = (0, 0, 255)
        image = np.zeros((40, 300, 3), np.uint8)
        cv2.rectangle(image, (0, 0), (300, 50), (255, 255, 255), cv2.FILLED)
        cv2.putText(image, name, (10, 15), cv2.FONT_HERSHEY_DUPLEX, 0.5,
                    (125, 125, 0), 1)
        cv2.rectangle(image, (10, 20), (int(load) + 15, 30), color, cv2.FILLED)
        return image

    def record_date_hour(self, name):
        #insert where name date
        date = strftime("%Y/%m/%d", gmtime())
        hour = strftime("%H:%M:%S", gmtime())
        try:
            connection = psycopg2.connect(
                "dbname=registros user=reddy password=123456 port=5432 host=localhost port=5432"
            )
        except:
            print("conexion exito")
        cursor = connection.cursor()
        postgres_insert_query = """ INSERT INTO deteccion (name, fecha, hora) VALUES (%s, %s, %s)"""

        fecha = "'" + date + "'"
        hora = "'" + hour + "'"
        record_to_insert = (name, fecha, hora)
        cursor.execute(postgres_insert_query, record_to_insert)
        connection.commit()
        cursor.close()
        connection.close()

    def dahua(self, name, actual_img, accuracy):
        path = "knowFaces/" + name.lower() + ".png"
        db_img = cv2.imread(path)
        db_img = cv2.resize(db_img, (150, 150), interpolation=cv2.INTER_CUBIC)
        un_img = np.concatenate((db_img, actual_img), axis=1)
        un_img = np.concatenate((un_img, self.distance(accuracy, name)),
                                axis=0)
        self.record_date_hour(name)
        if (self.first):
            self.first = False
            cv2.imshow("Board", un_img)
        else:
            final = np.concatenate((un_img, self.pastUnion), axis=0)
            cv2.imshow("Board", final)
        self.pastUnion = un_img

        return

    def face_enc(self, face_image, known_face_locations=None, num_jitters=1):
        """
        Given an image, return the 128-dimension face encoding for each face in the image.
        :param face_image: The image that contains one or more faces
        :param known_face_locations: Optional - the bounding boxes of each face if you already know them.
        :param num_jitters: How many times to re-sample the face when calculating encoding. Higher is more accurate, but slower (i.e. 100 is 100x slower)
        :return: A list of 128-dimensional face encodings (one for each face in the image)
        """
        raw_landmarks = face_recognition.api._raw_face_landmarks(
            face_image, known_face_locations)
        return [
            np.array(
                face_encoder.compute_face_descriptor(face_image,
                                                     raw_landmark_set,
                                                     num_jitters))
            for raw_landmark_set in raw_landmarks
        ]

    def getEncodingFaces(self, know_persons):
        i = 1
        for imag in know_persons:
            image = face_recognition.load_image_file(imag.getImgSrc())
            #self.faces_encoding.append(face_recognition.face_encodings(image, num_jitters=100)[0])
            self.faces_encoding.append(
                self.face_enc(image, num_jitters=100)[0])

            self.face_names.append(imag.getNombre())
            i = i + 1
        return self.faces_encoding, self.face_names

    def __init__(self):
        self.pastUnion = 2
        self.first = True
        self.path = "knowFaces/reddy.png"
        self.db_img = cv2.imread(self.path)
        self.db_img = cv2.resize(self.db_img, (150, 150),
                                 interpolation=cv2.INTER_CUBIC)

        self.cap = VideoCaptureAsync()
        self.cap.set(cv2.CAP_PROP_FRAME_WIDTH, 1280)
        self.cap.set(cv2.CAP_PROP_FRAME_HEIGHT, 720)
        self.cap.start()

        frame_width = 1280
        frame_height = 720

        # Define the codec and create VideoWriter object.The output is stored in 'outpy.avi' file.
        self.out = cv2.VideoWriter('outpy.avi',
                                   cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                                   10, (1280, 720))

        self.face_record1 = "nadies"
        self.nombres = {}
        self.first = True
        self.accuracy = 2

        self.know_persons = getKnowPersonsFromDB()
        self.faces_encoding = []
        self.face_names = []
        self.known_face_encodings, self.known_face_names = self.getEncodingFaces(
            self.know_persons)
        self.face_locations = []
        self.face_encodings = []
        self.face_names = []
        self.process_this_frame = True

        while True:
            ret, frame = self.cap.read()
            small_frame = cv2.resize(frame, (0, 0), fx=1, fy=1)
            #mitad de la calidad
            rgb_small_frame = small_frame[:, :, ::-1]
            if self.process_this_frame:
                self.face_locations = face_recognition.face_locations(
                    rgb_small_frame, model="cnn")  #, model ="cnn")
                self.face_encodings = self.face_enc(rgb_small_frame,
                                                    self.face_locations,
                                                    num_jitters=100)
                self.face_names = []
                self.face_values = []

                for face_encoding in self.face_encodings:
                    self.matches = face_recognition.compare_faces(
                        self.known_face_encodings,
                        face_encoding,
                        tolerance=0.30)
                    self.name = "Unknown"
                    self.values = np.linalg.norm(self.known_face_encodings -
                                                 face_encoding,
                                                 axis=1)

                    if True in self.matches:
                        first_match_index = self.matches.index(True)
                        self.accuracy = self.values[
                            first_match_index]  #get the accuracy
                        self.name = self.known_face_names[first_match_index]
                    self.face_names.append(self.name)
                    self.face_values.append(self.accuracy)  #gui

            self.process_this_frame = not self.process_this_frame

            tratar = False
            for (top, right, bottom,
                 left), name, acc in zip(self.face_locations, self.face_names,
                                         self.face_values):
                """top *= 2
                right *= 2
                bottom *= 2
                left *= 2"""
                collight = (123, 123, 123)
                actual_img = cv2.resize(frame[top:bottom, left:right],
                                        (150, 150),
                                        interpolation=cv2.INTER_CUBIC)  #gui
                cv2.rectangle(frame, (left, top), (right, bottom), collight,
                              1)  #face bordes
                ##calcular el tamaño entre top y left
                vertical = bottom - top
                horizontal = right - left
                #draw contorns
                r = right
                l = left

                t = top
                b = bottom

                alpha = vertical / 4
                alpha = int(alpha)
                betha = 2 * alpha
                if (name == "Unknown"):
                    col = (255, 255, 255)

                else:
                    col = (241, 175, 14)
                cv2.line(frame, (l, t), (l, t + alpha), col, 2)
                cv2.line(frame, (l, t), (l + alpha, t), col, 2)

                cv2.line(frame, (r, t), (r - alpha, t), col, 2)
                cv2.line(frame, (r, t), (r, t + alpha), col, 2)

                cv2.line(frame, (l, b), (l + alpha, b), col, 2)
                cv2.line(frame, (l, b), (l, b - alpha), col, 2)

                cv2.line(frame, (r, b), (r - alpha, b), col, 2)
                cv2.line(frame, (r, b), (r, b - alpha), col, 2)

                alpha = 10
                cv2.line(frame, (l - alpha, t + betha), (l + alpha, t + betha),
                         collight, 1)
                cv2.line(frame, (r - alpha, t + betha), (r + alpha, t + betha),
                         collight, 1)
                cv2.line(frame, (l + betha, t - alpha), (l + betha, t + alpha),
                         collight, 1)
                cv2.line(frame, (l + betha, b - alpha), (l + betha, b + alpha),
                         collight, 1)

                #print("vertical: ", vertical)
                #print("horizontal: ", horizontal)

                #cv2.rectangle(frame, (left, bottom - 35), (right, bottom), (123, 123, 123), cv2.FILLED) #space for name
                cv2.putText(frame, name, (left + 6, bottom - 6),
                            cv2.FONT_HERSHEY_DUPLEX, 0.70, (255, 255, 0), 1)
                print("nombres: ", self.nombres)

                try:
                    if (self.nombres[self.name] >= 1):
                        self.nombres[self.name] += 1
                    else:
                        self.nombres[self.name] = 1

                except:
                    print("entrando excepcion")
                    self.nombres[self.name] = 1

                if (name != "Unknown" and self.nombres[self.name] == 7):
                    if (self.face_record1 != self.name):
                        self.dahua(
                            self.name, actual_img,
                            acc)  #causa 50fps con 0.02 y el mas bajo 0.001
                        self.face_record1 = name
                    self.nombres[self.name] = 1
            #self.out.write(frame)
            cv2.imshow('Video', frame)
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        self.cap.stop()
        print(self.out.release())
        #out.release()
        cv2.destroyAllWindows()
Ejemplo n.º 8
0
class ListScreen(Screen):
    def __init__(self, **kwargs):
        super(ListScreen, self).__init__(**kwargs)
        # run clock
        Clock.schedule_interval(self.init_gui, 0.2)
        Clock.schedule_interval(self.show_status, 0.03)

    def init_gui(self, dt):
        self.new_file()
        Clock.unschedule(self.init_gui)
        Clock.schedule_interval(self.cam_update, 0.03)

    def cam_update(self, dt):
        try:
            _, frame = self.capture.read()
            buf1 = cv2.flip(frame, 0)
            buf = buf1.tostring()
            texture1 = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='rgb')
            texture1.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
            self.ids['img_cam'].texture = texture1
        except Exception as e:
            pass

    #### File menu
    def exit_app(self):
        self.camera_disconnect()
        self.printer_disconnect()
        App.get_running_app().stop()
    def new_file(self):
        ## File menu / New Project
        self.init_project()

    def init_project(self):
        # init data
        self.project_file_path = ""
        self.project_data=data.init_project_data()
        self.project_data['CADMode']="None"
        self.ids["img_cad_origin"].set_cad_view(self.project_data)
        self.ids["img_cad_origin"].redraw_cad_view()
        self.capture = None
        self.print = None
        self.paneldisselection=[]
        try:
            self.camera_disconnect()
            self.camera_connect()
            self.printer_disconnect()
            self.printer_connect()
        except Exception as e:
            print(e, "cam or printer start problem")
            pass

    def load_file(self):
        ### File Menu / Load project
        content = LoadDialog(load=self.load, cancel=self.dismiss_popup)
        self._popup = Popup(title="Load file", content=content,
                            size_hint=(0.9, 0.9))
        self._popup.open()
        self.project_data['CADMode']="None"

    def load(self, path, filename):
        ### click load button of Loading Dialog
        ### load all info from pre saved project file
        try:
            ### if proper project file
            self.project_file_path =  os.path.expanduser(filename[0])
            self.project_data=data.read_project_data(self.project_file_path)
            self.paneldisselection=[]
            try:
                self.camera_disconnect()
                self.camera_connect()
                self.printer_disconnect()
                self.printer_connect()
            except Exception as e:
                print(e, "cam or printer start problem")
                pass

            self.ids["img_cad_origin"].set_cad_view(self.project_data)
            self.ids["img_cad_origin"].redraw_cad_view()
        except:
            ### if not proper project file
            print("Problem loading file")
            self.dismiss_popup()
            return
        self.dismiss_popup()

    def save_file(self):
        ### File Menu / Save Project
        if self.project_file_path == "":
            self.save_as_file()
        else:
            data.write_project_data(self.project_file_path, self.project_data)

    def save_as_file(self):
        ### File Menu / Save as Project
        content = SaveDialog(save=self.save, cancel=self.dismiss_popup)
        self._popup = Popup(title="Save file", content=content,
                            size_hint=(0.9, 0.9))
        self._popup.open()
        self.project_data['CADMode']="None"

    def save(self, path, filename):
        ### after click Save button of Save Dialog
        self.project_file_path = os.path.expanduser(os.path.join(path, filename))
        print(path, filename, self.project_file_path)
        data.write_project_data(self.project_file_path, self.project_data)
        self.dismiss_popup()


    #### program menu ####
    def import_file(self):
        ### Program menu / import NC drills
        content = ImportDialog(load=self.import_ncdrill, cancel=self.dismiss_popup)
        self._popup = Popup(title="Import file", content=content,
                            size_hint=(0.9, 0.9))
        self._popup.open()
        self.project_data['CADMode']="None"

    def import_ncdrill(self, path, filename):

        ### after click load button of Loading button
        try:
            ### if proper project file
            nc_file_path  = os.path.expanduser(filename[0])
            ncdata=excellon.load_nc_drill(nc_file_path)
            print("ncdata", ncdata)
            # convert tool list for selection
            self.project_data['NCTool']=excellon.convert_to_tools(ncdata)
            # convert soldering tool path
            self.project_data['SolderToolpath']=excellon.convert_to_json(ncdata)
            # redraw
            self.ids["img_cad_origin"].redraw_cad_view()

        except:
            ### if not proper project file
            self.dismiss_popup()
            return

        self.dismiss_popup()


    def select_side(self):
        ### Program menu / Select Soldering Side
        side = [  { "text" : "Top", "is_selected" : self.project_data['SolderSide']=="Top" },
                { "text" : "Bottom", "is_selected" : self.project_data['SolderSide']=="Bottom" }  ]
        self.ignore_first=not side[0]['is_selected']

        content = ListPopup()
        args_converter = lambda row_index, rec: {'text': rec['text'], 'is_selected': rec['is_selected'], 'size_hint_y': None, 'height': 50}
        list_adapter = ListAdapter(data=side, args_converter=args_converter, propagate_selection_to_data=True, cls=ListItemButton, selection_mode='single', allow_empty_selection=False)
        list_view = ListView(adapter=list_adapter)

        content.ids.profile_list.add_widget(list_view)

        list_view.adapter.bind(on_selection_change=self.selected_side)

        self._popup = Popup(title="Select Soldering Side", content=content, size_hint=(0.5, 0.6))
        self._popup.open()
        self.project_data['CADMode']="None"

    def selected_side(self, adapter):
        if self.ignore_first:
            self.ignore_first=False
            return
        self.project_data['SolderSide']=adapter.selection[0].text
        self.dismiss_popup()
        self.ids["img_cad_origin"].redraw_cad_view()

    def select_profile(self):
        ### Program menu / Select Soldering Profile
        profile = excellon.convert_to_solderingprofile(self.project_data)
        if len(profile) < 1:
            return
        self.ignore_first=not profile[0]['is_selected']
        content = ListPopup()
        args_converter = lambda row_index, rec: {'text': rec['text'], 'is_selected': rec['is_selected'], 'size_hint_y': None, 'height': 50}
        list_adapter = ListAdapter(data=profile, args_converter=args_converter, propagate_selection_to_data=True, cls=ListItemButton, selection_mode='single', allow_empty_selection=False)
        list_view = ListView(adapter=list_adapter)

        content.ids.profile_list.add_widget(list_view)

        list_view.adapter.bind(on_selection_change=self.selected_profile)

        self._popup = Popup(title="Select Soldering Profile", content=content, size_hint=(0.5, 0.6))
        self._popup.open()
        self.project_data['CADMode']="None"

    def selected_profile(self, adapter):
        ### select profile on soldering profile list
        if self.ignore_first:
            self.ignore_first=False
            return
        num=excellon.get_solderingprofile_index_by_id(self.project_data['SolderingProfile']['SolderingProfile'], adapter.selection[0].text)
        self.project_data['SelectedSolderingProfile']=num
        self.dismiss_popup()
        self.ids["img_cad_origin"].redraw_cad_view()
        self.project_data['CADMode']="Select"

    def select_by_dia(self):
        ### Program Menu / Select soldering pad by diameter
        tools=self.project_data['NCTool']
        if len(tools) < 1:
            return
        self.ignore_first=not tools[0]['is_selected']

        content = ListPopup()
        args_converter = lambda row_index, rec: {'text': rec['text'], 'is_selected': rec['is_selected'], 'size_hint_y': None, 'height': 40}
        list_adapter = ListAdapter(data=tools, args_converter=args_converter, propagate_selection_to_data=True, cls=ListItemButton, selection_mode='single', allow_empty_selection=False)
        list_view = ListView(adapter=list_adapter)

        content.ids.profile_list.add_widget(list_view)
        list_view.adapter.bind(on_selection_change=self.selected_tools)

        self._popup = Popup(title="Select Soldering Pad by Tools", content=content, size_hint=(0.5, 0.7))
        self._popup.open()
        self.project_data['CADMode']="None"

    def selected_tools(self, adapter):
        ### select tool on tools' list
        if self.ignore_first:
            self.ignore_first=False
            return
        soldertoolpath=self.project_data['SolderToolpath']
        num=int(adapter.selection[0].text.split(":")[0])
        excellon.select_by_tool(soldertoolpath, num, self.project_data['SelectedSolderingProfile'])
        # redraw
        self.dismiss_popup()
        self.ids["img_cad_origin"].redraw_cad_view()

    def select_by_view(self):
        ### Program menu / Select Soldering pads by View
        self.project_data['CADMode']="Select"

    def deselect_by_view(self):
        ### Program Menu / Deselect by view
        self.project_data['CADMode']="Deselect"

    def set_reference1(self):
        ### Program Menu / Set Reference point 1
        self.project_data['CADMode']="Ref1"

    def set_reference2(self):
        ### Program Menu /  Set Reference Point 2
        self.project_data['CADMode']="Ref2"

    def optmize_nc(self):
        ### Program Menu / Optmize NC drills
        soldertoolpath=self.project_data['SolderToolpath']
        excellon.optimize_soldertoolpath(soldertoolpath)

    ##### panel menu
    def set_num_panel(self):
        num=excellon.get_num_panel(self.project_data['Panel'])
        content = EditPopup(save=self.save_panel_num, cancel=self.dismiss_popup)
        content.ids["btn_connect"].text = "Save"
        content.ids["text_port"].text = str(num)
        self._popup = Popup(title="Select panel num", content=content,
                            size_hint=(0.5, 0.4))
        self._popup.open()
        self.project_data['CADMode']="None"

    def save_panel_num(self, txt_port):
        # set num of panels
        num  = int(txt_port)
        num = max(1, num)
        num = min(self.project_data['Setup']['MaxPanel'], num)
        excellon.set_num_panel(self.project_data['Panel'], num)
        self.dismiss_popup()

    def set_reference_panel(self):
        #  show dialpad
        print("ref")
        self.ids["tab_panel"].switch_to(self.ids["tab_panel"].tab_list[0])
        self.content = ControlPopup(controlXYZ=self.control_XYZ, set_panel_ref1=self.set_panel_ref1, set_panel_ref2=self.set_panel_ref2, get_panel_ref1=self.get_panel_ref1, get_panel_ref2=self.get_panel_ref2, cancel=self.dismiss_popup)
        self.content.ids["cur_X"].text = format(self.project_data['Setup']['TravelX'],".2f")
        self.content.ids["cur_Y"].text = format(self.project_data['Setup']['TravelY'],".2f")
        self.content.ids["cur_Z"].text = format(self.project_data['Setup']['TravelZ'],".2f")
        self.content.ids["cur_panel"].text = "1"
        self._popup = Popup(title="Set reference point", content=self.content,
                            size_hint=(0.4, 0.4), background_color=[0, 0, 0, 0.0])
        self._popup.pos_hint={"center_x": .8, "center_y": .8}
        self._popup.open()
        self.project_data['CADMode']="None"

        # home printer
        gcode=robotcontrol.go_home(self.project_data)
        self.queue_printer_command(gcode)

    def control_XYZ(self, axis, value):
        ### click any button on dialpad, calculate new position
        index=int(self.content.ids["cur_panel"].text)
        x=float(self.content.ids["cur_X"].text)
        y=float(self.content.ids["cur_Y"].text)
        z=float(self.content.ids["cur_Z"].text)

        if axis == "X":
            x += float(value)
        elif axis == "Y":
            y += float(value)
        elif axis == "Z":
            z += float(value)
        elif axis == "HomeXY":
            x=self.project_data['Setup']['TravelX']
            y=self.project_data['Setup']['TravelY']
        elif axis == "HomeZ":
            z=self.project_data['Setup']['TravelZ']

        index = max(1, index)
        index = min(self.project_data['Setup']['MaxPanel'], index)
        x=max(self.project_data['Setup']['MinX'],x)
        x=min(self.project_data['Setup']['MaxX'],x)
        y=max(self.project_data['Setup']['MinY'],y)
        y=min(self.project_data['Setup']['MaxY'],y)
        z=max(self.project_data['Setup']['MinZ'],z)
        z=min(self.project_data['Setup']['MaxZ'],z)

        self.content.ids["cur_panel"].text = str(index)
        self.content.ids["cur_X"].text = format(x,".2f")
        self.content.ids["cur_Y"].text = format(y,".2f")
        self.content.ids["cur_Z"].text = format(z,".2f")

        # go xyz printer
        gcode=robotcontrol.go_xyz(self.project_data,x,y,z)
        self.queue_printer_command(gcode)

    def set_panel_ref1(self):
        ### click set1 button on dialpad
        index=int(self.content.ids["cur_panel"].text)
        index=min(index,excellon.get_num_panel(self.project_data['Panel']))
        index=max(index,1)

        x=float(self.content.ids["cur_X"].text)
        y=float(self.content.ids["cur_Y"].text)
        z=float(self.content.ids["cur_Z"].text)
        excellon.set_panel_reference_1(self.project_data['Panel'], index-1, x, y, z)

    def set_panel_ref2(self):
        ### click set2 button on dialpad
        index=int(self.content.ids["cur_panel"].text)
        x=float(self.content.ids["cur_X"].text)
        y=float(self.content.ids["cur_Y"].text)
        z=float(self.content.ids["cur_Z"].text)
        excellon.set_panel_reference_2(self.project_data['Panel'], index-1, x, y, z)

    def get_panel_ref1(self):
        ### click on get1 button on dialpad
        index=int(self.content.ids["cur_panel"].text)
        x,y,z = excellon.get_panel_reference_1(self.project_data['Panel'], index-1)
        if x==-1 and y==-1 and z==-1:
            x=self.project_data['Setup']['TravelX']
            y=self.project_data['Setup']['TravelY']
            z=self.project_data['Setup']['TravelZ']
        self.content.ids["cur_X"].text = format(x,".2f")
        self.content.ids["cur_Y"].text = format(y,".2f")
        self.content.ids["cur_Z"].text = format(z,".2f")
        # go xyz printer
        gcode=robotcontrol.go_xyz(self.project_data,x,y,z)
        self.queue_printer_command(gcode)

    def get_panel_ref2(self):
        ### click on get2 button on dialpad
        index=int(self.content.ids["cur_panel"].text)
        x,y,z = excellon.get_panel_reference_2(self.project_data['Panel'], index-1)
        if x==-1 and y==-1 and z==-1:
            x=self.project_data['Setup']['TravelX']
            y=self.project_data['Setup']['TravelY']
            z=self.project_data['Setup']['TravelZ']
        self.content.ids["cur_X"].text = format(x,".2f")
        self.content.ids["cur_Y"].text = format(y,".2f")
        self.content.ids["cur_Z"].text = format(z,".2f")
        # go xyz printer
        gcode=robotcontrol.go_xyz(self.project_data,x,y,z)
        self.queue_printer_command(gcode)

    def select_pcb_in_panel(self):
        num=excellon.get_num_panel(self.project_data['Panel'])
        content = EditPopup(save=self.save_pcb_in_panel, cancel=self.dismiss_popup)
        content.ids["btn_connect"].text = "Save"
        content.ids["text_port"].text = ""
        self._popup = Popup(title="Select Panels to exclude from Soldering example \"1,2\"", content=content,
                            size_hint=(0.5, 0.4))
        self._popup.open()
        self.project_data['CADMode']="None"

    def save_pcb_in_panel(self, txt_port):
        # set array of panels
        excludepanels=txt_port.split(",")
        panel=[]
        for p in range(excellon.get_num_panel(self.project_data['Panel'])):
            if str(p+1) in excludepanels:
                panel.append(p)
        self.paneldisselection=panel
        self.dismiss_popup()

    def start_soldering(self):
        ### toolbar start soldering button
        # prepare panel
        panel=[]
        for p in range(excellon.get_num_panel(self.project_data['Panel'])):
            if p not in self.paneldisselection:
                panel.append(p)
        # print
        print("panel", panel)
        gcode=robotcontrol.panel_soldering(self.project_data, panel, False)
        self.queue_printer_command(gcode)

    def test_soldering(self):
        ### toolbar test soldering button
        # prepare panel
        panel=[]
        for p in range(excellon.get_num_panel(self.project_data['Panel'])):
            if p not in self.paneldisselection:
                panel.append(p)
        # print
        gcode=robotcontrol.panel_soldering(self.project_data, panel, True)
        self.queue_printer_command(gcode)

    def pause_soldering(self):
        ### toolbar pause soldering button
        if self.print.printing:
            self.print.pause()

    def resume_soldering(self):
        ### toolbar resume soldering button
        if self.print.printing:
            self.print.resume()

    def stop_soldering(self):
        ### toolbar stop soldering button
        if self.print.printing:
            self.print.cancelprint()

    def queue_printer_command(self, gcode):
        garray=robotcontrol.make_array(gcode)
        #print("gcode raw", gcode, garray)

        gcoded = gcoder.LightGCode(garray)
        #print("gcoded", gcoded)
        if hasattr(self,'print') and self.print is not None:
            if not self.print.online or not self.print.printer:
                print("Problem with printer", self.print.online, self.print.printer)
            if self.print.printing:
                self.print.send(gcoded)
            else:
                self.print.startprint(gcoded)
        else:
            print("Problem with printer interface")
    #### Connect menu
    def set_printer(self):
        ### Connect Menu /  Connect Printer
        content = EditPopup(save=self.save_printer_port, cancel=self.dismiss_popup)
        content.ids["text_port"].text = self.project_data['Setup']['RobotPort']
        self._popup = Popup(title="Select Printer port", content=content,
                            size_hint=(0.5, 0.4))
        self._popup.open()
        self.project_data['CADMode']="None"

    def save_printer_port(self, txt_port):
        self.project_data['Setup']['RobotPort'] = txt_port
        try:
            self.printer_disconnect()
            self.printer_connect()
        except  Exception as e:
            print(e,"exception save robot port")
            pass
        self.dismiss_popup()

    def set_camera(self):
        # set camera device
        content = EditPopup(save=self.save_camera_port, cancel=self.dismiss_popup)
        content.ids["text_port"].text = self.project_data['Setup']['CameraPort']
        self._popup = Popup(title="Select Camera port", content=content,
                            size_hint=(0.5, 0.4))
        self._popup.open()
        self.project_data['CADMode']="None"

    def save_camera_port(self, txt_port):
        self.project_data['Setup']['CameraPort'] = txt_port
        try:
            self.camera_disconnect()
            self.camera_connect()
        except  Exception as e:
            print(e,"exception save cam port")
            pass
        self.dismiss_popup()

    def dismiss_popup(self):
        self._popup.dismiss()

    def camera_connect(self):
        ### connect camera
        self.capture = VideoCaptureAsync(self.project_data['Setup']['CameraPort'])
        self.capture.start()

    def camera_disconnect(self):
        if self.capture is not None:
            self.capture.stop()
            self.capture = None

    def printer_connect(self):
        if self.print is None:
            self.print = printcore(self.project_data['Setup']['RobotPort'], 115200)

    def printer_disconnect(self):
        if self.print is not None:
            self.print.disconnect()
            self.print = None

    def show_status(self, dt):
        self.ids["lbl_layer_status"].text="Layer: "+self.project_data['SolderSide']
        self.ids["lbl_cad_status"].text="CADMode: "+self.project_data['CADMode']

        profile=excellon.get_list_soldering_profile(self.project_data['SolderingProfile'])
        self.ids["lbl_profile_status"].text="Profile: "+profile[self.project_data['SelectedSolderingProfile']]
        if hasattr(self,'capture') and self.capture is not None:
            self.ids["lbl_cam_status"].text="Camera: Connected"
        else:
            self.ids["lbl_cam_status"].text="Camera: Not Found"
        #printer
        if hasattr(self,'print') and self.print is not None:
            if self.print.printer is None:
                self.ids["lbl_printer_status"].text="Robot: No 3d printer found"
            elif self.print.printing:
                if len(self.print.mainqueue)>0:
                    self.ids["lbl_printer_status"].text="Robot: Soldering "+ str(round(float(self.print.queueindex) / len(self.print.mainqueue)*100,2))+"%"
                else:
                    self.ids["lbl_printer_status"].text="Robot: Soldering"
            elif self.print.online:
                self.ids["lbl_printer_status"].text="Robot: Idle"
            else:
                self.ids["lbl_printer_status"].text="Robot: Connected"
        else:
            self.ids["lbl_printer_status"].text="Robot: Not Found"
Ejemplo n.º 9
0
def test(n_frames=500, width=1280, height=720):

    cap1 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch1")
    cap2 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch2")
    cap3 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch3")
    cap4 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch4")
    cap1.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap1.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap2.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap2.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap3.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap3.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap4.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap4.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

    newcameramtx, roi = cv2.getOptimalNewCameraMatrix(camera_matrix,
                                                      dist_coefs, (720, 480),
                                                      1, (720, 480))
    x, y, w, h = roi
    #M = cv2.getRotationMatrix2D((720,480),5,2)

    cap1.start()
    cap2.start()
    cap3.start()
    cap4.start()
    while 1:
        _, frame1 = cap1.read()
        _, frame2 = cap2.read()
        _, frame3 = cap3.read()
        _, frame4 = cap4.read()
        frame11 = cv2.undistort(frame1, camera_matrix, dist_coefs, None,
                                newcameramtx)
        #frame11 = frame11[130:390, 110:550]
        frame22 = cv2.undistort(frame2, camera_matrix, dist_coefs, None,
                                newcameramtx)
        #frame22 = frame22[130:390, 110:550]
        frame33 = cv2.undistort(frame3, camera_matrix, dist_coefs, None,
                                newcameramtx)
        #frame33 = frame33[130:390, 110:550]
        frame44 = cv2.undistort(frame4, camera_matrix, dist_coefs, None,
                                newcameramtx)
        #frame44 = frame44[130:390, 110:550]
        cv2.imshow('Frame1', frame1)
        cv2.imshow('Frame11', frame11)
        cv2.imshow('Frame2', frame2)
        cv2.imshow('Frame22', frame22)
        cv2.imshow('Frame3', frame3)
        cv2.imshow('Frame33', frame33)
        cv2.imshow('Frame4', frame4)
        cv2.imshow('Frame44', frame44)

        if cv2.waitKey(1) == 27:
            cv2.destroyAllWindows()
            break

    cap1.stop()
    cap2.stop()
    cap3.stop()
    cap4.stop()
    cv2.destroyAllWindows()
Ejemplo n.º 10
0
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    
    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename,batch_size=1)
    
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)

    writeVideo_flag = True
    asyncVideo_flag = False

    file_path = 'video.webm'
    if asyncVideo_flag :
        video_capture = VideoCaptureAsync(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)

    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))
        frame_index = -1

    fps = 0.0
    fps_imutils = imutils.video.FPS().start()

    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
             break

        t1 = time.time()

        image = Image.fromarray(frame[...,::-1])  # bgr to rgb
        boxs = yolo.detect_image(image)[0]
        confidence = yolo.detect_image(image)[1]

        features = encoder(frame,boxs)

        detections = [Detection(bbox, confidence, feature) for bbox, confidence, feature in zip(boxs, confidence, features)]
        
        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices]
        
        # Call the tracker
        tracker.predict()
        tracker.update(detections)
        
        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue 
            bbox = track.to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,255,255), 2)
            cv2.putText(frame, str(track.track_id),(int(bbox[0]), int(bbox[1])),0, 5e-3 * 200, (0,255,0),2)

        for det in detections:
            bbox = det.to_tlbr()
            score = "%.2f" % round(det.confidence * 100, 2)
            cv2.rectangle(frame,(int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])),(255,0,0), 2)
            cv2.putText(frame, score + '%', (int(bbox[0]), int(bbox[3])), 0, 5e-3 * 130, (0,255,0),2)
            
        cv2.imshow('', frame)

        if writeVideo_flag: # and not asyncVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()

        fps = (fps + (1./(time.time()-t1))) / 2
        print("FPS = %f"%(fps))
        
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()
Ejemplo n.º 11
0
def main(yolo):
    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)

    np.random.seed(42)
    COLORS = np.random.randint(0, 255, size=(200, 3),
                               dtype="uint8")

    writeVideo_flag = True
    asyncVideo_flag = False

    file_path = 'testvideo2.avi'

    # load the COCO class labels our YOLO model was trained on    加载我们的YOLO模型经过培训的COCO类标签
    labelsPath = os.path.sep.join(["model_data", "coco_classes.txt"])
    LABELS = open(labelsPath).read().strip().split("\n")
    # print(str(len(LABELS))+"load successfully")
    # print(LABELS)
    class_nums = np.zeros(80)
    counter = np.zeros(80)
    track_id_max = -1
    if asyncVideo_flag:
        video_capture = VideoCaptureAsync(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)

    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('testvideo2_out.avi', fourcc, 30, (w, h))
        frame_index = -1

    fps = 0.0
    fps_imutils = imutils.video.FPS().start()

    img_num = 0
    frame_cnt = 0
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        frame_copy = frame.copy()
        if ret != True or frame_cnt > 30:
            break

        t1 = time.time()

        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb

        boxs, confidence, class_names = yolo.detect_image(image)

        features = encoder(frame, boxs)

        detections = [Detection(bbox, confidence, feature) for bbox, confidence, feature in
                      zip(boxs, confidence, features)]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        # print("print indices!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
        # print(indices)

        # for i in indices:
        #    print(str(i)+class_names[i][0])

        # print(indices.shape)

        detections = [detections[i] for i in indices]
        class_names = [class_names[i] for i in indices]
        print("class_name:" + str(class_names))

        class_IDs = []
        current_nums = np.zeros(80)
        # class_IDs=[]
        for class_name in class_names:
            for i, LABEL in enumerate(LABELS):
                if class_name[0] == LABEL:
                    current_nums[i] += 1
                    class_IDs.append(i)
        # print("person:"+str(current_nums[0]))

        cv2.putText(frame, 'Current', (20, 70), cv2.FONT_HERSHEY_DUPLEX, 0.75, (255, 255, 255), 2)
        cv2.putText(frame, 'Total', (180, 70), cv2.FONT_HERSHEY_DUPLEX, 0.75, (0, 255, 255), 2)

        x1 = 20
        y1 = 100
        for i, cl in enumerate(current_nums):
            if cl > 0:
                cv2.putText(frame, LABELS[i] + "=" + str(cl), (x1, y1), cv2.FONT_HERSHEY_DUPLEX, 0.6, (255, 255, 255),
                            1)  # 当前帧各类别数量
                y1 = y1 + 20

        for i, det in enumerate(detections):
            bbox = det.to_tlbr()
            score = "%.2f" % round(det.confidence * 100, 2)
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 1)
            cv2.putText(frame, score + '%', (int(bbox[0]), int(bbox[1]) + 10), cv2.FONT_HERSHEY_DUPLEX, 0.3,
                        (255, 255, 255), 1)
            # cv2.putText(frame, class_names[i],(int(bbox[0]), int(bbox[1])-5), 0, 5e-3 * 130, (0, 255, 0), 2)
            # cv2.putText(frame, class_names[i], (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 130,(0, 255, 255), 2)

        print("Total of detections:" + str(len(detections)))
        # Call the tracker
        tracker.predict()
        tracker.update(detections, class_IDs)

        # for i, cl in enumerate(class_nums):
        #     if cl > 0:
        #         print("add: " + LABELS[i] + str(cl - class_last_nums[i]))

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                print("not track.is_confirmed() or track.time_since_update > 1: " + str(track.track_id))
                continue
            bbox = track.to_tlbr()
            color = [int(c) for c in COLORS[track.class_id % len(COLORS)]]
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)

            cv2.putText(frame, str(track.track_id) + ' ' + LABELS[track.class_id], (int(bbox[0]), int(bbox[1]) - 5),
                        cv2.FONT_HERSHEY_DUPLEX, 0.4, color, 1)

            if track.track_id > track_id_max:
                counter[track.class_id] = counter[track.class_id] + 1
                track_id_max = track.track_id
                dest = frame_copy[int(bbox[1]):int(bbox[3]), int(bbox[0]):int(bbox[2])]
                pdest = "./output_image/" + str(LABELS[track.class_id]) + str(int(counter[track.class_id])) + ".png"
                cv2.imwrite(pdest, dest)
                img_num += 1
            # class_nums[track.class_id].append(track.track_id)
            # print(str(LABELS[track.class_id])+":"+class_nums[track.class_id])
            # print("track.id: " + str(track.track_id))
            # print("track.class_name: " + str(LABELS[track.class_id]))

        print(str(counter))
        print("--------------------------该帧输出完毕!--------------------------------------")

        # cv2.putText(frame, 'Total', (200, 60), cv2.FONT_HERSHEY_DUPLEX, 0.75, (255, 0, 0), 2)
        # x2 = 200
        # y2 = 100
        # for i, cl in enumerate(class_nums):
        #     if cl > 0:
        #         cv2.putText(frame, LABELS[i] + "=" + str(cl), (x2, y2), cv2.FONT_HERSHEY_DUPLEX, 0.5, (255, 0, 0), 2)
        #         y2 = y2 + 20

        cv2.putText(frame, "FPS: %f" % (fps), (int(20), int(40)), 0, 5e-3 * 200, (0, 255, 0), 3)  # !!!!!!!!!输出FPS
        x2 = 180
        y2 = 100
        for i, cl in enumerate(counter):
            if cl > 0:
                cv2.putText(frame, LABELS[i] + "=" + str(cl), (x2, y2), cv2.FONT_HERSHEY_DUPLEX, 0.6, (0, 255, 255), 1)
                y2 = y2 + 20
        # cv2.imshow('', frame)

        if writeVideo_flag:  # and not asyncVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()

        fps = (fps + (1. / (time.time() - t1))) / 2

        # print("FPS = %f"%(fps))

        frame_cnt += 1
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()
Ejemplo n.º 12
0
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    
    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)
    
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)

    tracking = True
    writeVideo_flag = False
    asyncVideo_flag = False
    webcamera_flag = True
    ipcamera_flag = False
    udp_flag = True

    file_path = '/workspace/data/C0133_v4.mp4'
    if asyncVideo_flag :
        video_capture = VideoCaptureAsync(file_path)
    elif ipcamera_flag :
        video_capture = cv2.VideoCapture('rtsp://*****:*****@192.168.2.201/ONVIF/MediaInput?profile=def_profile1')
    elif webcamera_flag :
        video_capture = cv2.VideoCapture(0)
    else:
        video_capture = cv2.VideoCapture(file_path)
        
    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))
        frame_index = -1

    if udp_flag:
        HOST = ''
        PORT = 5000
        address = '192.168.2.255'
        sock =socket(AF_INET, SOCK_DGRAM)
        sock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)
        sock.bind((HOST, PORT))


    fps = 0.0
    fps_imutils = imutils.video.FPS().start()
    
    savetime = 0

    while True:
        nowtime = datetime.datetime.now().isoformat()
        ret, frame = video_capture.read()  # frame shape 640*480*3
        t1 = time.time()

        if time.time() - savetime >= 30: 
            print('save data') 
            cv2.imwrite("/workspace/images/image.png", frame)
            savetime = time.time()
        image = Image.fromarray(frame[...,::-1])  # bgr to rgb
        boxes, confidence, classes = yolo.detect_image(image)

        if tracking:
            features = encoder(frame, boxes)

            detections = [Detection(bbox, confidence, cls, feature) for bbox, confidence, cls, feature in
                        zip(boxes, confidence, classes, features)]
        else:
            detections = [Detection_YOLO(bbox, confidence, cls) for bbox, confidence, cls in
                        zip(boxes, confidence, classes)]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices]

        if tracking:
            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                cv2.putText(frame, "ID: " + str(track.track_id), (int(bbox[0]), int(bbox[1])), 0,
                            1.5e-3 * frame.shape[0], (0, 255, 0), 1)
                # socket
                message = str(nowtime + "," + str(track.track_id) + "," + str(int(bbox[0])) + "," + str(int(bbox[1])) + "," + str(int(bbox[2])) + "," + str(int(bbox[3])))
                bmessage = message.encode('utf-8')
                print(type(bmessage))
                if udp_flag:
                    sock.sendto(message.encode('utf-8'), (address, PORT))


        for det in detections:
            bbox = det.to_tlbr()
            score = "%.2f" % round(det.confidence * 100, 2) + "%"
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)
            if len(classes) > 0:
                cls = det.cls
                cv2.putText(frame, str(cls) + " " + score, (int(bbox[0]), int(bbox[3])), 0,
                            1.5e-3 * frame.shape[0], (0, 255, 0), 1)

        #cv2.imshow('', frame)

        if writeVideo_flag: # and not asyncVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()

        if not asyncVideo_flag:
            fps = (fps + (1./(time.time()-t1))) / 2
            print("FPS = %f"%(fps))
        
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()
Ejemplo n.º 13
0
class CameraSource(object):
  def __init__(self, cameraSource, height, output_file=None, startFrame=0,
               async_read=False, outputToServer=False, capture_size=None):
    if async_read:
      self.camera = VideoCaptureAsync(cameraSource)
    else:
      self.camera = cv2.VideoCapture(cameraSource)

    if capture_size is not None:
      print(capture_size)
      self.camera.set(cv2.CAP_PROP_FRAME_WIDTH, capture_size[0])
      self.camera.set(cv2.CAP_PROP_FRAME_HEIGHT, capture_size[1])

    if async_read:
      self.ORIGINAL_WIDTH = self.camera.width
      self.ORIGINAL_HEIGHT = self.camera.height
    else:
      self.ORIGINAL_WIDTH = int(self.camera.get(cv2.CAP_PROP_FRAME_WIDTH))
      self.ORIGINAL_HEIGHT = int(self.camera.get(cv2.CAP_PROP_FRAME_HEIGHT))

    print('CameraSource')
    print('Requested capture size', capture_size)
    print('Actual capture size', self.ORIGINAL_WIDTH, self.ORIGINAL_HEIGHT)

    self.HEIGHT = height
    self.WIDTH = self.ORIGINAL_WIDTH * self.HEIGHT // self.ORIGINAL_HEIGHT
    self.WIDTH = self.WIDTH + self.WIDTH % 2  # Make it even.

    self.startFrame = startFrame
    self.nFrames = 0
    self.writer = VideoWriter(output_file)

    if async_read:
      self.camera.start()

    self.outputToServer = outputToServer
    if outputToServer:
      # https://robotpy.readthedocs.io/en/stable/vision/code.html
      pass
      #self.outputStream = CameraServer.getInstance().putVideo(
      #    'ProcessedVisionFrame', self.WIDTH, self.HEIGHT)


  def GetFrame(self):  
    # Processing on first call. 
    if self.nFrames == 0:
      # Skip some frames if requested.
      if self.startFrame > 0:
        skippedFrames = 0
        while True:
          ret, frame = self.camera.read()
          if not ret or frame is None:
            print('No more frames')
            return None
          skippedFrames += 1
          if skippedFrames >= self.startFrame:
            break
      # Start timer for first frame.
      self.startTime = time.time()

    # Get frame.
    frame = None
    frameTime = time.time() 
    if self.nFrames > 0 and self.nFrames % 50 == 0:
      print('FPS: ', self.nFrames / (frameTime - self.startTime))
    self.nFrames += 1
    ret, frame = self.camera.read()
    if ret and frame is not None:
      if frame.shape[0] != self.HEIGHT:
        frame = cv2.resize(frame, (self.WIDTH, self.HEIGHT))
    return frame


  def ImageSize(self):
    return (self.WIDTH, self.HEIGHT)


  def OutputFrameAndTestContinue(self, message, frame, height=None):
    self.writer.OutputFrame(frame)
    if self.outputToServer:
      self.outputStream.putFrame(frame)
    return ShowFrameAndTestContinue(message, frame, height)


  def __del__(self):
    self.camera.release()
    cv2.destroyAllWindows()
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    tracking = True
    writeVideo_flag = True
    asyncVideo_flag = False

    file_path = 'IMG_3326.MOV'
    dfObj = pd.DataFrame(
        columns=['frame_num', 'track', 'cx', 'cy', 'w', 'h', 'track_temp'])
    dfObjDTP = pd.DataFrame(columns=[
        'filename', 'frame_num', 'bb1', 'bb2', 'bb3', 'bb4', 'track',
        'track_temp', 'Height'
    ])

    if asyncVideo_flag:
        video_capture = VideoCaptureAsync(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)

    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))
        frame_index = -1

    fps = 0.0
    fps_imutils = imutils.video.FPS().start()

    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break

        t1 = time.time()

        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        boxes, confidence, classes = yolo.detect_image(image)

        if tracking:
            features = encoder(frame, boxes)

            detections = [
                Detection(bbox, confidence, cls, feature)
                for bbox, confidence, cls, feature in zip(
                    boxes, confidence, classes, features)
            ]
        else:
            detections = [
                Detection_YOLO(bbox, confidence, cls)
                for bbox, confidence, cls in zip(boxes, confidence, classes)
            ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        if tracking:
            # Call the tracker

            tracker.predict()
            tracker.update(detections)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()

                #Ini buat cropping gambar per frame

                #cropped_image = frame[int(bbox[1]):int(bbox[1])+(int(bbox[3])-int(bbox[1])),int(bbox[0]):int(bbox[0])+(int(bbox[2])-int(bbox[0]))]
                cropped_image = frame[int(bbox[1]):int(bbox[1]) + 256,
                                      int(bbox[0]):int(bbox[0]) + 128]
                # cropped_image = frame[2:5,6:10]

                # Matiin atau comment biar ga ada box putih
                # cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                #
                # cv2.putText(frame, "ID: " + str(track.track_id), (int(bbox[0]), int(bbox[1])), 0,
                #             1.5e-3 * frame.shape[0], (0, 255, 0), 1)

                # print(cropped_image)
                dirname = "output_crop/{}".format(track.track_id)
                if not os.path.exists(dirname):
                    os.makedirs(dirname)

                if (cropped_image.size == 0):
                    continue
                else:
                    writeStatus = cv2.imwrite(
                        "output_crop/{}/frame_{}.png".format(
                            track.track_id, frame_index), cropped_image)
                    print("output_crop/{}/frame_{}.png".format(
                        track.track_id, frame_index))

                # Write CSV
                dfObj = dfObj.append(pd.Series([
                    frame_index, track.track_id,
                    int(bbox[0]),
                    int(bbox[1]),
                    int(bbox[2]) - int(bbox[0]),
                    int(bbox[3]) - int(bbox[1]), track.time_since_update
                ],
                                               index=dfObj.columns),
                                     ignore_index=True)

                dfObjDTP = dfObjDTP.append(pd.Series([
                    file_path, frame_index,
                    int(bbox[0]),
                    int(bbox[1]),
                    int(bbox[2]),
                    int(bbox[3]), track.track_id, track.time_since_update,
                    int(bbox[3]) - int(bbox[1])
                ],
                                                     index=dfObjDTP.columns),
                                           ignore_index=True)

        for det in detections:
            bbox = det.to_tlbr()
            score = "%.2f" % round(det.confidence * 100, 2) + "%"

            #Matiin atau comment biar ga ada box putih di crop image
            # cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)

            # if len(classes) > 0:
            #     cls = det.cls
            #     cv2.putText(frame, str(cls) + " " + score, (int(bbox[0]), int(bbox[3])), 0,
            #                 1.5e-3 * frame.shape[0], (0, 255, 0), 1)

        cv2.imshow('', frame)

        if writeVideo_flag:  # and not asyncVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()

        if not asyncVideo_flag:
            fps = (fps + (1. / (time.time() - t1))) / 2
            print("FPS = %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    dfObj = dfObj.sort_values(["track", "frame_num"], ascending=(True, True))
    dfObj.to_csv(r'result_temp.csv', index=False)
    dfObjDTP = dfObjDTP.sort_values(["track", "frame_num"],
                                    ascending=(True, True))
    dfObjDTP.to_csv(r'result_temp_dtp.csv', index=False)
    convert_to_final()
    cv2.destroyAllWindows()
Ejemplo n.º 15
0
def test(width, height):

    cap1 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch1")
    cap2 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch2")
    cap3 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch3")
    cap4 = VideoCaptureAsync(
        "rtsp://*****:*****@192.168.15.200:554/moxa-cgi/udpstream_ch4")
    cap1.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap1.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap2.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap2.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap3.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap3.set(cv2.CAP_PROP_FRAME_HEIGHT, height)
    cap4.set(cv2.CAP_PROP_FRAME_WIDTH, width)
    cap4.set(cv2.CAP_PROP_FRAME_HEIGHT, height)

    alpha = 90
    beta = 90
    gamma = 90
    focalLength = 500
    dist = 500
    move_upper = 0
    move_left = 0
    move_right = 0
    move_lower = 0
    move_together = 0
    cv2.namedWindow('tune_upper')
    cv2.namedWindow('tune_left')
    cv2.namedWindow('tune_right')
    cv2.namedWindow('tune_lower')
    cv2.namedWindow('move')

    cv2.createTrackbar('Alpha', 'tune_upper', 60, 180, nothing)
    cv2.createTrackbar('Beta', 'tune_upper', 90, 180, nothing)
    cv2.createTrackbar('Gamma', 'tune_upper', 90, 180, nothing)
    cv2.createTrackbar('f', 'tune_upper', 300, 2000, nothing)
    cv2.createTrackbar('Distance', 'tune_upper', 500, 2000, nothing)

    cv2.createTrackbar('Alpha', 'tune_left', 90, 180, nothing)
    cv2.createTrackbar('Beta', 'tune_left', 70, 180, nothing)
    cv2.createTrackbar('Gamma', 'tune_left', 90, 180, nothing)
    cv2.createTrackbar('f', 'tune_left', 300, 2000, nothing)
    cv2.createTrackbar('Distance', 'tune_left', 500, 2000, nothing)

    cv2.createTrackbar('Alpha', 'tune_right', 90, 180, nothing)
    cv2.createTrackbar('Beta', 'tune_right', 110, 180, nothing)
    cv2.createTrackbar('Gamma', 'tune_right', 90, 180, nothing)
    cv2.createTrackbar('f', 'tune_right', 300, 2000, nothing)
    cv2.createTrackbar('Distance', 'tune_right', 500, 2000, nothing)

    cv2.createTrackbar('Alpha', 'tune_lower', 60, 180, nothing)
    cv2.createTrackbar('Beta', 'tune_lower', 90, 180, nothing)
    cv2.createTrackbar('Gamma', 'tune_lower', 90, 180, nothing)
    cv2.createTrackbar('f', 'tune_lower', 300, 2000, nothing)
    cv2.createTrackbar('Distance', 'tune_lower', 500, 2000, nothing)

    cv2.createTrackbar('Move upper', 'move', 0, 400, nothing)
    cv2.createTrackbar('Move left', 'move', 0, 400, nothing)
    cv2.createTrackbar('Move right', 'move', 0, 400, nothing)
    cv2.createTrackbar('Move lower', 'move', 0, 400, nothing)
    cv2.createTrackbar('Move together', 'move', 200, 400, nothing)

    cap1.start()
    cap2.start()
    cap3.start()
    cap4.start()
    h = 480
    w = 720
    vis = np.zeros(((w + h * 2), (w + h * 2), 3), np.uint8)
    vis_center = np.zeros(((w + h * 2), h, 3), np.uint8)
    vis_left = np.zeros(((w + h * 2), h, 3), np.uint8)
    vis_right = np.zeros(((w + h * 2), h, 3), np.uint8)
    void = np.zeros((w, w, 3), np.uint8)
    void_side = np.zeros((h, h, 3), np.uint8)

    while 1:
        _, frame1 = cap1.read()
        _, frame2 = cap2.read()
        _, frame3 = cap3.read()
        _, frame4 = cap4.read()

        frame11 = undistort(frame1)
        frame22 = undistort(frame2)
        frame33 = undistort(frame3)
        frame44 = undistort(frame4)

        alpha_upper = cv2.getTrackbarPos('Alpha', 'tune_upper')
        beta_upper = cv2.getTrackbarPos('Beta', 'tune_upper')
        gamma_upper = cv2.getTrackbarPos('Gamma', 'tune_upper')
        focalLength_upper = cv2.getTrackbarPos('f', 'tune_upper')
        dist_upper = cv2.getTrackbarPos('Distance', 'tune_upper')

        alpha_left = cv2.getTrackbarPos('Alpha', 'tune_left')
        beta_left = cv2.getTrackbarPos('Beta', 'tune_left')
        gamma_left = cv2.getTrackbarPos('Gamma', 'tune_left')
        focalLength_left = cv2.getTrackbarPos('f', 'tune_left')
        dist_left = cv2.getTrackbarPos('Distance', 'tune_left')

        alpha_right = cv2.getTrackbarPos('Alpha', 'tune_right')
        beta_right = cv2.getTrackbarPos('Beta', 'tune_right')
        gamma_right = cv2.getTrackbarPos('Gamma', 'tune_right')
        focalLength_right = cv2.getTrackbarPos('f', 'tune_right')
        dist_right = cv2.getTrackbarPos('Distance', 'tune_right')

        alpha_lower = cv2.getTrackbarPos('Alpha', 'tune_lower')
        beta_lower = cv2.getTrackbarPos('Beta', 'tune_lower')
        gamma_lower = cv2.getTrackbarPos('Gamma', 'tune_lower')
        focalLength_lower = cv2.getTrackbarPos('f', 'tune_lower')
        dist_lower = cv2.getTrackbarPos('Distance', 'tune_lower')

        move_upper = cv2.getTrackbarPos('Move upper', 'move')
        move_left = cv2.getTrackbarPos('Move left', 'move')
        move_right = cv2.getTrackbarPos('Move right', 'move')
        move_lower = cv2.getTrackbarPos('Move lower', 'move')
        move_together = cv2.getTrackbarPos('Move together', 'move')

        alpha_upper = (alpha_upper - 90) * math.pi / 180
        beta_upper = (beta_upper - 90) * math.pi / 180
        gamma_upper = (gamma_upper - 90) * math.pi / 180

        alpha_left = (alpha_left - 90) * math.pi / 180
        beta_left = (beta_left - 90) * math.pi / 180
        gamma_left = (gamma_left - 90) * math.pi / 180

        alpha_right = (alpha_right - 90) * math.pi / 180
        beta_right = (beta_right - 90) * math.pi / 180
        gamma_right = (gamma_right - 90) * math.pi / 180

        alpha_lower = (alpha_lower - 90) * math.pi / 180
        beta_lower = (beta_lower - 90) * math.pi / 180
        gamma_lower = (gamma_lower - 90) * math.pi / 180

        #rotation_mat = cv2.getRotationMatrix2D((360,240), 180, 1)
        #print("Transformation: ", Transformation.shape)
        #print("Rotation: ", rotation_mat.shape)

        transformation_upper = get_transformation(w, h, alpha_upper,
                                                  beta_upper, gamma_upper,
                                                  dist_upper,
                                                  focalLength_upper)
        transformation_left = get_transformation(h, w, alpha_left, beta_left,
                                                 gamma_left, dist_left,
                                                 focalLength_left)
        transformation_right = get_transformation(h, w, alpha_right,
                                                  beta_right, gamma_right,
                                                  dist_right,
                                                  focalLength_right)
        transformation_lower = get_transformation(w, h, alpha_lower,
                                                  beta_lower, gamma_lower,
                                                  dist_lower,
                                                  focalLength_lower)

        left = rotate_bound(frame33, 270)
        right = rotate_bound(frame22, 90)

        result_upper = cv2.warpPerspective(frame11, transformation_upper,
                                           (720, 480))
        result_left = cv2.warpPerspective(left, transformation_left,
                                          (480, 720))
        result_right = cv2.warpPerspective(right, transformation_right,
                                           (480, 720))
        result_lower = cv2.warpPerspective(frame44, transformation_lower,
                                           (720, 480))

        #cv2.imshow('Frame1', frame1)
        #cv2.imshow('Frame11', frame11)
        #cv2.imshow('Frame2', frame2)
        #cv2.imshow('Frame22', frame22)
        #cv2.imshow('Frame3', frame3)
        #cv2.imshow('Frame33', frame33)
        #cv2.imshow('Frame4', frame4)
        #cv2.imshow('Frame44', frame44)
        #left = rotate_bound(result_left, 270)
        #right = rotate_bound(result_right, 90)
        result_lower = cv2.flip(result_lower, 0)
        #cv2.imshow('left', left)
        #cv2.imshow('right',right)
        #vis_center = np.concatenate((result,void,frame44),axis=0)
        #vis_right = np.concatenate((void_side,right,void_side),axis=0)
        #vis_left = np.concatenate((void_side,left,void_side),axis=0)
        #vis = np.concatenate((vis_left,vis_center,vis_right),axis=1)
        vis[move_upper + move_together:result_upper.shape[0] + move_upper +
            move_together,
            h - (result_upper.shape[1] - 720) / 2:result_upper.shape[1] + h -
            (result_upper.shape[1] - 720) / 2, :] = result_upper
        vis[h:result_left.shape[0] + h,
            move_left + move_together:result_left.shape[1] + move_left +
            move_together:] += result_left
        vis[h:result_right.shape[0] + h,
            h + w - move_right - move_together:result_right.shape[1] -
            move_right - move_together + w + h, :] += result_right
        vis[h + w - move_lower - move_together:result_lower.shape[0] -
            move_lower - move_together + w + h,
            h:result_lower.shape[1] + h, :] += result_lower
        height, width = vis.shape[:2]
        res = cv2.resize(vis, (width * 4 / 7, height * 4 / 7),
                         interpolation=cv2.INTER_CUBIC)
        #cv2.imshow('combined', vis_center)
        #cv2.imshow('combined_left', vis_left)
        #cv2.imshow('combined_right', vis_right)
        cv2.imshow('vis', res)
        vis = np.zeros(((w + h * 2), (w + h * 2), 3), np.uint8)
        if cv2.waitKey(1) == 27:
            cv2.destroyAllWindows()
            break

    cap1.stop()
    cap2.stop()
    cap3.stop()
    cap4.stop()
    cv2.destroyAllWindows()
Ejemplo n.º 16
0
def main(yolo):
    # Definition of the parameters
    with open("cfg/detection_tracker_cfg.json") as detection_config:
        detect_config = json.load(detection_config)
    with open("cfg/doors_info.json") as doors_config:
        doors_config = json.load(doors_config)
    with open("cfg/around_doors_info.json") as around_doors_config:
        around_doors_config = json.load(around_doors_config)
    model_filename = detect_config["tracking_model"]
    input_folder, output_folder = detect_config["input_folder"], detect_config[
        "output_folder"]
    meta_folder = detect_config["meta_folder"]
    output_format = detect_config["output_format"]

    # Deep SORT
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)
    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    show_detections = True
    asyncVideo_flag = False

    check_gpu()

    # from here should start loop to process videos from folder
    # for video_name in os.listdir(input_folder):

    HOST = "localhost"
    PORT = 8075
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as sock:
        sock.bind((HOST, PORT))
        sock.listen()
        conn, addr = sock.accept()
        with conn:
            print('Connected by', addr)
            #  loop over all videos
            while True:
                data = conn.recv(1000)
                video_motion_list = data.decode("utf-8").split(';')
                videos_que = deque()
                for video_motion in video_motion_list:
                    videos_que.append(video_motion)
                video_name = videos_que.popleft()

                if not video_name.endswith(output_format):
                    continue

                print('elements in que', len(videos_que))
                print("opening video: {}".format(video_name))
                full_video_path = join(input_folder, video_name)
                # full_video_path = "rtsp://*****:*****@192.168.1.52:554/1/h264major"

                meta_name = meta_folder + video_name[:-4] + ".json"
                with open(meta_name) as meta_config_json:
                    meta_config = json.load(meta_config_json)
                camera_id = meta_config["camera_id"]
                if not os.path.exists(output_folder + str(camera_id)):
                    os.mkdir(output_folder + str(camera_id))

                output_name = output_folder + camera_id + '/out_' + video_name
                counter = Counter(counter_in=0, counter_out=0, track_id=0)
                tracker = Tracker(metric)

                if asyncVideo_flag:
                    video_capture = VideoCaptureAsync(full_video_path)
                    video_capture.start()
                    w = int(video_capture.cap.get(3))
                    h = int(video_capture.cap.get(4))
                else:
                    video_capture = cv2.VideoCapture(full_video_path)
                    w = int(video_capture.get(3))
                    h = int(video_capture.get(4))

                fourcc = cv2.VideoWriter_fourcc(*'XVID')
                out = cv2.VideoWriter(output_name, fourcc, 25, (w, h))

                door_array = doors_config["{}".format(camera_id)]
                around_door_array = tuple(
                    around_doors_config["{}".format(camera_id)])
                rect_door = Rectangle(door_array[0], door_array[1],
                                      door_array[2], door_array[3])
                border_door = door_array[3]
                #  loop over video
                save_video_flag = False
                while True:
                    fps_imutils = imutils.video.FPS().start()
                    ret, frame = video_capture.read()
                    if not ret:
                        with open('videos_saved/log_results.txt', 'a') as log:
                            log.write(
                                'processed (ret). Time: {}, camera id: {}\n'.
                                format(video_name, camera_id))
                        break
                    t1 = time.time()
                    # lost_ids = counter.return_lost_ids()
                    image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
                    # image = image.crop(around_door_array)
                    boxes, confidence, classes = yolo.detect_image(image)

                    features = encoder(frame, boxes)
                    detections = [
                        Detection(bbox, confidence, cls, feature)
                        for bbox, confidence, cls, feature in zip(
                            boxes, confidence, classes, features)
                    ]

                    # Run non-maxima suppression.
                    boxes = np.array([d.tlwh for d in detections])
                    scores = np.array([d.confidence for d in detections])
                    classes = np.array([d.cls for d in detections])
                    indices = preprocessing.non_max_suppression(
                        boxes, nms_max_overlap, scores)
                    detections = [detections[i] for i in indices]

                    # Call the tracker
                    tracker.predict()
                    tracker.update(detections)

                    cv2.rectangle(frame,
                                  (int(door_array[0]), int(door_array[1])),
                                  (int(door_array[2]), int(door_array[3])),
                                  (23, 158, 21), 3)
                    if len(detections) != 0:
                        counter.someone_inframe()
                        for det in detections:
                            bbox = det.to_tlbr()
                            if show_detections and len(classes) > 0:
                                score = "%.2f" % (det.confidence * 100) + "%"
                                cv2.rectangle(frame,
                                              (int(bbox[0]), int(bbox[1])),
                                              (int(bbox[2]), int(bbox[3])),
                                              (255, 0, 0), 3)
                    else:
                        if counter.need_to_clear():
                            counter.clear_all()
                    # identities = [track.track_id for track in tracker.tracks]
                    # counter.update_identities(identities)

                    for track in tracker.tracks:
                        if not track.is_confirmed(
                        ) or track.time_since_update > 1:
                            continue
                        bbox = track.to_tlbr()

                        if track.track_id not in counter.people_init or counter.people_init[
                                track.track_id] == 0:
                            # counter.obj_initialized(track.track_id)
                            ratio_init = find_ratio_ofbboxes(
                                bbox=bbox, rect_compare=rect_door)

                            if ratio_init > 0:
                                if ratio_init >= 0.5:  # and bbox[3] < door_array[3]:
                                    counter.people_init[
                                        track.track_id] = 2  # man in the door
                                elif ratio_init < 0.5:  # and bbox[3] > door_array[3]:  # initialized in the outside
                                    counter.people_init[track.track_id] = 1
                            else:
                                counter.people_init[track.track_id] = 1
                            counter.people_bbox[track.track_id] = bbox
                        counter.cur_bbox[track.track_id] = bbox

                        adc = "%.2f" % (track.adc * 100
                                        ) + "%"  # Average detection confidence
                        cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                      (int(bbox[2]), int(bbox[3])),
                                      (255, 255, 255), 2)
                        cv2.putText(frame, "ID: " + str(track.track_id),
                                    (int(bbox[0]), int(bbox[1]) + 50), 0,
                                    1e-3 * frame.shape[0], (0, 255, 0), 3)

                        if not show_detections:
                            track_cls = track.cls
                            cv2.putText(frame, str(track_cls),
                                        (int(bbox[0]), int(bbox[3])), 0,
                                        1e-3 * frame.shape[0], (0, 255, 0), 3)
                            cv2.putText(frame, 'ADC: ' + adc,
                                        (int(bbox[0]),
                                         int(bbox[3] + 2e-2 * frame.shape[1])),
                                        0, 1e-3 * frame.shape[0], (0, 255, 0),
                                        3)
                        # if track.time_since_update >= 15:
                        #     id_get_lost.append(track.track_id)
                    id_get_lost = [
                        track.track_id for track in tracker.tracks
                        if track.time_since_update >= 15
                    ]

                    for val in counter.people_init.keys():
                        ratio = 0
                        cur_c = find_centroid(counter.cur_bbox[val])
                        init_c = find_centroid(counter.people_bbox[val])
                        if val in id_get_lost and counter.people_init[
                                val] != -1:
                            ratio = find_ratio_ofbboxes(
                                bbox=counter.cur_bbox[val],
                                rect_compare=rect_door)
                            if counter.people_init[val] == 2 \
                                    and ratio < 0.6:  # and counter.people_bbox[val][3] > border_door \
                                counter.get_out()
                                save_video_flag = True
                                print(counter.people_init[val], ratio)
                            elif counter.people_init[val] == 1 \
                                    and ratio >= 0.6:
                                counter.get_in()
                                save_video_flag = True
                                print(counter.people_init[val], ratio)
                            counter.people_init[val] = -1

                    ins, outs = counter.return_counter()
                    cv2.rectangle(frame, (frame.shape[1] - 150, 0),
                                  (frame.shape[1], 50), (0, 0, 0), -1, 8)
                    cv2.putText(frame, "in: {}, out: {} ".format(ins, outs),
                                (frame.shape[1] - 140, 20), 0,
                                1e-3 * frame.shape[0], (255, 255, 255), 3)
                    out.write(frame)
                    fps_imutils.update()
                    if not asyncVideo_flag:
                        pass
                        # fps = (1. / (time.time() - t1))
                        # print("FPS = %f" % fps)

                        # if len(fpeses) < 15:
                        #     fpeses.append(round(fps, 2))
                        #
                        # elif len(fpeses) == 15:
                        #     # fps = round(np.median(np.array(fpeses)))
                        #     median_fps = float(np.median(np.array(fpeses)))
                        #     fps = round(median_fps, 1)
                        #     print('max fps: ', fps)
                        #     # fps = 20
                        #     counter.fps = fps
                        #     fpeses.append(fps)

                    if cv2.waitKey(1) & 0xFF == ord('q'):
                        break

                if asyncVideo_flag:
                    video_capture.stop()
                    del video_capture
                else:
                    video_capture.release()

                if save_video_flag:
                    with open('videos_saved/log_results.txt', 'a') as log:
                        log.write(
                            'detected!!! time: {}, camera id: {}, detected move in: {}, out: {}\n'
                            .format(video_name, camera_id, ins, outs))
                        log.write('video written {}\n\n'.format(output_name))
                    out.release()
                else:
                    if out.isOpened():
                        out.release()
                        if os.path.isfile(output_name):
                            os.remove(output_name)

                if os.path.isfile(full_video_path):
                    os.remove(full_video_path)
                if os.path.isfile(meta_name):
                    os.remove(meta_name)
                save_video_flag = False
                cv2.destroyAllWindows()
Ejemplo n.º 17
0
def main():

    PATH_TO_CKPT_PERSON = 'models/faster_rcnn_restnet50.pb'

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = 200
    nms_max_overlap = 1.0
    yolo = YOLO()
    reid = PERSON_REID()
    frozen_person = FROZEN_GRAPH_INFERENCE(PATH_TO_CKPT_PERSON)

    # # Deep SORT
    # model_filename = 'model_data/mars-small128.pb'
    # encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    # file_path = 0
    if VIDEO_CAPTURE == 0 and asyncVideo_flag == True:
        video_capture = VideoCaptureAsync(file_path)
    elif VIDEO_CAPTURE == 1 and asyncVideo_flag == True:
        video_capture = myVideoCapture(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)
        im_width = int(video_capture.get(3))
        im_height = int(video_capture.get(4))

    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(output_filename, fourcc, 30, (w, h))
        frame_index = -1

    fps = 0.0
    fps_imutils = imutils.video.FPS().start()
    boxs = list()
    confidence = list()
    persons = list()
    frame_count = 0
    track_count = 0
    num_files = 0

    while True:
        t1 = time.time()
        ret, frame = video_capture.read()  # frame shape 640*480*3
        frame_org = frame.copy()
        if ret != True:
            break
        frame_count += 1
        # print('Frame count: {}'.format(frame_count))

        # Person detection using Frozen Graph
        persons = frozen_person.run_frozen_graph(frame, im_width, im_height)
        boxs = [[
            person['left'], person['top'], person['width'], person['height']
        ] for person in persons]
        confidence = [person['confidence'] for person in persons]
        cropped_persons = list(person['cropped'] for person in persons)

        # # Person detection using YOLO - Keras-converted model
        # image = Image.fromarray(frame[...,::-1])  # bgr to rgb
        # boxs = yolo.detect_image(image)[0]
        # confidence = yolo.detect_image(image)[1]
        # cropped_persons = [np.array(frame[box[1]:box[1]+box[3], box[0]:box[0]+box[2]]) for box in boxs] #[x,y,w,h]

        # features = encoder(frame, boxs)
        if len(cropped_persons) > 0:
            features = reid.extract_feature_imgTensor(cropped_persons)
            # print(features.shape)
            detections = [
                Detection(bbox, confidence,
                          feature) for bbox, confidence, feature in zip(
                              boxs, confidence, features)
            ]

            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            indices = preprocessing.non_max_suppression(
                boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]

            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (0, 0, 255), 2)
                cv2.putText(frame, str(track.track_id),
                            (int(bbox[0]), int(bbox[1])), 0, 5e-3 * 200,
                            (0, 255, 0), 2)

                directory = os.path.join('output', str(track.track_id))
                if not os.path.exists(directory):
                    os.makedirs(directory)
                # file_count = len([name for name in os.listdir(directory+'/') if os.path.isfile(name)])
                file_count = sum(
                    [len(files) for root, dirs, files in os.walk(directory)])
                # print(file_count)

                if file_count == 0:
                    cv2.imwrite(
                        directory + '/' + str(file_count + 1) + '.jpg',
                        frame_org[int(bbox[1]):int(bbox[3]),
                                  int(bbox[0]):int(bbox[2])])
                elif file_count > 0 and track_count % 10 == 0:
                    cv2.imwrite(
                        directory + '/' + str(file_count + 1) + '.jpg',
                        frame_org[int(bbox[1]):int(bbox[3]),
                                  int(bbox[0]):int(bbox[2])])

                track_count += 1

            for det in detections:
                bbox = det.to_tlbr()
                score = "%.2f" % round(det.confidence * 100, 2)
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)
                cv2.putText(frame, score + '%', (int(bbox[0]), int(bbox[3])),
                            0, 5e-3 * 130, (0, 255, 0), 2)

        cv2.imshow('YOLO DeepSort', frame)

        if writeVideo_flag:  # and not asyncVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()

        fps = (fps + (1. / (time.time() - t1))) / 2
        # print("FPS = %f"%(fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    # print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()
Ejemplo n.º 18
0
def main(yolo):

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    
    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)
    # Calculate cosine Distance Metric 
    metric = nn_matching.NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
    tracker = Tracker(metric)

    # Flags for process
    tracking = True # Set False if you only want to do detection
    writeVideo_flag = True # Set False if you don't want to write frames locally
    asyncVideo_flag = False # It uses asynchronous processing for better FPS :Warning: Shuttering Problem

    # Video File Path
    file_path = '/mydrive/test.mp4'
    # Check if asyncVideo flag set to True
    if asyncVideo_flag :
        video_capture = VideoCaptureAsync(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)

    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        output_file = os.path.basename(file_path)[:-4]
        out = cv2.VideoWriter('./Output/' + output_file + "-prueba-test.avi", fourcc, 30, (w, h))
        frame_index = -1

    fps = 0.0
    fps_imutils = imutils.video.FPS().start()

    while True:
        ret, frame = video_capture.read() # Capture frames
        if ret != True:
             break

        t1 = time.time()
        
        # bgr to rgb frame conversion
        image = Image.fromarray(frame[...,::-1])  
        # YOLOv4 Detection
        boxes, confidence, classes = yolo.detect_image(image) 

        if tracking:
            # Encodes the frame and boxes for DeepSORT
            features = encoder(frame, boxes)
            # DeepSORT Detection
            detections = [Detection(bbox, confidence, cls, feature) for bbox, confidence, cls, feature in
                          zip(boxes, confidence, classes, features)]
        else:
            # Only YOLOv4 Detection
            detections = [Detection_YOLO(bbox, confidence, cls) for bbox, confidence, cls in
                          zip(boxes, confidence, classes)]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap, scores)
        detections = [detections[i] for i in indices]

        if tracking:
            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                # Draw white bbox for DeepSORT
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                cv2.putText(frame, "ID: " + str(track.track_id), (int(bbox[0]), int(bbox[1])), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1.5,(0, 255, 0), 1)

        for det in detections:
            bbox = det.to_tlbr()
            score = "%.2f" % round(det.confidence * 100, 2)
            # Check the class for colored bbox
            if len(classes) > 0:
                cls = det.cls
                center_bbox = (int(bbox[2]), int(bbox[2]))
                if str(cls) == 'person':
                    # Draw Blue bbox for YOLOv4 person detection
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (219, 152, 52), 2)
                elif str(cls) == 'backpack' or 'handbag' or 'suitcase':
                    # Draw Orange bbox for YOLOv4 handbag, backpack and suitcase detection
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (65, 176, 245), 2)

        if not asyncVideo_flag:
            fps = (fps + (1./(time.time()-t1))) / 2
            print("FPS = %f"%(fps))
            cv2.putText(frame, "GPU: NVIDIA Tesla P100", (5, 70), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0, 255, 0), 1)
            cv2.putText(frame, "FPS: %.2f" % fps, (5, 50), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0, 255, 0), 1)

            # draw the timestamp on the frame
            timestamp = datetime.datetime.now()
            ts = timestamp.strftime("%d/%m/%Y, %H:%M:%S")
            cv2.putText(frame, ts, (5, 30), cv2.FONT_HERSHEY_COMPLEX_SMALL, 0.8, (0, 255, 0), 1)            

        #cv2.imshow('', frame)

        if writeVideo_flag: # and not asyncVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()
          
        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()
Ejemplo n.º 19
0
# hyper-parameters for bounding boxes shape
# loading models
face_detection = cv2.CascadeClassifier(detection_model_path)
emotion_classifier = load_model(emotion_model_path, compile=False)
EMOTIONS = [
    "angry", "disgust", "scared", "happy", "sad", "surprised", "neutral"
]

#feelings_faces = []
#for index, emotion in enumerate(EMOTIONS):
# feelings_faces.append(cv2.imread('emojis/' + emotion + '.png', -1))

# starting video streaming
cv2.namedWindow('your_face')
camera = VideoCaptureAsync()
camera.start()
#camera = cv2.VideoCapture(0)
while True:
    frame = camera.read()[1]
    frame = camera.read()[1]
    frame = camera.read()[1]
    #reading the frame
    #frame = imutils.resize(frame,width=300)
    frame = cv2.resize(frame, (300, 250))
    ##

    #r = 300.0 / float(frame.shape[1])
    #dim = (300, int(frame.shape[1] * frame.shape[0]))

    # resize the image
    frame = cv2.resize(frame, dim)
def main(yolo):
    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    tracking = True
    writeVideo_flag = True
    asyncVideo_flag = False

    file_path = 'video.webm'
    if asyncVideo_flag:
        video_capture = VideoCaptureAsync(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)

    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))
        frame_index = -1

    fps = 0.0
    fps_imutils = imutils.video.FPS().start()
    model_par, valid_transform = model_init_par()
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break

        t1 = time.time()

        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        boxes, confidence, classes = yolo.detect_image(image)

        if tracking:
            features = encoder(frame, boxes)

            detections = [
                Detection(bbox, confidence, cls, feature)
                for bbox, confidence, cls, feature in zip(
                    boxes, confidence, classes, features)
            ]
        else:
            detections = [
                Detection_YOLO(bbox, confidence, cls)
                for bbox, confidence, cls in zip(boxes, confidence, classes)
            ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        for det in detections:
            bbox = det.to_tlbr()
            score = "%.2f" % round(det.confidence * 100, 2) + "%"
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)
            if len(classes) > 0:
                cls = det.cls
                cv2.putText(frame,
                            str(cls) + " " + score,
                            (int(bbox[0]), int(bbox[3])), 0,
                            1e-3 * frame.shape[0], (0, 255, 0), 1)

        if tracking:
            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                #crop_img = frame[int(bbox[1]):int(bbox[3]),int(bbox[0]):int(bbox[2])]
                crop_img = image.crop(
                    [int(bbox[0]),
                     int(bbox[1]),
                     int(bbox[2]),
                     int(bbox[3])])
                #res_txt = demo_par(model_par, valid_transform, crop_img)

                #draw.rectangle(xy=person_bbox[:-1], outline='red', width=1)

                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                cv2.putText(frame, "ID: " + str(track.track_id),
                            (int(bbox[0]), int(bbox[1])), 0,
                            1e-3 * frame.shape[0], (0, 255, 0), 1)
                font = ImageFont.truetype(
                    '/home/sohaibrabbani/PycharmProjects/Strong_Baseline_of_Pedestrian_Attribute_Recognition/arial.ttf',
                    size=10)
                # positive_cnt = 1
                # for txt in res_txt:
                #     if 'personal' in txt:
                #         #draw.text((x1, y1 + 20 * positive_cnt), txt, (255, 0, 0), font=font)
                #         cv2.putText(frame, txt, (int(bbox[0]), int(bbox[1]) + 20 * positive_cnt), 0,
                #                     1e-3 * frame.shape[0], (0, 255, 0), 1)
                #         positive_cnt += 1

        cv2.imshow('', frame)

        if writeVideo_flag:  # and not asyncVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()

        if not asyncVideo_flag:
            fps = (fps + (1. / (time.time() - t1))) / 2
            print("FPS = %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()
Ejemplo n.º 21
0
def main(yolo):
    # Definition of the parameters
    max_cosine_distance = 0.2
    nn_budget = None
    nms_max_overlap = 1.0
    output_format = 'mp4'

    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    show_detections = True
    writeVideo_flag = True
    asyncVideo_flag = False

    fpeses = []

    check_gpu()
    video_name = 'test1.mp4'

    print("opening video: {}".format(video_name))
    file_path = join('data_files/videos', video_name)
    output_name = 'save_data/out_' + video_name[0:-3] + output_format
    counter = Counter(counter_in=0, counter_out=0, track_id=0)

    if asyncVideo_flag:
        video_capture = VideoCaptureAsync(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)

    if asyncVideo_flag:
        video_capture.start()
        w = int(video_capture.cap.get(3))
        h = int(video_capture.cap.get(4))
    else:
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))

    if writeVideo_flag:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(output_name, fourcc, 15, (w, h))

    left_array = [0, 0, w / 2, h]
    fps = 0.0
    fps_imutils = imutils.video.FPS().start()

    rect_left = Rectangle(left_array[0], left_array[1], left_array[2],
                          left_array[3])

    border_door = left_array[3]
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if not ret:
            with open('log_results.txt', 'a') as log:
                log.write('1')
            break

        t1 = time.time()

        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        boxes, confidence, classes = yolo.detect_image(image)

        features = encoder(frame, boxes)
        detections = [
            Detection(bbox, confidence, cls,
                      feature) for bbox, confidence, cls, feature in zip(
                          boxes, confidence, classes, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        classes = np.array([d.cls for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        cv2.rectangle(frame, (int(left_array[0]), int(left_array[1])),
                      (int(left_array[2]), int(left_array[3])), (23, 158, 21),
                      3)
        if len(detections) != 0:
            counter.someone_inframe()
            for det in detections:
                bbox = det.to_tlbr()
                if show_detections and len(classes) > 0:
                    score = "%.2f" % (det.confidence * 100) + "%"
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])), (255, 0, 0), 3)
        else:
            if counter.need_to_clear():
                counter.clear_all()

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()

            if track.track_id not in counter.people_init or counter.people_init[
                    track.track_id] == 0:
                counter.obj_initialized(track.track_id)
                ratio_init = find_ratio_ofbboxes(bbox=bbox,
                                                 rect_compare=rect_left)

                if ratio_init > 0:
                    if ratio_init >= 0.8 and bbox[3] < left_array[3]:
                        counter.people_init[
                            track.track_id] = 2  # man in left side
                    elif ratio_init < 0.8 and bbox[3] > left_array[
                            3]:  # initialized in the bus, mb going out
                        counter.people_init[track.track_id] = 1
                else:
                    counter.people_init[track.track_id] = 1
                counter.people_bbox[track.track_id] = bbox
            counter.cur_bbox[track.track_id] = bbox

            adc = "%.2f" % (track.adc *
                            100) + "%"  # Average detection confidence
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, "ID: " + str(track.track_id),
                        (int(bbox[0]), int(bbox[1]) + 50), 0,
                        1e-3 * frame.shape[0], (0, 255, 0), 5)

            if not show_detections:
                track_cls = track.cls
                cv2.putText(frame, str(track_cls),
                            (int(bbox[0]), int(bbox[3])), 0,
                            1e-3 * frame.shape[0], (0, 255, 0), 1)
                cv2.putText(
                    frame, 'ADC: ' + adc,
                    (int(bbox[0]), int(bbox[3] + 2e-2 * frame.shape[1])), 0,
                    1e-3 * frame.shape[0], (0, 255, 0), 1)

        id_get_lost = [
            track.track_id for track in tracker.tracks
            if track.time_since_update >= 5
        ]

        # TODO clear people_init and other dicts
        for val in counter.people_init.keys():
            ratio = 0
            cur_c = find_centroid(counter.cur_bbox[val])
            init_c = find_centroid(counter.people_bbox[val])
            vector_person = (cur_c[0] - init_c[0], cur_c[1] - init_c[1])

            if val in id_get_lost and counter.people_init[val] != -1:
                ratio = find_ratio_ofbboxes(bbox=counter.cur_bbox[val],
                                            rect_compare=rect_left)

                if vector_person[0] > 200 and counter.people_init[val] == 2 \
                        and ratio < 0.7:  # and counter.people_bbox[val][3] > border_door \
                    counter.get_out()
                    print(vector_person[0], counter.people_init[val], ratio)

                elif vector_person[0] < -100 and counter.people_init[val] == 1 \
                        and ratio >= 0.7:
                    counter.get_in()
                    print(vector_person[0], counter.people_init[val], ratio)

                counter.people_init[val] = -1
                del val

        ins, outs = counter.show_counter()
        cv2.rectangle(frame, (700, 0), (950, 50), (0, 0, 0), -1, 8)
        cv2.putText(frame, "in: {}, out: {} ".format(ins, outs), (710, 35), 0,
                    1e-3 * frame.shape[0], (255, 255, 255), 3)

        cv2.namedWindow('video', cv2.WINDOW_NORMAL)
        # cv2.resizeWindow('video', 1422, 800)
        cv2.imshow('video', frame)

        if writeVideo_flag:
            out.write(frame)

        fps_imutils.update()

        if not asyncVideo_flag:
            fps = (fps + (1. / (time.time() - t1))) / 2
            print("FPS = %f" % fps)

            if len(fpeses) < 15:
                fpeses.append(round(fps, 2))

            elif len(fpeses) == 15:
                # fps = round(np.median(np.array(fpeses)))
                median_fps = float(np.median(np.array(fpeses)))
                fps = round(median_fps, 1)
                print('max fps: ', fps)
                fps = 20
                counter.fps = fps
                fpeses.append(fps)

        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()
Ejemplo n.º 22
0
    def run(self):
        asyncVideo_flag = self.args.asyncVideo_flag
        writeVideo_flag = self.args.writeVideo_flag

        if asyncVideo_flag:
            video_capture = VideoCaptureAsync(self.video_path)
        else:
            video_capture = cv2.VideoCapture(self.video_path)

        if asyncVideo_flag:
            video_capture.start()

        if writeVideo_flag:
            if asyncVideo_flag:
                w = int(video_capture.cap.get(3))
                h = int(video_capture.cap.get(4))
            else:
                w = int(video_capture.get(3))
                h = int(video_capture.get(4))
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            out = cv2.VideoWriter('output_yolov4.avi', fourcc, 30, (w, h))
            frame_index = -1

        fps = 0.0
        fps_imutils = imutils.video.FPS().start()

        count_frame = 0
        while True:
            count_frame += 1
            t1 = time.time()
            ret, frame = video_capture.read()
            if ret != True:
                break

            _frame = frame
            # process
            frame = self.process(frame, _frame, count_frame)

            # visualize
            if self.args.visualize:
                frame = imutils.resize(frame, width=1000)
                cv2.imshow("Final result", frame)

            if writeVideo_flag:  # and not asyncVideo_flag:
                # save a frame
                out.write(frame)
                frame_index = frame_index + 1

            fps_imutils.update()

            if not asyncVideo_flag:
                fps = (fps + (1. / (time.time() - t1))) / 2
                print("FPS = %f" % (fps))

            # Press Q to stop!
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        fps_imutils.stop()
        print('imutils FPS: {}'.format(fps_imutils.fps()))

        if asyncVideo_flag:
            video_capture.stop()
        else:
            video_capture.release()

        if writeVideo_flag:
            out.release()

        cv2.destroyAllWindows()
Ejemplo n.º 23
0
def main(yolo):
    # Definition of the parameters
    max_cosine_distance = 0.2
    nn_budget = None
    nms_max_overlap = 1.0

    output_format = 'mp4'
    video_name = 'bus4_2in_4out.mp4'
    file_path = join('data_files/videos', video_name)
    output_name = 'save_data/out_' + video_name[0:-3] + output_format
    initialize_door_by_yourself = False
    door_array = None
    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    show_detections = True
    writeVideo_flag = True
    asyncVideo_flag = False

    counter = Counter(counter_in=0, counter_out=0, track_id=0)

    if asyncVideo_flag:
        video_capture = VideoCaptureAsync(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)

    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(output_name, fourcc, 15, (w, h))
        frame_index = -1

    fps = 0.0
    fps_imutils = imutils.video.FPS().start()

    ret, first_frame = video_capture.read()

    if door_array is None:
        if initialize_door_by_yourself:
            door_array = select_object(first_frame)[0]
            print(door_array)
        else:
            all_doors = read_door_info('data_files/doors_info.csv')
            door_array = all_doors[video_name]

    border_door = door_array[3]
    error_values = []
    truth = get_truth(video_name)
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if not ret:
            total_count = counter.return_total_count()
            true_total = truth.inside + truth.outside
            err = abs(total_count - true_total) / true_total
            log_res = "in video: {}\n predicted / true\n counter in: {} / {}\n counter out: {} / {}\n" \
                      " total: {} / {}\n error: {}\n______________\n".format(video_name, counter.counter_in,
                                                                             truth.inside,
                                                                             counter.counter_out, truth.outside,
                                                                             total_count, true_total, err)
            with open('log_results.txt', 'w') as file:
                file.write(log_res)
            print(log_res)
            error_values.append(err)
            break

        t1 = time.time()

        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        boxes, confidence, classes = yolo.detect_image(image)

        features = encoder(frame, boxes)
        detections = [
            Detection(bbox, confidence, cls,
                      feature) for bbox, confidence, cls, feature in zip(
                          boxes, confidence, classes, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        classes = np.array([d.cls for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)

        cv2.rectangle(frame, (int(door_array[0]), int(door_array[1])),
                      (int(door_array[2]), int(door_array[3])), (23, 158, 21),
                      2)

        for det in detections:
            bbox = det.to_tlbr()
            if show_detections and len(classes) > 0:
                score = "%.2f" % (det.confidence * 100) + "%"
                rect_head = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3])
                rect_door = Rectangle(int(door_array[0]), int(door_array[1]),
                                      int(door_array[2]), int(door_array[3]))
                intersection = rect_head & rect_door

                if intersection:
                    squares_coeff = rect_square(*intersection) / rect_square(
                        *rect_head)
                    cv2.putText(
                        frame,
                        score + " inter: " + str(round(squares_coeff, 3)),
                        (int(bbox[0]), int(bbox[3])), 0, 1e-3 * frame.shape[0],
                        (0, 100, 255), 5)
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])), (255, 0, 0), 3)

        for track in tracker.tracks:
            if not track.is_confirmed() or track.time_since_update > 1:
                continue
            bbox = track.to_tlbr()
            # first appearence of object with id=track.id

            if track.track_id not in counter.people_init or counter.people_init[
                    track.track_id] == 0:
                counter.obj_initialized(track.track_id)
                rect_head = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3])
                rect_door = Rectangle(door_array[0], door_array[1],
                                      door_array[2], door_array[3])
                res = rect_head & rect_door
                if res:

                    inter_square = rect_square(*res)
                    head_square = rect_square(*rect_head)
                    #     was initialized in door, probably going in
                    if (inter_square / head_square) >= 0.8:
                        counter.people_init[track.track_id] = 2
                        #     initialized in the bus, mb going out
                    elif (inter_square /
                          head_square) <= 0.4 or bbox[3] > border_door:
                        counter.people_init[track.track_id] = 1
                # res is None, means that object is not in door contour
                else:
                    counter.people_init[track.track_id] = 1

                counter.people_bbox[track.track_id] = bbox
            counter.cur_bbox[track.track_id] = bbox

            adc = "%.2f" % (track.adc *
                            100) + "%"  # Average detection confidence
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
            cv2.putText(frame, "ID: " + str(track.track_id),
                        (int(bbox[0]), int(bbox[1])), 0, 1e-3 * frame.shape[0],
                        (0, 255, 0), 5)

            if not show_detections:
                track_cls = track.cls
                cv2.putText(frame, str(track_cls),
                            (int(bbox[0]), int(bbox[3])), 0,
                            1e-3 * frame.shape[0], (0, 255, 0), 1)
                cv2.putText(
                    frame, 'ADC: ' + adc,
                    (int(bbox[0]), int(bbox[3] + 2e-2 * frame.shape[1])), 0,
                    1e-3 * frame.shape[0], (0, 255, 0), 1)

        id_get_lost = [
            track.track_id for track in tracker.tracks
            if track.time_since_update >= 25 and track.age >= 29
        ]
        id_inside_tracked = [
            track.track_id for track in tracker.tracks if track.age > 60
        ]
        for val in counter.people_init.keys():
            # check bbox also
            cur_c = find_centroid(counter.cur_bbox[val])
            init_c = find_centroid(counter.people_bbox[val])
            vector_person = (cur_c[0] - init_c[0], cur_c[1] - init_c[1])

            if val in id_get_lost and counter.people_init[val] != -1:
                # if vector_person < 0 then current coord is less than initialized, it means that man is going
                # in the exit direction
                if vector_person[1] > 70 and counter.people_init[
                        val] == 2:  # and counter.people_bbox[val][3] > border_door \
                    counter.get_in()

                elif vector_person[1] < -70 and counter.people_init[val] == 1:
                    counter.get_out()

                counter.people_init[val] = -1
                print(f"person left frame")
                print(f"current centroid - init : {cur_c} - {init_c}\n")
                print(f"vector: {vector_person}\n")

                del val
            # elif val in id_inside_tracked and val not in id_get_lost and counter.people_init[val] == 1 \
            #         and bb_intersection_over_union(counter.cur_bbox[val], door_array) <= 0.3 \
            #         and vector_person[1] > 0:  # and \
            #     # counter.people_bbox[val][3] > border_door:
            #     counter.get_in()
            #
            #     counter.people_init[val] = -1
            #     print(f"person is tracked for a long time")
            #     print(f"current centroid - init : {cur_c} - {init_c}\n")
            #     print(f"vector: {vector_person}\n")
            #     imaggg = cv2.line(frame, find_centroid(counter.cur_bbox[val]),
            #                       find_centroid(counter.people_bbox[val]),
            #                       (0, 0, 255), 7)

            # cv2.imshow('frame', imaggg)
            # cv2.waitKey(0)

        ins, outs = counter.show_counter()
        cv2.putText(frame, "in: {}, out: {} ".format(ins, outs), (10, 30), 0,
                    1e-3 * frame.shape[0], (255, 0, 0), 5)

        cv2.namedWindow('image', cv2.WINDOW_NORMAL)
        cv2.resizeWindow('image', 1400, 800)
        cv2.imshow('image', frame)

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        fps_imutils.update()

        if not asyncVideo_flag:
            fps = (fps + (1. / (time.time() - t1))) / 2
            # print("FPS = %f" % (fps))

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

    fps_imutils.stop()
    print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()

    cv2.destroyAllWindows()

    mean_error = np.mean(error_values)
    print("mean error for {} video: {}".format(video_name, mean_error))
Ejemplo n.º 24
0
def main(yolo):
    # Definition of the parameters
    max_cosine_distance = 0.25
    max_cross_cosine_distance = 0.5
    nn_budget = None
    nms_max_overlap = 0.5
    frame_rate = 12

    file_path = 'reid-wide2'
    file_path2 = 'reid-long2'

    show_detections = False
    writeVideo_flag = False
    asyncVideo_flag = False
    beta_calulate_flag = False
    predict_ns_flag = False
    predict_time = 0.5
    multi_camera_flag = True

    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric, frame_rate=frame_rate)

    if multi_camera_flag:
        metric2 = nn_matching.NearestNeighborDistanceMetric(
            "cosine", max_cosine_distance, nn_budget)
        tracker2 = Tracker(metric2, frame_rate=frame_rate)

    if asyncVideo_flag:
        video_capture = VideoCaptureAsync(file_path + ".mp4")
    else:
        video_capture = cv2.VideoCapture(file_path + ".mp4")

    if multi_camera_flag:
        video_capture2 = cv2.VideoCapture(file_path2 + ".mp4")

    if asyncVideo_flag:
        video_capture.start()

    if asyncVideo_flag:
        w = int(video_capture.cap.get(3))
        h = int(video_capture.cap.get(4))
    else:
        w = int(video_capture.get(3))
        h = int(video_capture.get(4))

    if writeVideo_flag:
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(file_path2 + "-output.avi", fourcc, frame_rate,
                              (w, h))
        frame_index = -1

        if multi_camera_flag:
            out2 = cv2.VideoWriter(file_path + "-output2.avi", fourcc,
                                   frame_rate, (w, h))

    fps = 0.0
    fps_imutils = imutils.video.FPS().start()
    alpha = np.arctan((326 + (369 - 326) * (250 - 110) /
                       (300 - 110) - w / 2) / w * 6.4 / 3.6) * 180 / np.pi
    beta = 0
    beta_last = 0
    x_temp = 0
    track_angle = np.array([])
    x_axis = np.array([])
    x_predict = np.array([])
    x_current = np.array([])
    x_kalman = np.array([])
    matches_id = []
    alert_mode_flag = False

    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break

        t1 = time.time()

        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        boxes, confidence, classes = yolo.detect_image(image)

        features = encoder(frame, boxes)
        detections = [
            Detection(bbox, confidence, cls,
                      feature) for bbox, confidence, cls, feature in zip(
                          boxes, confidence, classes, features)
        ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        classes = np.array([d.cls for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        # Call the tracker
        tracker.predict()
        tracker.update(detections)
        if predict_ns_flag:
            tracker.predict_ns(predict_time)

        if alert_mode_flag:
            if len(tracker.tracks) > p:
                ret = tracker.tracks[track1_idx].is_confirmed()
                if not ret:
                    alert_mode_flag = False
            else:
                alert_mode_flag = False

        if not alert_mode_flag:
            for det in detections:
                bbox = det.to_tlbr()
                if show_detections and len(classes) > 0:
                    det_cls = det.cls
                    score = "%.2f" % (det.confidence * 100) + "%"
                    cv2.putText(frame,
                                str(det_cls) + " " + score,
                                (int(bbox[0]), int(bbox[3])), 0,
                                1e-3 * frame.shape[0], (0, 255, 0), 1)
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 5:
                    continue
                bbox = track.to_tlbr()

                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                cv2.putText(frame, "ID: " + str(track.track_id),
                            (int(bbox[0]), int(bbox[1])), 0,
                            1e-3 * frame.shape[0], (0, 255, 0), 1)

                if predict_ns_flag:
                    if track.x_predict > 0 and track.x_predict < w:
                        cv2.circle(frame, (track.x_predict, int(h / 2)), 15,
                                   (0, 0, 255), -1)
                    x_temp += 1 / frame_rate
                    x_axis = np.append(x_axis, x_temp)
                    x_predict = np.append(x_predict, track.x_predict)
                    x_current = np.append(x_current, track.x_current)
                    x_kalman = np.append(x_kalman, track.mean[0])

                if beta_calulate_flag:
                    beta = np.arctan(
                        (track.mean[0] - w / 2) / w * 6.4 / 16) * 180 / np.pi
        else:
            bbox = tracker.tracks[track1_idx].to_tlbr()
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (0, 0, 255), 2)
            cv2.putText(frame, "ID: " + str(track.track_id),
                        (int(bbox[0]), int(bbox[1])), 0, 1e-3 * frame.shape[0],
                        (0, 255, 0), 1)

            # if not show_detections:
            #     track_cls = track.cls
            #     adc = "%.2f" % (track.adc * 100) + "%"  # Average detection confidence
            #     cv2.putText(frame, str(track_cls), (int(bbox[0]), int(bbox[3])), 0, 1e-3 * frame.shape[0], (0, 255, 0),
            #                 1)
            #     cv2.putText(frame, 'ADC: ' + adc, (int(bbox[0]), int(bbox[3] + 2e-2 * frame.shape[1])), 0,
            #                 1e-3 * frame.shape[0], (0, 255, 0), 1)

        if beta_calulate_flag:
            if np.isclose(beta, beta_last) and abs(beta) > 7:
                beta = np.sign(beta) * np.arctan(3.2 / 16) * 180 / np.pi
            cv2.putText(frame, "Beta_angle: " + '{:4.2f}'.format(beta),
                        (20, 20), 0, 1e-3 * frame.shape[0], (0, 0, 255), 1)
            beta_last = beta
            x_temp += 1 / frame_rate
            x_axis = np.append(x_axis, x_temp)
            track_angle = np.append(track_angle, alpha + beta)

        cv2.imshow('camera1', frame)

        if writeVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        if multi_camera_flag:
            ret, frame = video_capture2.read()  # frame shape 640*480*3
            if ret != True:
                break

            image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
            boxes, confidence, classes = yolo.detect_image(image)

            features = encoder(frame, boxes)
            detections = [
                Detection(bbox, confidence, cls, feature)
                for bbox, confidence, cls, feature in zip(
                    boxes, confidence, classes, features)
            ]

            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            classes = np.array([d.cls for d in detections])
            indices = preprocessing.non_max_suppression(
                boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]

            # Call the tracker
            tracker2.predict()
            tracker2.update(detections)
            matches, unmatched_tracks_1, unmatched_tracks_2 = cross_tracker_match(
                tracker, tracker2, max_cross_cosine_distance)
            matches_id.clear()
            for track_1_idx, track_2_idx in matches:
                matches_id.append((tracker.tracks[track_1_idx].track_id,
                                   tracker2.tracks[track_2_idx].track_id))
            print("Matches:", matches_id)

            if alert_mode_flag:
                if len(tracker2.tracks) > track2_idx:
                    ret = tracker2.tracks[track2_idx].is_confirmed()
                    if not ret:
                        alert_mode_flag = False
                else:
                    alert_mode_flag = False

            if not alert_mode_flag:
                for det in detections:
                    bbox = det.to_tlbr()
                    if show_detections and len(classes) > 0:
                        det_cls = det.cls
                        score = "%.2f" % (det.confidence * 100) + "%"
                        cv2.putText(frame,
                                    str(det_cls) + " " + score,
                                    (int(bbox[0]), int(bbox[3])), 0,
                                    1e-3 * frame.shape[0], (0, 255, 0), 1)
                        cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                      (int(bbox[2]), int(bbox[3])),
                                      (255, 0, 0), 2)

                for track in tracker2.tracks:
                    if not track.is_confirmed() or track.time_since_update > 5:
                        continue
                    bbox = track.to_tlbr()
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])),
                                  (255, 255, 255), 2)
                    cv2.putText(frame, "ID: " + str(track.track_id),
                                (int(bbox[0]), int(bbox[1])), 0,
                                1e-3 * frame.shape[0], (0, 255, 0), 1)

                if cv2.waitKey(1) & 0xFF == ord('t'):
                    roi = cv2.selectROI("Select Target", frame)
                    Roi = [Detection(roi, 0, None, 0)]
                    ret, track2_idx = ROI_target_match(tracker2, Roi)
                    cv2.destroyWindow("Select Target")

                    if ret:
                        t_list = [i for (i, j) in matches if j == track2_idx]
                        if t_list != []:
                            track1_idx = t_list[0]
                            alert_mode_flag = True
            else:
                bbox = tracker2.tracks[track2_idx].to_tlbr()
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (0, 0, 255), 2)
                cv2.putText(frame, "ID: " + str(track.track_id),
                            (int(bbox[0]), int(bbox[1])), 0,
                            1e-3 * frame.shape[0], (0, 255, 0), 1)

            cv2.imshow('camera2', frame)

            if writeVideo_flag:
                # save a frame
                out2.write(frame)

        # Press Q to stop!
        if cv2.waitKey(1) & 0xFF == ord('q'):
            break

        # if not asyncVideo_flag:
        #     fps = (fps + (1. / (time.time() - t1))) / 2
        #     print("FPS = %.2f" % (fps))
        fps_imutils.update()

    fps_imutils.stop()
    print('imutils FPS: {:.2f}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()
        if multi_camera_flag:
            out2.release()

    if predict_ns_flag:
        np.save("x_axis.npy", x_axis)
        np.save("x_predict.npy", x_predict)
        np.save("x_current.npy", x_current)
        np.save("x_kalman.npy", x_kalman)
        plt.plot(x_axis, x_current, label='X_Measured')
        plt.plot(x_axis, x_kalman, label='X_Filtered')
        plt.plot(x_axis, x_predict, label='X_Predicted')
        plt.legend()
        plt.xlabel('Time (s)')
        plt.ylabel('Pixel')
        plt.title('Motion Prediction')
        plt.show()

    if beta_calulate_flag:
        np.save("x_axis.npy", x_axis)
        np.save("track_angle.npy", track_angle)
        plt.plot(x_axis, track_angle)
        plt.xlabel('Time (s)')
        plt.ylabel('Angle (deg)')
        plt.title('Target Angle')
        plt.show()

    cv2.destroyAllWindows()
Ejemplo n.º 25
0
                img = np.tile(img[..., None], [1, 1, 3])
            img = resize(img, (256, 256))[..., :3]
            avatars.append(img)

    log('load checkpoints..')

    generator, kp_detector = load_checkpoints(config_path=opt.config,
                                              checkpoint_path=opt.checkpoint,
                                              device=device)

    fa = face_alignment.FaceAlignment(face_alignment.LandmarksType._2D,
                                      flip_input=True,
                                      device=device)

    cap = VideoCaptureAsync(opt.cam)
    cap.start()

    if _streaming:
        ret, frame = cap.read()
        stream = pyfakewebcam.FakeWebcam(f'/dev/video{opt.virt_cam}',
                                         frame.shape[1], frame.shape[0])

    cur_ava = 0
    avatar = None
    change_avatar(fa, avatars[cur_ava])
    passthrough = False

    cv2.namedWindow('cam', cv2.WINDOW_GUI_NORMAL)
    cv2.namedWindow('avatarify', cv2.WINDOW_GUI_NORMAL)
    cv2.moveWindow('cam', 0, 0)
    cv2.moveWindow('avatarify', 600, 0)
Ejemplo n.º 26
0
import pyautogui
import pyttsx3
from gui import Interface

if __name__ == '__main__':

    eyeTrack = EyeTrack()
    int2Text = ['zero', 'um', 'dois', 'tres', 'quatro', 'cinco']
    contadorOlhosFechados = 0

    gui = Interface()
    gui.speech_assistant(
        'Olá estou aqui para ajudar você a expressar algumas necessidades.  Por favor, olhe em direção ao cartão que expressa a sua necessidade'
    )
    video = VideoCaptureAsync(0)
    video.start()

    pyautogui.press('tab')

    while True:
        (grabed, frame) = video.read()
        if not grabed:
            break

        coords, frame = eyeTrack.getCoordenada(frame, debug=False)

        if coords:
            direcao = eyeTrack.getEyeDirection(coords)

            if direcao == Direction.DIREITA:
                gui.speech_assistant('direita')
Ejemplo n.º 27
0
class ListScreen(Screen):
    def __init__(self, **kwargs):
        super(ListScreen, self).__init__(**kwargs)
        # run clock
        Clock.schedule_interval(self.init_gui, 0.2)
        Clock.schedule_interval(self.show_status, 0.03)

    def init_gui(self, dt):
        self.new_file()
        Clock.unschedule(self.init_gui)
        Clock.schedule_interval(self.cam_update, 0.03)

    def cam_capture_video(
        self,
        frame,
        path,
        scalex,
        scaley,
        footprint,
        designator,
        angle,
        polarity,
        bodyShape,
        bodySize,
        maskShape,
        maskSize,
    ):
        frame2 = frame.copy()
        rotated = imutils.rotate(frame, int(angle))
        # create a mask image of the same shape as input image, filled with 0s (black color)
        mask = np.zeros_like(rotated)
        if maskShape == "Rectangular":
            # create a white filled ellipse
            center = (rotated.shape[1] * 0.5, rotated.shape[0] * 0.5)
            bottomleft = (
                int(center[0] - maskSize[0] * scalex * 0.5),
                int(center[1] - maskSize[1] * scaley * 0.5),
            )
            topright = (
                int(center[0] + maskSize[0] * scalex * 0.5),
                int(center[1] + maskSize[1] * scaley * 0.5),
            )
            maskMask = cv2.rectangle(mask, bottomleft, topright,
                                     (255, 255, 255), -1)
            cropped = rotated[bottomleft[1]:topright[1],
                              topright[0]:bottomleft[0]]
        elif maskShape == "Circular":
            # create a white filled ellipse
            center = (int(rotated.shape[1] * 0.5), int(rotated.shape[0] * 0.5))
            axis = (int(maskSize[0] * scalex * 0.5),
                    int(maskSize[1] * scaley * 0.5))
            maskMask = cv2.ellipse(mask, center, axis, 0, 0, 360,
                                   (255, 255, 255), -1)
            cropped = rotated[center[1] - axis[1]:center[1] + axis[1],
                              center[0] - axis[0]:center[0] + axis[0], ]
        # Bitwise AND operation to black out regions outside the mask
        resultMask = np.bitwise_and(rotated, maskMask)

        # create a mask image of the same shape as input image, filled with 0s (black color)
        mask = np.zeros_like(rotated)
        if bodyShape == "Rectangular":
            # create a white filled ellipse
            center = (rotated.shape[1] * 0.5, rotated.shape[0] * 0.5)
            bottomleft = (
                int(center[0] - bodySize[0] * scalex * 0.5),
                int(center[1] - bodySize[1] * scaley * 0.5),
            )
            topright = (
                int(center[0] + bodySize[0] * scalex * 0.5),
                int(center[1] + bodySize[1] * scaley * 0.5),
            )
            maskBody = cv2.rectangle(mask, bottomleft, topright,
                                     (255, 255, 255), -1)
        elif bodyShape == "Circular":
            # create a white filled ellipse
            center = (int(rotated.shape[1] * 0.5), int(rotated.shape[0] * 0.5))
            axis = (int(bodySize[0] * scalex * 0.5),
                    int(bodySize[1] * scaley * 0.5))
            maskBody = cv2.ellipse(mask, center, axis, 0, 0, 360,
                                   (255, 255, 255), -1)
        # Bitwise AND operation to black out regions outside the mask
        maskSolder = np.bitwise_and(maskMask, np.invert(maskBody))
        resultBody = np.bitwise_and(rotated, maskBody)
        resultSolder = np.bitwise_and(rotated, maskSolder)
        station = self.project_data["Setup"]["Station"]
        timestamp = time.time()
        filename = inspection.create_part_identifier(
            station,
            timestamp,
            self.capture_video_panel,
            designator,
            footprint,
            angle,
            polarity,
            bodyShape,
            bodySize,
            maskShape,
            maskSize,
        )
        joined = os.path.join(path, filename)
        print("Capturing", joined + "*.png")
        if self.capture_video_calibration:
            if len(self.project_data["Marker"]) == 0:
                print("No marker teached")
            else:
                template_rgb = imageprocessing.convert_image_from_base64(
                    self.project_data["Marker"])
                template_rgb, template_gray = brightness_correction.BrightnessAndContrastAuto(
                    template_rgb, 2.0)

                calibrate_rgb, calibrate_gray = brightness_correction.BrightnessAndContrastAuto(
                    rotated, 2.0)

                center = [rotated.shape[1] * 0.5, rotated.shape[0] * 0.5]
                pixscale = [
                    self.project_data["Setup"]["CalibrationScaleX"],
                    self.project_data["Setup"]["CalibrationScaleY"],
                ]
                # find the marker

                markers, templatelocm, keypointsm = findmarker.find_marker(
                    calibrate_gray,
                    pixscale,
                    bodyShape,
                    bodySize,
                    maskShape,
                    maskSize,
                    template_gray,
                    self.project_data["Setup"]["MarkerThreshold"],
                    center,
                    debug=False,
                )
                if len(markers) >= 1:
                    self.project_data["Setup"]["CalibrationScaleX"] = (
                        np.sqrt(
                            np.square(markers[0]["RefX"] - center[0]) +
                            np.square(markers[0]["RefY"] - center[1])) /
                        self.project_data["Setup"]["CalibrationOffset"])
                    self.project_data["Setup"]["CalibrationScaleY"] = (
                        np.sqrt(
                            np.square(markers[0]["RefX"] - center[0]) +
                            np.square(markers[0]["RefY"] - center[1])) /
                        self.project_data["Setup"]["CalibrationOffset"])
                else:
                    print("marker not detected")

        cv2.imwrite(joined + "RotatedMask.jpg", resultMask,
                    [cv2.IMWRITE_JPEG_QUALITY, 90])
        cv2.imwrite(joined + "RotatedBody.jpg", resultBody,
                    [cv2.IMWRITE_JPEG_QUALITY, 90])
        cv2.imwrite(joined + "RotatedSolder.jpg", resultSolder,
                    [cv2.IMWRITE_JPEG_QUALITY, 90])
        cv2.imwrite(joined + "MaskMask.png", maskMask,
                    [cv2.IMWRITE_PNG_COMPRESSION, 9])
        cv2.imwrite(joined + "MaskBody.png", maskBody,
                    [cv2.IMWRITE_PNG_COMPRESSION, 9])
        cv2.imwrite(joined + "MaskSolder.png", maskSolder,
                    [cv2.IMWRITE_PNG_COMPRESSION, 9])
        cv2.imwrite(joined + "Raw.jpg", frame2, [cv2.IMWRITE_JPEG_QUALITY, 90])
        if self.capture_video_marker:
            self.project_data[
                "Marker"] = imageprocessing.convert_image_to_base64(cropped)

    def cam_draw_crosshair(self, frame):
        center = (int(frame.shape[1] * 0.5), int(frame.shape[0] * 0.5))
        left = (center[0] - 100, center[1])
        right = (center[0] + 100, center[1])
        top = (center[0], center[1] - 100)
        bottom = (center[0], center[1] + 100)

        cv2.line(frame, left, right, (255, 0, 0), 2)
        cv2.line(frame, top, bottom, (255, 0, 0), 2)

        for i in range(-5, 6):
            # horizontal dashes
            start = (center[0] + i * 20, center[1] + 5)
            end = (center[0] + i * 20, center[1] - 5)
            cv2.line(frame, start, end, (255, 0, 0), 2)
            # vertical dashes
            start = (center[0] + 5, center[1] + i * 20)
            end = (center[0] - 5, center[1] + i * 20)
            cv2.line(frame, start, end, (255, 0, 0), 2)

    def cam_teachin_part(
        self,
        frame,
        scalex,
        scaley,
        angle,
        polarity,
        bodyShape,
        bodySize,
        maskShape,
        maskSize,
    ):
        # translated = imutils.translate(frame, x,y)
        # or use center in rotate function
        rotated = imutils.rotate(frame, int(angle))
        cv2.putText(
            rotated,
            self.overlay_teachin_designator + "  " +
            self.overlay_teachin_footprint,
            (10, 35),
            cv2.FONT_HERSHEY_SIMPLEX,
            1,
            (0, 0, 0),
            2,
        )
        if polarity == True:
            center = (5, 5)
            axis = (5, 5)
            cv2.ellipse(rotated, center, axis, 0, 0, 360, (0, 0, 0), 2)
        # draw body
        if bodyShape == "Rectangular":
            center = (rotated.shape[1] * 0.5, rotated.shape[0] * 0.5)
            bottomleft = (
                int(center[0] - bodySize[0] * scalex * 0.5),
                int(center[1] - bodySize[1] * scaley * 0.5),
            )
            topright = (
                int(center[0] + bodySize[0] * scalex * 0.5),
                int(center[1] + bodySize[1] * scaley * 0.5),
            )
            cv2.rectangle(rotated, bottomleft, topright, (0, 0, 0), 2)
        elif bodyShape == "Circular":
            center = (int(rotated.shape[1] * 0.5), int(rotated.shape[0] * 0.5))
            axis = (int(bodySize[0] * scalex * 0.5),
                    int(bodySize[1] * scaley * 0.5))
            cv2.ellipse(rotated, center, axis, 0, 0, 360, (0, 0, 0), 2)
        # draw mask
        if maskShape == "Rectangular":
            center = (rotated.shape[1] * 0.5, rotated.shape[0] * 0.5)
            bottomleft = (
                int(center[0] - maskSize[0] * scalex * 0.5),
                int(center[1] - maskSize[1] * scaley * 0.5),
            )
            topright = (
                int(center[0] + maskSize[0] * scalex * 0.5),
                int(center[1] + maskSize[1] * scaley * 0.5),
            )
            cv2.rectangle(rotated, bottomleft, topright, (0, 0, 0), 2)
        elif maskShape == "Circular":
            center = (int(rotated.shape[1] * 0.5), int(rotated.shape[0] * 0.5))
            axis = (int(maskSize[0] * scalex * 0.5),
                    int(maskSize[1] * scaley * 0.5))
            cv2.ellipse(rotated, center, axis, 0, 0, 360, (0, 0, 0), 2)
        return rotated

    def cam_update(self, dt):
        try:
            _, frame = self.capture.read()
            # capture
            if (self.capture_video or self.capture_video_marker
                    or self.capture_video_calibration):
                path = os.path.expanduser(
                    self.project_data["Setup"]["ReportingPath"])
                inspectpart = self.project_data["InspectionPath"][
                    self.capture_video_inspectionpart]
                panel = self.capture_video_panel
                partref = inspectpart["Partsdefinition"]
                if partref != -1:
                    part = inspection.get_part_definition(
                        self.project_data["PartsDefinition"]
                        ["PartsDefinition"], partref)
                    body_shape = part["BodyShape"]
                    body_size = part["BodySize"]
                    mask_shape = part["MaskShape"]
                    mask_size = part["MaskSize"]
                    rotation = inspectpart["Rotation"] - part["Rotation"]
                    polarity = part["Polarity"]
                    designator = inspectpart["Designator"]
                    footprint = inspectpart["Footprint"]
                    self.cam_capture_video(
                        frame,
                        path,
                        self.project_data["Setup"]["CalibrationScaleX"],
                        self.project_data["Setup"]["CalibrationScaleY"],
                        footprint,
                        designator,
                        rotation,
                        polarity,
                        body_shape,
                        body_size,
                        mask_shape,
                        mask_size,
                    )
                self.capture_video = 0
                self.capture_video_marker = 0
                self.capture_video_calibration = 0

            # overlay cross
            if self.overlay_crosshair:
                self.cam_draw_crosshair(frame)
            if self.overlay_teachin:
                # print("overlay", self.overlay_teachin_body_shape, self.overlay_teachin_body_size, self.overlay_teachin_mask_shape, self.overlay_teachin_mask_size, self.overlay_teachin_rotation)
                frame = self.cam_teachin_part(
                    frame,
                    self.project_data["Setup"]["CalibrationScaleX"],
                    self.project_data["Setup"]["CalibrationScaleY"],
                    self.overlay_teachin_rotation,
                    self.overlay_teachin_polarity,
                    self.overlay_teachin_body_shape,
                    self.overlay_teachin_body_size,
                    self.overlay_teachin_mask_shape,
                    self.overlay_teachin_mask_size,
                )
            buf1 = cv2.flip(frame, 0)
            buf = buf1.tostring()
            texture1 = Texture.create(size=(frame.shape[1], frame.shape[0]),
                                      colorfmt="rgb")
            texture1.blit_buffer(buf, colorfmt="bgr", bufferfmt="ubyte")
            self.ids["img_cam"].texture = texture1

        except Exception as e:
            print("cam exception", e)
            pass

    #### File menu
    def exit_app(self):
        self.camera_disconnect()
        self.printer_disconnect()
        App.get_running_app().stop()

    def new_file(self):
        ## File menu / New Project
        self.init_project()

    def init_project(self):
        # init data
        self.project_file_path = ""
        self.project_data = data.init_project_data()
        self.project_data["CADMode"] = "None"
        self.ids["img_cad_origin"].set_cad_view(self.project_data,
                                                self.teachin)
        self.ids["img_cad_origin"].redraw_cad_view()
        self.capture = None
        self.print = None
        self.paneldisselection = []
        self.overlay_crosshair = 0
        self.overlay_teachin = 0
        self.capture_video_marker = 0
        self.capture_video = 0

        try:
            self.camera_disconnect()
            self.camera_connect()
            self.printer_disconnect()
            self.printer_connect()
        except Exception as e:
            print(e, "cam or printer start problem")
            pass

    def load_file(self):
        ### File Menu / Load project
        content = LoadDialog(load=self.load, cancel=self.dismiss_popup)
        self._popup = Popup(title="Load file",
                            content=content,
                            size_hint=(0.9, 0.9))
        self._popup.open()
        self.project_data["CADMode"] = "None"

    def load(self, path, filename):
        ### click load button of Loading Dialog
        ### load all info from pre saved project file
        try:
            ### if proper project file
            self.project_file_path = os.path.expanduser(filename[0])
            self.project_data = data.read_project_data(self.project_file_path)
            self.paneldisselection = []
            try:
                self.camera_disconnect()
                self.camera_connect()
                self.printer_disconnect()
                self.printer_connect()
            except Exception as e:
                print(e, "cam or printer start problem")
                pass

            self.ids["img_cad_origin"].set_cad_view(self.project_data,
                                                    self.teachin)
            self.ids["img_cad_origin"].redraw_cad_view()
        except:
            ### if not proper project file
            print("Problem loading file")
            self.dismiss_popup()
            return
        self.dismiss_popup()

    def save_file(self):
        ### File Menu / Save Project
        if self.project_file_path == "":
            self.save_as_file()
        else:
            data.write_project_data(self.project_file_path, self.project_data)

    def save_as_file(self):
        ### File Menu / Save as Project
        content = SaveDialog(save=self.save, cancel=self.dismiss_popup)
        self._popup = Popup(title="Save file",
                            content=content,
                            size_hint=(0.9, 0.9))
        self._popup.open()
        self.project_data["CADMode"] = "None"

    def save(self, path, filename):
        ### after click Save button of Save Dialog
        self.project_file_path = os.path.expanduser(
            os.path.join(path, filename))
        print(path, filename, self.project_file_path)
        data.write_project_data(self.project_file_path, self.project_data)
        self.dismiss_popup()

    def export_partsdefinition(self):
        ### File Menu / Export Partsdefinition
        old = data.helper_read_json("partsdefinition")
        data.helper_write_json("temp/partsdefinition", old)
        new = self.project_data["PartsDefinition"].copy()
        inspection.merge_partsdefinition(new, old)
        data.helper_write_json("partsdefinition", new)

    #### program menu ####
    def import_file(self):
        ### Program menu / import pick and place
        content = ImportDialog(load=self.import_pp, cancel=self.dismiss_popup)
        self._popup = Popup(title="Import file",
                            content=content,
                            size_hint=(0.9, 0.9))
        self._popup.open()
        self.project_data["CADMode"] = "None"

    def import_pp(self, path, filename):

        ### after click load button of Loading button
        try:
            ### if proper project file
            pp_file_path = os.path.expanduser(filename[0])
            inspection.load_pick_place(self.project_data, pp_file_path)
            inspection.assign_partsdefinition(self.project_data)
            # print("data", self.project_data)
            # redraw
            self.ids["img_cad_origin"].redraw_cad_view()

        except:
            ### if not proper project file
            self.dismiss_popup()
            return

        self.dismiss_popup()

    def teachin_select(self):
        ### Program Menu / Teachin select part
        self.project_data["CADMode"] = "Teachin"

    def select_side(self):
        ### Program menu / Select Soldering Side
        side = [
            {
                "text": "Top",
                "is_selected": self.project_data["InspectionSide"] == "Top",
            },
            {
                "text": "Bottom",
                "is_selected": self.project_data["InspectionSide"] == "Bottom",
            },
        ]
        self.ignore_first = not side[0]["is_selected"]

        content = ListPopup()
        args_converter = lambda row_index, rec: {
            "text": rec["text"],
            "is_selected": rec["is_selected"],
            "size_hint_y": None,
            "height": 50,
        }
        list_adapter = ListAdapter(
            data=side,
            args_converter=args_converter,
            propagate_selection_to_data=True,
            cls=ListItemButton,
            selection_mode="single",
            allow_empty_selection=False,
        )
        list_view = ListView(adapter=list_adapter)

        content.ids.profile_list.add_widget(list_view)

        list_view.adapter.bind(on_selection_change=self.selected_side)

        self._popup = Popup(title="Select Soldering Side",
                            content=content,
                            size_hint=(0.5, 0.6))
        self._popup.open()
        self.project_data["CADMode"] = "None"

    def selected_side(self, adapter):
        if self.ignore_first:
            self.ignore_first = False
            return
        self.project_data["InspectionSide"] = adapter.selection[0].text
        self.dismiss_popup()
        self.ids["img_cad_origin"].redraw_cad_view()

    def set_reference1(self):
        ### Program Menu / Set Reference point 1
        self.project_data["CADMode"] = "Ref1"

    def set_reference2(self):
        ### Program Menu /  Set Reference Point 2
        self.project_data["CADMode"] = "Ref2"

    def teachin(self, partindex):
        ### Program Menu /  Teachin
        if partindex == -1:
            partindex = inspection.find_first_unassigned_part(
                self.project_data["InspectionPath"])
        if partindex == -1:
            return
        self.set_part_overlay(partindex)

        self.ids["tab_panel"].switch_to(self.ids["tab_panel"].tab_list[0])
        self.content = TeachinPopup(
            controlTeachin=self.control_teachin,
            save=self.select_teachin_save,
            cancel=self.dismiss_popup,
        )
        # initialize control
        self.content.ids["body_input_x"].text = str(
            self.overlay_teachin_body_size[0])
        self.content.ids["body_input_y"].text = str(
            self.overlay_teachin_body_size[1])
        self.content.ids["mask_input_x"].text = str(
            self.overlay_teachin_mask_size[0])
        self.content.ids["mask_input_y"].text = str(
            self.overlay_teachin_mask_size[1])
        self.content.ids["rotation"].text = str(self.overlay_teachin_rotation)
        self.content.ids[
            "polarity_switch"].active = self.overlay_teachin_polarity
        if self.overlay_teachin_body_shape == "Rectangular":
            self.content.ids["body_switch"].active = True
        else:
            self.content.ids["body_switch"].active = False
        if self.overlay_teachin_mask_shape == "Rectangular":
            self.content.ids["mask_switch"].active = True
        else:
            self.content.ids["mask_switch"].active = False
        self._popup = Popup(
            title="Teachin Part",
            content=self.content,
            size_hint=(0.2, 0.5),
            background_color=[0, 0, 0, 0.0],
        )
        self._popup.pos_hint = {"center_x": 0.9, "center_y": 0.75}
        self._popup.open()

        self.overlay_crosshair = 1
        self.overlay_teachin = 1
        self.project_data["CADMode"] = "None"

    def control_teachin(self, field, value):
        if field == "TextRotation":
            val = min(360, max(-360, float(self.content.ids["rotation"].text)))
            self.content.ids["rotation"].text = str(val)
            self.overlay_teachin_rotation = val
        elif field == "SwitchBody":
            if value == True:
                self.overlay_teachin_body_shape = "Rectangular"
            else:
                self.overlay_teachin_body_shape = "Circular"
        elif field == "SwitchMask":
            if value == True:
                self.overlay_teachin_mask_shape = "Rectangular"
            else:
                self.overlay_teachin_mask_shape = "Circular"
        elif field == "SwitchPolarity":
            self.overlay_teachin_polarity = value
        elif field == "TextBodyX":
            val = min(100, max(0,
                               float(self.content.ids["body_input_x"].text)))
            self.overlay_teachin_body_size = [
                val, self.overlay_teachin_body_size[1]
            ]
            self.content.ids["body_input_x"].text = str(val)
        elif field == "TextBodyY":
            val = min(100, max(0,
                               float(self.content.ids["body_input_y"].text)))
            self.overlay_teachin_body_size = [
                self.overlay_teachin_body_size[0], val
            ]
            self.content.ids["body_input_y"].text = str(val)
        elif field == "TextMaskX":
            val = min(100, max(0,
                               float(self.content.ids["mask_input_x"].text)))
            self.overlay_teachin_mask_size = [
                val, self.overlay_teachin_mask_size[1]
            ]
            self.content.ids["mask_input_x"].text = str(val)
        elif field == "TextMaskY":
            val = min(100, max(0,
                               float(self.content.ids["mask_input_y"].text)))
            self.overlay_teachin_mask_size = [
                self.overlay_teachin_mask_size[0], val
            ]
            self.content.ids["mask_input_y"].text = str(val)

    def select_teachin_save(self):
        index = inspection.helper_get_index_by_designator(
            self.project_data["InspectionPath"],
            self.overlay_teachin_designator)
        if self.project_data["InspectionSide"] == "Top":
            rot = (self.project_data["InspectionPath"][index]["Rotation"] -
                   self.overlay_teachin_rotation)
        else:
            rot = (self.overlay_teachin_rotation -
                   self.project_data["InspectionPath"][index]["Rotation"])

        part = {
            "BodyShape": self.overlay_teachin_body_shape,
            "BodySize": self.overlay_teachin_body_size,
            "Id": self.overlay_teachin_footprint,
            "MaskShape": self.overlay_teachin_mask_shape,
            "MaskSize": self.overlay_teachin_mask_size,
            "Polarity": self.overlay_teachin_polarity,
            "Rotation": rot,
            "Type": "Part",
        }
        partsdefinition = self.project_data["PartsDefinition"][
            "PartsDefinition"]
        index = inspection.find_part_in_definition(partsdefinition, part["Id"])
        if index != -1:
            partsdefinition[index] = part
        else:
            partsdefinition.append(part)
        # try to assign all unassigned parts with that footprint
        inspection.assign_partsdefinition(self.project_data)
        self.overlay_crosshair = 0
        self.overlay_teachin = 0
        self.dismiss_popup()
        self.ids["img_cad_origin"].redraw_cad_view()

    ##### panel menu
    def set_num_panel(self):
        num = inspection.get_num_panel(self.project_data["Panel"])
        content = EditPopup(save=self.save_panel_num,
                            cancel=self.dismiss_popup)
        content.ids["btn_connect"].text = "Save"
        content.ids["text_port"].text = str(num)
        self._popup = Popup(title="Select panel num",
                            content=content,
                            size_hint=(0.5, 0.4))
        self._popup.open()
        self.project_data["CADMode"] = "None"

    def save_panel_num(self, txt_port):
        # set num of panels
        num = int(txt_port)
        num = max(1, num)
        num = min(self.project_data["Setup"]["MaxPanel"], num)
        inspection.set_num_panel(self.project_data["Panel"], num)
        self.dismiss_popup()

    def set_part_overlay(self, partindex):
        partref = -1
        if partindex != -1:
            inspectpart = self.project_data["InspectionPath"][partindex]
            partref = inspectpart["Partsdefinition"]
            self.overlay_teachin_designator = inspectpart["Designator"]
            self.overlay_teachin_footprint = inspectpart["Footprint"]
        else:
            self.overlay_teachin_designator = ""
            self.overlay_teachin_footprint = ""

        if partref != -1:
            part = inspection.get_part_definition(
                self.project_data["PartsDefinition"]["PartsDefinition"],
                partref)
            self.overlay_teachin_body_shape = part["BodyShape"]
            self.overlay_teachin_body_size = part["BodySize"]
            self.overlay_teachin_mask_shape = part["MaskShape"]
            self.overlay_teachin_mask_size = part["MaskSize"]
            self.overlay_teachin_polarity = part["Polarity"]
            if self.project_data["InspectionSide"] == "Top":
                self.overlay_teachin_rotation = (inspectpart["Rotation"] -
                                                 part["Rotation"])
            else:
                self.overlay_teachin_rotation = (part["Rotation"] -
                                                 inspectpart["Rotation"])
        else:
            setup = self.project_data["Setup"]
            self.overlay_teachin_body_shape = setup["BodyShape"]
            self.overlay_teachin_body_size = setup["BodySize"]
            self.overlay_teachin_mask_shape = setup["MaskShape"]
            self.overlay_teachin_mask_size = setup["MaskSize"]
            self.overlay_teachin_polarity = setup["Polarity"]
            self.overlay_teachin_rotation = setup["Rotation"]
        # print("overlay", self.overlay_teachin_body_shape, self.overlay_teachin_body_size, self.overlay_teachin_mask_shape, self.overlay_teachin_mask_size, self.overlay_teachin_rotation)
        self.overlay_teachin = 1
        self.overlay_crosshair = 1

    def set_reference_panel(self):
        #  show dialpad
        # print("ref")
        self.ids["tab_panel"].switch_to(self.ids["tab_panel"].tab_list[0])
        self.content = ControlPopup(
            controlXYZ=self.control_XYZ,
            set_panel_ref1=self.set_panel_ref1,
            set_panel_ref2=self.set_panel_ref2,
            get_panel_ref1=self.get_panel_ref1,
            get_panel_ref2=self.get_panel_ref2,
            calibrate=self.calibrate,
            teachin_reference=self.teachin_reference,
            cancel=self.dismiss_popup,
        )
        self.content.ids["cur_X"].text = format(
            self.project_data["Setup"]["TravelX"], ".2f")
        self.content.ids["cur_Y"].text = format(
            self.project_data["Setup"]["TravelY"], ".2f")
        self.content.ids["cur_Z"].text = format(
            self.project_data["Setup"]["TravelZ"], ".2f")
        self.content.ids["cur_panel"].text = "1"
        self._popup = Popup(
            title="Set reference point",
            content=self.content,
            size_hint=(0.4, 0.4),
            background_color=[0, 0, 0, 0.0],
        )
        self._popup.pos_hint = {"center_x": 0.8, "center_y": 0.8}
        self._popup.open()
        self.project_data["CADMode"] = "None"

        # set body and mask
        ref = inspection.get_reference_1(self.project_data["InspectionPath"])
        self.capture_video_inspectionpart = ref
        self.set_part_overlay(ref)

        # home printer
        gcode = robotcontrol.go_home(self.project_data)
        self.queue_printer_command(gcode)

    def teachin_reference(self):
        # trigger capturing to data as base64 here
        self.capture_video_marker = 1
        return

    def control_XYZ(self, axis, value):
        ### click any button on dialpad, calculate new position
        index = int(self.content.ids["cur_panel"].text)
        x = float(self.content.ids["cur_X"].text)
        y = float(self.content.ids["cur_Y"].text)
        z = float(self.content.ids["cur_Z"].text)

        if axis == "X":
            x += float(value)
        elif axis == "Y":
            y += float(value)
        # TODO cleanup dialpad
        # elif axis == "Z":
        #    z += float(value)
        elif axis == "HomeXY":
            x = self.project_data["Setup"]["TravelX"]
            y = self.project_data["Setup"]["TravelY"]
        elif axis == "HomeZ":
            z = self.project_data["Setup"]["TravelZ"]

        index = max(1, index)
        index = min(self.project_data["Setup"]["MaxPanel"], index)
        x = max(self.project_data["Setup"]["MinX"], x)
        x = min(self.project_data["Setup"]["MaxX"], x)
        y = max(self.project_data["Setup"]["MinY"], y)
        y = min(self.project_data["Setup"]["MaxY"], y)
        z = max(self.project_data["Setup"]["MinZ"], z)
        z = min(self.project_data["Setup"]["MaxZ"], z)

        self.content.ids["cur_panel"].text = str(index)
        self.content.ids["cur_X"].text = format(x, ".2f")
        self.content.ids["cur_Y"].text = format(y, ".2f")
        self.content.ids["cur_Z"].text = format(z, ".2f")

        # go xyz printer
        gcode = robotcontrol.go_xyz(self.project_data, x, y, z)
        self.queue_printer_command(gcode)

    def set_panel_ref1(self):
        ### click set1 button on dialpad
        index = int(self.content.ids["cur_panel"].text)
        index = min(index,
                    inspection.get_num_panel(self.project_data["Panel"]))
        index = max(index, 1)

        x = float(self.content.ids["cur_X"].text)
        y = float(self.content.ids["cur_Y"].text)
        z = float(self.content.ids["cur_Z"].text)
        inspection.set_panel_reference_1(self.project_data["Panel"], index - 1,
                                         x, y)

    def set_panel_ref2(self):
        ### click set2 button on dialpad
        index = int(self.content.ids["cur_panel"].text)
        x = float(self.content.ids["cur_X"].text)
        y = float(self.content.ids["cur_Y"].text)
        z = float(self.content.ids["cur_Z"].text)
        inspection.set_panel_reference_2(self.project_data["Panel"], index - 1,
                                         x, y)

    def get_panel_ref1(self):
        ### click on get1 button on dialpad
        index = int(self.content.ids["cur_panel"].text)
        x, y = inspection.get_panel_reference_1(self.project_data["Panel"],
                                                index - 1)
        if x == -1 and y == -1:
            x = self.project_data["Setup"]["TravelX"]
            y = self.project_data["Setup"]["TravelY"]
        z = self.project_data["Setup"]["TravelZ"]
        self.content.ids["cur_X"].text = format(x, ".2f")
        self.content.ids["cur_Y"].text = format(y, ".2f")
        self.content.ids["cur_Z"].text = format(z, ".2f")
        # set body and mask
        ref = inspection.get_reference_1(self.project_data["InspectionPath"])
        self.set_part_overlay(ref)
        self.capture_video_inspectionpart = ref

        # go xyz printer
        gcode = robotcontrol.go_xyz(self.project_data, x, y, z)
        self.queue_printer_command(gcode)

    def get_panel_ref2(self):
        ### click on get2 button on dialpad
        index = int(self.content.ids["cur_panel"].text)
        x, y = inspection.get_panel_reference_2(self.project_data["Panel"],
                                                index - 1)
        if x == -1 and y == -1:
            x = self.project_data["Setup"]["TravelX"]
            y = self.project_data["Setup"]["TravelY"]
        z = self.project_data["Setup"]["TravelZ"]
        self.content.ids["cur_X"].text = format(x, ".2f")
        self.content.ids["cur_Y"].text = format(y, ".2f")
        self.content.ids["cur_Z"].text = format(z, ".2f")
        # set body and mask
        ref = inspection.get_reference_2(self.project_data["InspectionPath"])
        self.set_part_overlay(ref)
        self.capture_video_inspectionpart = ref

        # go xyz printer
        gcode = robotcontrol.go_xyz(self.project_data, x, y, z)
        self.queue_printer_command(gcode)

    def calibrate(self, axis):
        ### click on calibrate X or Y button on dialpad
        index = int(self.content.ids["cur_panel"].text)
        x, y = inspection.get_panel_reference_1(self.project_data["Panel"],
                                                index - 1)
        if x == -1 and y == -1:
            return
        z = self.project_data["Setup"]["CameraParameters"]["FocusZ"]
        br = self.project_data["Setup"]["CameraParameters"]["IlluminationPWM"]
        # add offset for calibration
        centerx = x
        centery = y
        if axis == "X":
            x += self.project_data["Setup"]["CalibrationOffset"]
        if axis == "Y":
            y += self.project_data["Setup"]["CalibrationOffset"]

        self.content.ids["cur_X"].text = format(x, ".2f")
        self.content.ids["cur_Y"].text = format(y, ".2f")
        self.content.ids["cur_Z"].text = format(z, ".2f")
        ref = inspection.get_reference_1(self.project_data["InspectionPath"])
        self.set_part_overlay(ref)
        self.capture_video_inspectionpart = ref
        # inspect xyz printer
        gcode = robotcontrol.inspect_xyz(self.project_data, x, y, z, br,
                                         centerx, centery, index - 1, ref)
        self.queue_printer_command(gcode)

    def select_pcb_in_panel(self):
        num = inspection.get_num_panel(self.project_data["Panel"])
        content = EditPopup(save=self.save_pcb_in_panel,
                            cancel=self.dismiss_popup)
        content.ids["btn_connect"].text = "Save"
        content.ids["text_port"].text = ""
        self._popup = Popup(
            title='Select Panels to exclude from Soldering example "1,2"',
            content=content,
            size_hint=(0.5, 0.4),
        )
        self._popup.open()
        self.project_data["CADMode"] = "None"

    def save_pcb_in_panel(self, txt_port):
        # set array of panels
        excludepanels = txt_port.split(",")
        panel = []
        for p in range(inspection.get_num_panel(self.project_data["Panel"])):
            if str(p + 1) in excludepanels:
                panel.append(p)
        self.paneldisselection = panel
        self.dismiss_popup()

    def start_inspection(self):
        ### toolbar start soldering button
        # prepare panel
        panel = []
        for p in range(inspection.get_num_panel(self.project_data["Panel"])):
            if p not in self.paneldisselection:
                panel.append(p)
        # print
        # print("panel", panel)
        gcode = robotcontrol.panel_inspection(self.project_data, panel)
        self.queue_printer_command(gcode)

    def pause_inspection(self):
        ### toolbar pause inspection button
        if self.print.printing:
            self.print.pause()

    def resume_inspection(self):
        ### toolbar resume inspection button
        if self.print.printing:
            self.print.resume()

    def stop_inspection(self):
        ### toolbar stop inspection button
        if self.print.printing:
            self.print.cancelprint()

    def queue_printer_command(self, gcode):
        garray = robotcontrol.make_array(gcode)
        # print("gcode raw", gcode, garray)

        gcoded = gcoder.LightGCode(garray)
        # print("gcoded", gcoded)
        if hasattr(self, "print") and self.print is not None:
            if not self.print.online or not self.print.printer:
                print("Problem with printer", self.print.online,
                      self.print.printer)
            if self.print.printing:
                self.print.send(gcoded)
            else:
                self.print.startprint(gcoded)
        else:
            print("Problem with printer interface")

    #### Connect menu
    def set_printer(self):
        ### Connect Menu /  Connect Printer
        content = EditPopup(save=self.save_printer_port,
                            cancel=self.dismiss_popup)
        content.ids["text_port"].text = self.project_data["Setup"]["RobotPort"]
        self._popup = Popup(title="Select Printer port",
                            content=content,
                            size_hint=(0.5, 0.4))
        self._popup.open()
        self.project_data["CADMode"] = "None"

    def save_printer_port(self, txt_port):
        self.project_data["Setup"]["RobotPort"] = txt_port
        try:
            self.printer_disconnect()
            self.printer_connect()
        except Exception as e:
            print(e, "exception save robot port")
            pass
        self.dismiss_popup()

    def set_camera(self):
        # set camera device
        content = EditPopup(save=self.save_camera_port,
                            cancel=self.dismiss_popup)
        content.ids["text_port"].text = self.project_data["Setup"][
            "CameraPort"]
        self._popup = Popup(title="Select Camera port",
                            content=content,
                            size_hint=(0.5, 0.4))
        self._popup.open()
        self.project_data["CADMode"] = "None"

    def save_camera_port(self, txt_port):
        self.project_data["Setup"]["CameraPort"] = txt_port
        try:
            self.camera_disconnect()
            self.camera_connect()
        except Exception as e:
            print(e, "exception save cam port")
            pass
        self.dismiss_popup()

    def set_reporting(self):
        # set camera device
        content = EditPopup(save=self.save_camera_port,
                            cancel=self.dismiss_popup)
        content.ids["text_port"].text = self.project_data["Setup"][
            "ReportingPath"]
        self._popup = Popup(title="Select Camera port",
                            content=content,
                            size_hint=(0.5, 0.4))
        self._popup.open()
        self.project_data["CADMode"] = "None"

    def save_reporting_port(self, txt_port):
        self.project_data["Setup"]["ReportingPath"] = txt_port
        self.dismiss_popup()

    def dismiss_popup(self):
        self._popup.dismiss()
        self.overlay_crosshair = 0
        self.overlay_teachin = 0

    def camera_connect(self):
        ### connect camera
        self.capture = VideoCaptureAsync(
            self.project_data["Setup"]["CameraPort"],
            self.project_data["Setup"]["CameraResX"],
            self.project_data["Setup"]["CameraResY"],
        )
        self.capture.start()

    def camera_disconnect(self):
        if self.capture is not None:
            self.capture.stop()
            self.capture = None

    def printer_connect(self):
        if self.print is None:
            self.print = printcore(self.project_data["Setup"]["RobotPort"],
                                   115200)
            self.print.addEventHandler(self)

    def printer_disconnect(self):
        if self.print is not None:
            self.print.disconnect()
            self.print = None

    # printrun events:
    def on_init(self):
        return  # self.__write("on_init")

    def on_send(self, command, gline):
        return  # self.__write("on_send", command)

    def on_capture(self, command):
        # Trigger capture of image
        # String given in command must be translated to body mask....
        # See printerinspect.txt for details
        # CAPTURE %Panel %PartRef %CenterX %CenterY %PosX %PosY %PosZ %Brightness
        panel, partref, centerX, centerY, posX, posY, posZ, brigthness = robotcontrol.parse_capture_command(
            command)

        self.capture_video_inspectionpart = partref
        self.capture_video_panel = panel
        self.capture_video = 1
        return  # self.__write("on_send", command)

    def on_recv(self, line):
        return  # self.__write("on_recv", line.strip())

    def on_connect(self):
        return  # self.__write("on_connect")

    def on_disconnect(self):
        return  # self.__write("on_disconnect")

    def on_error(self, error):
        return  # self.__write("on_error", error)

    def on_online(self):
        return  # self.__write("on_online")

    def on_temp(self, line):
        return  # self.__write("on_temp", line)

    def on_start(self, resume):
        return  # self.__write("on_start", "true" if resume else "false")

    def on_end(self):
        return  # self.__write("on_end")

    def on_layerchange(self, layer):
        return  # self.__write("on_layerchange", "%f" % (layer))

    def on_preprintsend(self, gline, index, mainqueue):
        return  # self.__write("on_preprintsend", gline)

    def on_printsend(self, gline):
        return  # self.__write("on_printsend", gline)

    def show_status(self, dt):
        self.ids["lbl_layer_status"].text = (
            "Layer: " + self.project_data["InspectionSide"])
        self.ids[
            "lbl_cad_status"].text = "CADMode: " + self.project_data["CADMode"]
        unassigned = inspection.get_list_unassigned_parts(self.project_data)
        self.ids["lbl_profile_status"].text = "# unassigned parts: " + str(
            len(unassigned))
        if hasattr(self, "capture") and self.capture is not None:
            self.ids["lbl_cam_status"].text = "Camera: Connected"
        else:
            self.ids["lbl_cam_status"].text = "Camera: Not Found"
        # printer
        if hasattr(self, "print") and self.print is not None:
            if self.print.printer is None:
                self.ids[
                    "lbl_printer_status"].text = "Robot: No 3d printer found"
            elif self.print.printing:
                if len(self.print.mainqueue) > 0:
                    self.ids["lbl_printer_status"].text = (
                        "Robot: Inspecting " + str(
                            round(
                                float(self.print.queueindex) /
                                len(self.print.mainqueue) * 100,
                                2,
                            )) + "%")
                else:
                    self.ids["lbl_printer_status"].text = "Robot: Inspecting"
            elif self.print.online:
                self.ids["lbl_printer_status"].text = "Robot: Idle"
            else:
                self.ids["lbl_printer_status"].text = "Robot: Connected"
        else:
            self.ids["lbl_printer_status"].text = "Robot: Not Found"
Ejemplo n.º 28
0
def main(yolo):
    # Definition of the parameters
    max_cosine_distance = 0.2
    nn_budget = None
    nms_max_overlap = 1.0

    output_format = 'mp4'

    initialize_door_by_yourself = False
    door_array = None
    # Deep SORT
    model_filename = '../model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    show_detections = True
    writeVideo_flag = True
    asyncVideo_flag = False

    error_values = []
    check_gpu()
    files = sorted(os.listdir('data_files/videos'))

    for video_name in files:
        print("opening video: {}".format(video_name))
        file_path = join('data_files/videos', video_name)
        output_name = 'save_data/out_' + video_name[0:-3] + output_format
        counter = Counter(counter_in=0, counter_out=0, track_id=0)
        truth = get_truth(video_name)

        if asyncVideo_flag:
            video_capture = VideoCaptureAsync(file_path)
        else:
            video_capture = cv2.VideoCapture(file_path)

        if asyncVideo_flag:
            video_capture.start()

        if writeVideo_flag:
            if asyncVideo_flag:
                w = int(video_capture.cap.get(3))
                h = int(video_capture.cap.get(4))
            else:
                w = int(video_capture.get(3))
                h = int(video_capture.get(4))
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            out = cv2.VideoWriter(output_name, fourcc, 15, (w, h))
            frame_index = -1

        fps = 0.0
        fps_imutils = imutils.video.FPS().start()

        all_doors = read_door_info('data_files/doors_info_links.json')
        door_array = all_doors[video_name]
        rect_door = Rectangle(door_array[0], door_array[1], door_array[2],
                              door_array[3])

        border_door = door_array[3]
        while True:
            ret, frame = video_capture.read()  # frame shape 640*480*3
            if not ret:
                y1 = (counter.counter_in - truth.inside)**2
                y2 = (counter.counter_out - truth.outside)**2
                total_count = counter.return_total_count()
                true_total = truth.inside + truth.outside
                if true_total != 0:
                    err = abs(total_count - true_total) / true_total
                else:
                    err = abs(total_count - true_total)
                mse = (y1 + y2) / 2
                log_res = "in video: {}\n predicted / true\n counter in: {} / {}\n counter out: {} / {}\n" \
                          " total: {} / {}\n error: {}\n mse error: {}\n______________\n".format(video_name,
                                                                                                 counter.counter_in,
                                                                                                 truth.inside,
                                                                                                 counter.counter_out,
                                                                                                 truth.outside,
                                                                                                 total_count,
                                                                                                 true_total, err, mse)
                with open('../log_results.txt', 'a') as log:
                    log.write(log_res)
                print(log_res)
                error_values.append(err)
                break

            t1 = time.time()

            image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
            boxes, confidence, classes = yolo.detect_image(image)

            features = encoder(frame, boxes)
            detections = [
                Detection(bbox, confidence, cls, feature)
                for bbox, confidence, cls, feature in zip(
                    boxes, confidence, classes, features)
            ]

            # Run non-maxima suppression.
            boxes = np.array([d.tlwh for d in detections])
            scores = np.array([d.confidence for d in detections])
            classes = np.array([d.cls for d in detections])
            indices = preprocessing.non_max_suppression(
                boxes, nms_max_overlap, scores)
            detections = [detections[i] for i in indices]

            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            cv2.rectangle(frame, (int(door_array[0]), int(door_array[1])),
                          (int(door_array[2]), int(door_array[3])),
                          (23, 158, 21), 3)
            for det in detections:
                bbox = det.to_tlbr()
                if show_detections and len(classes) > 0:
                    score = "%.2f" % (det.confidence * 100) + "%"
                    # rect_head = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3])
                    # rect_door = Rectangle( int(door_array[0]), int(door_array[1]), int(door_array[2]), int(door_array[3]) )
                    # intersection = rect_head & rect_door
                    #
                    # if intersection:
                    #     squares_coeff = rect_square(*intersection)/ rect_square(*rect_head)
                    #     cv2.putText(frame, score + " inter: " + str(round(squares_coeff, 3)), (int(bbox[0]), int(bbox[3])), 0,
                    #             1e-3 * frame.shape[0], (0, 100, 255), 5)
                    cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                                  (int(bbox[2]), int(bbox[3])), (255, 0, 0), 3)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                # first appearence of object with id=track.id

                if track.track_id not in counter.people_init or counter.people_init[
                        track.track_id] == 0:
                    counter.obj_initialized(track.track_id)
                    rect_head = Rectangle(bbox[0], bbox[1], bbox[2], bbox[3])

                    intersection = rect_head & rect_door
                    if intersection:

                        intersection_square = rect_square(*intersection)
                        head_square = rect_square(*rect_head)
                        rat = intersection_square / head_square

                        #             1e-3 * frame.shape[0], (0, 100, 255), 5)

                        #     was initialized in door, probably going in
                        if rat >= 0.7:
                            counter.people_init[track.track_id] = 2
                            #     initialized in the bus, mb going out
                        elif rat <= 0.4 or bbox[3] > border_door:
                            counter.people_init[track.track_id] = 1
                        #     initialized between the exit and bus, not obvious state
                        elif rat > 0.4 and rat < 0.7:
                            counter.people_init[track.track_id] = 3
                            counter.rat_init[track.track_id] = rat
                    # res is None, means that object is not in door contour
                    else:
                        counter.people_init[track.track_id] = 1
                    counter.people_bbox[track.track_id] = bbox
                counter.cur_bbox[track.track_id] = bbox

                adc = "%.2f" % (track.adc *
                                100) + "%"  # Average detection confidence
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                cv2.putText(frame, "ID: " + str(track.track_id),
                            (int(bbox[0]), int(bbox[1])), 0,
                            1e-3 * frame.shape[0], (0, 255, 0), 5)

                if not show_detections:
                    track_cls = track.cls
                    cv2.putText(frame, str(track_cls),
                                (int(bbox[0]), int(bbox[3])), 0,
                                1e-3 * frame.shape[0], (0, 255, 0), 1)
                    cv2.putText(
                        frame, 'ADC: ' + adc,
                        (int(bbox[0]), int(bbox[3] + 2e-2 * frame.shape[1])),
                        0, 1e-3 * frame.shape[0], (0, 255, 0), 1)

            id_get_lost = [
                track.track_id for track in tracker.tracks
                if track.time_since_update >= 35
            ]
            # and track.age >= 29]
            # id_inside_tracked = [track.track_id for track in tracker.tracks if track.age > 60]
            for val in counter.people_init.keys():
                # check bbox also
                inter_square = 0
                cur_square = 0
                ratio = 0
                cur_c = find_centroid(counter.cur_bbox[val])
                init_c = find_centroid(counter.people_bbox[val])
                vector_person = (cur_c[0] - init_c[0], cur_c[1] - init_c[1])

                if val in id_get_lost and counter.people_init[val] != -1:
                    rect_сur = Rectangle(counter.cur_bbox[val][0],
                                         counter.cur_bbox[val][1],
                                         counter.cur_bbox[val][2],
                                         counter.cur_bbox[val][3])
                    inter = rect_сur & rect_door
                    if inter:

                        inter_square = rect_square(*inter)
                        cur_square = rect_square(*rect_сur)
                        try:
                            ratio = inter_square / cur_square
                        except ZeroDivisionError:
                            ratio = 0

                    # if vector_person < 0 then current coord is less than initialized, it means that man is going
                    # in the exit direction
                    if vector_person[1] > 70 and counter.people_init[val] == 2 \
                            and ratio < 0.9:  # and counter.people_bbox[val][3] > border_door \
                        counter.get_in()

                    elif vector_person[1] < -70 and counter.people_init[val] == 1 \
                            and ratio >= 0.6:
                        counter.get_out()

                    elif vector_person[1] < -70 and counter.people_init[val] == 3 \
                            and ratio > counter.rat_init[val] and ratio >= 0.6:
                        counter.get_out()
                    elif vector_person[1] > 70 and counter.people_init[val] == 3 \
                            and ratio < counter.rat_init[val] and ratio < 0.6:
                        counter.get_in()

                    counter.people_init[val] = -1
                    del val

            ins, outs = counter.show_counter()
            cv2.rectangle(frame, (0, 0), (250, 50), (0, 0, 0), -1, 8)
            cv2.putText(frame, "in: {}, out: {} ".format(ins, outs), (10, 35),
                        0, 1e-3 * frame.shape[0], (255, 255, 255), 3)

            cv2.namedWindow('video', cv2.WINDOW_NORMAL)
            cv2.resizeWindow('video', 1422, 800)
            cv2.imshow('video', frame)

            if writeVideo_flag:
                # save a frame
                out.write(frame)
                frame_index = frame_index + 1

            fps_imutils.update()

            if not asyncVideo_flag:
                fps = (fps + (1. / (time.time() - t1))) / 2
                # print("FPS = %f" % fps)

            # Press Q to stop!
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        fps_imutils.stop()
        print('imutils FPS: {}'.format(fps_imutils.fps()))

        if asyncVideo_flag:
            video_capture.stop()
        else:
            video_capture.release()

        if writeVideo_flag:
            out.release()

        cv2.destroyAllWindows()
        del door_array

    mean_error = np.mean(error_values)
    print("mean error for {} videos: {}".format(len(files), mean_error))
Ejemplo n.º 29
0
    def run_video(self):
        # init for classify module
        clf_model = None
        clf_labels = None

        encoder = gdet.create_box_encoder(self.cfg.DEEPSORT.MODEL,
                                          batch_size=4)
        metric = nn_matching.NearestNeighborDistanceMetric(
            "cosine", self.cfg.DEEPSORT.MAX_COSINE_DISTANCE,
            self.cfg.DEEPSORT.NN_BUDGET)
        tracker = Tracker(self.cfg, metric)

        tracking = True
        writeVideo_flag = self.args.out_video
        asyncVideo_flag = False

        list_classes = ['loai_1', 'loai_2', 'loai_3', 'loai_4']
        arr_cnt_class = np.zeros((len(list_classes), self.number_MOI),
                                 dtype=int)

        fps = 0.0
        fps_imutils = imutils.video.FPS().start()
        counted_obj = []
        count_frame = 0
        objs_dict = {}

        if asyncVideo_flag:
            video_capture = VideoCaptureAsync(self.video_path)
        else:
            video_capture = cv2.VideoCapture(self.video_path)

        if asyncVideo_flag:
            video_capture.start()

        if writeVideo_flag:
            if asyncVideo_flag:
                w = int(video_capture.cap.get(3))
                h = int(video_capture.cap.get(4))
            else:
                w = int(video_capture.get(3))
                h = int(video_capture.get(4))
            fourcc = cv2.VideoWriter_fourcc(*'XVID')
            output_camname = 'output' + '_' + self.video_name + '.avi'
            out = cv2.VideoWriter(output_camname, fourcc, 10, (1280, 720))
            frame_index = -1

        while True:
            count_frame += 1
            print('frame processing .......: ', count_frame)
            ret, frame = video_capture.read()
            if ret != True:
                break

            frame_info = self.video_name + "_" + str(count_frame - 1)

            t1 = time.time()
            # frame = cv2.flip(frame, -1)

            _frame = self.process(frame, count_frame, frame_info, encoder,
                                  tracking, tracker, objs_dict, counted_obj,
                                  arr_cnt_class, clf_model, clf_labels)

            if writeVideo_flag:  # and not asyncVideo_flag:
                # save a frame
                out.write(_frame)
                frame_index = frame_index + 1

            # visualize
            if self.args.visualize:
                _frame = imutils.resize(_frame, width=1000)
                cv2.imshow("Final result", _frame)

            fps_imutils.update()

            if not asyncVideo_flag:
                fps = (fps + (1. / (time.time() - t1))) / 2
                print("FPS = %f" % (fps))

            # Press Q to stop!
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        fps_imutils.stop()
        print('imutils FPS: {}'.format(fps_imutils.fps()))

        if asyncVideo_flag:
            video_capture.stop()
        else:
            video_capture.release()

        if writeVideo_flag:
            out.release()

        cv2.destroyAllWindows()
Ejemplo n.º 30
0
def main():
    parser = argparse.ArgumentParser(description='Process some integers.')
    parser.add_argument('input', type=str, help='Input video path')
    parser.add_argument('bbox', type=str, help='Input bounding box path')
    parser.add_argument('output', type=str, help='Ouput video path')
    args = parser.parse_args()

    # Definition of the parameters
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0

    # Deep SORT
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)

    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)

    tracking = True
    writeVideo_flag = True
    asyncVideo_flag = False

    file_path = args.input
    if asyncVideo_flag:
        video_capture = VideoCaptureAsync(file_path)
    else:
        video_capture = cv2.VideoCapture(file_path)

    if asyncVideo_flag:
        video_capture.start()

    if writeVideo_flag:
        if asyncVideo_flag:
            w = int(video_capture.cap.get(3))
            h = int(video_capture.cap.get(4))
        else:
            w = int(video_capture.get(3))
            h = int(video_capture.get(4))
        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter(args.output, fourcc, 30, (w, h))
        frame_index = -1

    fps = 0.0
    # fps_imutils = imutils.video.FPS().start()

    with open(args.bbox) as f:
        data = json.load(f)
    frame_index = 0
    while True:
        ret, frame = video_capture.read()  # frame shape 640*480*3
        if ret != True:
            break

        t1 = time.time()

        image = Image.fromarray(frame[..., ::-1])  # bgr to rgb
        boxes = np.asarray(
            [pred['bbox'] for pred in data[frame_index]['annotations']])
        confidence = np.asarray(
            [pred['score'] for pred in data[frame_index]['annotations']])
        classes = np.asarray(
            [pred['label'] for pred in data[frame_index]['annotations']])

        if tracking:
            features = encoder(frame, boxes)

            detections = [
                Detection(bbox, confidence, cls, feature)
                for bbox, confidence, cls, feature in zip(
                    boxes, confidence, classes, features)
            ]

        # Run non-maxima suppression.
        boxes = np.array([d.tlwh for d in detections])
        scores = np.array([d.confidence for d in detections])
        indices = preprocessing.non_max_suppression(boxes, nms_max_overlap,
                                                    scores)
        detections = [detections[i] for i in indices]

        if tracking:
            # Call the tracker
            tracker.predict()
            tracker.update(detections)

            for track in tracker.tracks:
                if not track.is_confirmed() or track.time_since_update > 1:
                    continue
                bbox = track.to_tlbr()
                cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                              (int(bbox[2]), int(bbox[3])), (255, 255, 255), 2)
                cv2.putText(frame, "ID: " + str(track.track_id),
                            (int(bbox[0]), int(bbox[1])), 0,
                            1.5e-3 * frame.shape[0], (0, 255, 0), 1)

        for det in detections:
            bbox = det.to_tlbr()
            score = "%.2f" % round(det.confidence * 100, 2) + "%"
            cv2.rectangle(frame, (int(bbox[0]), int(bbox[1])),
                          (int(bbox[2]), int(bbox[3])), (255, 0, 0), 2)
            if len(classes) > 0:
                cls = det.cls
                cv2.putText(frame,
                            str(cls) + " " + score,
                            (int(bbox[0]), int(bbox[3])), 0,
                            1.5e-3 * frame.shape[0], (0, 255, 0), 1)

        # cv2.imshow('', frame)

        if writeVideo_flag:  # and not asyncVideo_flag:
            # save a frame
            out.write(frame)
            frame_index = frame_index + 1

        # fps_imutils.update()

        # if not asyncVideo_flag:
        #     fps = (fps + (1./(time.time()-t1))) / 2
        #     print("FPS = %f"%(fps))

        # Press Q to stop!
        # if cv2.waitKey(1) & 0xFF == ord('q'):
        #     break

    # fps_imutils.stop()
    # print('imutils FPS: {}'.format(fps_imutils.fps()))

    if asyncVideo_flag:
        video_capture.stop()
    else:
        video_capture.release()

    if writeVideo_flag:
        out.release()