def __init__(self, host='raspberrypi', port=1234, url=None): QtWidgets.QMainWindow.__init__(self) self.setupUi(self) self.running = True self.capture = None self.selected_classes = None self.status = self.STOP self.yolo_detector = YoloObjectDetector() self.color_detector = ColorObjectDetector() self.bboxes = None self.queue = Queue() self.tracker = ObjectTracker() self.tracker.set_detector(self.yolo_detector) self.client = Client(host=host, port=port) self.timer = QtCore.QTimer(self) self.lowerBoundColor.clicked.connect(self.set_lower_color) self.upperBoundColor.clicked.connect(self.set_upper_color) self.lowerBoundColor.setDisabled(True) self.upperBoundColor.setDisabled(True) self.trackButton.setDisabled(True) self.setup_camera(url) color = QColor(*self.color_detector.get_lower_color()) self.lowerBoundColor.setStyleSheet("background-color: %s" % (color.name())) color = QColor(*self.color_detector.get_upper_color()) self.upperBoundColor.setStyleSheet("background-color: %s" % (color.name())) self.trackButton.clicked.connect(self.start_tracking) self.forceStopButton.clicked.connect(self.stop_tracking) self.setButton.clicked.connect(self.robot_initializer) self.window_width = self.videoWidget.frameSize().width() self.window_height = self.videoWidget.frameSize().height() self.videoWidget = ImageWidget(self.videoWidget, self) self.captureButton.clicked.connect(self.capture_image) self.yoloDetectorRB.clicked.connect(self.set_yolo_detector) self.colorDetectorRB.clicked.connect(self.set_color_detector) self.videoWidget.mousePressEvent = self.select_object self.set_image_on_button(self.captureButton, True) self.resume_camera() self.timer.timeout.connect(self.update_frame) self.timer.start(1) self.statusBar.addWidget(QLabel("Status: ")) self.statusLabel = QLabel("Initialization") self.statusBar.addWidget(self.statusLabel) self.actionOptions.triggered.connect(self.show_options) self.actionExit.triggered.connect(lambda: self.close()) self.options_dialog = Pid_Dialog()
def __init__(self, host='raspberrypi', port=1234, url=None): super().__init__() self.ui = Ui_Dialog() self.ui.setupUi(self) self.ui.pushButton_3.clicked.connect(self.start_tracking) self.ui.pushButton_4.clicked.connect(self.stop_tracking) self.ui.pushButton_5.clicked.connect(self.robot_initializer) self.ui.pushButton_6.clicked.connect(self.show_tracker) self.ui.pushButton.clicked.connect(self.closeAll) self.show() self.client = Client(host=host, port=port) self.object_tracker = ColorBasedObjectTracker(video_url=url, buffer_size=64) # self.object_tracker = YoloObjectTracker(video_url=url, buffer_size=64) self.status = self.STOP
async def setUpClass(cls): telegram_client = TelegramClient() # A hack to ensure dispatcher exist Dispatcher._set_singleton( # pylint: disable=W0212 telegram_client.updater.dispatcher) # pylint: disable=W0212 database_handle = Database(testing=True) database_handle.drop_testing_database() cls.client = Client(database_handle, telegram_client) cls.client.start() cls.user = TelegramUser() await cls.user.start() await cls.user.setup_chat()
def main(): ''' Main function ''' database_handle = Database() telegram_client = TelegramUserClient(database_handle) client = Client(database_handle, telegram_client) client.start() client.idle()
def main(): ''' Main function ''' if len(sys.argv) < 2: print("python3 test_model.py folder_name") exit(1) client_main = Client() set_x, set_y = get_full_set(sys.argv[1], 1000) train_x, test_x, train_y, test_y = train_test_split(set_x, set_y, test_size=0.3, stratify=set_y) model = client_main.add_images_mock(train_x, train_y) results = client_main.predict_image_mock(test_x, model) for result, y_val in zip(results, test_y): if result['label'] != y_val: print("predicted: {} {}, solution: {}".format( result['label'], result['dist'], y_val)) score = [0 if a['label'] != b else 1 for a, b in zip(results, test_y)] print("Score {}".format(round(sum(score) * 1.0 / len(score), 2)))
class Ui(QtWidgets.QMainWindow, FormClass): RUN = 'Running' STOP = 'Stopped' MANUAL = 'Manual' NO_OBJECT = ObjectDetector.NO_OBJECT def __init__(self, host='raspberrypi', port=1234, url=None): QtWidgets.QMainWindow.__init__(self) self.setupUi(self) self.running = True self.capture = None self.selected_classes = None self.status = self.STOP self.yolo_detector = YoloObjectDetector() self.color_detector = ColorObjectDetector() self.bboxes = None self.queue = Queue() self.tracker = ObjectTracker() self.tracker.set_detector(self.yolo_detector) self.client = Client(host=host, port=port) self.timer = QtCore.QTimer(self) self.lowerBoundColor.clicked.connect(self.set_lower_color) self.upperBoundColor.clicked.connect(self.set_upper_color) self.lowerBoundColor.setDisabled(True) self.upperBoundColor.setDisabled(True) self.trackButton.setDisabled(True) self.setup_camera(url) color = QColor(*self.color_detector.get_lower_color()) self.lowerBoundColor.setStyleSheet("background-color: %s" % (color.name())) color = QColor(*self.color_detector.get_upper_color()) self.upperBoundColor.setStyleSheet("background-color: %s" % (color.name())) self.trackButton.clicked.connect(self.start_tracking) self.forceStopButton.clicked.connect(self.stop_tracking) self.setButton.clicked.connect(self.robot_initializer) self.window_width = self.videoWidget.frameSize().width() self.window_height = self.videoWidget.frameSize().height() self.videoWidget = ImageWidget(self.videoWidget, self) self.captureButton.clicked.connect(self.capture_image) self.yoloDetectorRB.clicked.connect(self.set_yolo_detector) self.colorDetectorRB.clicked.connect(self.set_color_detector) self.videoWidget.mousePressEvent = self.select_object self.set_image_on_button(self.captureButton, True) self.resume_camera() self.timer.timeout.connect(self.update_frame) self.timer.start(1) self.statusBar.addWidget(QLabel("Status: ")) self.statusLabel = QLabel("Initialization") self.statusBar.addWidget(self.statusLabel) self.actionOptions.triggered.connect(self.show_options) self.actionExit.triggered.connect(lambda: self.close()) self.options_dialog = Pid_Dialog() def show_options(self): result, kp_f, ki_f, kd_f, kp_s, ki_s, kd_s = self.options_dialog.get_values( ) if not result: return verbose = { "kp_f": kp_f, "ki_f": ki_f, "kd_f": kd_f, "kp_s": kp_s, "ki_s": ki_s, "kd_s": kd_s, } json_data = json.dumps(verbose) print(json_data) self.client.send(json_data) def closeEvent(self, event): self.stop_tracking() def setup_camera(self, url=None, width=304, height=304, fps=30): if url is None: self.capture = cv2.VideoCapture(0) else: self.capture = cv2.VideoCapture(url) self.capture.set(cv2.CAP_PROP_FRAME_WIDTH, width) self.capture.set(cv2.CAP_PROP_FRAME_HEIGHT, height) self.capture.set(cv2.CAP_PROP_FPS, fps) def grab(self): while self.running: self.capture.grab() _, img = self.capture.retrieve(0) self.queue.put(img) def keyPressEvent(self, event1): if event1.isAutoRepeat(): return self.set_status(self.MANUAL) verbose = {"FB": "", "LR": ""} if event1.key() == QtCore.Qt.Key_W: # print "Up pressed" verbose["FB"] = "F" if event1.key() == QtCore.Qt.Key_S: # print "D pressed" verbose["FB"] = "B" if event1.key() == QtCore.Qt.Key_A: # print "L pressed" verbose["LR"] = "L" if event1.key() == QtCore.Qt.Key_D: # print "R pressed" verbose["LR"] = "R" json_data = json.dumps(verbose) if verbose["LR"] != "" or verbose["FB"] != "": print(verbose) self.client.send(json_data) def keyReleaseEvent(self, event): if event.isAutoRepeat(): return verbose = {"FB": "", "LR": ""} if event.key() == QtCore.Qt.Key_W: # print "Up rel" verbose["FB"] = "S" if event.key() == QtCore.Qt.Key_S: # print "D rel" verbose["FB"] = "S" if event.key() == QtCore.Qt.Key_A: # print "L pressed" verbose["LR"] = "S" if event.key() == QtCore.Qt.Key_D: # print "R pressed" verbose["LR"] = "S" json_data = json.dumps(verbose) if verbose["LR"] != "" or verbose["FB"] != "": print(verbose) self.client.send(json_data) self.client.send(json_data) def set_lower_color(self): if self.tracker.detector is not self.color_detector: return dialog = QColorDialog() color = dialog.getColor( QColor(*self.tracker.detector.get_lower_color())) self.lowerBoundColor.setStyleSheet("background-color: %s" % (color.name())) self.tracker.detector.set_lower_color(color.getRgb()) def set_upper_color(self): if self.tracker.detector is not self.color_detector: return dialog = QColorDialog() color = dialog.getColor( QColor(*self.tracker.detector.get_upper_color())) self.upperBoundColor.setStyleSheet("background-color: %s" % (color.name())) self.tracker.detector.set_upper_color(color.getRgb()) def start_tracking(self): self.set_status(self.RUN) self.trackButton.setDisabled(True) # if self.tracker.is_tracking(): self.tracker.set_tracking(True) self.tracker.detector.set_classes(self.selected_classes) print('start tracking') verbose = {"status": self.RUN} json_data = json.dumps(verbose) self.client.send(json_data) data_sender_thread = threading.Thread(target=self.data_sender) data_sender_thread.start() def stop_tracking(self): self.trackButton.setDisabled(False) self.tracker.detector.set_classes(None) self.tracker.set_tracking(False) self.set_status(self.STOP) verbose = {"status": self.STOP} json_data = json.dumps(verbose) self.client.send(json_data) def robot_initializer(self): try: x_min = round(float(self.minXEdit.text()), 2) except: x_min = 200 try: x_max = round(float(self.maxXEdit.text()), 2) except: x_max = 300 try: minArea = round(float(self.minAreaEdit.text()), 2) except: minArea = 20 try: maxArea = round(float(self.maxAreaEdit.text()), 2) except: maxArea = 100 is_keep_track = self.chk_keep_track.isChecked() verbose = { "x_min": x_min, "x_max": x_max, "maxArea": maxArea, "minArea": minArea, "keepTrack": is_keep_track, } json_data = json.dumps(verbose) print(json_data) self.client.send(json_data) def data_sender(self): verbose = {} prev_position = [0, 0, 0, 0] while self.tracker.is_tracking() and self.status != self.STOP: if self.tracker.has_positions(): currentPosition = self.tracker.positions[0] if currentPosition[ 0] is not None and self.status != self.NO_OBJECT and max_diff( currentPosition, prev_position) > 5: print(currentPosition) verbose["x"] = currentPosition[0] verbose["y"] = currentPosition[1] verbose["width"] = currentPosition[2] verbose["height"] = currentPosition[3] json_data = json.dumps(verbose) self.client.send(json_data) prev_position = currentPosition if currentPosition[4] == self.NO_OBJECT: self.set_status(self.NO_OBJECT) elif self.status == self.NO_OBJECT: self.set_status(self.RUN) time.sleep(0.05) self.set_status(self.STOP) def set_status(self, status): self.status = status self.statusLabel.setText(self.status) def set_yolo_detector(self): self.lowerBoundColor.setDisabled(True) self.upperBoundColor.setDisabled(True) self.tracker.set_detector(self.yolo_detector) self.tracker.detector.set_classes(self.selected_classes) self.videoWidget.setBBoxes(self.bboxes) if self.tracker.detector.has_classes(): self.trackButton.setDisabled(False) else: self.trackButton.setDisabled(True) def set_color_detector(self): self.lowerBoundColor.setDisabled(False) self.upperBoundColor.setDisabled(False) self.tracker.set_detector(self.color_detector) self.videoWidget.setBBoxes(None) self.trackButton.setDisabled(False) def item_selected(self, item): self.selected_classes = [item] self.trackButton.setDisabled(False) def select_object(self, event): if self.running or self.tracker.detector is not self.yolo_detector: return self.videoWidget.setBBoxes(self.bboxes) @staticmethod def set_image_on_button(button, stop: bool): if stop: pixmap = QPixmap("icons/cam_record.png") else: pixmap = QPixmap("icons/cam_stop.png") buttonIcon = QIcon(pixmap) button.setIcon(buttonIcon) size = QSize(50, 50) button.setIconSize(size) def resume_camera(self): # 1920, 1080, 30 self.videoWidget.setBBoxes(None) capture_thread = threading.Thread(target=self.grab) capture_thread.start() def capture_image(self): self.set_image_on_button(self.captureButton, not self.running) if self.running: self.running = False else: self.running = True self.queue.queue.clear() self.resume_camera() def set_image(self, image_path): pixmap = QtGui.QPixmap(image_path) scaled_pixmap = pixmap.scaled(self.imageLabel.size(), Qt.KeepAspectRatio) self.imageLabel.setPixmap(scaled_pixmap) def update_frame(self): if not self.queue.empty(): img = self.queue.get() img = cv2.resize(img, (self.window_width, self.window_height), interpolation=cv2.INTER_CUBIC) new_img, temp = self.tracker.track(img) if self.tracker.detector is not self.yolo_detector: temp = None if self.running: self.bboxes = temp height, width, bpc = new_img.shape new_img = cv2.cvtColor(new_img, cv2.COLOR_BGR2RGB) bpl = bpc * width image = QtGui.QImage(new_img.data, width, height, bpl, QtGui.QImage.Format_RGB888) self.videoWidget.setBBoxes(None) self.videoWidget.setImage(image, img) def closeEvent(self, event): self.running = False self.status = self.STOP verbose = {"status": self.STOP} json_data = json.dumps(verbose) self.client.send(json_data) QtCore.QCoreApplication.instance().quit()
class MainWindow(QtWidgets.QMainWindow): RUN = 'Running' STOP = 'Stopped' MANUAL = 'Manual' NO_OBJECT = ObjectTracker.NO_OBJECT def __init__(self, host='raspberrypi', port=1234, url=None): super().__init__() self.ui = Ui_Dialog() self.ui.setupUi(self) self.ui.pushButton_3.clicked.connect(self.start_tracking) self.ui.pushButton_4.clicked.connect(self.stop_tracking) self.ui.pushButton_5.clicked.connect(self.robot_initializer) self.ui.pushButton_6.clicked.connect(self.show_tracker) self.ui.pushButton.clicked.connect(self.closeAll) self.show() self.client = Client(host=host, port=port) self.object_tracker = ColorBasedObjectTracker(video_url=url, buffer_size=64) # self.object_tracker = YoloObjectTracker(video_url=url, buffer_size=64) self.status = self.STOP def keyPressEvent(self, event1): self.status = self.MANUAL self.ui.label_2.setText(self.status) verbose = {"FB": "", "LR": ""} if event1.key() == QtCore.Qt.Key_W: # print "Up pressed" verbose["FB"] = "F" if event1.key() == QtCore.Qt.Key_S: # print "D pressed" verbose["FB"] = "B" if event1.key() == QtCore.Qt.Key_A: # print "L pressed" verbose["LR"] = "L" if event1.key() == QtCore.Qt.Key_D: # print "R pressed" verbose["LR"] = "R" json_data = json.dumps(verbose) if verbose["LR"] != "" or verbose["FB"] != "": print(verbose) self.client.send(json_data) def keyReleaseEvent(self, event): verbose = {"FB": "", "LR": ""} if event.key() == QtCore.Qt.Key_W: # print "Up rel" verbose["FB"] = "S" if event.key() == QtCore.Qt.Key_S: # print "D rel" verbose["FB"] = "S" if event.key() == QtCore.Qt.Key_A: # print "L pressed" verbose["LR"] = "S" if event.key() == QtCore.Qt.Key_D: # print "R pressed" verbose["LR"] = "S" json_data = json.dumps(verbose) if verbose["LR"] != "" or verbose["FB"] != "": print(verbose) self.client.send(json_data) self.client.send(json_data) def start_tracking(self): self.status = self.RUN if self.object_tracker.is_working: print('start tracking') verbose = {"status": self.RUN} json_data = json.dumps(verbose) self.client.send(json_data) self.ui.label_2.setText(self.status) data_sender_thread = threading.Thread(target=self.data_sender) data_sender_thread.start() def stop_tracking(self): self.status = self.STOP verbose = {"status": self.STOP} json_data = json.dumps(verbose) self.client.send(json_data) self.ui.label_2.setText(self.status) def closeAll(self): self.status = self.STOP verbose = {"status": self.STOP} json_data = json.dumps(verbose) self.client.send(json_data) QtCore.QCoreApplication.instance().quit() def show_tracker(self): if not self.object_tracker.is_working: tracking_thread = threading.Thread( target=self.object_tracker.track) tracking_thread.start() def robot_initializer(self): try: x_min = round(float(self.ui.lineEdit_5.text()), 2) except: x_min = 100 try: x_max = round(float(self.ui.lineEdit_2.text()), 2) except: x_max = 500 try: minArea = round(float(self.ui.lineEdit_3.text()), 2) except: minArea = 2500 try: maxArea = round(float(self.ui.lineEdit_4.text()), 2) except: maxArea = 10000 verbose = { "x_min": x_min, "x_max": x_max, "maxArea": maxArea, "minArea": minArea, "P": 1, "I": 0, "D": 0.5, } json_data = json.dumps(verbose) print(json_data) self.client.send(json_data) def data_sender(self): verbose = {} prev_position = [0, 0, 0, 0] while self.object_tracker.is_working and self.status != self.STOP: if len(self.object_tracker.positions) > 0: currentPosition = self.object_tracker.positions[0] if currentPosition[ 0] is not None and self.status != self.NO_OBJECT and max_diff( currentPosition, prev_position) > 10: print(currentPosition) verbose["x"] = currentPosition[0] verbose["y"] = currentPosition[1] verbose["width"] = currentPosition[2] verbose["height"] = currentPosition[3] json_data = json.dumps(verbose) self.client.send(json_data) prev_position = currentPosition if currentPosition[4] == self.NO_OBJECT: self.set_status(self.NO_OBJECT) elif self.status == self.NO_OBJECT: self.set_status(self.RUN) time.sleep(0.2) self.set_status(self.STOP) def set_status(self, status): self.status = status self.ui.label_2.setText(self.status)
def help(err): print( "Use os argumentos [client] ou [server] seguidos da porta usada para conexão [port]" ) print(__file__ + " [client/server] [port]") if err < 0: print("Erro: " + str(err)) print("\nArgumentos utilizados: \n") print(sys.argv) sys.exit(err) if __name__ == '__main__': if sys.argv[1] == 'server': server = Server(get_host(), sys.argv[2]) c = server.inicia_escuta_e_transmissao() server.finalizar_conexao(c) elif sys.argv[1] == 'client': client = Client(get_host()) dados = client.busca_dados() s = client.abre_conexao() client.envia_dados(dados, s) client.fechar_conexao(s) else: help(0)