class NaoController: def __init__(self, classifier_path, preprocessor_path, response_mapping): self.response_mapping = response_mapping self.robot = Nao() self.camera_controller = CameraController() self.frame_preprocessor = pickle.load(open(preprocessor_path, 'rb')) self.classifier = pickle.load(open(classifier_path, 'rb')) self._flusher = Thread(target=self._flush, daemon=True) self._flusher.start() def _flush(self): while True: self.camera_controller.flush() def __del__(self): del self.camera_controller def step(self): return self.robot.step() def on_frame(self): frame = self.camera_controller.get_frame() if self.robot.is_moving(): return processed = self.frame_preprocessor.preprocess(frame) label = self.classifier.predict(np.array( [processed]))[0] if processed is not None else None self.robot.play_motion(self.response_mapping[label])
def __init__(self, classifier_path, preprocessor_path, response_mapping): self.response_mapping = response_mapping self.robot = Nao() self.camera_controller = CameraController() self.frame_preprocessor = pickle.load(open(preprocessor_path, 'rb')) self.classifier = pickle.load(open(classifier_path, 'rb')) self._flusher = Thread(target=self._flush, daemon=True) self._flusher.start()
def __init__(self, img_size=(256, 160), img_observation=False): """Initialise environment. Args: img_size (int tuple): (width, height) of image to return. img_observation (bool): If true returns observation as dictionary containing img and state(euler coordinates). Otherwise returns observation as n array. """ # Enable baxter and initialise environment baxter_interface.RobotEnable().enable() self._left_cam = CameraController("/cameras/left_hand_camera/image", img_size) self._left_arm = baxter_interface.Limb('left') self._left_arm.set_joint_position_speed(1.0) self._left_gripper = baxter_interface.Gripper('left') ns = "ExternalTools/left/PositionKinematicsNode/IKService" self._iksvc = rospy.ServiceProxy(ns, SolvePositionIK) self._move_to_start_position() self._load_gazebo_models() rospy.wait_for_service(ns, 5.0) # Observation information self.img_observation = img_observation self.image = None self.img_size = img_size self.x_pos = None self.y_pos = None self.z_pos = None self.horiz_dist = None self.vert_dist = None self.depth_dist = None # Accuracy tollerance self._horiz_threshold = 0.05 self._vert_threshold = 0.05 self._min_depth_threshold = 1 self._max_depth_threshold = 2 # Object position self._obj_x_pos = None self._obj_y_pos = None self._obj_z_pos = None # Limit on endpoint movements self._x_move_lim = 0.28 self._y_move_lim = 0.40 # Limits on endpoint position relative to object self._x_dist_lim = 0.28 self._y_dist_lim = 0.40 # Limit on endpoint position relative to body self._x_pos_min = 0.36
def __init__(self, droneList): ShowBase.__init__(self) # set resolution wp = WindowProperties() wp.setSize(2000, 1500) # wp.setSize(1200, 900) # wp.setSize(800, 600) self.win.requestProperties(wp) self.setFrameRateMeter(True) self.render.setAntialias(AntialiasAttrib.MAuto) CameraController(self) # setup model directory self.modelDir = os.path.abspath( sys.path[0]) # Get the location of the 'py' file I'm running: self.modelDir = Filename.from_os_specific(self.modelDir).getFullpath( ) + "/models" # Convert that to panda's unix-style notation. self.initScene() self.initBullet() self.droneManager = DroneManager(self, droneList) DroneRecorder(self.droneManager) self.stopwatchOn = False self.now = 0 self.accept('t', self.toggleStopwatch)
def process_frame(self, frame_info, params): camera_ctrl = CameraController() camera_ctrl.get_presets() video_w = frame_info['video_w'] video_h = frame_info['video_h'] start_h = frame_info['start_h'] end_h = frame_info['end_h'] start_w = frame_info['start_w'] end_w = frame_info['end_w'] fps = frame_info['fps'] f_resize_ratio = frame_info['resize_ratio'] f_send_server = params['f_send_server'] f_show = params['f_show'] f_save = params['f_save'] resize_w = int(f_resize_ratio * video_w) resize_h = int(f_resize_ratio * video_h) # out = cv2.VideoWriter('output.avi', cv2.VideoWriter_fourcc(*'MPEG'), fps, (video_w, video_h)) frame = None self.last_time = time.time() while True: if self.quit_thread: break try: current_preset, moving = camera_ctrl.get_current_preset() print("current preset {} => ({}, {}, {}) {}".format( current_preset.Name, current_preset['PTZPosition'].PanTilt.x, current_preset['PTZPosition'].PanTilt.y, current_preset['PTZPosition'].Zoom.x, moving)) except: time.sleep(0.5) continue if moving == "MOVING": time.sleep(0.5) continue print("frame _lock===") self._frame_lock = True if self.current_frame is not None: frame = self.current_frame.copy() if frame is None: time.sleep(0.01) self._frame_lock = False continue if f_resize_ratio <= 1: dim = (resize_w, resize_h) resize_frame = cv2.resize(frame, dim, cv2.INTER_AREA) img_draw, valid_rects = self.process_image( resize_frame, DETECT_THRESHOLD) if f_send_server: temp_name = str(time.time()) + '.jpg' # cv2.imwrite(temp_name, img_draw) json_req = make_request_json(ip_addr=CAMERA_IP, img_file=temp_name, count=len(valid_rects), cam_name=current_preset.Name) response_code = send_request(server=SERVER_URL, cam_name=current_preset.Name, req_json=json_req) if response_code == 200: self.last_time = time.time() else: print("frame ==copied ===") frame1 = frame[:end_h, :end_w].copy() frame2 = frame[:end_h, start_w:].copy() frame3 = frame[start_h:, :end_w].copy() frame4 = frame[start_h:, start_w:].copy() _, valid_rects1 = self.process_image(frame1, DETECT_THRESHOLD) _, valid_rects2 = self.process_image(frame2, DETECT_THRESHOLD) _, valid_rects3 = self.process_image(frame3, DETECT_THRESHOLD) _, valid_rects4 = self.process_image(frame4, DETECT_THRESHOLD) # -------------------------- combine the detection result ---------------------- valid_rects = [] for i in range(len(valid_rects1)): rect = valid_rects1[i] if rect[2] < end_w - 10 and rect[3] < end_h - 10: valid_rects.append(rect) new_rect_list = [] for i in range(len(valid_rects2)): rect = valid_rects2[i] if rect[0] > 10 and rect[3] < end_h - 10: new_rect = [ rect[0] + start_w, rect[1], rect[2] + start_w, rect[3] ] if not func.check_contain(valid_rects, new_rect): new_rect_list.append(new_rect) valid_rects += new_rect_list new_rect_list = [] for i in range(len(valid_rects3)): rect = valid_rects3[i] if rect[2] < end_w - 10 and rect[1] > 10: new_rect = [ rect[0], rect[1] + start_h, rect[2], rect[3] + start_h ] if not func.check_contain(valid_rects, new_rect): new_rect_list.append(new_rect) valid_rects += new_rect_list new_rect_list = [] for i in range(len(valid_rects4)): rect = valid_rects4[i] if rect[0] > 10 and rect[1] > 10: new_rect = [ rect[0] + start_w, rect[1] + start_h, rect[2] + start_w, rect[3] + start_h ] if not func.check_contain(valid_rects, new_rect): new_rect_list.append(new_rect) valid_rects += new_rect_list # ----------------------- Send the result to server -------------------------- if f_send_server: temp_name = str(time.time()) + '.jpg' # cv2.imwrite(temp_name, frame) json_req = make_request_json(ip_addr=CAMERA_IP, img_file=temp_name, count=len(valid_rects), cam_name=current_preset.Name) send_request(server=SERVER_URL, cam_name=current_preset.Name, req_json=json_req) # func.rm_file(temp_name) # ---------------------------- draw the result ------------------------------- print('The are {} peoples.'.format(len(valid_rects))) self._frame_lock = False
def __init__(self): """ Summary => initialize the main window view. Description => args => None return => None """ super(MainWindow, self).__init__() self.ui = Ui_mainWindow() self.ui.setupUi(self) self.move(100, 100) # move gui to the 100,100 on the screen self.setWindowIcon(QtGui.QIcon('Images/Icon.png')) # sets icon for gui self.camera = CameraController(self.ui) # -------------------------------------------------------- # set menuBar actions self.ui.actionExit_Application.setShortcut('Ctrl+Q') self.ui.actionExit_Application.setStatusTip('Leaving Application') self.ui.actionExit_Application.triggered.connect( self.close_application) restart_application = QtGui.QAction("&Restart Application", self) restart_application.setShortcut("R") restart_application.setStatusTip('Leave The App') restart_application.triggered.connect(self.restart_application) self.ui.menuFile.addAction(restart_application) # set unworking menuBar items, to display error message self.ui.actionLogin.triggered.connect(self.trigger_error) self.ui.actionToggle_Admin_View.triggered.connect(self.trigger_error) self.ui.actionTutorial.triggered.connect(self.trigger_error) # -------------------------------------------------------- # set the stackedWidget view to be the first page self.display_stack(0) # -------------------------------------------------------- # set up buttons self.ui.capture_picture.clicked.connect(self.capture_image) btn = self.ui.yes_no_button.button(self.ui.yes_no_button.Yes) btn.clicked.connect(self.accept_picture) btn = self.ui.yes_no_button.button(self.ui.yes_no_button.No) btn.clicked.connect(self.reject_picture) # self.ui.yes_no_button.clicked.connect(self.accept_picture) btn = self.ui.yes_no_button2.button(self.ui.yes_no_button.Yes) btn.clicked.connect(self.start_plot) self.ui.btn_continue1.clicked.connect(self.process_image) btn = self.ui.yes_no_button2.button(self.ui.yes_no_button.No) btn.clicked.connect(self.reject_style) # self.ui.video_capture.show() # style check boxes self.style_check_boxes = [ self.ui.check_style1, self.ui.check_style2, self.ui.check_style3, self.ui.check_style4, self.ui.check_style5 ] # set styles labels self.style_check_boxes[0].setText("Dithering") self.style_check_boxes[1].setText("edges") self.style_check_boxes[2].setText("No Style") self.style_check_boxes[3].setText("No Style") self.style_check_boxes[4].setText("No Style") # set style images pixmap = QtGui.QPixmap('Images/dithering_example.jpg') self.ui.image_style1.setPixmap(pixmap) pixmap = QtGui.QPixmap('Images/edges_style_example.jpg') pixmap = pixmap.scaled(100, 100, QtCore.Qt.KeepAspectRatio) self.ui.image_style2.setPixmap(pixmap) pixmap = QtGui.QPixmap("Images/blank_image.jpg") self.ui.image_style3.setPixmap(pixmap) pixmap = QtGui.QPixmap("Images/blank_image.jpg") self.ui.image_style4.setPixmap(pixmap) pixmap = QtGui.QPixmap("Images/blank_image.jpg") self.ui.image_style5.setPixmap(pixmap) # import text for description # got technique from # https://stackoverflow.com/questions/8369219/ # how-do-i-read-a-text-file-into-a-string-variable-in-python text = "" with open('description.txt', 'r') as myfile: text = myfile.read() # set discription text self.ui.description_text.setText(text)
class MainWindow(QtGui.QMainWindow): """ Summary => maintain the GUI used to control the robotic artist. Description => This class will be used to generate and control the GUI used for the system. You will be able to control the camera and see the feed comming from it. args => None return => None """ def __init__(self): """ Summary => initialize the main window view. Description => args => None return => None """ super(MainWindow, self).__init__() self.ui = Ui_mainWindow() self.ui.setupUi(self) self.move(100, 100) # move gui to the 100,100 on the screen self.setWindowIcon(QtGui.QIcon('Images/Icon.png')) # sets icon for gui self.camera = CameraController(self.ui) # -------------------------------------------------------- # set menuBar actions self.ui.actionExit_Application.setShortcut('Ctrl+Q') self.ui.actionExit_Application.setStatusTip('Leaving Application') self.ui.actionExit_Application.triggered.connect( self.close_application) restart_application = QtGui.QAction("&Restart Application", self) restart_application.setShortcut("R") restart_application.setStatusTip('Leave The App') restart_application.triggered.connect(self.restart_application) self.ui.menuFile.addAction(restart_application) # set unworking menuBar items, to display error message self.ui.actionLogin.triggered.connect(self.trigger_error) self.ui.actionToggle_Admin_View.triggered.connect(self.trigger_error) self.ui.actionTutorial.triggered.connect(self.trigger_error) # -------------------------------------------------------- # set the stackedWidget view to be the first page self.display_stack(0) # -------------------------------------------------------- # set up buttons self.ui.capture_picture.clicked.connect(self.capture_image) btn = self.ui.yes_no_button.button(self.ui.yes_no_button.Yes) btn.clicked.connect(self.accept_picture) btn = self.ui.yes_no_button.button(self.ui.yes_no_button.No) btn.clicked.connect(self.reject_picture) # self.ui.yes_no_button.clicked.connect(self.accept_picture) btn = self.ui.yes_no_button2.button(self.ui.yes_no_button.Yes) btn.clicked.connect(self.start_plot) self.ui.btn_continue1.clicked.connect(self.process_image) btn = self.ui.yes_no_button2.button(self.ui.yes_no_button.No) btn.clicked.connect(self.reject_style) # self.ui.video_capture.show() # style check boxes self.style_check_boxes = [ self.ui.check_style1, self.ui.check_style2, self.ui.check_style3, self.ui.check_style4, self.ui.check_style5 ] # set styles labels self.style_check_boxes[0].setText("Dithering") self.style_check_boxes[1].setText("edges") self.style_check_boxes[2].setText("No Style") self.style_check_boxes[3].setText("No Style") self.style_check_boxes[4].setText("No Style") # set style images pixmap = QtGui.QPixmap('Images/dithering_example.jpg') self.ui.image_style1.setPixmap(pixmap) pixmap = QtGui.QPixmap('Images/edges_style_example.jpg') pixmap = pixmap.scaled(100, 100, QtCore.Qt.KeepAspectRatio) self.ui.image_style2.setPixmap(pixmap) pixmap = QtGui.QPixmap("Images/blank_image.jpg") self.ui.image_style3.setPixmap(pixmap) pixmap = QtGui.QPixmap("Images/blank_image.jpg") self.ui.image_style4.setPixmap(pixmap) pixmap = QtGui.QPixmap("Images/blank_image.jpg") self.ui.image_style5.setPixmap(pixmap) # import text for description # got technique from # https://stackoverflow.com/questions/8369219/ # how-do-i-read-a-text-file-into-a-string-variable-in-python text = "" with open('description.txt', 'r') as myfile: text = myfile.read() # set discription text self.ui.description_text.setText(text) # ------------------------------------------------------------ # methods # ------------------------------------------------------------ def display_stack(self, i): """ Summary => changes the widget stack page. Description => will change the stacked widget view to the int given through i. This code was found on: https://www.tutorialspoint.com/pyqt/pyqt_qstackedwidget.htm If the specified page has any objects that needs to be initialized then the process will be ran through here. args => i => the int for the page wanted. Ranging from 0-3 return => None """ self.ui.page_layer.setCurrentIndex(i) if (i == 0): print("starting VideoCapture") # set checked_boxes to false self.ui.check_process.setChecked(False) # creating a thread to update images when complete. # update images in gui update = UpdateImages(self.ui) # connect signal self.connect(update, QtCore.SIGNAL("update_images(PyQt_PyObject)"), self.update_images) # intialize the video_capture object self.video_capture = VideoCapture(self.ui, update) # connect video_capture to main gui thread self.connect(self.video_capture, QtCore.SIGNAL("update_capture(QImage)"), self.update_capture) self.camera.start_video_capture(self.video_capture) elif (i == 2): # set check boxes to false for check in self.style_check_boxes: check.setChecked(False) def update_images(self, images): """ Summary => update all images in the ui. Description => This method will be called, when any of the non-static images in the ui has been updated. Works by when a signal is called from another thread it will update the given images. That are sent through a list. args => images - list - this is a list of string for the images return => None """ pixmap = QtGui.QPixmap(images[0]) # update images throughout gui that are not static. pixmap = pixmap.scaled(498, 300, QtCore.Qt.KeepAspectRatio) self.ui.captured_image.setPixmap(pixmap) self.ui.captured_image.setAlignment(QtCore.Qt.AlignCenter) pixmap = pixmap.scaled(100, 100, QtCore.Qt.KeepAspectRatio) self.ui.image_your_picture.setPixmap(pixmap) pixmap = QtGui.QPixmap(images[1]) pixmap = pixmap.scaled(498, 300, QtCore.Qt.KeepAspectRatio) self.ui.styled_image.setPixmap(pixmap) self.ui.styled_image.setAlignment(QtCore.Qt.AlignCenter) def update_capture(self, img): """ Summary => update the capture image. Description => This method will be called, when the video_capture is running. Will update the video_capture img/Qlabel every time a new image comes through. args => None return => None """ pixmap = QtGui.QPixmap(img) pixmap = pixmap.scaled(498, 300, QtCore.Qt.KeepAspectRatio) self.ui.video_capture.setPixmap(pixmap) self.ui.video_capture.setAlignment(QtCore.Qt.AlignCenter) # ------------------------------------------------------------ # Actions # ------------------------------------------------------------ def process_image(self): """ Summary => starts the process for processing. Description => start the process for processing the image that was captured ealier on in the gui. This method will be a action used for the specified button. args => None return => None """ print("Image processing") self.processors = [Dithering(), EdgesStyle()] no_check = True checked = 0 # loop through every processor for i, processor in enumerate(self.processors): # check if check_box style has been ticked if self.style_check_boxes[i].isChecked(): no_check = False checked = checked + 1 # check if no check_boxs have been ticked if no_check: error = QMessageBox() error.setIcon(QMessageBox.Information) error.setText("No style selected \n Please select the syle wanted") error.setWindowTitle("Error: No style") error.setStandardButtons(QMessageBox.Ok) error.exec_() else: if checked == 1: for i, processor in enumerate(self.processors): # check if check_box style has been ticked if self.style_check_boxes[i].isChecked(): # run the processor processor.run() # update images in gui update = UpdateImages(self.ui) # connect signal to update thread self.connect(update, QtCore.SIGNAL("update_images(PyQt_PyObject)"), self.update_images) # run the update thread update.start() # display the next page self.display_stack(3) else: error = QMessageBox() error.setIcon(QMessageBox.Information) error.setWindowTitle("Error: Too many styles") error.setText("Too many style selected \n Please pick one") error.setStandardButtons(QMessageBox.Ok) error.exec_() def capture_image(self): """ Summary => starts the process to capture the picture. Description => starts the process to capture the picture from the video feed. This image will be saved to 'Image/takenPicture'. args => None return => None """ print("taken picture!!!!!") if self.ui.check_process.isChecked(): # sleep to let the program to update, before changing screen time.sleep(0.1) self.camera.stop_video_capture() # changed to the acceptance page, after picture has been taken self.display_stack(1) else: error = QMessageBox() error.setIcon(QMessageBox.Information) error.setText("Please confirm that we can process your picture") error.setWindowTitle("Confirmation") error.setStandardButtons(QMessageBox.Ok) error.exec_() def accept_picture(self): """ Summary => action to take the user to the next page. Description => when this action is given, it will take the user to the style picking page. args => None return => None """ self.display_stack(2) def start_plot(self): """ Summary => Will initialize the plotting process. Description => will initialize the plotting process from the coordinates given, when the image has been changed in the styling stage. args => None return => None """ print("plotting") # create the plotter conroller (coordinates , scale) # plot coords for dithering style self.plotters = [ DitheringPlotter(self.processors[0].get_coordinates(), 1), EdgeStylePlotter(self.processors[1].get_coordinates(), 1) ] # call run, when ready to throw through the process. for i, plotter in enumerate(self.plotters): # check if specified style is ticked if self.style_check_boxes[i].isChecked(): # need to check if thread is running, so doesn't start more # than one thread from pressing button muiltiple times. if not plotter.is_alive(): plotter.start() def reject_style(self): """ Summary => will reject the current style and ask if they are sure. Description => will reject the current stlye and before returning to the previous stage. The program will ask if they are sure by using a message box. args => None return => None """ # if no, display message box to make sure that they don't want the # the image choice = QMessageBox.question( self, 'Are you sure', "Are you sure that you do not want" + " this image, if not you will be" + " returned to styling page?", QMessageBox.Yes | QMessageBox.No) if choice == QMessageBox.Yes: self.display_stack(2) else: pass def reject_picture(self): """ Summary => will reject the current picture and ask if they are sure. Description => will reject the current picture and before returning to the previous stage. The program will ask if they are sure by using a message box. args => None return => None """ # if no, display message box to make sure that they don't want the # the image choice = QMessageBox.question( self, 'Are you sure', "Are you sure that you do not want" + " this image, if not you will be" + " returned to home page?", QMessageBox.Yes | QMessageBox.No) if choice == QMessageBox.Yes: self.display_stack(0) else: pass def close_application(self): """ Summary => will close the Gui. Description => will create a message box that will ask if you want to leave the Application. If yes, Application is closed and if no - return to Application args => None return => None """ # creating exit pop-up message choice = QMessageBox.question(self, 'Leaving Application', "Leave Application?", QMessageBox.Yes | QMessageBox.No) # do actions depending on response if choice == QMessageBox.Yes: # close the camera feed, if it is still running self.camera.stop_video_capture() # reset each of the images to blank, when closed cv2.imwrite("Images/takenPicture.jpg", cv2.imread("Images/blank.jpg")) cv2.imwrite("Images/processedImage.jpg", cv2.imread("Images/blank.jpg")) print("Leaving Application") sys.exit() else: pass def restart_application(self): """ Summary => will restart the Gui. Description => will create a message box that will ask if you want to restart the Application. If yes, Application will restart and set back to the first page and if no - return to Application. args => None return => None """ if self.ui.page_layer.currentIndex() == 0: time.sleep(1) error = QMessageBox() error.setIcon(QMessageBox.Information) error.setText("Cannot restart this page") error.setWindowTitle("Error: cannot restart") error.setStandardButtons(QMessageBox.Ok) error.exec_() else: # creating exit pop-up message choice = QMessageBox.question(self, 'Restarting Application', "Restart Application?", QMessageBox.Yes | QMessageBox.No) # do actions depending on response if choice == QMessageBox.Yes: # close the camera feed, if it is still running self.camera.stop_video_capture() # reset each of the images to blank, when closed cv2.imwrite("Images/takenPicture.jpg", cv2.imread("Images/blank.jpg")) cv2.imwrite("Images/processedImage.jpg", cv2.imread("Images/blank.jpg")) print("Restarting Application") # change bk to front page self.display_stack(0) else: pass def trigger_error(self): """ Summary => will send out of order error. Description => will send out of order error message. This message is to say that this part of the program is either broken or not complete. args => None return => None """ error = QMessageBox() error.setIcon(QMessageBox.Information) error.setText("Error: this function is currently out of use") error.setWindowTitle("Error: Out of order") error.setStandardButtons(QMessageBox.Ok) error.exec_()
class BaxterEnvironment: """Class representing the learning environment.""" def __init__(self, img_size=(256, 160), img_observation=False): """Initialise environment. Args: img_size (int tuple): (width, height) of image to return. img_observation (bool): If true returns observation as dictionary containing img and state(euler coordinates). Otherwise returns observation as n array. """ # Enable baxter and initialise environment baxter_interface.RobotEnable().enable() self._left_cam = CameraController("/cameras/left_hand_camera/image", img_size) self._left_arm = baxter_interface.Limb('left') self._left_arm.set_joint_position_speed(1.0) self._left_gripper = baxter_interface.Gripper('left') ns = "ExternalTools/left/PositionKinematicsNode/IKService" self._iksvc = rospy.ServiceProxy(ns, SolvePositionIK) self._move_to_start_position() self._load_gazebo_models() rospy.wait_for_service(ns, 5.0) # Observation information self.img_observation = img_observation self.image = None self.img_size = img_size self.x_pos = None self.y_pos = None self.z_pos = None self.horiz_dist = None self.vert_dist = None self.depth_dist = None # Accuracy tollerance self._horiz_threshold = 0.05 self._vert_threshold = 0.05 self._min_depth_threshold = 1 self._max_depth_threshold = 2 # Object position self._obj_x_pos = None self._obj_y_pos = None self._obj_z_pos = None # Limit on endpoint movements self._x_move_lim = 0.28 self._y_move_lim = 0.40 # Limits on endpoint position relative to object self._x_dist_lim = 0.28 self._y_dist_lim = 0.40 # Limit on endpoint position relative to body self._x_pos_min = 0.36 def reset(self): """Reset arm position and move object to random location. Returns: observation and low state desciptor """ self._move_to_start_position() # Loop until gripper not obscuring object visible = False while not visible: self._move_block() self._update_state() visible = self.horiz_dist is not None return self._get_state() def step(self, action): """Attempt to execute the action. Returns: state, reward, done """ # Make move if possible and update observation/state joint_angles = self._get_joint_angles(action) if joint_angles: self._move_arm(joint_angles) self._update_state() # Calculate reward and if new episode should be started reward = 0. done = False if self.horiz_dist is None: # Lost sight of object, end episode and return larger penalty reward = -3 done = True else: # Return negative reward based on distances reward -= abs(self.horiz_dist) reward -= abs(self.vert_dist) return self._get_state(), reward, done def close(self): """Delete loaded models. To be called when node is shutting down.""" self._delete_gazebo_models() def _get_state(self): if self.img_observation: return {'img': self.image, 'state': [self.x_pos, self.y_pos]} return [self.horiz_dist, self.vert_dist, self.x_pos, self.y_pos] def _update_state(self): # Update image self.image = self._left_cam.get_image() # Update cartesian coordinates pose = self._left_arm.endpoint_pose() self.z_pos = pose['position'].z self.x_pos = pose['position'].x self.y_pos = pose['position'].y # Calculate distance to object and distance from centre of camera self.horiz_dist, self.vert_dist = self._calc_dist() def _calc_dist(self): # Define lower and upper colour bounds BLUE_MIN = np.array([100, 150, 0], np.uint8) BLUE_MAX = np.array([140, 255, 255], np.uint8) # Threshold the image in the HSV colour space hsv_image = cv2.cvtColor(self.image, cv2.COLOR_RGB2HSV) mask = cv2.inRange(hsv_image, BLUE_MIN, BLUE_MAX) # If no object in image return none moments = cv2.moments(mask) if (moments["m00"] == 0): return None, None # Calculate normalised horizontal and vertical distances obj_x = int(moments["m10"] / moments["m00"]) obj_y = int(moments["m01"] / moments["m00"]) x_dist = obj_x - self.img_size[0] / 2 y_dist = obj_y - self.img_size[1] / 2 x_dist = float(x_dist) / (self.img_size[0] / 2) y_dist = float(y_dist) / (self.img_size[1] / 2) return x_dist, y_dist def _get_joint_angles(self, action): def clamp(n, minim, maxim): return max(minim, min(n, maxim)) # Calculate desired endpoint position of arm pose = self._left_arm.endpoint_pose() x = pose['position'].x + action[0] * self._x_move_lim y = pose['position'].y + action[1] * self._y_move_lim # Clamp values to prevent losing sight of object x = clamp(x, self._obj_x_pos - self._x_dist_lim, self._obj_x_pos + self._x_dist_lim) y = clamp(y, self._obj_y_pos - self._y_dist_lim, self._obj_y_pos + self._y_dist_lim) # Clamp to prevent gripper being too close to body x = max(x, self._x_pos_min) # Create pose of desired position pose = Pose() pose.position.x = x pose.position.y = y pose.position.z = 0.3 pose.orientation.x = 0.0 pose.orientation.y = 1.0 pose.orientation.z = 0.0 pose.orientation.w = 0.0 # Return result from inverse kinematics or none if move invalid i = 0 joint_angles = None while joint_angles is None and i < 5: joint_angles = self._ik_request(pose) i = i + 1 return joint_angles def _move_to_start_position(self): # Move arm to the starting position starting_joint_angles = { 'left_w0': 0.003, 'left_w1': 1.379, 'left_w2': -0.121, 'left_e0': 0.020, 'left_e1': 1.809, 'left_s0': -0.935, 'left_s1': -1.618 } self._move_arm(starting_joint_angles) self._left_gripper.open() def _ik_request(self, pose): # Send request hdr = Header(stamp=rospy.Time.now(), frame_id='base') ikreq = SolvePositionIKRequest() ikreq.pose_stamp.append(PoseStamped(header=hdr, pose=pose)) try: resp = self._iksvc(ikreq) except (rospy.ServiceException, rospy.ROSException), e: rospy.logerr("Service call failed: %s" % (e, )) return None # If successful then return joint angles other return none resp_seeds = struct.unpack('<%dB' % len(resp.result_type), resp.result_type) limb_joints = {} if (resp_seeds[0] != resp.RESULT_INVALID): limb_joints = dict( zip(resp.joints[0].name, resp.joints[0].position)) else: return None return limb_joints
def start(self): server_stream_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) server_stream_socket.bind(("127.0.0.1", 12345)) server_stream_socket.listen() camera = CameraController() state = 'end' while True: logging.warning("[Main]: Waiting for connection ...") (connection, address) = server_stream_socket.accept() logging.warning("[Main]: Connected.") stop = False while True: try: logging.warning("[Main]: Waiting for data ...") msg = connection.recv(1024) msg = msg.decode('UTF-8') if msg != '': logging.warning("[Main]: Data received...") logging.warning("[Main]: msg: {}".format(msg)) if 'start' in msg: if state == 'end': path = msg[6:] Path(path).mkdir(parents=True, exist_ok=True) state = 'start' camera.set_video_path(path) camera.start_camera() connection.sendall(b"StreamPort:9090") elif 'end' in msg: if state == 'start': state = 'end' camera.stop_camera() while True: if camera.finished is True: camera.finished = False break periods = [] for i in range(0, 8): sp = [] for period in camera.session_info.periods[i]: sp.append(period.__dict__) periods.append(sp) result = Result(camera.result.__dict__, periods) json_string = json.dumps(result.__dict__) connection.sendall(f"SessionResult:{json_string}".encode('UTF-8')) logging.warning("[Main]: msg sent") elif 'exit' in msg: logging.warning("[Main]: state='{}'".format(state)) if state == 'start': state = 'end' camera.stop_camera() while True: if camera.finished is True: camera.finished = False break periods = [] for i in range(0, 8): sp = [] for period in camera.session_info.periods[i]: sp.append(period.__dict__) periods.append(sp) result = Result(camera.result.__dict__, periods) json_string = json.dumps(result.__dict__) connection.sendall(f"SessionResult:{json_string}".encode('UTF-8')) logging.warning("[Main]: msg sent") else: connection.sendall(f"EXITING".encode('UTF-8')) logging.warning("[Main]: exiting in progress") connection.close() stop = True break else: logging.warning("[Main]: Client disconnected") connection.close() break except: logging.warning("[Main]: Socket disconnected") connection.close() break if stop: break
if camera_target is not None: ps = PoseStamped() ps.header.stamp = image.header.stamp ps.header.frame_id = 'base_link' ps.pose = ray_to_pose(p, camera_target) pose2_pub.publish(ps) cam.look_at(np.array(p) + camera_target, source_frame='base_link') rospy.init_node('person_tracker') bridge = CvBridge() tr = TreeReader() cam = CameraController() track = None camera_target = None image_to_show = None detect_objects = rospy.ServiceProxy('detect_objects', DetectObjects) image_sub = rospy.Subscriber('image', Image, callback) pose_pub = rospy.Publisher('detected_{}'.format(CLASS), PoseStamped, queue_size=1) pose2_pub = rospy.Publisher('camera_target'.format(CLASS), PoseStamped, queue_size=1) rospy.spin()
v = detection[0] - mean[:2] v = np.outer(v, v) track.update(t, detection[0], detection[1] + v * 10) tracks = [] for track in self._tracks: _, cov = track.get_prediction(t) if np.trace(cov[:2, :2]) / 2. < 6.**2: tracks.append(track) self._tracks = tracks if __name__ == '__main__': rospy.init_node('openpose_tracker') cam = CameraController() tracker = Tracker() camera_target = None move_camera = False people_msg = None pose_pub = rospy.Publisher('person', PoseWithCovarianceStamped, queue_size=1) pose2_pub = rospy.Publisher('camera_target', PoseStamped, queue_size=1) def _cb(msg): global people_msg people_msg = msg rospy.Subscriber('people_tx2',
def import_sceneArcPkg(self, sceneArcPkg): actorArcPkg = None for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "actor": actorArcPkg = arcPkg actorMgr = ActorManager() self.__actorMgr = actorMgr #actorArcPkg.print_metaData() for actorItem in actorArcPkg.get_itemsData(): actor = actorMgr.load_res(_resId = actorItem[0], resPath = actorItem[1], extraResPath = actorItem[2]) #print "the sceneMgr get_res : ", self.get_res(actorItem[0]) actor.setPos(actorItem[3]) actor.setHpr(actorItem[4]) actor.setScale(actorItem[5]) parentNode = self.get_res(actorItem[6]) actor.reparentTo(parentNode) eventActionRecord = actorArcPkg.get_metaData("eventActionRecord") eventEffertRecord = actorArcPkg.get_metaData("eventEffertRecord") #print "in import : eventActionRecord ", len(eventActionRecord) for actorId, record in eventActionRecord.iteritems(): for toggleEvent, actionName in record.iteritems(): actorMgr.add_toggle_to_actor(toggleEvent, actorId, actionName) for actorId, record in eventEffertRecord.iteritems(): for toggleEvent, effertList in record.iteritems(): for effert in effertList: print "import eventEffertRecord : ", toggleEvent, ", ", actorId, ", ", effert actorMgr.add_effert_to_actor(toggleEvent, actorId, effert[0]) #actorMgr.set_clock(globalClock) actorMgr.print_eventActionRecord() actorMgr.print_eventEffertRecord() #self.__actorMgr = actorMgr # Model数据读档 modelArcPkg = None for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "model": modelArcPkg = arcPkg modelMgr = ModelManager() self.__modelMgr = modelMgr for modelItem in modelArcPkg.get_itemsData(): model = modelMgr.load_res(_resId = modelItem[0], resPath = modelItem[1]) model.setPos(modelItem[2]) model.setHpr(modelItem[3]) model.setScale(modelItem[4]) parentNode = self.get_res(resId = modelItem[5]) model.reparentTo(parentNode) # Terrain数据读档 terraArcPkg = None terraMgr = TerrainManager() for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "terrain": terraArcPkg = arcPkg for terraItem in terraArcPkg.get_itemsData(): terrain = terraMgr.load_res(_resId = terraItem[0], resPath = terraItem[1], extraResPath = terraItem[2]) terrain.getRoot().setPos(terraItem[3]) terrain.getRoot().setHpr(terraItem[4]) terrain.getRoot().setScale(terraItem[5]) parentNode = self.get_res(resId = terraItem[6]) terrain.getRoot().reparentTo(parentNode) terraMgr.set_currTerrain(terraArcPkg.get_metaData("currTerraId")) self.__terraMgr = terraMgr # Camera数据读档 cameraArcPkg = None for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "camera": cameraArcPkg = arcPkg camCtrlr = CameraController() self.__camCtrlr = camCtrlr camCtrlr.bind_camera(self.__showbase.cam) camCtrlr.bind_ToggleHost(self.__showbase) cameraArcPkg = cameraArcPkg.get_itemsData()[0] #print "in import : ", cameraArcPkg[0] camCtrlr.get_camToCtrl().setPos(cameraArcPkg[0]) camCtrlr.get_camToCtrl().setHpr(cameraArcPkg[1]) camCtrlr.set_moveSpeed(cameraArcPkg[2]) camCtrlr.set_rotateSpeed(cameraArcPkg[3]) objToFocus = self.get_res(cameraArcPkg[4]) camCtrlr.focus_on(objToFocus, cameraArcPkg[5]) camCtrlr.set_optsSwitch(cameraArcPkg[6]) #camCtrlr.set_toggleEventToOpts(cameraArcPkg[7]) self.__actorMgr.bind_CameraController(camCtrlr) for toggleEvent, opt in cameraArcPkg[7].iteritems(): camCtrlr.add_toggle_to_opt(toggleEvent, opt) # print "Camera Pos : ", camCtrlr.get_camToCtrl().getPos() # print "Camera Hpr : ", camCtrlr.get_camToCtrl().getHpr() # Light数据读档 lightArcPkg = None for arcPkg in sceneArcPkg: if arcPkg.get_ArchivePackageName() == "light": lightArcPkg = arcPkg lightCtrlr = LightController() self.__lightCtrlr = lightCtrlr lightCtrlr.bind_SceneManager(self) for lightItem in lightArcPkg.get_itemsData(): light = lightCtrlr.create_light(_lightId = lightItem[0], lightType = SeriousTools.extract_name_from_Id(lightItem[0]), lightColor = lightItem[1], lightPos = lightItem[2], lightHpr = lightItem[3], targetId = lightItem[4], parentId = lightItem[6]) #print "in import : ", light for setorId in lightItem[5]: lightCtrlr.set_light_to(lightItem[0], setorId)