class HAL: IMG_WIDTH = 320 IMG_HEIGHT = 240 def __init__(self): rospy.init_node("HAL") self.image = None self.camera = ListenerCamera("/F1ROS/cameraL/image_raw") self.motors = PublisherMotors("/F1ROS/cmd_vel", 4, 0.3) self.pose3d = ListenerPose3d("/F1ROS/odom") self.laser = ListenerLaser("/F1ROS/laser/scan") self.camera_lock = threading.Lock() # Explicit initialization functions # Class method, so user can call it without instantiation @classmethod def initRobot(self): pass # Get Image from ROS Driver Camera def getImage(self): self.camera_lock.acquire() self.image = self.camera.getImage().data self.camera_lock.release() return self.image
class HAL: IMG_WIDTH = 320 IMG_HEIGHT = 240 def __init__(self): rospy.init_node("HAL") # Shared memory variables self.shared_image = SharedImage("halimage") self.shared_v = SharedValue("velocity") self.shared_w = SharedValue("angular") # ROS Topics self.camera = ListenerCamera("/F1ROS/cameraL/image_raw") self.motors = PublisherMotors("/F1ROS/cmd_vel", 4, 0.3) # Update thread self.thread = ThreadHAL(self.update_hal) # Function to start the update thread def start_thread(self): self.thread.start() # Get Image from ROS Driver Camera def getImage(self): image = self.camera.getImage().data self.shared_image.add(image) # Set the velocity def setV(self): velocity = self.shared_v.get() self.motors.sendV(velocity) # Get the velocity def getV(self): velocity = self.shared_v.get() return velocity # Get the angular velocity def getW(self): angular = self.shared_w.get() return angular # Set the angular velocity def setW(self): angular = self.shared_w.get() self.motors.sendW(angular) def update_hal(self): self.getImage() self.setV() self.setW() # Destructor function to close all fds def __del__(self): self.shared_image.close() self.shared_v.close() self.shared_w.close()
class HAL: IMG_WIDTH = 320 IMG_HEIGHT = 240 def __init__(self): rospy.init_node("HAL") self.image = None self.camera = ListenerCamera("/F1ROS/cameraL/image_raw") self.motors = PublisherMotors("/F1ROS/cmd_vel", 4, 0.3) # Explicit initialization functions # Class method, so user can call it without instantiation @classmethod def initRobot(cls): new_instance = cls() return new_instance # Get Image from ROS Driver Camera def getImage(self): image = self.camera.getImage().data return image
class HAL: IMG_WIDTH = 320 IMG_HEIGHT = 240 def __init__(self): rospy.init_node("HAL") # Shared memory variables self.image = None self.v = None self.w = None self.p3d = None if os.getcwd() == "/": f = open("/RoboticsAcademy/exercises/car_junction/web-template/stop_conf.yml", "r") else: f = open("stop_conf.yml", "r") cfg = yaml.load(f) ymlNode = cfg['Stop'] #node = rospy.init_node(ymlNode["NodeName"], anonymous=True) # ------------ M O T O R S ---------------------------------- print("Publishing "+ "Stop.Motors" + " with ROS messages") topicM = cfg['Stop']["Motors"]["Topic"] maxW = cfg['Stop']["Motors"]["maxW"] if not maxW: maxW = 0.5 print ("Stop.Motors"+".maxW not provided, the default value is used: "+ repr(maxW)) maxV = cfg['Stop']["Motors"]["maxV"] if not maxV: maxV = 5 print ("Stop.Motors"+".maxV not provided, the default value is used: "+ repr(maxV)) self.motors = PublisherMotors(topicM, maxV, maxW) # ----------------- P O S E 3 D ------------------------------------- print("Receiving " + "Stop.Pose3D" + " from ROS messages") topicP = cfg['Stop']["Pose3D"]["Topic"] self.pose3d = ListenerPose3d(topicP) # -------- C A M E R A C E N T R A L -------------------------------------- print("Receiving " + "Stop.CameraC" + " CameraData from ROS messages") topicCameraC = cfg['Stop']["CameraC"]["Topic"] self.cameraC = ListenerCamera(topicCameraC) # -------- C A M E R A L E F T -------------------------------------------- print("Receiving " + "Stop.CameraL" + " CameraData from ROS messages") topicCameraL = cfg['Stop']["CameraL"]["Topic"] self.cameraL = ListenerCamera(topicCameraL) # -------- C A M E R A R I G H T ------------------------------------------ print("Receiving " + "Stop.CameraR" + " CameraData from ROS messages") topicCameraR = cfg['Stop']["CameraR"]["Topic"] self.cameraR = ListenerCamera(topicCameraR) self.template = cv2.imread('assets/img/template.png',0) # Get Image from ROS Driver Camera def getImage(self, lr): if (lr == 'left'): image = self.cameraL.getImage().data elif (lr == 'right'): image = self.cameraR.getImage().data elif (lr == 'center'): image = self.cameraC.getImage().data else: print("Invalid camera") return image # Set the velocity def setV(self, velocity): self.v = velocity self.motors.sendV(velocity) # Get the velocity def getV(self): velocity = self.v return velocity # Get the angular velocity def getW(self): angular = self.w return angular # Set the angular velocity def setW(self, angular): self.w = angular self.motors.sendW(angular) def getPose3D(self): return self.pose3d def setPose3D(self, pose3d): self.pose3d = pose3d def getTemplate(self): return self.template def getYaw(self): return self.pose3d.data.yaw
class HAL: IMG_WIDTH = 640 IMG_HEIGHT = 480 def __init__(self): rospy.init_node("HAL") self.image = None self.cameraL = ListenerCamera("/TurtlebotROS/cameraL/image_raw") self.cameraR = ListenerCamera("/TurtlebotROS/cameraR/image_raw") self.camLeftP = ListenerParameters("3d_reconstruction_conf.yml", "CamACalibration") self.camRightP = ListenerParameters("3d_reconstruction_conf.yml", "CamBCalibration") # Get Image from ROS Driver Camera def getImage(self, lr): if (lr == 'left'): image = self.cameraL.getImage().data elif (lr == 'right'): image = self.cameraR.getImage().data else: print("Invalid camera") return image # Transform the Coordinate System to the Camera System def graficToOptical(self, lr, point2d): if (lr == 'left'): pointOpt = self.camLeftP.graficToOptical(point2d) elif (lr == 'right'): pointOpt = self.camRightP.graficToOptical(point2d) else: print("Invalid camera") return pointOpt # Backprojects the 2D Point into 3D Space def backproject(self, lr, point2d): if (lr == 'left'): point3D = self.camLeftP.backproject(point2d) elif (lr == 'right'): point3D = self.camRightP.backproject(point2d) else: print("Invalid camera") return point3D # Get Camera Position from ROS Driver Camera def getCameraPosition(self, lr): if (lr == 'left'): position_cam = self.camLeftP.getCameraPosition() elif (lr == 'right'): position_cam = self.camRightP.getCameraPosition() else: print("Invalid camera") return position_cam # Backprojects a 3D Point onto an Image def project(self, lr, point3d): if (lr == 'left'): projected = self.camLeftP.project(point3d) elif (lr == 'right'): projected = self.camRightP.project(point3d) else: print("Invalid camera") return projected # Get Image Coordinates def opticalToGrafic(self, lr, point2d): if (lr == 'left'): point = self.camLeftP.opticalToGrafic(point2d) elif (lr == 'right'): point = self.camRightP.opticalToGrafic(point2d) else: print("Invalid camera") return point def project3DScene(self, point3d): px = point3d[0] / 100.0 py = point3d[1] / 100.0 + 12 pz = point3d[2] / 100.0 outPoint = np.array([px, py, pz]) return outPoint
class HAL: IMG_WIDTH = 320 IMG_HEIGHT = 240 def __init__(self): rospy.init_node("HAL") self.image = None self.cameraL = ListenerCamera("/TurtlebotROS/cameraL/image_raw") self.cameraR = ListenerCamera("/TurtlebotROS/cameraR/image_raw") self.camLeftP = ListenerParameters("3d_reconstruction_conf.yml", "CamACalibration") self.camRightP = ListenerParameters("3d_reconstruction_conf.yml", "CamBCalibration") # Explicit initialization functions # Class method, so user can call it without instantiation @classmethod def initRobot(cls): new_instance = cls() return new_instance # Get Image from ROS Driver Camera def getImage(self, lr): if (lr == 'left'): image = self.cameraL.getImage().data elif (lr == 'right'): image = self.cameraR.getImage().data else: print("Invalid camera") return image # Transform the Coordinate System to the Camera System def graficToOptical(self, lr, point2d): if (lr == 'left'): pointOpt = self.camLeftP.graficToOptical(point2d) elif (lr == 'right'): pointOpt = self.camRightP.graficToOptical(point2d) else: print("Invalid camera") return pointOpt # Backprojects the 2D Point into 3D Space def backproject(self, lr, point2d): if (lr == 'left'): point3D = self.camLeftP.backproject(point2d) elif (lr == 'right'): point3D = self.camRightP.backproject(point2d) else: print("Invalid camera") return point3D # Get Camera Position from ROS Driver Camera def getCameraPosition(self, lr): if (lr == 'left'): position_cam = self.camLeftP.getCameraPosition() elif (lr == 'right'): position_cam = self.camRightP.getCameraPosition() else: print("Invalid camera") return position_cam # Backprojects a 3D Point onto an Image def project(self, lr, point3d): if (lr == 'left'): projected = self.camLeftP.project(point3d) elif (lr == 'right'): projected = self.camRightP.project(point3d) else: print("Invalid camera") return projected # Get Image Coordinates def opticalToGrafic(self, lr, point2d): if (lr == 'left'): point = self.camLeftP.opticalToGrafic(point2d) elif (lr == 'right'): point = self.camRightP.opticalToGrafic(point2d) else: print("Invalid camera") return point def project3DScene(self, point3d): phi = 90 tetha = 0 alpha = -90 cos_phi = math.cos(math.radians(phi)) sin_phi = math.sin(math.radians(phi)) cos_tetha = math.cos(math.radians(tetha)) sin_tetha = math.sin(math.radians(tetha)) cos_alpha = math.cos(math.radians(alpha)) sin_alpha = math.sin(math.radians(alpha)) px = ((cos_phi * cos_tetha) * point3d[0] + (sin_phi * sin_alpha - cos_phi * sin_tetha * cos_alpha) * point3d[1] + (cos_phi * sin_tetha * sin_alpha + sin_phi * cos_alpha) * point3d[2]) / 100.0 py = (sin_tetha * point3d[0] + (cos_tetha * cos_alpha) * point3d[1] + (-cos_tetha * sin_alpha) * point3d[2]) / 100.0 + 12 pz = ((-sin_phi * cos_tetha) * point3d[0] + (sin_phi * sin_tetha * cos_alpha + cos_phi * sin_alpha) * point3d[1] + (cos_phi * cos_alpha - sin_phi * sin_tetha * sin_alpha) * point3d[2]) / 100.0 outPoint = np.array([px, py, pz]) return outPoint