示例#1
0
 def __init__(self):
 	rospy.init_node("HAL")
 
 	self.image = None
 	self.camera = ListenerCamera("/F1ROS/cameraL/image_raw")
 	self.motors = PublisherMotors("/F1ROS/cmd_vel", 4, 0.3)
 	self.camera_lock = threading.Lock()
    def __init__(self):
        rospy.init_node("HAL")

        self.image = None
        self.camera = ListenerCamera("/F1ROS/cameraL/image_raw")
        self.motors = PublisherMotors("/F1ROS/cmd_vel", 4, 0.3)
        self.pose3d = ListenerPose3d("/F1ROS/odom")
        self.laser = ListenerLaser("/F1ROS/laser/scan")
示例#3
0
    def __init__(self):
        rospy.init_node("HAL")

        self.image = None
        self.cameraL = ListenerCamera("/TurtlebotROS/cameraL/image_raw")
        self.cameraR = ListenerCamera("/TurtlebotROS/cameraR/image_raw")

        self.camLeftP = ListenerParameters("3d_reconstruction_conf.yml",
                                           "CamACalibration")
        self.camRightP = ListenerParameters("3d_reconstruction_conf.yml",
                                            "CamBCalibration")
    def __init__(self):
        rospy.init_node("HAL")

        # Shared memory variables
        self.shared_image = SharedImage("halimage")
        self.shared_v = SharedValue("velocity")
        self.shared_w = SharedValue("angular")

        # ROS Topics
        self.camera = ListenerCamera("/F1ROS/cameraL/image_raw")
        self.motors = PublisherMotors("/F1ROS/cmd_vel", 4, 0.3)

        # Update thread
        self.thread = ThreadHAL(self.update_hal)
示例#5
0
class HAL:
    IMG_WIDTH = 320
    IMG_HEIGHT = 240

    def __init__(self):
        rospy.init_node("HAL")

        self.image = None
        self.camera = ListenerCamera("/F1ROS/cameraL/image_raw")
        self.motors = PublisherMotors("/F1ROS/cmd_vel", 4, 0.3)
        self.pose3d = ListenerPose3d("/F1ROS/odom")
        self.laser = ListenerLaser("/F1ROS/laser/scan")
        self.camera_lock = threading.Lock()

    # Explicit initialization functions
    # Class method, so user can call it without instantiation
    @classmethod
    def initRobot(self):
        pass

    # Get Image from ROS Driver Camera
    def getImage(self):
        self.camera_lock.acquire()
        self.image = self.camera.getImage().data
        self.camera_lock.release()

        return self.image
class HAL:
    IMG_WIDTH = 320
    IMG_HEIGHT = 240

    def __init__(self):
        rospy.init_node("HAL")

        # Shared memory variables
        self.shared_image = SharedImage("halimage")
        self.shared_v = SharedValue("velocity")
        self.shared_w = SharedValue("angular")

        # ROS Topics
        self.camera = ListenerCamera("/F1ROS/cameraL/image_raw")
        self.motors = PublisherMotors("/F1ROS/cmd_vel", 4, 0.3)

        # Update thread
        self.thread = ThreadHAL(self.update_hal)

    # Function to start the update thread
    def start_thread(self):
        self.thread.start()

    # Get Image from ROS Driver Camera
    def getImage(self):
        image = self.camera.getImage().data
        self.shared_image.add(image)

    # Set the velocity
    def setV(self):
        velocity = self.shared_v.get()
        self.motors.sendV(velocity)

    # Get the velocity
    def getV(self):
        velocity = self.shared_v.get()
        return velocity

    # Get the angular velocity
    def getW(self):
        angular = self.shared_w.get()
        return angular

    # Set the angular velocity
    def setW(self):
        angular = self.shared_w.get()
        self.motors.sendW(angular)

    def update_hal(self):
        self.getImage()
        self.setV()
        self.setW()

    # Destructor function to close all fds
    def __del__(self):
        self.shared_image.close()
        self.shared_v.close()
        self.shared_w.close()
示例#7
0
    def __init__(self):
        rospy.init_node("HAL")

        # Shared memory variables
        self.image = None
        self.v = None
        self.w = None
        self.p3d = None

        if os.getcwd() == "/":
            f = open("/RoboticsAcademy/exercises/car_junction/web-template/stop_conf.yml", "r")
        else:
            f = open("stop_conf.yml", "r")

        cfg = yaml.load(f)

        ymlNode = cfg['Stop']
        #node = rospy.init_node(ymlNode["NodeName"], anonymous=True)

        # ------------ M O T O R S ----------------------------------
        print("Publishing "+  "Stop.Motors" + " with ROS messages")
        topicM = cfg['Stop']["Motors"]["Topic"]
        maxW = cfg['Stop']["Motors"]["maxW"]
        if not maxW:
            maxW = 0.5
            print ("Stop.Motors"+".maxW not provided, the default value is used: "+ repr(maxW))

        maxV = cfg['Stop']["Motors"]["maxV"]
        if not maxV:
            maxV = 5
            print ("Stop.Motors"+".maxV not provided, the default value is used: "+ repr(maxV))
    
        self.motors = PublisherMotors(topicM, maxV, maxW)

        # ----------------- P O S E     3 D -------------------------------------
        print("Receiving " + "Stop.Pose3D" + " from ROS messages")
        topicP = cfg['Stop']["Pose3D"]["Topic"]
        self.pose3d = ListenerPose3d(topicP)

        # -------- C A M E R A C E N T R A L --------------------------------------
        print("Receiving " + "Stop.CameraC" + "  CameraData from ROS messages")
        topicCameraC  = cfg['Stop']["CameraC"]["Topic"]
        self.cameraC = ListenerCamera(topicCameraC)

        # -------- C A M E R A L E F T --------------------------------------------
        print("Receiving " + "Stop.CameraL" + "  CameraData from ROS messages")
        topicCameraL  = cfg['Stop']["CameraL"]["Topic"]
        self.cameraL = ListenerCamera(topicCameraL)

        # -------- C A M E R A R I G H T ------------------------------------------
        print("Receiving " + "Stop.CameraR" + "  CameraData from ROS messages")
        topicCameraR  = cfg['Stop']["CameraR"]["Topic"]
        self.cameraR = ListenerCamera(topicCameraR)

        self.template = cv2.imread('assets/img/template.png',0)
示例#8
0
class HAL:
    IMG_WIDTH = 320
    IMG_HEIGHT = 240

    def __init__(self):
        rospy.init_node("HAL")

        self.image = None
        self.camera = ListenerCamera("/F1ROS/cameraL/image_raw")
        self.motors = PublisherMotors("/F1ROS/cmd_vel", 4, 0.3)

    # Explicit initialization functions
    # Class method, so user can call it without instantiation
    @classmethod
    def initRobot(cls):
        new_instance = cls()
        return new_instance

    # Get Image from ROS Driver Camera
    def getImage(self):
        image = self.camera.getImage().data
        return image
示例#9
0
#       Aitor Martinez Fernandez <*****@*****.**>
#       Francisco Miguel Rivas Montero <*****@*****.**>
#

import sys
from gui.GUI import MainWindow
from gui.threadGUI import ThreadGUI
from MyAlgorithm import MyAlgorithm
from PyQt5.QtWidgets import QApplication
from interfaces.camera import ListenerCamera
from interfaces.pose3d import ListenerPose3d
from interfaces.motors import PublisherMotors

if __name__ == "__main__":

    camera = ListenerCamera("/F1ROS/cameraL/image_raw")
    motors = PublisherMotors("/F1ROS/cmd_vel", 4, 0.3)
    pose3d = ListenerPose3d("/F1ROS/odom")
    pose3dphantom = ListenerPose3d("/F1ROS_phantom/odom")
    algorithm = MyAlgorithm(camera, motors, pose3d)

    app = QApplication(sys.argv)
    myGUI = MainWindow()
    myGUI.setCamera(camera)
    myGUI.setMotors(motors)
    myGUI.setPose3D(pose3d, pose3dphantom)
    myGUI.setAlgorithm(algorithm)
    myGUI.show()

    t2 = ThreadGUI(myGUI)
    t2.daemon = True
示例#10
0
文件: stop.py 项目: igarag/Academy
        print("Stop.Motors" +
              ".maxV not provided, the default value is used: " + repr(maxV))

    # motors = PublisherMotors("/opel/cmd_vel", maxV, maxW)
    motors = PublisherMotors(topicM, maxV, maxW)

    # ----------------- P O S E     3 D -------------------------------------
    print("Receiving " + "Stop.Pose3D" + " from ROS messages")
    topicP = cfg.getProperty("Stop.Pose3D" + ".Topic")
    pose3d = ListenerPose3d(topicP)
    # pose3d = ListenerPose3d("/opel/odom")

    # -------- C A M E R A C E N T R A L --------------------------------------
    print("Receiving " + "Stop.CameraC" + "  CameraData from ROS messages")
    topicCameraC = cfg.getProperty("Stop.CameraC" + ".Topic")
    cameraC = ListenerCamera(topicCameraC)
    # cameraC = ListenerCamera("/opel/cameraC/image_raw")

    # -------- C A M E R A L E F T --------------------------------------------
    print("Receiving " + "Stop.CameraL" + "  CameraData from ROS messages")
    topicCameraL = cfg.getProperty("Stop.CameraL" + ".Topic")
    cameraL = ListenerCamera(topicCameraL)
    # cameraL = ListenerCamera("/opel/cameraL/image_raw")

    # -------- C A M E R A R I G H T ------------------------------------------
    print("Receiving " + "Stop.CameraR" + "  CameraData from ROS messages")
    topicCameraR = cfg.getProperty("Stop.CameraR" + ".Topic")
    cameraR = ListenerCamera(topicCameraR)
    # cameraR = ListenerCamera("/opel/cameraR/image_raw")

    print("Starting movement of dummy cars")
示例#11
0
class HAL:
    IMG_WIDTH = 320
    IMG_HEIGHT = 240

    def __init__(self):
        rospy.init_node("HAL")

        # Shared memory variables
        self.image = None
        self.v = None
        self.w = None
        self.p3d = None

        if os.getcwd() == "/":
            f = open("/RoboticsAcademy/exercises/car_junction/web-template/stop_conf.yml", "r")
        else:
            f = open("stop_conf.yml", "r")

        cfg = yaml.load(f)

        ymlNode = cfg['Stop']
        #node = rospy.init_node(ymlNode["NodeName"], anonymous=True)

        # ------------ M O T O R S ----------------------------------
        print("Publishing "+  "Stop.Motors" + " with ROS messages")
        topicM = cfg['Stop']["Motors"]["Topic"]
        maxW = cfg['Stop']["Motors"]["maxW"]
        if not maxW:
            maxW = 0.5
            print ("Stop.Motors"+".maxW not provided, the default value is used: "+ repr(maxW))

        maxV = cfg['Stop']["Motors"]["maxV"]
        if not maxV:
            maxV = 5
            print ("Stop.Motors"+".maxV not provided, the default value is used: "+ repr(maxV))
    
        self.motors = PublisherMotors(topicM, maxV, maxW)

        # ----------------- P O S E     3 D -------------------------------------
        print("Receiving " + "Stop.Pose3D" + " from ROS messages")
        topicP = cfg['Stop']["Pose3D"]["Topic"]
        self.pose3d = ListenerPose3d(topicP)

        # -------- C A M E R A C E N T R A L --------------------------------------
        print("Receiving " + "Stop.CameraC" + "  CameraData from ROS messages")
        topicCameraC  = cfg['Stop']["CameraC"]["Topic"]
        self.cameraC = ListenerCamera(topicCameraC)

        # -------- C A M E R A L E F T --------------------------------------------
        print("Receiving " + "Stop.CameraL" + "  CameraData from ROS messages")
        topicCameraL  = cfg['Stop']["CameraL"]["Topic"]
        self.cameraL = ListenerCamera(topicCameraL)

        # -------- C A M E R A R I G H T ------------------------------------------
        print("Receiving " + "Stop.CameraR" + "  CameraData from ROS messages")
        topicCameraR  = cfg['Stop']["CameraR"]["Topic"]
        self.cameraR = ListenerCamera(topicCameraR)

        self.template = cv2.imread('assets/img/template.png',0)

    # Get Image from ROS Driver Camera
    def getImage(self, lr):
        if (lr == 'left'):
            image = self.cameraL.getImage().data
        elif (lr == 'right'):
            image = self.cameraR.getImage().data
        elif (lr == 'center'):
            image = self.cameraC.getImage().data
        else:
            print("Invalid camera")

        return image

    # Set the velocity
    def setV(self, velocity):
        self.v = velocity
        self.motors.sendV(velocity)

    # Get the velocity
    def getV(self):
        velocity = self.v
        return velocity

    # Get the angular velocity
    def getW(self):
        angular = self.w
        return angular

    # Set the angular velocity
    def setW(self, angular):
        self.w = angular
        self.motors.sendW(angular)

    def getPose3D(self):
        return self.pose3d

    def setPose3D(self, pose3d):
        self.pose3d = pose3d

    def getTemplate(self):
        return self.template

    def getYaw(self):
        return self.pose3d.data.yaw
    #jdrc= comm.init(cfg, 'FollowLineTurtlebot')

    robot = cfg.getProperty("FollowLineTurtlebot.Robot")

    maxv = cfg.getPropertyWithDefault("FollowLineTurtlebot.MaxV", 2)
    maxw = cfg.getPropertyWithDefault("FollowLineTurtlebot.MaxV", 0.3)

    if (robot == "simkobuki"):
        cam_path = cfg.getProperty("FollowLineTurtlebot.SimCameraPath")
        mot_path = cfg.getProperty("FollowLineTurtlebot.SimMotorsPath")

    else: # realkobuki
        cam_path = cfg.getProperty("FollowLineTurtlebot.RealCameraPath")
        mot_path = cfg.getProperty("FollowLineTurtlebot.RealMotorsPath")

    camera = ListenerCamera(cam_path)
    motors = PublisherMotors(mot_path, maxv, maxw)

    algorithm=MyAlgorithm(camera, motors)

    app = QApplication(sys.argv)
    myGUI = MainWindow()
    myGUI.setCamera(camera)
    myGUI.setMotors(motors)
    myGUI.setAlgorithm(algorithm)
    myGUI.show()

    t2 = ThreadGUI(myGUI)
    t2.daemon=True
    t2.start()
# General imports
import sys

# Practice imports
from gui.GUI import MainWindow
from gui.threadGUI import ThreadGUI
from MyAlgorithm import MyAlgorithm
from PyQt5.QtWidgets import QApplication
from interfaces.camera import ListenerCamera
from interfaces.motors import PublisherMotors
from interfaces.point import ListenerPoint, PublisherPoint

if __name__ == "__main__":

    cameraL = ListenerCamera("/TurtlebotROS/cameraL/image_raw")
    cameraR = ListenerCamera("/TurtlebotROS/cameraR/image_raw")
    #motors = PublisherMotors("/TurtlebotROS/cmd_vel", 4, 0.3)

    pointTopic = "/TurtlebotROS/point"
    publishPoint = PublisherPoint(pointTopic)
    listenPoint = ListenerPoint(pointTopic)

    algorithm = MyAlgorithm(cameraL, cameraR, publishPoint)

    app = QApplication(sys.argv)
    myGUI = MainWindow()
    myGUI.setCamera(cameraL, 'left')
    myGUI.setCamera(cameraR, 'right')
    myGUI.setPlot(listenPoint)
    myGUI.setAlgorithm(algorithm)
示例#14
0
class HAL:
    IMG_WIDTH = 640
    IMG_HEIGHT = 480

    def __init__(self):
        rospy.init_node("HAL")

        self.image = None
        self.cameraL = ListenerCamera("/TurtlebotROS/cameraL/image_raw")
        self.cameraR = ListenerCamera("/TurtlebotROS/cameraR/image_raw")

        self.camLeftP = ListenerParameters("3d_reconstruction_conf.yml",
                                           "CamACalibration")
        self.camRightP = ListenerParameters("3d_reconstruction_conf.yml",
                                            "CamBCalibration")

    # Get Image from ROS Driver Camera
    def getImage(self, lr):
        if (lr == 'left'):
            image = self.cameraL.getImage().data
        elif (lr == 'right'):
            image = self.cameraR.getImage().data
        else:
            print("Invalid camera")

        return image

    # Transform the Coordinate System to the Camera System
    def graficToOptical(self, lr, point2d):
        if (lr == 'left'):
            pointOpt = self.camLeftP.graficToOptical(point2d)
        elif (lr == 'right'):
            pointOpt = self.camRightP.graficToOptical(point2d)
        else:
            print("Invalid camera")
        return pointOpt

    # Backprojects the 2D Point into 3D Space
    def backproject(self, lr, point2d):
        if (lr == 'left'):
            point3D = self.camLeftP.backproject(point2d)
        elif (lr == 'right'):
            point3D = self.camRightP.backproject(point2d)
        else:
            print("Invalid camera")
        return point3D

    # Get Camera Position from ROS Driver Camera
    def getCameraPosition(self, lr):
        if (lr == 'left'):
            position_cam = self.camLeftP.getCameraPosition()
        elif (lr == 'right'):
            position_cam = self.camRightP.getCameraPosition()
        else:
            print("Invalid camera")

        return position_cam

    # Backprojects a 3D Point onto an Image
    def project(self, lr, point3d):
        if (lr == 'left'):
            projected = self.camLeftP.project(point3d)
        elif (lr == 'right'):
            projected = self.camRightP.project(point3d)
        else:
            print("Invalid camera")

        return projected

    # Get Image Coordinates
    def opticalToGrafic(self, lr, point2d):
        if (lr == 'left'):
            point = self.camLeftP.opticalToGrafic(point2d)
        elif (lr == 'right'):
            point = self.camRightP.opticalToGrafic(point2d)
        else:
            print("Invalid camera")

        return point

    def project3DScene(self, point3d):
        px = point3d[0] / 100.0
        py = point3d[1] / 100.0 + 12
        pz = point3d[2] / 100.0
        outPoint = np.array([px, py, pz])
        return outPoint
示例#15
0
class HAL:
    IMG_WIDTH = 320
    IMG_HEIGHT = 240

    def __init__(self):
        rospy.init_node("HAL")

        self.image = None
        self.cameraL = ListenerCamera("/TurtlebotROS/cameraL/image_raw")
        self.cameraR = ListenerCamera("/TurtlebotROS/cameraR/image_raw")

        self.camLeftP = ListenerParameters("3d_reconstruction_conf.yml",
                                           "CamACalibration")
        self.camRightP = ListenerParameters("3d_reconstruction_conf.yml",
                                            "CamBCalibration")

    # Explicit initialization functions
    # Class method, so user can call it without instantiation
    @classmethod
    def initRobot(cls):
        new_instance = cls()
        return new_instance

    # Get Image from ROS Driver Camera
    def getImage(self, lr):
        if (lr == 'left'):
            image = self.cameraL.getImage().data
        elif (lr == 'right'):
            image = self.cameraR.getImage().data
        else:
            print("Invalid camera")

        return image

    # Transform the Coordinate System to the Camera System
    def graficToOptical(self, lr, point2d):
        if (lr == 'left'):
            pointOpt = self.camLeftP.graficToOptical(point2d)
        elif (lr == 'right'):
            pointOpt = self.camRightP.graficToOptical(point2d)
        else:
            print("Invalid camera")
        return pointOpt

    # Backprojects the 2D Point into 3D Space
    def backproject(self, lr, point2d):
        if (lr == 'left'):
            point3D = self.camLeftP.backproject(point2d)
        elif (lr == 'right'):
            point3D = self.camRightP.backproject(point2d)
        else:
            print("Invalid camera")
        return point3D

    # Get Camera Position from ROS Driver Camera
    def getCameraPosition(self, lr):
        if (lr == 'left'):
            position_cam = self.camLeftP.getCameraPosition()
        elif (lr == 'right'):
            position_cam = self.camRightP.getCameraPosition()
        else:
            print("Invalid camera")

        return position_cam

    # Backprojects a 3D Point onto an Image
    def project(self, lr, point3d):
        if (lr == 'left'):
            projected = self.camLeftP.project(point3d)
        elif (lr == 'right'):
            projected = self.camRightP.project(point3d)
        else:
            print("Invalid camera")

        return projected

    # Get Image Coordinates
    def opticalToGrafic(self, lr, point2d):
        if (lr == 'left'):
            point = self.camLeftP.opticalToGrafic(point2d)
        elif (lr == 'right'):
            point = self.camRightP.opticalToGrafic(point2d)
        else:
            print("Invalid camera")

        return point

    def project3DScene(self, point3d):
        phi = 90
        tetha = 0
        alpha = -90
        cos_phi = math.cos(math.radians(phi))
        sin_phi = math.sin(math.radians(phi))
        cos_tetha = math.cos(math.radians(tetha))
        sin_tetha = math.sin(math.radians(tetha))
        cos_alpha = math.cos(math.radians(alpha))
        sin_alpha = math.sin(math.radians(alpha))
        px = ((cos_phi * cos_tetha) * point3d[0] +
              (sin_phi * sin_alpha - cos_phi * sin_tetha * cos_alpha) *
              point3d[1] +
              (cos_phi * sin_tetha * sin_alpha + sin_phi * cos_alpha) *
              point3d[2]) / 100.0
        py = (sin_tetha * point3d[0] + (cos_tetha * cos_alpha) * point3d[1] +
              (-cos_tetha * sin_alpha) * point3d[2]) / 100.0 + 12
        pz = ((-sin_phi * cos_tetha) * point3d[0] +
              (sin_phi * sin_tetha * cos_alpha + cos_phi * sin_alpha) *
              point3d[1] +
              (cos_phi * cos_alpha - sin_phi * sin_tetha * sin_alpha) *
              point3d[2]) / 100.0

        outPoint = np.array([px, py, pz])
        return outPoint