Example #1
0
    def __init__(self, clientID, suffix=""):
        # vrep.simxFinish(-1) # just in case, close all opened connections
        # self._clientID = vrep.simxStart('127.0.0.1',19997, True, True, 5000, 5) # Connect to V-REP
        self._clientID = clientID
        self.suffix = suffix
        _, self._left_joint = vrep.simxGetObjectHandle(self._clientID, 'ePuck_leftJoint' + suffix, vrep.simx_opmode_oneshot_wait)
        _, self._right_joint = vrep.simxGetObjectHandle(self._clientID, 'ePuck_rightJoint' + suffix, vrep.simx_opmode_oneshot_wait)
        _, self._light_sensor = vrep.simxGetObjectHandle(self._clientID, 'ePuck_lightSensor' + suffix, vrep.simx_opmode_oneshot_wait)
        _, self._camera = vrep.simxGetObjectHandle(self._clientID, 'ePuck_camera' + suffix, vrep.simx_opmode_oneshot_wait)
        
        self._prox_handles = []
        for i in range(1,9):
            _, p = vrep.simxGetObjectHandle(self._clientID, 'ePuck_proxSensor' + str(i) + suffix, vrep.simx_opmode_oneshot_wait)
            self._prox_handles.append(p)
        
        # First calls with simx_opmode_streaming
        for i in range(8):
            vrep.simxReadProximitySensor(self._clientID, self._prox_handles[i], vrep.simx_opmode_streaming)
        _, self.camera_resolution, _ = vrep.simxGetVisionSensorImage(self._clientID, self._camera, options=0, operationMode=vrep.simx_opmode_streaming)
        _, self.light_sensor_resolution, _ = vrep.simxGetVisionSensorImage(self._clientID, self._light_sensor, options=0, operationMode=vrep.simx_opmode_streaming)
        
        self._body = vrep.simxGetObjectHandle(self._clientID, "ePuck_bodyElements" + suffix, vrep.simx_opmode_oneshot_wait)
        self.wheel_diameter = 4.25 * 10 ** -2
        self.base_lenght = 7 * 10 ** -2
        
        self._prox_aliases = {"all" : range(8),
                              "all-but-rear" : range(6),
                              "front" : [2, 3],
                              "rear" : [6, 7],
                              "front-left" : [0, 1, 2],
                              "front-right": [3, 4, 5]}
        
        self.no_detection_value = 2000.

        self._fwd_spd, self._rot_spd = 0., 0.
        self._left_spd, self._right_spd =  0., 0.

        self.fwd_spd, self.rot_spd = 0., 0.

        vrep.simxGetFloatSignal(self._clientID, "CurrentTime", vrep.simx_opmode_streaming)

        self.freq = 100
        self._behaviors = {}
        self._runnings = {}
        self._to_detach = {}

        self._sensations = {}
        self._conditions = {}

        self._registered_objects = {}

        _, _, _ , _, _ = vrep.simxGetObjectGroupData(self._clientID, vrep.sim_object_shape_type, 0, vrep.simx_opmode_streaming)

        sleep(0.5)
        self.register_all_scene_objects()
Example #2
0
 def get_image(self, cam_handle):
     (code, res, im) = vrep.simxGetVisionSensorImage(self.ID, cam_handle, 0, const_v.simx_opmode_buffer)
     # print(code)
     img = np.array(im, dtype=np.uint8)
     img.resize([res[0], res[1], 3])
     img = cv2.flip(img, 0)
     return img
Example #3
0
    def get_image_vrep_buffering(self):
        ret, res, image = vrep.simxGetVisionSensorImage(
            self.vrep_connection.client_id, self.vision_sensor_handle, 0,
            vrep.simx_opmode_buffer)

        if ret == vrep.simx_return_ok:
            return image, res
        else:
            raise Exception(
                "Failed to get vision sensor image. Vision sensor name: " +
                self.vision_sensor_name)
    def raw_image(self, is_grey_scale=False):
        """
        Retrieves the image of a vision sensor.
        @return the image data
        """
        num_of_clr = 3
        if is_grey_scale:
            num_of_clr = 1

        code, resolution, image = v.simxGetVisionSensorImage(
            self._id, self._handle, int(is_grey_scale), self._def_op_mode)
        return resolution, image
Example #5
0
 def activate_vrep_image_streaming(self):
     ret, res, image = vrep.simxGetVisionSensorImage(
         self.vrep_connection.client_id, self.vision_sensor_handle, 0,
         vrep.simx_opmode_streaming)
     if ret == vrep.simx_return_ok:
         print('Started streaming image')
         return image, res
     elif ret == vrep.simx_return_remote_error_flag or ret == vrep.simx_return_local_error_flag:
         raise Exception(
             "Failed to stream vision sensor image. Vision sensor name: " +
             self.vision_sensor_name)
     else:
         print('Possible started streaming image')
         return [], [0, 0]
Example #6
0
 def get_vision_sensor_image(self, handle, first_call=False):
     opmode = vrep.simx_opmode_streaming if first_call else vrep.simx_opmode_buffer
     status, resolution, image = vrep.simxGetVisionSensorImage(
         self.id, handle, 0, opmode)
     self.check_return_code(status, tolerance=vrep.simx_return_novalue_flag)
     return resolution, image
Example #7
0
    def __init__(self):
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        self.out = cv2.VideoWriter('output.mp4', fourcc, 15.0, (1024, 1024))

        self.DoF = 6
        # self.action_bound = [[-15,15],[-10,110],[-30,30],[-120,120],[-180,180],[-180,180]]
        self.action_bound = [[-180, 180], [-180, 180], [-180, 180],
                             [-180, 180], [-180, 180], [-180, 180]]
        self.action_range = [-5, 5]
        self.action_dim = self.DoF

        self.action_space = gym.spaces.Box(shape=(self.DoF, ), low=-5, high=5)
        self.observation_space = gym.spaces.Box(shape=(3 + 6 + self.DoF * 2, ),
                                                low=-180,
                                                high=180)
        self.action_dim = self.action_space.shape[0]
        self.state_dim = self.observation_space.shape[0]

        p = subprocess.Popen(['ps', '-A'], stdout=subprocess.PIPE)
        out, err = p.communicate()
        for line in out.splitlines():
            if b'vrep' in line:
                pid = int(line.split(None, -1)[0])
                os.kill(pid, signal.SIGKILL)

        self.vrep_root = "/home/ali/Downloads/VREP"
        self.scene_file = "/home/ali/RL_code/env/rozum_model.ttt"
        #
        os.chdir(self.vrep_root)
        os.system("./vrep.sh -s " + self.scene_file + " &")

        vrep.simxFinish(-1)
        time.sleep(1)

        # get the ID of the running simulation
        self.ID = vrep.simxStart('127.0.0.1', 19999, True, False, 5000, 5)
        # check the connection
        if self.ID != -1:
            print("Connected")
        else:
            sys.exit("Error")
        # get handles
        # for camera
        self.cam_handle = self.get_handle('Vision_sensor')
        (code, res,
         im) = vrep.simxGetVisionSensorImage(self.ID, self.cam_handle, 0,
                                             const_v.simx_opmode_streaming)

        self.render_handle = self.get_handle('render')
        (code, res,
         im) = vrep.simxGetVisionSensorImage(self.ID, self.render_handle, 0,
                                             const_v.simx_opmode_streaming)

        # joints
        self.joint_handles = []
        for i in range(self.DoF):
            tmp = self.get_handle("joint%d" % (i))
            self.joint_handles.append(tmp)
            code, angle = vrep.simxGetJointPosition(
                self.ID, tmp, const_v.simx_opmode_streaming)

        # gripper tip
        self.tip_handle = self.get_handle("Tip")
        (code,
         pose) = vrep.simxGetObjectPosition(self.ID, self.tip_handle, -1,
                                            const_v.simx_opmode_streaming)
        self.or_handle = self.get_handle("RG2_baseVisible")
        (code,
         pose) = vrep.simxGetObjectOrientation(self.ID, self.or_handle, -1,
                                               const_v.simx_opmode_streaming)
        # cube
        self.cube_handle = self.get_handle("Cube")
        (code,
         pose) = vrep.simxGetObjectPosition(self.ID, self.cube_handle, -1,
                                            const_v.simx_opmode_streaming)
        (code,
         pose) = vrep.simxGetObjectOrientation(self.ID, self.cube_handle, -1,
                                               const_v.simx_opmode_streaming)
        # get the goal handle
        self.goal_handle = self.get_handle("Goal")
        (code,
         pose) = vrep.simxGetObjectPosition(self.ID, self.goal_handle, -1,
                                            const_v.simx_opmode_streaming)

        # angles' array
        self.angles = self.get_angles()

        # gripper handles (used in closing and opening gripper)
        self.gripper_motor = self.get_handle('RG2_openCloseJoint')
        # task part
        self.task_part = 0

        self.init_angles = self.get_angles()
        self.init_orientation = self.get_orientation(self.or_handle)
        self.init_pose_cube = self.get_position(self.cube_handle)
        # print(self.init_pose_cube)
        self.init_goal_pose = self.get_position(self.goal_handle)
        # print(self.init_goal_pose)
        self.open_gripper()
        self.reset()
        self.tip_position = self.get_position(self.tip_handle)

        self.goal_l = (80, 0, 0)
        self.goal_u = (120, 255, 255)
        self.cube_l = (55, 50, 50)
        self.cube_u = (80, 255, 255)
        self.er_kernel = np.ones((2, 2), np.uint8)
        self.di_kernel = np.ones((2, 22), np.uint8)
        self.task_part = 0
        self.part_1_center = np.array([300.0, 335.0])
        self.part_2_center = np.array([320.0, 290.0])
        self.part_1_area = 0.25
        self.part_2_area = 0.75
Example #8
0
    def step(self, action):
        # Activate the motors
        vrep.simxSetJointTargetVelocity(self.client_id, self.left_motor_handle,
                                        action[0], vrep.simx_opmode_blocking)
        vrep.simxSetJointTargetVelocity(self.client_id,
                                        self.right_motor_handle, action[1],
                                        vrep.simx_opmode_blocking)

        # Get observations
        observations = {}
        observations['proxy_sensor'] = []
        observations['light_sensor'] = []

        # Fetch the vals of proxy sensors
        for sensor in self.proxy_sensors:
            _, _, detectedPoint, _, _ = vrep.simxReadProximitySensor(
                self.client_id, sensor, vrep.simx_opmode_blocking)
            # Append to list of values
            observations['proxy_sensor'].append(np.linalg.norm(detectedPoint))

        # Fetch the vals of light sensors
        for sensor in self.light_sensors:
            # Fetch the initial value in the suggested mode
            _, _, image = vrep.simxGetVisionSensorImage(
                self.client_id, sensor, 1, vrep.simx_opmode_blocking)
            # extract image from list
            image = image[0] if len(image) else -1
            # Append to the list of values
            observations['light_sensor'].append(image)

        # vrep gives a positive value for the black strip and negative for the
        # floor so convert it into 0 and 1

        observations['light_sensor'] = np.asarray(observations['light_sensor'])
        observations['light_sensor'] = np.sign(observations['light_sensor'])

        # Assign reward
        reward = {}

        # For light sensors
        # If any of the center 2 sensors is 1 give high reward
        if (observations['light_sensor'][[3, 4]] > 0).any():
            reward['light_sensor'] = 5
        # If any of second, third, sixth or seventh is 1
        elif (observations['light_sensor'][[1, 2, 5, 6]] > 0).any():
            reward['light_sensor'] = 2
        # If first or last are high
        elif (observations['light_sensor'][[0, 7]] > 0).any():
            reward['light_sensor'] = 0
        # Bot is completly out of line
        else:
            reward['light_sensor'] = -5

        # For proximity sensors
        reward['proxy_sensor'] = 0

        # Should be rewarded for movement
        r = np.sum(np.sign(action)) * 2

        reward['light_sensor'] += r
        reward['proxy_sensor'] += r
        # reward['combined'] += r

        return observations, reward
Example #9
0
 def floor_sensor(self):
     tresh = 0.
     _, _, image = vrep.simxGetVisionSensorImage(self._clientID, self._light_sensor, options=0, operationMode=vrep.simx_opmode_buffer)
     return image[0] > tresh, image[21] > tresh, image[93] > tresh
Example #10
0
 def light_sensor_image(self):
     _, resolution, image = vrep.simxGetVisionSensorImage(self._clientID, self._light_sensor, options=0, operationMode=vrep.simx_opmode_buffer)
     image = array(image)
     image.resize(resolution[0], resolution[1], 3)
     return image   
Example #11
0
    clientID, sensor1, vrep.simx_opmode_buffer)
#returnCode,detectionState,detectedPoint,detectedObjectHandle,detectedSurfaceNormalVector=vrep.simxReadProximitySensor(clientID,sensor1,vrep.simx_opmode_streaming)
print returnCode, detectionState, detectedPoint, detectedObjectHandle, detectedSurfaceNormalVector
#
vrep.simxSetJointTargetVelocity(clientID, left_Motor, 0,
                                vrep.simx_opmode_streaming)
##time.sleep(5)
vrep.simxSetJointTargetVelocity(clientID, right_Motor, 0,
                                vrep.simx_opmode_streaming)
##vrep.simxGetCollectionHandle() (clientID,'Pioneer_p3dx',vrep.simxServiceCall())

a = vrep.simxGetObjects(clientID, vrep.sim_handle_all,
                        vrep.simx_opmode_oneshot_wait)
#print a
#vrep.simxGetVisionSensorImage(clientID,cam1,,simx_opmode_streaming)
returnCode, resolution, image = vrep.simxGetVisionSensorImage(
    clientID, cam1, 0, vrep.simx_opmode_streaming)
time.sleep(1)
returnCode, resolution, image = vrep.simxGetVisionSensorImage(
    clientID, cam1, 0, vrep.simx_opmode_buffer)

import numpy as np
from matplotlib import pyplot as plt

im = np.array(image, dtype=np.uint8)

im.resize([resolution[0], resolution[1], 3])
print im.shape
plt.imshow(im, origin='lower')
#for i in range(0,len(a)):
#    returnCode, childObjectHandle=vrep.simxGetObjectChild(clientID,carro,i,vrep.simx_opmode_blocking)
#    print childObjectHandle
Example #12
0
    def __init__(self):
        self.DoF = 6
        self.action_bound = [-5, 5]
        self.action_dim = self.DoF

        # os.chdir("/Vrep_server")
        # os.system("git pull")
        # os.chdir("/")
        #
        # self.vrep_root = "/V-REP_PRO_EDU_V3_5_0_Linux/"
        # self.scene_file = "/Vrep_server/env/rozum_model.ttt"
        #
        # os.chdir(self.vrep_root)
        # os.system("xvfb-run --auto-servernum --server-num=1 -s \"-screen 0 640x480x24\" ./vrep.sh -h -s " + self.scene_file + " &")

        vrep.simxFinish(-1)
        time.sleep(1)

        # get the ID of the running simulation
        self.ID = vrep.simxStart('127.0.0.1', 19999, True, False, 5000, 5)
        # check the connection
        if self.ID != -1:
            print("Connected")
        else:
            sys.exit("Error")
        # get handles
        # for camera
        self.cam_handle = self.get_handle('Vision_sensor')
        (code, res,
         im) = vrep.simxGetVisionSensorImage(self.ID, self.cam_handle, 0,
                                             const_v.simx_opmode_streaming)

        # joints
        self.joint_handles = []
        for i in range(self.DoF):
            tmp = self.get_handle("joint%d" % (i))
            self.joint_handles.append(tmp)
            code, angle = vrep.simxGetJointPosition(
                self.ID, tmp, const_v.simx_opmode_streaming)

        # gripper tip
        self.tip_handle = self.get_handle("Tip")
        (code,
         pose) = vrep.simxGetObjectPosition(self.ID, self.tip_handle, -1,
                                            const_v.simx_opmode_streaming)
        # cube
        self.cube_handle = self.get_handle("Cube")
        (code,
         pose) = vrep.simxGetObjectPosition(self.ID, self.cube_handle, -1,
                                            const_v.simx_opmode_streaming)
        # get the goal handle
        self.goal_handle = self.get_handle("Goal")
        (code,
         pose) = vrep.simxGetObjectPosition(self.ID, self.goal_handle, -1,
                                            const_v.simx_opmode_streaming)

        # angles' array
        self.angles = self.get_angles()

        # gripper handles (used in closing and opening gripper)
        self.gripper_motor = self.get_handle('RG2_openCloseJoint')
        # task part
        self.task_part = 0

        self.init_angles = self.get_angles()

        self.init_pose_cube = self.get_position(self.cube_handle)
        # print(self.init_pose_cube)
        self.init_goal_pose = self.get_position(self.goal_handle)
        # print(self.init_goal_pose)
        self.open_gripper()
        self.reset()
        self.tip_position = self.get_position(self.tip_handle)

        self.goal_l = (80, 0, 0)
        self.goal_u = (120, 255, 255)
        self.cube_l = (55, 50, 50)
        self.cube_u = (80, 255, 255)
        self.er_kernel = np.ones((2, 2), np.uint8)
        self.di_kernel = np.ones((2, 22), np.uint8)
        self.task_part = 0
        self.part_1_center = np.array([300.0, 335.0])
        self.part_2_center = np.array([320.0, 290.0])
        self.part_1_area = 0.25
        self.part_2_area = 0.75
Example #13
0
 def camera_image(self):
     _, resolution, image = vrep.simxGetVisionSensorImage(epuck._clientID, self._camera, options=0, operationMode=vrep.simx_opmode_buffer)
     image.resize(resolution[0], resolution[1], 3)
     return image