예제 #1
0
    def control_propeller_thrust(self, num_p) -> None:
        """ set thrust for the particular propeller, num_p is the number of the propeller(1 to 4) """

        particleVelocity = self.velocities[num_p - 1]
        s = sim.simGetObjectSizeFactor(
            self.force_sensors[num_p - 1])  # current size factor
        if (s != self.pre_v):  #When the size of the propeller changes
            self.particleSize = self.particleSize * 0.005 * s
            self.pre_v = s

        self.ts = sim.simGetSimulationTimeStep()

        m = sim.simGetObjectMatrix(self.force_sensors[num_p - 1], -1)

        requiredParticleCnt = self.particleCountPerSecond * self.ts + self.notFullParticles
        self.notFullParticles = requiredParticleCnt % 1
        requiredParticleCnt = np.floor(requiredParticleCnt)

        totalExertedForce = requiredParticleCnt * self.particleDensity * particleVelocity * np.pi * self.particleSize * self.particleSize * self.particleSize / (
            6 * self.ts)
        force = [0, 0, totalExertedForce]
        m[3] = 0
        m[7] = 0
        m[11] = 0
        force = sim.simMultiplyVector(m, force)

        torque = [0, 0, pow(-1, num_p) * 0.002 * particleVelocity]
        torque = sim.simMultiplyVector(m, torque)

        sim.simAddForceAndTorque(self.respondables[num_p - 1], force, torque)
예제 #2
0
    def _get_rml_handle(self) -> int:
        dt = sim.simGetSimulationTimeStep()
        limits = np.array(self._arm.get_joint_upper_velocity_limits())
        vel_correction = 1.0
        max_vel = self._arm.max_velocity
        max_accel = self._arm.max_acceleration
        max_jerk = self._arm.max_jerk
        lengths = self._get_path_point_lengths()
        target_pos_vel = [lengths[-1], 0]
        previous_q = self._path_points[0:len(self._arm.joints)]

        while True:
            pos_vel_accel = [0, 0, 0]
            rMax = 0
            rml_handle = sim.simRMLPos(
                1, 0.0001, -1, pos_vel_accel,
                [max_vel * vel_correction, max_accel, max_jerk], [1],
                target_pos_vel)
            state = 0
            while state == 0:
                state, pos_vel_accel = sim.simRMLStep(rml_handle, dt, 1)
                if state >= 0:
                    pos = pos_vel_accel[0]
                    for i in range(len(lengths) - 1):
                        if lengths[i] <= pos <= lengths[i + 1]:
                            t = (pos - lengths[i]) / (lengths[i + 1] -
                                                      lengths[i])
                            # For each joint
                            offset = len(self._arm.joints) * i
                            p1 = self._path_points[offset:offset +
                                                   self._num_joints]
                            offset = len(self._arm.joints) * (i + 1)
                            p2 = self._path_points[offset:offset +
                                                   self._num_joints]
                            dx = p2 - p1
                            qs = p1 + dx * t
                            dq = qs - previous_q
                            previous_q = qs
                            r = np.abs(dq / dt) / limits
                            m = np.max(r)
                            if m > rMax:
                                rMax = m
                            break
            sim.simRMLRemove(rml_handle)
            if rMax > 1.001:
                vel_correction = vel_correction / rMax
            else:
                break
        pos_vel_accel = [0, 0, 0]
        rml_handle = sim.simRMLPos(
            1, 0.0001, -1, pos_vel_accel,
            [max_vel * vel_correction, max_accel, max_jerk], [1],
            target_pos_vel)
        return rml_handle
예제 #3
0
    def start_video_recording(
        self,
        filename="{sensor_name}_video_{timestamp}.avi",
        fourcc="DIVX",
        timestamp_format="%Y-%m-%d_%H%M%S%f",
    ):
        """
        Starts video recording that records each simulation step.

        :param filename: filename with optional 'sensor_name' and 'timestamp'
                         placeholders.
        :type filename: str
        :param fourcc: The fourcc of the video codec.
        :type fourcc: str
        :param timestamp_format: formatstring for the timestamp
        :type timestamp_format: str
        """

        if self._video_callback_ids:
            logger.warn("Cannot start multiple video recordings.")
        else:
            # calculate fps from timestep
            fps = round(1.0 / sim.simGetSimulationTimeStep())
            # codec
            fourcc = cv2.VideoWriter_fourcc(*fourcc)
            # setup video writer for each sensor
            for device in self._devices:
                writer = cv2.VideoWriter(
                    filename.format(
                        sensor_name=device.sensor_name,
                        timestamp=datetime.now().strftime(timestamp_format),
                    ),
                    fourcc,
                    fps,
                    device.resolution,
                )
                # add callback
                callback = self.create_video_callback(writer)
                callback_id = device.add_callback(callback)
                self._video_callback_ids.append(callback_id)
 def _step_motion(self) -> int:
     dt = sim.simGetSimulationTimeStep()
     lengths = self._get_path_point_lengths()
     state, posVelAccel = sim.simRMLStep(self._rml_handle, dt, 1)
     if state >= 0:
         pos = posVelAccel[0]
         for i in range(len(lengths) - 1):
             if lengths[i] <= pos <= lengths[i + 1]:
                 t = (pos - lengths[i]) / (lengths[i + 1] - lengths[i])
                 # For each joint
                 offset = len(self._arm.joints) * i
                 p1 = self._path_points[
                      offset:offset + len(self._arm.joints)]
                 offset = self._arm._num_joints * (i + 1)
                 p2 = self._path_points[
                      offset:offset + len(self._arm.joints)]
                 dx = p2 - p1
                 qs = p1 + dx * t
                 self._arm.set_joint_target_positions(qs)
                 break
     if state == 1:
         sim.simRMLRemove(self._rml_handle)
     return state
예제 #5
0
파일: pyrep.py 프로젝트: wkoa/PyRep
    def get_simulation_timestep(self) -> float:
        """Gets the simulation time step.

        :return: The time step value in seconds.
        """
        return sim.simGetSimulationTimeStep()
예제 #6
0
    def __init__(self,
                 random,
                 headless=True,
                 use_vision_sensor=False,
                 seed=42,
                 state="Normal",
                 SCENE_FILE=None,
                 reward_function_name='Normal',
                 buffer_size=None,
                 neta=0.9,
                 restart=False):

        if reward_function_name not in list(rew_functions2.import_dict.keys()):
            print(
                "Wrong parameter passed on 'reward_function_name. Must be one of these: ",
                list(rew_functions.import_dict.keys()))

        if SCENE_FILE == None:
            if (use_vision_sensor):

                SCENE_FILE = join(
                    dirname(abspath(__file__))
                ) + '/../../scenes/ardrone_modeled_headless_vision_sensor.ttt'  ## FIX
            else:

                SCENE_FILE = join(
                    dirname(abspath(__file__))
                ) + '/../../scenes/ardrone_modeled_headless.ttt'  ## FIX
        else:
            SCENE_FILE = join(dirname(abspath(__file__))) + SCENE_FILE

        assert state in ['Normal', 'New_Double', 'New_action']
        assert random in [False, 'Gaussian', 'Uniform','Discretized_Uniform'], \
                    "random should be one of these values [False, 'Gaussian', 'Uniform', 'Discretized_Uniform]"
        # assert random in [False, 'Gaussian', 'Uniform','Weighted','Discretized_Uniform'], \
        # "random should be one of these values [False, 'Gaussian', 'Uniform', 'Weighted','Discretized_Uniform]"

        ## Setting Pyrep
        self.pr = PyRep()
        self.pr.launch(SCENE_FILE, headless=headless)
        self.pr.start()
        self.agent = Drone(count=0,
                           name="Quadricopter",
                           num_joints=4,
                           use_vision_sensor=use_vision_sensor)
        self.agent.set_control_loop_enabled(
            False)  ## Disables the built-in PID control on V-REP motors
        #self.agent.set_motor_locked_at_zero_velocity(True)  ## When the force is set to zero, it locks the motor to prevent drifting
        self.agent.set_motor_locked_at_zero_velocity(
            False
        )  ## When the force is set to zero, it locks the motor to prevent drifting
        self.target = Shape('Quadricopter_target')

        ##Attributes

        self.action_space = self.agent.action_space

        if state == 'New_action':
            self.observation_space = 22
        elif state == 'New_Double':
            self.observation_space = 36
        else:
            self.observation_space = self.agent.observation_space
        self.restart = restart
        self.random = random
        self.dt = np.round(sim.simGetSimulationTimeStep(), 4)  ## timestep

        ## Integrative buffer
        self.buffer_size = buffer_size
        # if self.buffer_size:
        #     assert (isinstance(buffer_size,int)) and (buffer_size < 100)
        #     self.integrative_buffer_size = self.buffer_size
        #     self.integrative_buffer = np.empty((self.buffer_size, 3)) # 3 because its [x,y,z] or [roll,pitch,yaw]
        #     self.neta = neta

        self.state = state
        ## initial observation
        self._initial_state()
        self.first_obs = True  ## hack so it doesn't calculate it at the first time
        self._make_observation()
        self.last_state = self.observation[:18]

        self.weighted = False

        # Creating lists
        if self.random == 'Discretized_Uniform':
            self._create_discretized_uniform_list()
        self.ptr = 0
        # self._creating_initialization_list()

        ## Setting seed
        self.seed = seed
        self._set_seed(self.seed, self.seed)

        ## Reward Functions
        self.reward_function = partial(
            rew_functions2.reward,
            rew_functions2.import_dict[reward_function_name]['weight_list'])
        keys = ["r_alive","radius","pitch","yaw","roll","pitch_vel","yaw_vel","roll_vel","lin_x_vel","lin_y_vel","lin_z_vel","norm_a", \
                        "std_a","death","integrative_error_x","integrative_error_y","integrative_error_z"]
        self.weight_dict = dict(
            zip(
                keys, rew_functions2.import_dict[reward_function_name]
                ['weight_list']))