Example #1
0
def collect_data():
    robot = Robot()
    cubusm = CubesManager()
    for i in range(MAX_PICTURE_NUM):
        cubusm.reset_cube(rand=True)
        Box_position = cubusm.read_cube_pose("demo_cube")
        # print "cube position:", str(Box_position)
        joint, view = robot.get_state()
        rgb, dep = robot.get_rgb_dep()
        # b, g, r = cv2.split(rgb)
        # print view[0,0,0]
        # print dep
        # rgb = cv2.merge([r, g, b])
        # print dep
        # plt.imshow(dep)
        # plt.show()
        rgb = cv2.resize(rgb, (224, 224))
        dep = cv2.resize(dep, (224, 224))
        # print dep
        cv2.imwrite(
            "/home/ljt/Desktop/images/rgb/" + str(Box_position) + ".png", rgb)
        # cv2.imwrite("/home/ljt/Desktop/ws/src/fetch_moveit_config/images/dep/" + str(Box_position) + ".png", dep)
        # a = np.array(rgb).shape
        # print a
        # print "camera image shape:", view.shape
        np.save("/home/ljt/Desktop/images/dep/" + str(Box_position), dep)
Example #2
0
import math
from camera import RGBD
from MofanDDPG import DDPG
from Env import Robot, CubesManager
import copy
import numpy as np

MAX_EPISODES = 900
MAX_EP_STEPS = 5
ON_TRAIN = True

if __name__ == '__main__':
    # set env
    robot = Robot()
    cubm = CubesManager()
    observation_dim = 3
    action_dim = 3
    action_bound = -1, 1

    # set RL method (continuous)
    rl = DDPG(action_dim, observation_dim, action_bound)
    number = 0
    steps = []
    # start training
    for i in range(MAX_EPISODES):

        cubm.reset_cube(rand=True)
        Box_position = cubm.read_cube_pose("demo_cube")
        print "cube position:", Box_position
        robot.Box_position = copy.deepcopy(Box_position)
        now_position = robot.gripper.get_current_pose(
Example #3
0
import math
from camera import RGBD
from DQN import DQN
from Env import Robot, CubesManager
import copy
import rospy


robot = Robot()
cum = CubesManager()
cum.reset_cube(False)
robot.test1()
robot.reset()
Example #4
0
import torch
from camera import RGBD
from DDPG import DDPG
from DQN import DQN
from Env import Robot, CubesManager
import copy
import rospy
MAX_EPISODES = 5000
MAX_EP_STEPS = 100
MEMORY_CAPACITY = 1000
if __name__ == "__main__":
    robot = Robot()
    s_dim = robot.state_dim
    a_dim = robot.action_dim
    a_bound = robot.action_bound
    cubm = CubesManager()
    rl = DQN()

    rl.eval_net = torch.load('eval_dqn.pkl')
    rl.target_net = torch.load('target_dqn.pkl')

    robot.reset()
    start_position = robot.gripper.get_current_pose(
        "gripper_link").pose.position  # 初始的夹爪位置
    st = 0
    rw = 0
    for i in range(1, MAX_EPISODES):
        cubm.reset_cube(rand=True)
        Box_position = cubm.read_cube_pose("cube1")  # 获取物块位置
        # print "cube position:", Box_position
        robot.Box_position = copy.deepcopy(Box_position)
Example #5
0
        """
        goal = control_msgs.msg.GripperCommandGoal()
        goal.command.position = OPENED_POS
        self._client.send_goal_and_wait(goal, rospy.Duration(10))

    def close(self, width=0.0, max_effort=MAX_EFFORT):
        """Closes the gripper.

        Args:
            width: The target gripper width, in meters. (Might need to tune to
                make sure the gripper won't damage itself or whatever it's
                gripping.)
            max_effort: The maximum effort, in Newtons, to use. Note that this
                should not be less than 35N, or else the gripper may not close.
        """
        assert CLOSED_POS <= width <= OPENED_POS
        goal = control_msgs.msg.GripperCommandGoal()
        goal.command.position = width
        goal.command.max_effort = max_effort
        self._client.send_goal_and_wait(goal, rospy.Duration(10))


if __name__ == "__main__":
    test = CubesManager()
    i = 1
    while i < 10:
        print(i)
        test.reset_cube(rand=True)
        rospy.sleep(1)
        i += 1