Ejemplo n.º 1
0
def basic_test():
    env = FetchDiscreteEnv(dis_tolerance=0.001, step_ds=0.005)
    obs = env.reset()
    done = False

    ori_pos = (obs['eeinfo'][0]).copy()
    print('---ori_pos = ', obs['eeinfo'][0], '----')
    step = 0
    robot_step = 0
    # env.render()
    s_time = time.time()

    for _ in range(2000):
        env.render()
        a = [1, 0, 0, 0, 0]
        s, r, d, info = env.step(a)
        print('now pos = ', env.pos)
        # env.render()
        # a = [0, 0, 1, 0, 0]
        # s,r, d, info =  env.step(a)

    time.sleep(1)
    # env.gripper_close()
    a = [0, 0, 0, 0, 1]
    s, r, d, info = env.step(a)

    env.render()
    time.sleep(1)

    print('---final_pos = ', obs['eeinfo'][0], '----')
    pos_diff = obs['eeinfo'][0] - ori_pos
    formattedList = ["%.2f" % member for member in pos_diff]
    print('---pos_diff = ', formattedList, '----')

    print('use time = {:.2f}'.format(time.time() - s_time))
    def __init__(self,
                 dis_tolerance=0.001,
                 step_ds=0.005,
                 gray_img=True,
                 hsv_color=False,
                 is_render=False):
        self.env = FetchDiscreteEnv(dis_tolerance=0.001,
                                    step_ds=0.005,
                                    is_render=is_render)
        self.gray_img = gray_img

        self.target_pic = None
        self.hsv_color = hsv_color
        self.is_render = is_render
Ejemplo n.º 3
0
 def __init__(self,
              dis_tolerance=0.001,
              step_ds=0.005,
              gray_img=True,
              use_tray=True,
              is_render=False,
              only_show_obj0=False):
     self.env = FetchDiscreteEnv(dis_tolerance=0.001,
                                 step_ds=0.005,
                                 use_tray=use_tray,
                                 is_render=is_render)
     self.gray_img = gray_img
     self.is_render = is_render
     self.only_show_obj0 = only_show_obj0
Ejemplo n.º 4
0
def basic_info():
    env = FetchDiscreteEnv(dis_tolerance=0.001, step_ds=0.005)
    obs = env.reset()

    print("Start position ->", env.pos)
    print("Start (Close) gripper state ->", env.gripper_state)

    env.gripper_close(False)
    print("Open gripper state ->", env.gripper_state)
    # Open gripper state -> [0.0500507 0.0500507]

    env.gripper_close(True)
    print("Close gripper state ->", env.gripper_state)
    # Close gripper state -> [0.00184229 0.00184229]

    env.measure_obj_reward()
Ejemplo n.º 5
0
def go_obj():
    # dis_tolerance  = 0.0001     # 1mm
    step_ds = 0.005
    env = FetchDiscreteEnv(dis_tolerance=0.001, step_ds=0.005)
    obs = env.reset()

    done = False

    ori_pos = (obs['eeinfo'][0]).copy()
    print('---ori_pos = ', obs['eeinfo'][0], '----')
    step = 0
    robot_step = 0
    # env.render()
    s_time = time.time()
    sum_r = 0

    while True:
        env.render()
        diff_x = env.obj_pos[0] - env.pos[0]
        diff_y = env.obj_pos[1] - env.pos[1]
        if diff_x > step_ds:
            a = [1, 0, 0, 0, 0]
        elif diff_x < 0 and abs(diff_x) > step_ds:
            a = [0, 0, 1, 0, 0]
        elif diff_y > step_ds:
            a = [0, 1, 0, 0, 0]
        elif diff_y < 0 and abs(diff_y) > step_ds:
            a = [0, 0, 0, 1, 0]
        else:
            break

        s, r, d, info = env.step(a)
        sum_r += r

    a = [0, 0, 0, 0, 1]
    s, r, d, info = env.step(a)
    sum_r += r
    env.render()
    print('epsoide sum_r = ', sum_r)

    print('---final_pos = ', obs['eeinfo'][0], '----')
    pos_diff = obs['eeinfo'][0] - ori_pos
    formattedList = ["%.2f" % member for member in pos_diff]
    print('---pos_diff = ', formattedList, '----')

    print('use time = {:.2f}'.format(time.time() - s_time))
Ejemplo n.º 6
0
def test_limit(go_pos):
    step_ds = 0.005
    env = FetchDiscreteEnv(dis_tolerance=0.001, step_ds=0.005)
    obs = env.reset()
    done = False

    sum_r = 0

    while True:
        env.render()
        diff_x = go_pos[0] - env.pos[0]
        diff_y = go_pos[1] - env.pos[1]
        if diff_x > step_ds:
            a = [1, 0, 0, 0, 0]
        elif diff_x < 0 and abs(diff_x) > step_ds:
            a = [0, 0, 1, 0, 0]
        elif diff_y > step_ds:
            a = [0, 1, 0, 0, 0]
        elif diff_y < 0 and abs(diff_y) > step_ds:
            a = [0, 0, 0, 1, 0]
        else:
            break

        s, r, d, info = env.step(a)
        sum_r += r

        print('now pos = ', env.pos)

    print('Sum reward = ', sum_r)
    print('Final pos = ', env.pos)
Ejemplo n.º 7
0
def test_one_axis():
    env = FetchDiscreteEnv(dis_tolerance=0.001, step_ds=0.005)
    obs = env.reset()

    ori_pos = (obs['eeinfo'][0]).copy()
    print('---ori_pos = ', obs['eeinfo'][0], '----')

    for _ in range(2000):
        env.render()
        a = [0, 0, 0, 1, 0]
        s, r, d, info = env.step(a)
        print('now pos = ', env.pos)
class FetchDiscreteCamSiamenseEnv:
    def __init__(self,
                 dis_tolerance=0.001,
                 step_ds=0.005,
                 gray_img=True,
                 hsv_color=False,
                 is_render=False):
        self.env = FetchDiscreteEnv(dis_tolerance=0.001,
                                    step_ds=0.005,
                                    is_render=is_render)
        self.gray_img = gray_img

        self.target_pic = None
        self.hsv_color = hsv_color
        self.is_render = is_render

    def img_preprocess(self, img):
        # not support gray image

        if self.hsv_color:
            resize_img = cv2.resize(img, (IMG_W_H, IMG_W_H),
                                    interpolation=cv2.INTER_AREA)
            resize_img = resize_img / 255.0
            # print('resize_img = ', resize_img)
            hsv_img = rgb_to_hsv(resize_img)
            return hsv_img
        else:
            resize_img = cv2.resize(img, (IMG_W_H, IMG_W_H),
                                    interpolation=cv2.INTER_AREA)
            return resize_img

    def step(self, action):
        # print('i action = ', action)
        a_one_hot = np.zeros(5)
        a_one_hot[action] = 1
        s_old, r, d, _ = self.env.step(a_one_hot)

        # no use, but you need preserve it; otherwise, you will get error image
        rgb_external = self.env.sim.render(width=256,
                                           height=256,
                                           camera_name="external_camera_0",
                                           depth=False,
                                           mode='offscreen',
                                           device_id=-1)
        rgb_gripper = self.env.sim.render(width=256,
                                          height=256,
                                          camera_name="gripper_camera_rgb",
                                          depth=False,
                                          mode='offscreen',
                                          device_id=-1)

        if self.is_render:
            self.render_gripper_img(rgb_gripper)
            # rgb_img = cv2.cvtColor(rgb_gripper, cv2.COLOR_BGR2RGB)
            # cv2.imshow('Gripper Image',rgb_img)
            # cv2.waitKey(50)

        s = [self.img_preprocess(rgb_gripper), self.target_pic]
        return s, r, d, None

    @property
    def pos(self):
        return self.env.pos

    @property
    def obj_pos(self):
        return self.env.obj_pos

    def get_obj_pos(self, obj_id):
        return self.env.get_obj_pos(obj_id)

    @property
    def gripper_state(self):
        return self.env.gripper_state

    def take_only_obj0_pic(self):

        try:
            self.env.rand_obj0_hide_obj1_obj2()
            self.env.render()
            # time.sleep(2)
            rgb_external = self.env.sim.render(width=256,
                                               height=256,
                                               camera_name="external_camera_0",
                                               depth=False,
                                               mode='offscreen',
                                               device_id=-1)
            rgb_gripper = self.env.sim.render(width=256,
                                              height=256,
                                              camera_name="gripper_camera_rgb",
                                              depth=False,
                                              mode='offscreen',
                                              device_id=-1)

            if self.is_render:
                self.render_target_img(rgb_gripper)
                self.render_gripper_img(rgb_gripper)

                time.sleep(2)

            # time.sleep(2)
            # resize_img = cv2.resize(rgb_gripper, (IMG_W_H, IMG_W_H), interpolation=cv2.INTER_AREA)
            # self.target_pic = resize_img.copy()
            self.target_pic = self.img_preprocess(rgb_gripper)

            self.env.recover_obj0_obj1_obj2_pos()
            self.env.render()
            # time.sleep(2)
        except Exception as e:
            print(' Exception e -> ', e)
            pass
            # print(' Exception e -> ', e )

    def reset(self):
        if self.hsv_color:
            self.env.rand_objs_hsv()
        else:
            self.env.rand_objs_color()

        self.env.reset()
        self.take_only_obj0_pic()

        self.env.render()
        rgb_external = self.env.sim.render(width=256,
                                           height=256,
                                           camera_name="external_camera_0",
                                           depth=False,
                                           mode='offscreen',
                                           device_id=-1)
        rgb_gripper = self.env.sim.render(width=256,
                                          height=256,
                                          camera_name="gripper_camera_rgb",
                                          depth=False,
                                          mode='offscreen',
                                          device_id=-1)

        if self.is_render:
            self.render_gripper_img(rgb_gripper)
            time.sleep(1)

        s = [self.img_preprocess(rgb_gripper), self.target_pic]
        return s

    def render(self):
        self.env.render()

    def render_target_img(self, img):
        # if self.is_render:
        rgb_img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        cv2.imshow('Target Image', rgb_img)
        cv2.waitKey(50)

    def render_gripper_img(self, gripper_img):
        # if self.is_render:
        rgb_img = cv2.cvtColor(gripper_img, cv2.COLOR_BGR2RGB)
        cv2.imshow('Gripper Image', rgb_img)
        cv2.waitKey(50)
Ejemplo n.º 9
0
def go_obj_savepic(is_render=True):
    save_dir = 'z_fetch_run_pic'
    create_dir(save_dir)
    # dis_tolerance  = 0.0001     # 1mm
    step_ds = 0.005
    env = FetchDiscreteEnv(dis_tolerance=0.001, step_ds=0.005)
    # obs = env.reset()
    # done = False

    # ori_pos = (obs['eeinfo'][0]).copy()
    print('---ori_pos = ', env.pos, '----')
    # step  = 0
    # robot_step = 0
    # # env.render()
    s_time = time.time()
    # env.render()
    # step_count = 0

    for i in range(5):
        obs = env.reset()
        env.gripper_close(False)
        env.render()
        save_dir = 'z_fetch_run_pic_%02d' % i
        create_dir(save_dir)
        step_count = 0
        print('------start ep %03d--------' % i)
        sum_r = 0
        while True:
            if is_render:
                env.render()
            diff_x = env.obj_pos[0] - env.pos[0]
            diff_y = env.obj_pos[1] - env.pos[1]
            if diff_x > step_ds:
                a = [1, 0, 0, 0, 0]
            elif diff_x < 0 and abs(diff_x) > step_ds:
                a = [0, 0, 1, 0, 0]
            elif diff_y > step_ds:
                a = [0, 1, 0, 0, 0]
            elif diff_y < 0 and abs(diff_y) > step_ds:
                a = [0, 0, 0, 1, 0]
            else:
                break
            step_count += 1
            s, r, d, info = env.step(a)
            sum_r += r
            # rgb_external = env.sim.render(width=256, height=256, camera_name="external_camera_0", depth=False,
            #         mode='offscreen', device_id=-1)
            # rgb_gripper = env.sim.render(width=256, height=256, camera_name="gripper_camera_rgb", depth=False,
            #     mode='offscreen', device_id=-1)
            rgb_external = env.sim.render(width=256,
                                          height=256,
                                          camera_name="external_camera_0",
                                          depth=False,
                                          mode='offscreen',
                                          device_id=-1)
            rgb_gripper = env.sim.render(width=256,
                                         height=256,
                                         camera_name="gripper_camera_rgb",
                                         depth=False,
                                         mode='offscreen',
                                         device_id=-1)

            # print('type(rgb_gripper) = ', type(rgb_gripper),', shape=', np.shape(rgb_gripper))
            img = Image.fromarray(rgb_gripper, 'RGB')
            # img.save(save_dir + '/%03d.jpg' % step_count)
            img.save(save_dir + '/%03d_r%3.2f.jpg' % (step_count, r))

        a = [0, 0, 0, 0, 1]
        s, r, d, info = env.step(a)
        sum_r += r
        print('sum_r = ', sum_r)
        print("use step = ", step_count)

        env.render()

    # print('---final_pos = ' , obs['eeinfo'][0],'----')
    # pos_diff = obs['eeinfo'][0] - ori_pos
    # formattedList = ["%.2f" % member for member in pos_diff]
    # print('---pos_diff = ' ,formattedList ,'----')

    print('use time = {:.2f}'.format(time.time() - s_time))
Ejemplo n.º 10
0
class FetchDiscreteCamEnv:
    def __init__(self,
                 dis_tolerance=0.001,
                 step_ds=0.005,
                 gray_img=True,
                 use_tray=True,
                 is_render=False,
                 only_show_obj0=False):
        self.env = FetchDiscreteEnv(dis_tolerance=0.001,
                                    step_ds=0.005,
                                    use_tray=use_tray,
                                    is_render=is_render)
        self.gray_img = gray_img
        self.is_render = is_render
        self.only_show_obj0 = only_show_obj0

    def state_preprocess(self, img):
        resize_img = cv2.resize(img, (IMG_W_H, IMG_W_H),
                                interpolation=cv2.INTER_AREA)
        gray_img = cv2.cvtColor(resize_img, cv2.COLOR_RGB2GRAY)
        return np.reshape(gray_img, (IMG_W_H, IMG_W_H, 1))

    def step(self, action):
        # print('i action = ', action)
        a_one_hot = np.zeros(6)
        a_one_hot[action] = 1
        s, r, d, _ = self.env.step(a_one_hot)

        # no use, but you need preserve it; otherwise, you will get error image
        rgb_external = self.env.sim.render(width=256,
                                           height=256,
                                           camera_name="external_camera_0",
                                           depth=False,
                                           mode='offscreen',
                                           device_id=-1)
        rgb_gripper = self.env.sim.render(width=256,
                                          height=256,
                                          camera_name="gripper_camera_rgb",
                                          depth=False,
                                          mode='offscreen',
                                          device_id=-1)

        rgb_gripper = cv2.cvtColor(rgb_gripper, cv2.COLOR_BGR2RGB)
        if self.is_render:
            if self.gray_img:
                # resize_img = cv2.resize(rgb_gripper, (256, 256), interpolation=cv2.INTER_AREA)
                gray_img = cv2.cvtColor(rgb_gripper, cv2.COLOR_RGB2GRAY)
                cv2.imshow('Gripper Image', gray_img)
                cv2.waitKey(50)
            else:
                self.render_gripper_img(rgb_gripper)

        # s = self.state_preprocess(rgb_gripper)
        if self.gray_img:
            s = self.state_preprocess(rgb_gripper)
            return s, r, d, None
        else:
            resize_img = cv2.resize(rgb_gripper, (IMG_W_H, IMG_W_H),
                                    interpolation=cv2.INTER_AREA)
            return resize_img, r, d, None

        # return s, r, d, None

    @property
    def pos(self):
        return self.env.pos

    @property
    def obj_pos(self):
        return self.env.obj_pos

    @property
    def red_tray_pos(self):
        return self.env.red_tray_pos

    @property
    def gripper_state(self):
        return self.env.gripper_state

    @property
    def is_gripper_close(self):
        return self.env.is_gripper_close

    def reset(self):
        # self.env.rand_objs_color(exclude_obj0 = True)
        # self.env.rand_red_or_not(obj_name='object0', use_red_color=True)
        self.env.rand_red_or_not(obj_name='object1', use_red_color=False)
        self.env.rand_red_or_not(obj_name='object2', use_red_color=False)
        self.env.reset()
        if self.only_show_obj0:
            self.env.hide_obj1_obj2()
        self.env.render()
        rgb_external = self.env.sim.render(width=256,
                                           height=256,
                                           camera_name="external_camera_0",
                                           depth=False,
                                           mode='offscreen',
                                           device_id=-1)
        rgb_gripper = self.env.sim.render(width=256,
                                          height=256,
                                          camera_name="gripper_camera_rgb",
                                          depth=False,
                                          mode='offscreen',
                                          device_id=-1)

        if self.gray_img:
            s = self.state_preprocess(rgb_gripper)
            return s
        else:
            resize_img = cv2.resize(rgb_gripper, (IMG_W_H, IMG_W_H),
                                    interpolation=cv2.INTER_AREA)
            return resize_img

    def render(self):
        self.env.render()

    def render_gripper_img(self, gripper_img):
        # if self.is_render:
        # rgb_img = cv2.cvtColor(gripper_img, cv2.COLOR_BGR2RGB)
        cv2.imshow('Gripper Image', gripper_img)
        cv2.waitKey(50)