offset, scale = np.min(depth), np.max(depth) - np.min(depth)
    depth = (depth - np.min(depth)) / (np.max(depth) - np.min(depth))

    seg[:, :top_pad], seg[:, -top_pad:] = False, False
    seg[:left_pad, :], seg[-left_pad:, :] = False, False

    dexnet = DexNet()
    dexnet.prepare_dexnet()
    print("dexnet prepared")
    state, rgbd_im = dexnet.get_state(depth, seg)
    action = dexnet.get_action(state)
    '''
    get depth of the action and the x, y
    apply inverse from camera coordinate to the world coordinate
    '''
    dexnet.visualization(action, rgbd_im, offset, scale)

    action.grasp.depth = action.grasp.depth * scale + offset
    rigid_transform = action.grasp.pose()
    print('center: {}, {}'.format(action.grasp.center.x,
                                  action.grasp.center.y))
    print('depth: {}'.format(action.grasp.depth))
    print('rot: {}'.format(rigid_transform.rotation))
    print('tra: {}'.format(rigid_transform.translation))
    print('camera intr: {}'.format(dir(action.grasp.camera_intr)))
    print('proj matrix: {}'.format(action.grasp.camera_intr.proj_matrix))
    print('other attr: {}'.format(dir(action.grasp)))
    # gripper_pos = (rigid_transform.rotation.T @ rigid_transform.translation).flatten()
    # gripper_pos[2] = (1-gripper_pos[2]) * scale + offset
    # gripper_pos[0] += 0.6
    # gripper_pos[1] += -0.15
示例#2
0
    viewer.render(width, height)
    image = np.asarray(viewer.read_pixels(width, height, depth=False)[:, :, :],
                       dtype=np.uint8)
    depth = np.asarray((viewer.read_pixels(width, height, depth=True)[1]))

    cdepth = max(depth[height // 2, width // 2], depth[left_pad, top_pad],
                 depth[-left_pad, -top_pad])
    print(cdepth)
    depth[depth > cdepth] = cdepth
    seg = depth != cdepth
    depth[:, :top_pad], depth[:, -top_pad:] = cdepth, cdepth
    depth[:left_pad, :], depth[-left_pad:, :] = cdepth, cdepth
    depth = (depth - np.min(depth)) / (np.max(depth) - np.min(depth))

    seg[:, :top_pad], seg[:, -top_pad:] = False, False
    seg[:left_pad, :], seg[-left_pad:, :] = False, False

    dexnet = DexNet()
    dexnet.prepare_dexnet()
    print(dexnet)
    state, rgbd_im = dexnet.get_state(depth, seg)
    action = dexnet.get_action(state)
    dexnet.visualization(action, rgbd_im)

    # visualization
    cv2.imwrite('test_dataset/seg.png', seg * 255)
    # normalize the depth
    np.save('test_dataset/depth_0.npy', depth)
    cv2.imwrite('test_dataset/depth.png', depth * 255)
    cv2.imwrite('test_dataset/visual.png', image)