예제 #1
0
def make_depth_images(obj_name,
                      pos,
                      rot,
                      obj_dir,
                      image_dir,
                      bottom=0.2,
                      imsize=(80, 80),
                      camera_offset=.45,
                      near_clip=.25,
                      far_clip=.8,
                      support=False):
    """
    Saves depth images from perspective of gripper as image files. Default
    camera parameters make an exaggerated representation of region in front of hand.

    :param obj_name: Name corresponding to .obj file (without path or extension)
    :param pos: Positions of perspectives from which to make depth images
    :param rot: Rotation matrices of perspectives
    :param obj_dir: Directory where .obj files can be found
    :param image_dir: Directory in which to store images
    """
    obj_filename = obj_dir + obj_name + '.obj'

    if support:
        verts, faces = loadOBJ('../data/support-box.obj')
    else:
        verts, faces = loadOBJ(obj_filename)

    verts = np.array(verts)
    # minz = np.min(verts, axis=0)[2]
    # verts[:,2] = verts[:,2] + bottom - minz
    min_bounding_box = np.min(verts, axis=0)
    max_bounding_box = np.max(verts, axis=0)

    # set bounding box horizontal centre to 0,0
    verts[:,
          0] = verts[:, 0] - (min_bounding_box[0] + max_bounding_box[0]) / 2.
    verts[:,
          1] = verts[:, 1] - (min_bounding_box[1] + max_bounding_box[1]) / 2.
    # set bottom of bounding box to "bottom"
    verts[:, 2] = verts[:, 2] + bottom - min_bounding_box[2]

    d = Display(imsize=imsize)
    d.set_perspective(fov=45, near_clip=near_clip, far_clip=far_clip)

    for i in range(len(pos)):
        d.set_camera_position(pos[i], rot[i], camera_offset)
        d.set_mesh(verts, faces)  #this must go after set_camera_position
        depth = d.read_depth()
        distance = get_distance(depth, near_clip, far_clip)
        rescaled_distance = np.maximum(0, (distance - camera_offset) /
                                       (far_clip - camera_offset))
        imfile = image_dir + obj_name + '-' + str(i) + '.png'
        Image.fromarray(
            (255.0 * rescaled_distance).astype('uint8')).save(imfile)
        # scipy.misc.toimage(depth, cmin=0.0, cmax=1.0).save(imfile)

    d.close()
예제 #2
0
def make_random_depths(obj_filename, param_filename, n, im_size=(40,40)):
    """
    Creates a dataset of depth maps and corresponding success probabilities
    at random interpolated gripper configurations.
    """
    verts, faces = loadOBJ(obj_filename)
    verts = np.array(verts)
    minz = np.min(verts, axis=0)[2]
    verts[:,2] = verts[:,2] + 0.2 - minz

    points, labels = get_points(param_filename)

    d = Display(imsize=im_size)
    probs = []
    depths = []
    for i in range(n):
        point = get_interpolated_point(points)

        estimate, confidence = get_prob_label(points, labels, point, sigma_p=2*.001, sigma_a=2*(4*np.pi/180))
        probs.append(estimate)

        gripper_pos = point[:3]
        gripper_orient = point[3:]
        d.set_camera_position(gripper_pos, gripper_orient, .3)
        d.set_mesh(verts, faces) #this must go after set_camera_position
        depth = d.read_depth()
        depths.append(depth)

    d.close()

    return np.array(depths), np.array(probs)
예제 #3
0
def make_random_depths(obj_filename, param_filename, n, im_size=(40, 40)):
    """
    Creates a dataset of depth maps and corresponding success probabilities
    at random interpolated gripper configurations.
    """
    verts, faces = loadOBJ(obj_filename)
    verts = np.array(verts)
    minz = np.min(verts, axis=0)[2]
    verts[:, 2] = verts[:, 2] + 0.2 - minz

    points, labels = get_points(param_filename)

    d = Display(imsize=im_size)
    probs = []
    depths = []
    for i in range(n):
        point = get_interpolated_point(points)

        estimate, confidence = get_prob_label(points,
                                              labels,
                                              point,
                                              sigma_p=2 * .001,
                                              sigma_a=2 * (4 * np.pi / 180))
        probs.append(estimate)

        gripper_pos = point[:3]
        gripper_orient = point[3:]
        d.set_camera_position(gripper_pos, gripper_orient, .3)
        d.set_mesh(verts, faces)  #this must go after set_camera_position
        depth = d.read_depth()
        depths.append(depth)

    d.close()

    return np.array(depths), np.array(probs)
예제 #4
0
def make_depth_images(obj_name, pos, rot, obj_dir, image_dir, bottom=0.2, imsize=(80,80),
                      camera_offset=.45, near_clip=.25, far_clip=.8, support=False):
    """
    Saves depth images from perspective of gripper as image files. Default
    camera parameters make an exaggerated representation of region in front of hand.

    :param obj_name: Name corresponding to .obj file (without path or extension)
    :param pos: Positions of perspectives from which to make depth images
    :param rot: Rotation matrices of perspectives
    :param obj_dir: Directory where .obj files can be found
    :param image_dir: Directory in which to store images
    """
    obj_filename = obj_dir + obj_name + '.obj'

    if support:
        verts, faces = loadOBJ('../data/support-box.obj')
    else:
        verts, faces = loadOBJ(obj_filename)

    verts = np.array(verts)
    # minz = np.min(verts, axis=0)[2]
    # verts[:,2] = verts[:,2] + bottom - minz
    min_bounding_box = np.min(verts, axis=0)
    max_bounding_box = np.max(verts, axis=0)

    # set bounding box horizontal centre to 0,0
    verts[:,0] = verts[:,0] - (min_bounding_box[0]+max_bounding_box[0])/2.
    verts[:,1] = verts[:,1] - (min_bounding_box[1]+max_bounding_box[1])/2.
    # set bottom of bounding box to "bottom"
    verts[:,2] = verts[:,2] + bottom - min_bounding_box[2]

    d = Display(imsize=imsize)
    d.set_perspective(fov=45, near_clip=near_clip, far_clip=far_clip)

    for i in range(len(pos)):
        d.set_camera_position(pos[i], rot[i], camera_offset)
        d.set_mesh(verts, faces) #this must go after set_camera_position
        depth = d.read_depth()
        distance = get_distance(depth, near_clip, far_clip)
        rescaled_distance = np.maximum(0, (distance-camera_offset)/(far_clip-camera_offset))
        imfile = image_dir + obj_name + '-' + str(i) + '.png'
        Image.fromarray((255.0*rescaled_distance).astype('uint8')).save(imfile)
        # scipy.misc.toimage(depth, cmin=0.0, cmax=1.0).save(imfile)

    d.close()
예제 #5
0
def plot_box_corners():
    verts, faces = loadOBJ('../data/support-box.obj')
    verts = np.array(verts)
    print(verts.shape)

    import matplotlib.pyplot as plt
    from mpl_toolkits.mplot3d import axes3d, Axes3D
    fig = plt.figure()
    ax = fig.add_subplot(1, 2, 1, projection='3d')
    ax.scatter(verts[:,0], verts[:,1], verts[:,2])
    plt.show()
예제 #6
0
def plot_box_corners():
    verts, faces = loadOBJ('../data/support-box.obj')
    verts = np.array(verts)
    print(verts.shape)

    import matplotlib.pyplot as plt
    from mpl_toolkits.mplot3d import axes3d, Axes3D
    fig = plt.figure()
    ax = fig.add_subplot(1, 2, 1, projection='3d')
    ax.scatter(verts[:, 0], verts[:, 1], verts[:, 2])
    plt.show()
예제 #7
0
def plot_success_prob():
    shape = '24_bowl-02-Mar-2016-07-03-29'
    param_filename = '../data/params/' + shape + '.csv'
    points, labels = get_points(param_filename)

    # z = np.linspace(-4 * np.pi, 4 * np.pi, 300)
    # x = np.cos(z)
    # y = np.sin(z)
    # z = [1, 2]
    # x = [1, 2]
    # y = [1, 2]
    #
    # fig = plt.figure()
    # ax = fig.gca(projection='3d')
    # ax.plot(x, y, z,
    #         color = 'Blue',      # colour of the curve
    #         linewidth = 3,            # thickness of the line
    #         )
    fig = plt.figure()
    ax = Axes3D(fig)
    red = np.array([1.0,0.0,0.0])
    green = np.array([0.0,1.0,0.0])

    obj_filename = '../data/obj_files/' + shape + '.obj'
    verts, faces = loadOBJ(obj_filename)
    verts = np.array(verts)
    minz = np.min(verts, axis=0)[2]
    verts[:,2] = verts[:,2] + 0.2 - minz

    show_flags = np.random.rand(verts.shape[0]) < .25
    ax.scatter(verts[show_flags,0], verts[show_flags,1], verts[show_flags,2], c='b')

    for i in range(1000):
        point = points[i]
        prob, confidence = get_prob_label(points, labels, point, sigma_p=1.5*.001, sigma_a=1.5*(4*np.pi/180))
        goodness = prob * np.minimum(1, .5 + .15*confidence)

        rm = rot_matrix(point[3], point[4], point[5])
        pos = point[:3]
        front = pos + np.dot(rm,[0,0,.15])
        left = pos - np.dot(rm,[0.0,0.005,0])
        right = pos + np.dot(rm,[0.0,0.01,0])

        # if goodness > .85:
        if prob > .9:
            ax.plot([pos[0],front[0]], [pos[1],front[1]], [pos[2],front[2]],
                    color=(prob*green + (1-prob)*red),
                    linewidth=confidence)
            ax.plot([left[0],right[0]], [left[1],right[1]], [left[2],right[2]],
                    color=(prob*green + (1-prob)*red),
                    linewidth=confidence)
    plt.show()
예제 #8
0
def get_perspectives(obj_filename,
                     points,
                     angles,
                     im_width=80,
                     near_clip=.25,
                     far_clip=0.8,
                     fov=45,
                     camera_offset=.45,
                     target_point=None):
    from depthmap import loadOBJ, Display, get_distance
    verts, faces = loadOBJ(obj_filename)

    # put vertical centre at zero
    verts = np.array(verts)
    min_bounding_box = np.min(verts, axis=0)
    max_bounding_box = np.max(verts, axis=0)

    # set bounding box centre to 0,0,0
    verts[:,
          0] = verts[:, 0] - (min_bounding_box[0] + max_bounding_box[0]) / 2.
    verts[:,
          1] = verts[:, 1] - (min_bounding_box[1] + max_bounding_box[1]) / 2.
    verts[:,
          2] = verts[:, 2] - (min_bounding_box[2] + max_bounding_box[2]) / 2.

    if target_point is not None:
        verts[:, 0] = verts[:, 0] - target_point[0]
        verts[:, 1] = verts[:, 1] - target_point[1]
        verts[:, 2] = verts[:, 2] - target_point[2]

    d = Display(imsize=(im_width, im_width))
    d.set_perspective(fov=fov, near_clip=near_clip, far_clip=far_clip)
    perspectives = np.zeros((points.shape[1], im_width, im_width),
                            dtype='float32')
    for i in range(points.shape[1]):
        point = points[:, i]
        angle = angles[:, i]
        rot = get_rotation_matrix(point, angle)
        d.set_camera_position(point, rot, camera_offset)
        d.set_mesh(verts, faces)
        depth = d.read_depth()
        distance = get_distance(depth, near_clip, far_clip)
        perspectives[i, :, :] = distance
    d.close()
    return perspectives
예제 #9
0
def check_depth_from_random_perspective():
    from depthmap import loadOBJ, Display
    filename = '../data/obj_files/24_bowl-02-Mar-2016-07-03-29.obj'
    verts, faces = loadOBJ(filename)

    # put vertical centre at zero
    verts = np.array(verts)
    minz = np.min(verts, axis=0)[2]
    maxz = np.max(verts, axis=0)[2]
    verts[:, 2] = verts[:, 2] - (minz + maxz) / 2

    n = 6
    points = get_random_points(n, .25)
    angles = get_random_angles(n)
    point = points[:, 0]
    angle = angles[:, 0]

    rot = get_rotation_matrix(point, angle)

    im_width = 80
    d = Display(imsize=(im_width, im_width))
    d.set_camera_position(point, rot, .5)
    d.set_mesh(verts, faces)
    depth = d.read_depth()
    d.close()

    X = np.arange(0, im_width)
    Y = np.arange(0, im_width)
    X, Y = np.meshgrid(X, Y)

    from mpl_toolkits.mplot3d import axes3d, Axes3D
    fig = plt.figure()
    ax = fig.add_subplot(1, 1, 1, projection='3d')
    ax.plot_wireframe(X, Y, depth)
    ax.set_xlabel('x')
    plt.show()