def render_from_file(meta_data, max_indx=5):
    clear_env()
    car_shape, car_rot, car_trans, cam_location, cam_rotation, img_fov, img_size, kpt_dict = load_meta_data(
        meta_data)
    request('vset /camera/1/location {:.6f} {:.6f} {:.6f}'.format(
        cam_location[0], cam_location[1], cam_location[2]))
    request('vset /camera/1/rotation {:.6f} {:.6f} {:.6f}'.format(
        cam_rotation[0], cam_rotation[1], cam_rotation[2]))
    request('vset /camera/1/fov {:.6f}'.format(img_fov))

    car_group = Car_Manager()

    if max_indx <= 0:
        max_indx = max_indx + len(car_shape)

    car_color = [[i, 0, 0] for i in range(len(car_trans))]

    print(len(car_shape))
    for i, shape in enumerate(car_shape):
        if i >= max_indx:
            break
        car_group.add_car(shape)
        car_group.trans_car(car_trans[i], "car{}".format(i))
        #car_group.trans_car([0,0,0], "car{}".format(i))
        car_group.rot_car(car_rot[i], "car{}".format(i))
        car_group.annotate_car(car_color[i], "car{}".format(i))
    car_group.flush()

    cwd_root, _ = os.path.split(meta_data)
    base_dir = join(cwd_root, "render_res")
    if DEBUG: print("Finished write object pose data ")
    time.sleep(0.1)
    if DEBUG: print("Acquiring image ...")
    img = read_png(request('vget /camera/1/lit png'))
    cv2.imwrite(join(base_dir, "car_arrangment.png"), img[:, :, 2::-1])

    mask = read_png(request('vget /camera/1/object_mask png'))
    cv2.imwrite(join(base_dir, "car_mask.png"), mask[:, :, 2::-1])

    png = read_png(request('vget /camera/0/lit png'))
    cv2.imwrite(join(base_dir, "overview.png"), png[:, :, 2::-1])

    for i in range(len(car_color)):
        obj_mask = udb.get_mask(mask, car_color[i])
        [ys, xs] = np.where(obj_mask)
        bbox = [min(xs), max(xs), min(ys), max(ys)]
        print(bbox)
        obj_img = udb.mask_img(img, obj_mask)
        bbox_img = img[min(ys):max(ys), min(xs):max(xs), :]
        cv2.imwrite(join(base_dir, "car%d_seg.png" % i), obj_img[:, :, 2::-1])
        cv2.imwrite(join(base_dir, "car%d_bbox.png" % i), bbox_img[:, :,
                                                                   2::-1])

    if DEBUG: print("Finished write image to files")

    return True
Exemple #2
0
def get_image(obj, dist, az, el):
    client.request(
        'vset /animal/{obj}/camera {dist} {az} {el}'.format(**locals()))
    lit = client.request('vget /animal/{obj}/image'.format(**locals()))
    seg = client.request('vget /animal/{obj}/seg'.format(**locals()))
    depth = client.request('vget /animal/{obj}/depth'.format(**locals()))
    lit = read_png(lit)
    seg = read_png(seg)
    depth = read_npy(depth)

    return lit, seg, depth
Exemple #3
0
    def onpress(event):
        rot_offset = 10 # Rotate 5 degree for each key press
        loc_offset = 10 # Move 5.0 when press a key

        # Up and Down in cam-plane
        if event.key == 'w': loc[0] += loc_offset
        if event.key == 's': loc[0] -= loc_offset
        # Left and Right movement in cam-plane
        if event.key == 'a': loc[1] -= loc_offset
        if event.key == 'd': loc[1] += loc_offset
        # In and Out movement into cam-plane
        if event.key == 'q': loc[2] += loc_offset
        if event.key == 'e': loc[2] -= loc_offset

        # cmd = 'vset /camera/0/rotation %s' % ' '.join([str(v) for v in rot])
        # client.request(cmd)
        cmd = 'vset /camera/0/moveto %s' % ' '.join([str(v) for v in loc])
        client.request(cmd)

        print(client.request('vget /camera/0/location'))
        print(client.request('vget /camera/0/rotation'))
        res = client.request('vget /camera/2/lit png')
        img = read_png(res)

        ax.imshow(img)
        fig.canvas.draw()
    def onpress(event):
        rot_offset = 10 # Rotate 5 degree for each key press
        loc_offset = 10 # Move 5.0 when press a key

        if event.key == 'a': rot[1] -= rot_offset
        if event.key == 'd': rot[1] += rot_offset
        if event.key == 'q': loc[2] += loc_offset # Move up
        if event.key == 'e': loc[2] -= loc_offset # Move down

        if event.key == 'w': loc[1] -= loc_offset
        if event.key == 's': loc[1] += loc_offset
        if event.key == 'up': loc[1] -= loc_offset
        if event.key == 'down': loc[1] += loc_offset
        if event.key == 'left': loc[0] -= loc_offset
        if event.key == 'right': loc[0] += loc_offset

        cmd = 'vset /camera/0/rotation %s' % ' '.join([str(v) for v in rot])
        client.request(cmd)
        cmd = 'vset /camera/0/location %s' % ' '.join([str(v) for v in loc])
        client.request(cmd)

        res = client.request('vget /camera/0/lit png')
        img = read_png(res)

        # print(event.key)
        # print('Requested image %s' % str(img.shape))

        ax.imshow(img)
        fig.canvas.draw()
    def onpress(event):
        rot_offset = 10  # Rotate 5 degree for each key press
        loc_offset = 10  # Move 5.0 when press a key

        if event.key == 'a': rot[1] -= rot_offset
        if event.key == 'd': rot[1] += rot_offset
        if event.key == 'q': loc[2] += loc_offset  # Move up
        if event.key == 'e': loc[2] -= loc_offset  # Move down

        if event.key == 'w': loc[1] -= loc_offset
        if event.key == 's': loc[1] += loc_offset
        if event.key == 'up': loc[1] -= loc_offset
        if event.key == 'down': loc[1] += loc_offset
        if event.key == 'left': loc[0] -= loc_offset
        if event.key == 'right': loc[0] += loc_offset

        cmd = 'vset /camera/0/rotation %s' % ' '.join([str(v) for v in rot])
        client.request(cmd)
        cmd = 'vset /camera/0/location %s' % ' '.join([str(v) for v in loc])
        client.request(cmd)

        res = client.request('vget /camera/0/lit png')
        img = read_png(res)

        # print(event.key)
        # print('Requested image %s' % str(img.shape))

        ax.imshow(img)
        fig.canvas.draw()
Exemple #6
0
def rotate_around_object(obj_loc: tuple, d: tuple, angle: tuple, y_max: int, view_modes: list)-> dict:
    """
    Rotates camera around obj_loc and return images from 360 view around for each view mode. Client has to be connected.
    :param obj_loc: 3-tuple, coordinates of the object location (x,y,z)
    :param d: 2-tuple, distance of camera from obj_loc (distance on x axis, distance on z axis)
    :param angle: 2-tuple, angle step by which is camera rotated around z and y axis (z step, y step)
    :param y_max: top y angle value by which can camera be rotated above object
    :param view_modes: list of strings representing requested view modes. Allowed are lit, normal, depth, object_mask.
    :return: Dictionary with given view modes as keys and lists of images as values.
    """
    output = {}
    for view_mode in view_modes:
        output[view_mode] = []
    t = get_translation_matrix([-obj_loc[0], -obj_loc[1], -obj_loc[2]])
    t_neg = get_translation_matrix([obj_loc[0], obj_loc[1], obj_loc[2]])
    cam_start_loc = [obj_loc[0] - d[0], obj_loc[1], obj_loc[2] + d[1]]
    loc_orig = np.matmul(t, np.array([[cam_start_loc[0]], [cam_start_loc[1]], [cam_start_loc[2]], [1.0]]))
    h_rot = list(
        zip([get_rotation_z_axis(x) for x in range(0, 360, angle[0])], [x for x in range(0, 360, angle[0])]))
    y_angles = [y for y in range(0, -y_max, -angle[1])]
    v_rot = list(zip([get_rotation_y_axis(y) for y in range(0, len(y_angles) * angle[1], angle[1])], y_angles))
    for v_params in v_rot:
        for h_params in h_rot:
            new_loc = np.round(np.matmul(t_neg, np.matmul(h_params[0], np.matmul(v_params[0], loc_orig))))
            assert client.request('vset /camera/0/rotation {} {} {}'.format(v_params[1], h_params[1] % 360, 0)) == 'ok', \
                'Did not get \'ok\' response from urealcv server for setting camera rotation'
            assert client.request('vset /camera/0/location {} {} {}'.format(new_loc[0][0],
                                                                            new_loc[1][0],
                                                                            new_loc[2][0])) == 'ok', \
                'Did not get \'ok\' response from urealcv server for setting camera location'
            for view_mode in view_modes:
                output[view_mode].append(read_png(client.request('vget /camera/0/' + view_mode + ' png')))
    return output
def run_commands_demo(logger):
    time.sleep(10)

    logger.request('vget /unrealcv/status')
    logger.request('vget /unrealcv/version')
    logger.request('vget /scene/name')

    res = logger.request('vget /camera/0/location')
    res = logger.request('vget /camera/0/rotation')

    # res = logger.request('vset /camera/0/location -162 -126 90')
    # res = logger.request('vset /camera/0/rotation 348 60 0')

    res = logger.request('vget /camera/0/lit png', log=False)
    img = read_png(res)
    res = logger.request('vget /camera/0/depth npy', log=False)
    depth = read_npy(res)
    clip_far = np.median(depth) * 5  # use median instead of mean
    logger.writelines('Before clip, max=%f, mean=%f' %
                      (depth.max(), depth.mean()))
    depth[depth > clip_far] = clip_far
    logger.writelines('Before clip, max=%f, mean=%f' %
                      (depth.max(), depth.mean()))
    # Trancate the depth of the window and sky to make it easier to see

    res = logger.request('vget /camera/0/normal npy', log=False)
    normal = read_npy(res)

    res = logger.request('vget /camera/0/object_mask png', log=False)
    object_mask = read_png(res)

    logger.save_image("img.png", img)
    logger.save_image("depth.png", normalize(depth))
    logger.save_image("surface_normal.png", normalize(normal))
    logger.save_image("object_mask.png", object_mask)

    res = logger.request('vget /camera/0/lit lit.png')
    res = logger.request('vget /camera/0/depth depth.png')
    res = logger.request('vget /camera/0/object_mask object_mask.png')

    res = logger.request('vget /objects', log=False)
    object_names = res.split(' ')
    logger.writelines(object_names[:5])
Exemple #8
0
def run_commands_demo(logger):
    time.sleep(10)

    logger.request('vget /unrealcv/status')
    logger.request('vget /unrealcv/version')
    logger.request('vget /scene/name')

    res = logger.request('vget /camera/0/location')
    res = logger.request('vget /camera/0/rotation')

    # res = logger.request('vset /camera/0/location -162 -126 90')
    # res = logger.request('vset /camera/0/rotation 348 60 0')

    res = logger.request('vget /camera/0/lit png', log = False)
    img = read_png(res)
    res = logger.request('vget /camera/0/depth npy', log = False)
    depth = read_npy(res)
    clip_far = np.median(depth) * 5 # use median instead of mean
    logger.writelines('Before clip, max=%f, mean=%f' % (depth.max(), depth.mean()))
    depth[depth > clip_far] = clip_far
    logger.writelines('Before clip, max=%f, mean=%f' % (depth.max(), depth.mean()))
    # Trancate the depth of the window and sky to make it easier to see

    res = logger.request('vget /camera/0/normal npy', log = False)
    normal = read_npy(res)

    res = logger.request('vget /camera/0/object_mask png', log = False)
    object_mask = read_png(res)

    logger.save_image("img.png", img)
    logger.save_image("depth.png", normalize(depth))
    logger.save_image("surface_normal.png", normalize(normal))
    logger.save_image("object_mask.png", object_mask)

    res = logger.request('vget /camera/0/lit lit.png')
    res = logger.request('vget /camera/0/depth depth.png')
    res = logger.request('vget /camera/0/object_mask object_mask.png')

    res = logger.request('vget /objects', log = False)
    object_names = res.split(' ')
    logger.writelines(object_names[:5])
Exemple #9
0
    def get_rgb_and_depth(self, loc, rot, verbose=False):
        # set camera to loc and rot
        self.vset(loc, rot, verbose=verbose)

        res = client.request('vget /camera/0/lit png')
        img = read_png(res)
        if verbose:
            print(res)
        res = client.request('vget /camera/0/depth npy')
        depth = read_npy(res)
        if verbose:
            print(res)

        return img, depth
Exemple #10
0
 def sample_anim_frames(self, obj_id, mesh_paths, anim_paths, ratios):
     # Render images using cartesian product of parameters
     self.make_obj('CvCharacter', obj_id, [0, 0, 138])
     self.cam_track_obj(obj_id, 0, 30, 200)
     ims = []
     params = list(itertools.product(mesh_paths, anim_paths, ratios))
     for param in tqdm(params):
         mesh_path, anim_path, ratio = param
         self.request(
             'vset /human/{obj_id}/mesh {mesh_path}'.format(**locals()))
         self.request(
             'vset /human/{obj_id}/animation/ratio {anim_path} {ratio}'.
             format(**locals()))
         res = self.request('vget /camera/0/lit png')
         im = read_png(res)
         ims.append(im)
     return ims
Exemple #11
0
    rgba = tuple([int(color[2:]) for color in tmp_color])
    actor_color_dict[actor_name] = rgba
    for target_name in target_name_list:
        if actor_name.startswith(target_name):
            if target_name not in target_actor_dict:
                target_actor_dict[target_name] = [actor_name]
            else:
                target_actor_dict[target_name].append(actor_name)
            break

# save to png file
render_file_path = output_folder + 'render' + img_file_id + '.png'
mask_file_path = output_folder + 'mask' + img_file_id + '.png'
client.request('vget /camera/' + camera_id + '/lit ' + render_file_path)
mask_res = client.request('vget /camera/' + camera_id + '/object_mask png')
mask_array = read_png(mask_res)
cv2.imwrite(mask_file_path, mask_array)

for target in target_actor_dict:
    target_rect_dict[target] = []
    actor_list = target_actor_dict[target]
    for actor_name in actor_list:
        current_target = np.all((mask_array == actor_color_dict[actor_name]),
                                axis=-1).astype(np.uint8)
        _, contours, _ = cv2.findContours(current_target, cv2.RETR_EXTERNAL,
                                          cv2.CHAIN_APPROX_SIMPLE)
        # top_left x, y, width, height
        rect_info = cv2.boundingRect(contours[0])
        target_rect_dict[target].append(rect_info)

full_info_dict = {}
Exemple #12
0
 def capture_img(self):
     res = self.client.request('vget /camera/{id}/lit png'.format(id = self.id))
     img = read_png(res)
     return img
def get_mask(cam_ind=0):
  ## TODO: 052319, Currently not working.
  res = client.request('vget /camera/{cam_ind}/object_mask png'.format(**locals()))
  object_mask = read_png(res)
  return object_mask
def get_rgb(cam_ind=0):
  res = client.request('vget /camera/{:d}/lit png'.format(cam_ind))
  img = read_png(res)
  return img
Exemple #15
0
from unrealcv import client
from unrealcv.util import read_png, read_npy
import argparse
import imageio
import numpy as np

parser = argparse.ArgumentParser()
parser.add_argument('-n', type=int, help='Number of iterations')
parser.add_argument('--cam_id', type=int, help='Camera id for data capture')
args = parser.parse_args()

client.connect()

if args.cam_id:
    cam_ids = [args.cam_id]
else:
    cams = client.request('vget /cameras').strip().split(' ')
    print(cams)
    cam_ids = range(len(cams))

for i in range(args.n):
    for cam_id in cam_ids:
        res = client.request('vget /camera/{cam_id}/lit png'.format(**locals()))
        imageio.imsave('tmp/lit_{cam_id}.png'.format(**locals()), read_png(res))
        # client.request('vget /camera/{cam_id}/depth npy'.format(**locals()))
        # np.save('tmp/depth_{cam_id}.npy'.format(**locals()), read_npy(res))
        res = client.request('vget /camera/{cam_id}/object_mask png'.format(**locals()))
        imageio.imsave('tmp/seg_{cam_id}.png'.format(**locals()), read_png(res))
Exemple #16
0
import imageio
import os
import argparse

parser = argparse.ArgumentParser()
parser.add_argument('img_filename')
args = parser.parse_args()
fname = args.img_filename

client.connect()
client.request('vset /arm/owi535/random_pose')
# Set the arm to a random pose
# Or set the arm to a specific pose with
# client.request('vset /arm/owi535/pose 20 20 20 20 20')

client.request('vset /camera/0/location 320 0 300')
client.request('vset /camera/0/rotation -30 180 0')
res = client.request('vget /camera/0/lit png')
im = read_png(res)

texture_filename = os.path.abspath('./000000000139.jpg')
client.request('vset /env/sky/texture %s' % texture_filename)
client.request('vset /env/floor/texture %s' % texture_filename)
client.request('vset /env/random_lighting')

imageio.imwrite(fname, im)

keypoints = client.request('vget /arm/owi535/keypoints')
print(keypoints)
# Dense 3D keypoint in the world space.
Exemple #17
0
def get_image(dist, az, el):
    client.request('vset /car/0/camera {dist} {az} {el}'.format(**locals()))
    lit = client.request('vget /car/0/image'.format(**locals()))
    lit = read_png(lit)
    return lit
 def get_rgb(self):
     id = self.id
     res = self.request('vget /camera/{id}/lit png'.format(**locals()))
     return read_png(res)
Exemple #19
0
 def capture_frame(func=None):
     res = client.request('vget /camera/0/lit png')
     im = read_png(res)
     return im
Exemple #20
0
def get_img_from_preds(preds):
    set_camrea(preds)
    i = 1
    data = client.request(f'vget /camera/{i}/lit png')
    im = read_png(data)
    return im
Exemple #21
0
from unrealcv import client
client.connect()
from unrealcv.util import read_png
import imageio

client.request('vset /car/0/mesh/id suv')
# The id can be selected from
# [suv, sedan2door, sedan4door, hybrid, hatchback]
client.request('vset /car/0/door/angles 30 30 30 30 30 30')
# The six values are associated to fl (front left), fr, bl, br, hood, trunk
img = read_png(client.request('vget /car/0/image'))
imageio.imwrite('car.png', img)
Exemple #22
0
def get_img_from_preds(preds):
    set_camrea(preds)
    i = 1
    data = client.request('vget /camera/{i}/lit png'.format(**locals()))
    im = read_png(data)
    return im
Exemple #23
0
 def request_image(self):
     image_data = ue4.request(f"vget /camera/0/lit png")
     return read_png(image_data)
 def get_img(self):
     obj_id = self.id
     img = self.request('vget /car/{obj_id}/image'.format(**locals()))
     img = read_png(img)
     return img
Exemple #25
0
def get_seg():
    seg = client.request('vget /car/0/seg'.format(**locals()))
    seg = read_png(seg)
    return seg
 def get_seg(self):
     obj_id = self.id
     seg = self.request('vget /car/{obj_id}/seg'.format(**locals()))
     seg = read_png(seg)
     return seg
        x=camera_traj[i]['location'][0],
        y=camera_traj[i]['location'][1],
        z=camera_traj[i]['location'][2]))
    client.request('vset /camera/0/rotation {x} {y} {z}'.format(
        x=camera_traj[i]['rotation'][0],
        y=camera_traj[i]['rotation'][1],
        z=camera_traj[i]['rotation'][2]))
    res_depth = client.request('vget /camera/0/depth npy')
    res_lit = client.request('vget /camera/0/lit png')
    res_object = client.request('vget /camera/0/object_mask png')
    res_normal = client.request('vget /camera/0/normal png')

    im_depth = read_npy(res_depth)
    #由於深度會有超過5000以上的值,圖會有白色的部分,因此把他指定為0,會轉為黑色
    im_depth[im_depth > 5000] = 0
    im_lit = read_png(res_lit)
    im_object = read_png(res_object)
    im_normal = read_png(res_normal)

    pil_img_depth = Image.fromarray(im_depth)
    pil_img_lit = Image.fromarray(im_lit)
    pil_img_object = Image.fromarray(im_object)
    pil_img_normal = Image.fromarray(im_normal)

    pil_img_depth = pil_img_depth.convert('RGB')
    #     pil_img_depth.save('./data_arch/depth_image/depth_image_{i}.png'.format(i=i))
    pil_img_depth.save(depth_image_path + '/depth_image_{i}.png'.format(i=i))
    #     np.save('./data_arch/depth/depth_{i}'.format(i=i),im_depth)
    np.save(depth_path + '/dapth_{i}'.format(i=i), im_depth)
    #     pil_img_lit.save('./data_arch/lit/lit_{i}.png'.format(i=i))
    pil_img_lit.save(lit_path + '/lit_{i}.png'.format(i=i))
 def get_mask(self):
     id = self.id
     res = self.request(
         'vget /camera/{id}/object_mask png'.format(**locals()))
     return read_png(res)
def render_from_file(meta_data, max_indx=5):
    clear_env()
    car_shape, car_rot, car_trans, cam_location, cam_rotation, img_fov, img_size, kpt_dict = load_meta_data(
        meta_data)
    request('vset /camera/1/location {:.6f} {:.6f} {:.6f}'.format(
        cam_location[0], cam_location[1], cam_location[2]))
    request('vset /camera/1/rotation {:.6f} {:.6f} {:.6f}'.format(
        cam_rotation[0], cam_rotation[1], cam_rotation[2]))
    request('vset /camera/1/fov {:.6f}'.format(img_fov))

    car_group = Car_Manager()

    # if max_indx <= 0:
    #     max_indx = max_indx + len(car_shape)

    #num_obj = min([len(car_trans), max_indx, 2])
    num_obj = 2
    car_color = [[i, 0, 0] for i in range(num_obj)]

    shape = car_shape[0]
    # for i in range(num_obj):
    #     car_group.add_car(shape, shape_lib="ShapenetKeypoint")
    #     #car_group.trans_car(car_trans[i], "car{}".format(i))
    #     car_group.trans_car([0, 100*i,150*i+100], "car{}".format(i))
    #     car_group.rot_car([0,0,0], "car{}".format(i))
    #     car_group.annotate_car(car_color[i], "car{}".format(i))

    with open("example/car_activity/render_shape.json", "r") as f:
        data_render = json.load(f)

    trans_model = data_render[shape]["trans"]

    i = 0
    car_group.add_car(shape, scale=(-1, 1, 1))
    print(trans_model)
    car_group.trans_car([
        trans_model[0] * 500, trans_model[2] * 500, 200 + trans_model[1] * 500
    ], "car{}".format(i))
    car_group.rot_car([0, 90, 0], "car{}".format(i))
    car_group.annotate_car(car_color[i], "car{}".format(i))
    i = 1
    car_group.add_car(shape, shape_lib="ShapenetKeypoint")
    car_group.trans_car([0, 0, 200], "car{}".format(i))
    car_group.rot_car([0, 0, 0], "car{}".format(i))
    car_group.annotate_car(car_color[i], "car{}".format(i))

    car_group.flush()

    cwd_root, _ = os.path.split(meta_data)
    base_dir = join(cwd_root, "render_res")
    if DEBUG: print("Finished write object pose data ")
    time.sleep(0.1)
    if DEBUG: print("Acquiring image ...")
    img = read_png(request('vget /camera/1/lit png'))
    cv2.imwrite(join(base_dir, "car_arrangment.png"), img[:, :, 2::-1])

    mask = read_png(request('vget /camera/1/object_mask png'))
    cv2.imwrite(join(base_dir, "car_mask.png"), mask[:, :, 2::-1])

    png = read_png(request('vget /camera/0/lit png'))
    cv2.imwrite(join(base_dir, "overview.png"), png[:, :, 2::-1])

    for i in range(len(car_color)):
        obj_mask = udb.get_mask(mask, car_color[i])
        [ys, xs] = np.where(obj_mask)
        bbox = [min(xs), max(xs), min(ys), max(ys)]
        print(bbox)
        obj_img = udb.mask_img(img, obj_mask)
        bbox_img = img[min(ys):max(ys), min(xs):max(xs), :]
        cv2.imwrite(join(base_dir, "car%d_seg.png" % i), obj_img[:, :, 2::-1])
        cv2.imwrite(join(base_dir, "car%d_bbox.png" % i), bbox_img[:, :,
                                                                   2::-1])

    if DEBUG: print("Finished write image to files")

    return True
Exemple #30
0
 def capture_frame(func=None):
     res = client.request('vget /camera/0/lit png')
     im = read_png(res)
     return im
Exemple #31
0
 def get_im(self):
     res = self.request('vget /camera/0/lit png')
     return read_png(res)
Exemple #32
0
from unrealcv import clientt
from unrealcv.util import read_png
import imageio
client.connect()

im = read_png(client.request('vget /camera/0/lit png'))
imageio.imwrite('hello_world.png', im)
Exemple #33
0
 def get_seg(self):
     res = self.request('vget /camera/0/object_mask png')
     return read_png(res)