def run_commands_demo(logger):
    time.sleep(10)

    logger.request('vget /unrealcv/status')
    logger.request('vget /unrealcv/version')
    logger.request('vget /scene/name')

    res = logger.request('vget /camera/0/location')
    res = logger.request('vget /camera/0/rotation')

    # res = logger.request('vset /camera/0/location -162 -126 90')
    # res = logger.request('vset /camera/0/rotation 348 60 0')

    res = logger.request('vget /camera/0/lit png', log=False)
    img = read_png(res)
    res = logger.request('vget /camera/0/depth npy', log=False)
    depth = read_npy(res)
    clip_far = np.median(depth) * 5  # use median instead of mean
    logger.writelines('Before clip, max=%f, mean=%f' %
                      (depth.max(), depth.mean()))
    depth[depth > clip_far] = clip_far
    logger.writelines('Before clip, max=%f, mean=%f' %
                      (depth.max(), depth.mean()))
    # Trancate the depth of the window and sky to make it easier to see

    res = logger.request('vget /camera/0/normal npy', log=False)
    normal = read_npy(res)

    res = logger.request('vget /camera/0/object_mask png', log=False)
    object_mask = read_png(res)

    logger.save_image("img.png", img)
    logger.save_image("depth.png", normalize(depth))
    logger.save_image("surface_normal.png", normalize(normal))
    logger.save_image("object_mask.png", object_mask)

    res = logger.request('vget /camera/0/lit lit.png')
    res = logger.request('vget /camera/0/depth depth.png')
    res = logger.request('vget /camera/0/object_mask object_mask.png')

    res = logger.request('vget /objects', log=False)
    object_names = res.split(' ')
    logger.writelines(object_names[:5])
Exemple #2
0
def get_image(obj, dist, az, el):
    client.request(
        'vset /animal/{obj}/camera {dist} {az} {el}'.format(**locals()))
    lit = client.request('vget /animal/{obj}/image'.format(**locals()))
    seg = client.request('vget /animal/{obj}/seg'.format(**locals()))
    depth = client.request('vget /animal/{obj}/depth'.format(**locals()))
    lit = read_png(lit)
    seg = read_png(seg)
    depth = read_npy(depth)

    return lit, seg, depth
Exemple #3
0
def run_commands_demo(logger):
    time.sleep(10)

    logger.request('vget /unrealcv/status')
    logger.request('vget /unrealcv/version')
    logger.request('vget /scene/name')

    res = logger.request('vget /camera/0/location')
    res = logger.request('vget /camera/0/rotation')

    # res = logger.request('vset /camera/0/location -162 -126 90')
    # res = logger.request('vset /camera/0/rotation 348 60 0')

    res = logger.request('vget /camera/0/lit png', log = False)
    img = read_png(res)
    res = logger.request('vget /camera/0/depth npy', log = False)
    depth = read_npy(res)
    clip_far = np.median(depth) * 5 # use median instead of mean
    logger.writelines('Before clip, max=%f, mean=%f' % (depth.max(), depth.mean()))
    depth[depth > clip_far] = clip_far
    logger.writelines('Before clip, max=%f, mean=%f' % (depth.max(), depth.mean()))
    # Trancate the depth of the window and sky to make it easier to see

    res = logger.request('vget /camera/0/normal npy', log = False)
    normal = read_npy(res)

    res = logger.request('vget /camera/0/object_mask png', log = False)
    object_mask = read_png(res)

    logger.save_image("img.png", img)
    logger.save_image("depth.png", normalize(depth))
    logger.save_image("surface_normal.png", normalize(normal))
    logger.save_image("object_mask.png", object_mask)

    res = logger.request('vget /camera/0/lit lit.png')
    res = logger.request('vget /camera/0/depth depth.png')
    res = logger.request('vget /camera/0/object_mask object_mask.png')

    res = logger.request('vget /objects', log = False)
    object_names = res.split(' ')
    logger.writelines(object_names[:5])
Exemple #4
0
    def get_rgb_and_depth(self, loc, rot, verbose=False):
        # set camera to loc and rot
        self.vset(loc, rot, verbose=verbose)

        res = client.request('vget /camera/0/lit png')
        img = read_png(res)
        if verbose:
            print(res)
        res = client.request('vget /camera/0/depth npy')
        depth = read_npy(res)
        if verbose:
            print(res)

        return img, depth
Exemple #5
0
    def imageCapture(self):
        self.stop = False

        cmd = 'vget /cameraComponent/0/all/' + self.viewMode.get().lower() + ' npy'
        res = client.request(cmd)
        if not isinstance(res, str):
            images = read_npy(res)
            self.log.insert(END, "LOG: Camera component captured\n")
        else:
            images = []
            self.log.insert(END, "LOG ERROR: Can't parse npy -> " + str(res) + "\n")

        self.showImageCaptured(images)

        self.predict(images)
 def get_depth(self):
     id = self.id
     res = self.request('vget /camera/{id}/depth npy'.format(**locals()))
     return read_npy(res)
 def get_depth(self):
     obj_id = self.id
     depth = self.request('vget /animal/{obj_id}/depth'.format(**locals()))
     depth = read_npy(depth)
     return depth
 def get_depth(self):
     res = self.request('vget /camera/0/depth npy')
     depth = read_npy(res)
     depth[depth > 5000] = 0
     # cut-off for better visualization
     return depth
    print ('UnrealCV server is not running')
else:
    print ("Start: ")
    # Test connection
    res = client.request('vget /unrealcv/status')
    print ("Status: " + str(res))





    start_time = time.time()
    res = client.request('vget /cameraComponent/0/all/lit npy')
    print("--- %2.3f seconds ---" % (time.time() - start_time))

    img_1 = read_npy(res)

    # my_img = img_1[...,:3]

    print(img_1.shape)

    start_time = time.time()
    res = client.request('vget /cameraComponent/0/0/lit npy')
    res = client.request('vget /cameraComponent/0/1/lit npy')
    res = client.request('vget /cameraComponent/0/2/lit npy')
    res = client.request('vget /cameraComponent/0/3/lit npy')
    print("--- %2.3f seconds ---" % (time.time() - start_time))

    
    f, axarr = plt.subplots(2,2)
    #axarr.imshow(img_1)
                        camera_traj[i]['rotation'][2])
    print(camera_str)  #472.783,-204.083,95.24,357.459,40.746,0.0
    client.request('vset /camera/0/location {x} {y} {z}'.format(
        x=camera_traj[i]['location'][0],
        y=camera_traj[i]['location'][1],
        z=camera_traj[i]['location'][2]))
    client.request('vset /camera/0/rotation {x} {y} {z}'.format(
        x=camera_traj[i]['rotation'][0],
        y=camera_traj[i]['rotation'][1],
        z=camera_traj[i]['rotation'][2]))
    res_depth = client.request('vget /camera/0/depth npy')
    res_lit = client.request('vget /camera/0/lit png')
    res_object = client.request('vget /camera/0/object_mask png')
    res_normal = client.request('vget /camera/0/normal png')

    im_depth = read_npy(res_depth)
    #由於深度會有超過5000以上的值,圖會有白色的部分,因此把他指定為0,會轉為黑色
    im_depth[im_depth > 5000] = 0
    im_lit = read_png(res_lit)
    im_object = read_png(res_object)
    im_normal = read_png(res_normal)

    pil_img_depth = Image.fromarray(im_depth)
    pil_img_lit = Image.fromarray(im_lit)
    pil_img_object = Image.fromarray(im_object)
    pil_img_normal = Image.fromarray(im_normal)

    pil_img_depth = pil_img_depth.convert('RGB')
    #     pil_img_depth.save('./data_arch/depth_image/depth_image_{i}.png'.format(i=i))
    pil_img_depth.save(depth_image_path + '/depth_image_{i}.png'.format(i=i))
    #     np.save('./data_arch/depth/depth_{i}'.format(i=i),im_depth)
Exemple #11
0
 def capture_depth(self):
     res = self.client.request('vget /camera/{id}/depth npy'.format(id = self.id))
     depth = read_npy(res)
     return depth