Exemplo n.º 1
0
from __future__ import print_function
import numpy as np
import cv2
from pybh.contrib import transformations
from pybh import math_utils
from pybh import log_utils

logger = log_utils.get_logger("RLrecon/engines/engine")


class StereoWrapper(object):
    def __init__(self,
                 base_engine,
                 stereo_method,
                 stereo_baseline,
                 width,
                 height,
                 min_depth,
                 num_disparities=64,
                 block_size=None):
        self._base_engine = base_engine
        self._stereo_method = stereo_method
        self._stereo_baseline = stereo_baseline
        self._width = width
        self._height = height
        self._min_depth = min_depth
        self._num_disparities = num_disparities
        self._focal_length = base_engine.get_focal_length()
        self._left_matcher = self._create_left_matcher(stereo_method,
                                                       num_disparities,
                                                       block_size)
Exemplo n.º 2
0
from __future__ import absolute_import

# Workaround for segmentation fault for some versions when ndimage is imported after tensorflow.
import scipy.ndimage as nd

import argparse
import numpy as np
from pybh import tensorpack_utils
import data_record
from pybh import serialization
from pybh import msgpack_utils
from pybh import lmdb_utils
from pybh.utils import argparse_bool, logged_time_measurement
from pybh import log_utils

logger = log_utils.get_logger("reward_learning/split_data_lmdb")


def dict_from_dataflow_generator(df):
    for sample in df.get_data():
        yield sample[0]


def split_lmdb_dataset(lmdb_input_path,
                       lmdb_output_path1,
                       lmdb_output_path2,
                       split_ratio1,
                       batch_size,
                       shuffle,
                       serialization_name,
                       compression,
Exemplo n.º 3
0
import numpy as np
import pvm
from pybh import serialization
from pybh import log_utils
from pybh import camera_utils
from pybh import math_utils
from pybh import utils

logger = log_utils.get_logger("RLrecon/render_server_zmq")


class RendererZMQService(object):
    def __init__(self,
                 framebuffer,
                 drawer,
                 fov,
                 initial_distance,
                 trackball=None,
                 world_to_opengl_mat=None):
        self._serializer = serialization.MsgPackSerializer()
        self._framebuffer = framebuffer
        self._drawer = drawer
        self._trackball = trackball
        if world_to_opengl_mat is None:
            world_to_opengl_mat = np.eye(4, dtype=np.float32)
        self._world_to_opengl_mat = world_to_opengl_mat
        self._opengl_to_world_mat = np.linalg.inv(self._world_to_opengl_mat)
        self._projection_transform = pvm.PerspectiveTransform(
            framebuffer.width, framebuffer.height, fov=fov)
        self._view_transform = pvm.ViewTransform()
        self._view_transform.z = -initial_distance
import numpy as np
from pybh import pybh_yaml as yaml
from pybh import hdf5_utils, tensorpack_utils, serialization
import data_record
import tensorflow as tf
import tensorpack.dataflow
import tensorpack.utils.serialize
from pybh import lmdb_utils
import input_pipeline
import configuration
import traceback
from pybh.utils import argparse_bool
from pybh.attribute_dict import AttributeDict
from pybh import log_utils

logger = log_utils.get_logger("reward_learning/write_data_to_lmdb")


def write_hdf5_files_to_lmdb(input_and_target_retriever,
                             tf_cfg,
                             hdf5_path,
                             lmdb_path,
                             max_num_files,
                             batch_size,
                             serialization_name,
                             compression,
                             compression_arg,
                             append=False,
                             max_num_samples=None,
                             verbose=False):
    logger.info("Data path: {}".format(hdf5_path))
Exemplo n.º 5
0
from pybh import log_utils
from pybh import zmq_utils
from pybh import utils
from pybh import camera_utils
from pybh import math_utils
from pybh.utils import argparse_bool
from framebuffer import Framebuffer
from trackball import Trackball
from mesh_drawer import MeshDrawer
from mesh import SimpleMesh, CubeMesh
from application import Application
from framebuffer_drawer import FramebufferDrawer
from window import Window
import renderer_zmq_service

logger = log_utils.get_logger("RLrecon/mesh_renderer")

# import pydevd
# pydevd.settrace('localhost', port=1234, stdoutToServer=True, stderrToServer=True)


def run(args):
    # Command line arguments
    address = args.address
    show_window = args.show_window
    poll_timeout = args.poll_timeout
    width = args.width
    height = args.height
    mesh_filename = args.mesh_filename
    use_msgpack_for_mesh = args.use_msgpack_for_mesh
    window_width = args.window_width
from pybh import pybh_yaml as yaml
import tensorflow as tf
import tensorflow.contrib.memory_stats as tf_memory_stats
from tensorflow.python.framework.errors_impl import InvalidArgumentError as TFInvalidArgumentError
from pybh import tf_utils, log_utils
import data_provider
import input_pipeline
import configuration
import traceback
from pybh.utils import argparse_bool
from pybh.attribute_dict import AttributeDict
from tensorflow.python.client import timeline
from pybh.utils import Timer, logged_time_measurement
from pybh import progressbar

logger = log_utils.get_logger("reward_learning/train")


def run(args):
    # Read config file
    topics = ["data"]
    cfg = configuration.get_config_from_cmdline(args, topics)
    if args.config is not None:
        with open(args.config, "r") as config_file:
            tmp_cfg = yaml.load(config_file)
            configuration.update_config_from_other(cfg, tmp_cfg)

    cfg = AttributeDict.convert_deep(cfg)

    logger.info("Creating train dataflow")
    print(cfg.data.max_num_samples)
#!/usr/bin/env python
from __future__ import print_function

import os
import argparse
from builtins import range
import numpy as np
import yaml
import env_factory
from pybh.utils import Timer, TimeMeter, DummyTimeMeter, argparse_bool
from pybh import hdf5_utils
from pybh import file_utils
from pybh import log_utils

logger = log_utils.get_logger("evaluate_reward_trajectory")


def run_episode(environment,
                pose_list,
                downsample_to_grid=True,
                measure_timing=False):
    timer = Timer()
    if measure_timing:
        time_meter = TimeMeter()
    else:
        time_meter = DummyTimeMeter()

    intrinsics = environment.base.get_engine().get_intrinsics()

    logger.info("Starting episode")
    environment.reset()
import time
import numpy as np
import cv2
from pybh import math_utils
from pybh import zmq_utils
from pybh import serialization
from pybh import log_utils
from pybh.contrib import transformations
from .engine import BaseEngine


logger = log_utils.get_logger("RLrecon/mesh_renderer_zmq_client")


class MeshRendererZMQClient(BaseEngine):

    INTER_NEAREST = cv2.INTER_NEAREST
    INTER_CUBIC = cv2.INTER_CUBIC

    class Exception(RuntimeError):
        pass

    def __init__(self,
                 address="tcp://localhost:22222",
                 image_scale_factor=1.0,
                 max_depth_distance=np.finfo(np.float).max,
                 max_depth_viewing_angle=math_utils.degrees_to_radians(90.),
                 max_request_trials=3,
                 request_timeout=0.5,
                 location_tolerance=1e-3,
                 orientation_tolerance=1e-3,
from octomap_server_ext.srv import InsertPointCloud, InsertPointCloudRequest
from octomap_server_ext.srv import InsertDepthMap, InsertDepthMapRequest
from octomap_server_ext.msg import Ray, Voxel
from octomap_server_ext.srv import Info, InfoRequest
from octomap_server_ext.srv import QueryVoxels, QueryVoxelsRequest
from octomap_server_ext.srv import QuerySubvolume, QuerySubvolumeRequest
from octomap_server_ext.srv import QueryBBox, QueryBBoxRequest
from octomap_server_ext.srv import Raycast, RaycastRequest
from octomap_server_ext.srv import RaycastCamera, RaycastCameraRequest
from octomap_server_ext.srv import ClearBoundingBox, ClearBoundingBoxRequest
from octomap_server_ext.srv import OverrideBoundingBox, OverrideBoundingBoxRequest
from octomap_server_ext.srv import Storage, StorageRequest
from octomap_server_ext.srv import Reset, ResetRequest
from pybh import math_utils, ros_utils, log_utils

logger = log_utils.get_logger("RLrecon/octomap_ext_mapper")


class OctomapExtMapper(object):
    class Ray(object):
        def __init__(self, origin, direction):
            self._origin = origin
            self._direction = direction

        def origin(self):
            return self._origin

        def direction(self):
            return self._direction

    class RaycastResultPoint(object):
Exemplo n.º 10
0
#!/usr/bin/env python

from __future__ import print_function

import data_record
import multiprocessing
import numpy as np
import tensorflow as tf
import tensorpack.dataflow
import data_provider
from pybh import hdf5_utils
from pybh import tensorpack_utils
from pybh import log_utils


logger = log_utils.get_logger("reward_learning/input_pipeline")


def compute_data_statistics_from_hdf5_files(data_filenames):
    assert(len(data_filenames) > 0)
    logger.info("Computing data statistics")
    field_names = ["in_grid_3ds", "out_grid_3ds", "rewards", "scores", "rgb_images", "depth_images", "normal_images"]
    statistics_dict, z_score_statistics_dict = data_record.compute_dataset_stats_from_hdf5_files_v4(
        data_filenames, field_names, compute_z_scores=True)
    logger.info("Data statistics:")
    logger.info("  Mean of in_grid_3d:", np.mean(statistics_dict["in_grid_3ds"]["mean"].flatten()))
    logger.info("  Stddev of in_grid_3d:", np.mean(statistics_dict["in_grid_3ds"]["stddev"].flatten()))
    logger.info("  Mean of z_score:", np.mean(z_score_statistics_dict["in_grid_3ds"]["mean"].flatten()))
    logger.info("  Stddev of z_score:", np.mean(z_score_statistics_dict["in_grid_3ds"]["stddev"].flatten()))
    logger.info("  Size of dataset:", statistics_dict["in_grid_3ds"]["num_samples"])
    mean_z_score_tolerance = 1e-3