Exemple #1
0
    def __init__(self, grid_processor: GridProcessor):
        Renderer.__init__(self)
        self.grid_processor: GridProcessor = grid_processor

        shader_settings: List[ShaderSetting] = []
        shader_settings.extend([ShaderSetting("grid_point", ["grid/grid.vert", "basic/discard_screen_color.frag"],
                                              ["screen_width", "screen_height"]),
                                ShaderSetting("grid_cube", ["grid/grid_impostor.vert", "basic/screen_color.frag",
                                                            "grid/point_to_cube_impostor.geom"],
                                              ["screen_width", "screen_height"])
                                ])
        self.set_shader(shader_settings)

        self.data_handler: OverflowingVertexDataHandler = OverflowingVertexDataHandler(
            [], [(self.grid_processor.grid_position_buffer, 0), (self.grid_processor.grid_density_buffer, 1)])

        def generate_element_count_func(gp: GridProcessor) -> Callable:
            buffered_gp: GridProcessor = gp

            def element_count_func(buffer: int):
                return buffered_gp.grid_density_buffer.get_objects() - buffered_gp.grid_slice_size

            return element_count_func

        self.render_funcs["grid_point"] = generate_render_function(OGLRenderFunction.ARRAYS, GL_POINTS, 10.0,
                                                                   depth_test=True)
        self.render_funcs["grid_cube"] = generate_render_function(OGLRenderFunction.ARRAYS, GL_POINTS,
                                                                  depth_test=True)
        self.element_count_funcs["grid_point"] = generate_element_count_func(grid_processor)
        self.element_count_funcs["grid_cube"] = generate_element_count_func(grid_processor)

        self.create_sets(self.data_handler)
Exemple #2
0
 def __init__(self):
     self._SAVE_FILE_PATH = 'save.plsf'
     self._done = False
     self._event_dispatcher = UserEventDispatcher(self)
     self._renderer = Renderer(GameSettings.Screen.WIDTH,
                               GameSettings.Screen.HEIGHT)
     self._timer = GameTimer()
     self._city = self.load_game()
     self._ui = UI(self)
     self._state = NormalState()
Exemple #3
0
def draw_detections_3D(image, detections, cam, model_map):
    """Draws 6D detections onto resized image

        Parameters
        ----------
        image: Numpy array, normalized to [0-1]
        detections: A list of detections for this image, coming from SSD.detect() in the form
            [l, t, r, b, name, confidence, 6D_pose0, ..., 6D_poseN]
        cam: Intrinsics for rendering
        model_map: Mapping of model name to Model3D instance {'obj': model3D}

    """
    if not detections:
        return np.copy(image)

    ren = Renderer((image.shape[1], image.shape[0]), cam)
    ren.clear()
    out = np.copy(image)
    for det in detections:
        model = model_map[det[4]]
        for pose in det[6:]:
            ren.draw_model(model, pose)
            ren.draw_boundingbox(model, pose)
    col, dep = ren.finish()

    # Copy the rendering over into the scene
    mask = np.dstack((dep, dep, dep)) > 0
    out[mask] = col[mask]
    return out
Exemple #4
0
def draw_detections_3D(image, detections, cam, model_map):
    """Draws 6D detections onto resized image

        Parameters
        ----------
        image: Numpy array, normalized to [0-1]
        detections: A list of detections for this image, coming from SSD.detect() in the form
            [l, t, r, b, name, confidence, 6D_pose0, ..., 6D_poseN]
        cam: Intrinsics for rendering
        model_map: Mapping of model name to Model3D instance {'obj': model3D}

    """
    if not detections:
        return np.copy(image)

    ren = Renderer((image.shape[1], image.shape[0]), cam)
    ren.clear()
    out = np.copy(image)
    for det in detections:
        model = model_map[det[4]]
        for pose in det[6:]:
            ren.draw_model(model, pose)
            ren.draw_boundingbox(model, pose)
    col, dep = ren.finish()

    # Copy the rendering over into the scene
    mask = np.dstack((dep, dep, dep)) > 0
    out[mask] = col[mask]
    return out
Exemple #5
0
def mainRender(args):
    #Import this module only in render mode, as it needs pygame and PyOpenGL
    from rendering.renderer import Renderer

    lastUpdate = getCurrentTimeMillis()
    running = True
    loops = 0

    ########
    queue = Manager().Queue()
    killSim = Value('i', 0)
    simProcess = Process(target=simLoop, args=(queue, killSim,))
    simProcess.start()

    random.setSeed(args.seedValue)
    #Initialize the renderer
    initialData = queue.get(block=True)
    numPersons = initialData[0]
    gridSize = initialData[1]
    gridData = initialData[2]
    theRenderer = Renderer(numPersons)
    theRenderer.initPlaceBuffer(gridSize, gridData)
    
    #Get the first simulation datum
    simData = queue.get(block=True)
    simPersons = simData[1]
    simNow = simData[0]
    nowOb = time.Timestamp(simNow)
    ########

    while running:
        now = getCurrentTimeMillis()
        deltaTime = now - lastUpdate

        running = theRenderer.fetchEvents(deltaTime)
        if not queue.empty():
            simData = queue.get(block=False)
            simPersons = simData[1]
            simNow = simData[0]
        theRenderer.render(simPersons, deltaTime, simNow)
        nowOb = time.Timestamp(simNow)
        
        loops += 1
        if loops % 100 == 0:
            print("Simulation time: {0}:{1}:{2}".format(nowOb.day(), nowOb.hourOfDay(), nowOb.minuteOfHour()))

        if args.numDays > 0 and nowOb.day() >= args.numDays:
            running = False

    theRenderer.quit()
    killSim.value = 1
    while(not queue.empty()):
        queue.get(block=False)
    simProcess.join() 
Exemple #6
0
def precompute_projections(dataset, views, cam, model_map, models):
    """Precomputes the projection information needed for 6D pose construction
    """
    save_path = f"output/{dataset}"  # save the rendered images
    if not os.path.exists(save_path):
        os.makedirs(save_path)

    w, h = 640, 480

    ren = Renderer((w, h), cam)
    for model_name in models:
        count = 0  # 图片计数
        scene_camera = {}
        scene_gt = {}
        model_path = os.path.join(save_path, model_name[-6:])
        rgb_dir = os.path.join(model_path, 'rgb')
        depth_dir = os.path.join(model_path, 'mask')
        ensure_dir(rgb_dir)
        ensure_dir(depth_dir)

        for i in tqdm(range(len(views))):
            pose = create_pose(views[i], angle_deg=0)
            pose[:3, 3] = [0, 0, 0.5]  # zr = 0.5
            model = model_map[model_name]  # 随机选出一个模型
            ren.clear()
            ren.draw_model(model, pose)
            # ren.draw_boundingbox(model, pose)
            col, dep = ren.finish()  # dep 缩放因子是0.001
            col *= 255
            dep[dep > 0] = 255
            # cam2d = project(model.fps, pose, cam)
            # ys, xs = np.nonzero(dep > 0)
            # xs_min = xs_original.min()
            # ys_min = ys_original.min()
            # xs_max = xs_original.max()
            # ys_max = ys_original.max()
            scene_camera[str(i)] = {
                'cam_K': cam.flatten().tolist(),
                'depth_scale': 1,
                'view_level': 4,
            }

            scene_gt[str(i)] = [{
                'cam_R_m2c': pose[:3, :3].flatten().tolist(),
                'cam_t_m2c': pose[:3, 3].flatten().tolist(),
                'obj_id': int(model_name[-2:])
            }]
            # draw fps
            # for i_p, point in enumerate(cam2d):
            #     if i_p == 0:
            #         col = cv2.circle(col, (point[0], point[1]), 3, (0, 255, 0), thickness=-1)
            #     else:
            #         col = cv2.circle(col, (point[0], point[1]), 3, (0, 0, 255), thickness=-1)
            cv2.imwrite(os.path.join(rgb_dir, str(count).zfill(6) + '.png'), col)
            cv2.imwrite(os.path.join(depth_dir, str(count).zfill(6) + '_000000' + '.png'), dep)
            count += 1
        save_json(os.path.join(model_path, 'scene_camera.json'), scene_camera)
        save_json(os.path.join(model_path, 'scene_gt.json'), scene_gt)
Exemple #7
0
    def __init__(self, node_processor: NodeProcessor, grid: Grid):
        Renderer.__init__(self)
        self.node_processor = node_processor
        self.grid = grid

        shader_settings: List[ShaderSetting] = []
        shader_settings.extend([ShaderSetting("node_point", ["node/sample.vert", "basic/discard_screen_color.frag"],
                                              ["screen_width", "screen_height"]),
                                ShaderSetting("node_sphere",
                                              ["node/sample_impostor.vert", "node/point_to_sphere_impostor_phong.frag",
                                               "node/point_to_sphere_impostor.geom"],
                                              ["node_object_radius", "node_importance_threshold"]),
                                ShaderSetting("node_transparent_sphere", ["node/sample_impostor.vert",
                                                                          "node/point_to_sphere_impostor_transparent.frag",
                                                                          "node/point_to_sphere_impostor.geom"],
                                              ["node_object_radius", "node_base_opacity", "node_importance_opacity",
                                               "node_depth_opacity",
                                               "node_opacity_exponent", "node_importance_threshold"])
                                ])
        self.set_shader(shader_settings)

        self.data_handler: VertexDataHandler = VertexDataHandler([(self.node_processor.node_buffer, 0)])

        def generate_element_count_func(np: NodeProcessor) -> Callable:
            buffered_np: NodeProcessor = np

            def element_count_func():
                return buffered_np.get_buffer_points()

            return element_count_func

        self.render_funcs["node_point"] = generate_render_function(OGLRenderFunction.ARRAYS, GL_POINTS, 10.0,
                                                                   depth_test=True)
        self.render_funcs["node_sphere"] = generate_render_function(OGLRenderFunction.ARRAYS, GL_POINTS,
                                                                    depth_test=True)
        self.render_funcs["node_transparent_sphere"] = generate_render_function(OGLRenderFunction.ARRAYS, GL_POINTS,
                                                                                add_blending=True)
        self.element_count_funcs["node_point"] = generate_element_count_func(node_processor)
        self.element_count_funcs["node_sphere"] = generate_element_count_func(node_processor)
        self.element_count_funcs["node_transparent_sphere"] = generate_element_count_func(node_processor)

        self.create_sets(self.data_handler)
Exemple #8
0
def verify_6D_poses(detections, model_map, cam, image):
    """For one image, select for each detection the best pose from the 6D pool

    # Arguments
        detections: List of predictions for one image. Each prediction is:
                [xmin, ymin, xmax, ymax, label, confidence,
                (pose00), ..., (poseNM)] where poseXX is a 4x4 matrix
        model_map: Mapping of model name to Model3D instance {'obj': model3D}
        cam: Intrinsics to use for backprojection
        image: The scene color image

    # Returns
        filtered: List of predictions for one image.
                Each prediction has the form:
                [xmin, ymin, xmax, ymax, label, confidence, pose] where pose is a 4x4 matrix

    """
    def compute_grads_and_mags(color):
        gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
        grads = np.dstack(
            (cv2.Sobel(gray, cv2.CV_32F, 1, 0,
                       ksize=5), cv2.Sobel(gray, cv2.CV_32F, 0, 1, ksize=5)))
        mags = np.sqrt(np.sum(grads**2, axis=2)) + 0.001  # To avoid div/0
        grads /= np.dstack((mags, mags))
        mask = mags < 5
        mags[mask] = 0
        grads[np.dstack((mask, mask))] = 0
        return grads, mags

    scene_grads, scene_mags = compute_grads_and_mags(image)
    scene_grads = np.reshape(scene_grads, (-1, 2))
    #cv2.imshow('mags', scene_mags)

    ren = Renderer((image.shape[1], image.shape[0]), cam)
    filtered = []
    for det in detections:
        model = model_map[det[4]]
        scores = []
        for pose in det[6:]:
            ren.clear()
            ren.draw_model(model, pose)
            ren_grads, ren_mags = compute_grads_and_mags(ren.finish()[0])
            ren_grads = np.reshape(ren_grads, (-1, 2))
            dot = np.sum(
                np.abs(ren_grads[:, 0] * scene_grads[:, 0] +
                       ren_grads[:, 1] * scene_grads[:, 1]))
            sum = np.sum(ren_mags > 0)
            scores.append(dot / sum)
        new_det = det[:6]
        new_det.append(
            det[6 + np.argmax(np.asarray(scores))])  # Put best pose first
        filtered.append(new_det)

    return filtered
Exemple #9
0
def precompute_projections(views, inplanes, cam, model3D):
    """Precomputes the projection information needed for 6D pose construction

    # Arguments
        views: List of 3D viewpoint positions
        inplanes: List of inplane angles in degrees
        cam: Intrinsics to use for translation estimation
        model3D: Model3D instance

    # Returns
        data: a 3D list with precomputed entities with shape
            (views, inplanes, (4x4 pose matrix, 3) )
    """
    w, h = 640, 480
    ren = Renderer((w, h), cam)
    data = []
    if model3D.vertices is None:
        return data

    for v in tqdm(range(len(views))):
        data.append([])
        for i in inplanes:
            pose = create_pose(views[v], angle_deg=i)
            pose[:3, 3] = [0, 0, 0.5]  # zr = 0.5

            # Render object and extract tight 2D bbox and projected 2D centroid
            ren.clear()
            ren.draw_model(model3D, pose)
            box = np.argwhere(
                ren.finish()[1])  # Deduct bbox from depth rendering
            box = [
                box.min(0)[1],
                box.min(0)[0],
                box.max(0)[1] + 1,
                box.max(0)[0] + 1
            ]
            centroid = np.matmul(pose[:3, :3], model3D.centroid) + pose[:3, 3]
            centroid_x = cam[0, 2] + centroid[0] * cam[0, 0] / centroid[2]
            centroid_y = cam[1, 2] + centroid[1] * cam[1, 1] / centroid[2]

            # Compute 2D centroid position in normalized, box-local reference frame
            box_w, box_h = (box[2] - box[0]), (box[3] - box[1])
            norm_centroid_x = (centroid_x - box[0]) / box_w
            norm_centroid_y = (centroid_y - box[1]) / box_h

            # Compute normalized diagonal box length
            lr = np.sqrt((box_w / w)**2 + (box_h / h)**2)
            data[-1].append((pose, [norm_centroid_x, norm_centroid_y, lr]))
    return data
Exemple #10
0
def verify_6D_poses(detections, model_map, cam, image):
    """For one image, select for each detection the best pose from the 6D pool

    # Arguments
        detections: List of predictions for one image. Each prediction is:
                [xmin, ymin, xmax, ymax, label, confidence,
                (pose00), ..., (poseNM)] where poseXX is a 4x4 matrix
        model_map: Mapping of model name to Model3D instance {'obj': model3D}
        cam: Intrinsics to use for backprojection
        image: The scene color image

    # Returns
        filtered: List of predictions for one image.
                Each prediction has the form:
                [xmin, ymin, xmax, ymax, label, confidence, pose] where pose is a 4x4 matrix

    """

    def compute_grads_and_mags(color):
        gray = cv2.cvtColor(color, cv2.COLOR_BGR2GRAY)
        grads = np.dstack((cv2.Sobel(gray, cv2.CV_32F, 1, 0, ksize=5),
                           cv2.Sobel(gray, cv2.CV_32F, 0, 1, ksize=5)))
        mags = np.sqrt(np.sum(grads**2, axis=2)) + 0.001  # To avoid div/0
        grads /= np.dstack((mags, mags))
        mask = mags < 5
        mags[mask] = 0
        grads[np.dstack((mask, mask))] = 0
        return grads, mags

    scene_grads, scene_mags = compute_grads_and_mags(image)
    scene_grads = np.reshape(scene_grads, (-1, 2))
    #cv2.imshow('mags', scene_mags)

    ren = Renderer((image.shape[1], image.shape[0]), cam)
    filtered = []
    for det in detections:
        model = model_map[det[4]]
        scores = []
        for pose in det[6:]:
            ren.clear()
            ren.draw_model(model, pose)
            ren_grads, ren_mags = compute_grads_and_mags(ren.finish()[0])
            ren_grads = np.reshape(ren_grads, (-1, 2))
            dot = np.sum(np.abs(ren_grads[:, 0]*scene_grads[:, 0] + ren_grads[:, 1]*scene_grads[:, 1]))
            sum = np.sum(ren_mags>0)
            scores.append(dot / sum)
        new_det = det[:6]
        new_det.append(det[6 + np.argmax(np.asarray(scores))])  # Put best pose first
        filtered.append(new_det)

    return filtered
Exemple #11
0
def precompute_projections(views, inplanes, cam, model3D):
    """Precomputes the projection information needed for 6D pose construction

    # Arguments
        views: List of 3D viewpoint positions
        inplanes: List of inplane angles in degrees
        cam: Intrinsics to use for translation estimation
        model3D: Model3D instance

    # Returns
        data: a 3D list with precomputed entities with shape
            (views, inplanes, (4x4 pose matrix, 3) )
    """
    w, h = 640, 480
    ren = Renderer((w, h), cam)
    data = []
    if model3D.vertices is None:
        return data

    for v in tqdm(range(len(views))):
        data.append([])
        for i in inplanes:
            pose = create_pose(views[v], angle_deg=i)
            pose[:3, 3] = [0, 0, 0.5]  # zr = 0.5

            # Render object and extract tight 2D bbox and projected 2D centroid
            ren.clear()
            ren.draw_model(model3D, pose)
            box = np.argwhere(ren.finish()[1])  # Deduct bbox from depth rendering
            box = [box.min(0)[1], box.min(0)[0], box.max(0)[1] + 1, box.max(0)[0] + 1]
            centroid = np.matmul(pose[:3, :3], model3D.centroid) + pose[:3, 3]
            centroid_x = cam[0, 2] + centroid[0] * cam[0, 0] / centroid[2]
            centroid_y = cam[1, 2] + centroid[1] * cam[1, 1] / centroid[2]

            # Compute 2D centroid position in normalized, box-local reference frame
            box_w, box_h = (box[2] - box[0]), (box[3] - box[1])
            norm_centroid_x = (centroid_x - box[0]) / box_w
            norm_centroid_y = (centroid_y - box[1]) / box_h

            # Compute normalized diagonal box length
            lr = np.sqrt((box_w / w) ** 2 + (box_h / h) ** 2)
            data[-1].append((pose, [norm_centroid_x, norm_centroid_y, lr]))
    return data
Exemple #12
0
    def __init__(self, cfg):
        self.cfg = cfg
        self.rgb_topic = cfg['rgb_topic']
        self.depth_topic = cfg['depth_topic']
        self.camK = np.array(cfg['cam_K']).reshape(3, 3)
        self.im_width = int(cfg['im_width'])
        self.im_height = int(cfg['im_height'])
        self.inlier_th = float(cfg['inlier_th'])
        self.ransac_th = float(cfg['ransac_th'])

        self.model_params = inout.load_json(cfg['norm_factor_fn'])
        self.detection_labels = cfg[
            'obj_labels']  #labels of corresponding detections
        if (icp):
            self.ren = Renderer((self.im_width, self.im_height), self.camK)

        n_objs = int(cfg['n_objs'])
        self.target_objs = cfg['target_obj_name']
        self.colors = np.random.randint(0, 255, (n_objs, 3))
        if (detect_type == "rcnn"):
            #Load mask r_cnn
            '''
            standard estimation parameter for Mask R-CNN (identical for all dataset)
            '''
            self.config = BopInferenceConfig(dataset="ros",
                                             num_classes=n_objs + 1,
                                             im_width=self.im_width,
                                             im_height=self.im_height)
            self.config.DETECTION_MIN_CONFIDENCE = 0.3
            self.config.DETECTION_MAX_INSTANCES = 30
            self.config.DETECTION_NMS_THRESHOLD = 0.5

            self.detection_model = modellib.MaskRCNN(mode="inference",
                                                     config=self.config,
                                                     model_dir="/")
            self.detection_model.load_weights(cfg['path_to_detection_weights'],
                                              by_name=True)

        self.obj_models = []
        self.obj_bboxes = []

        self.obj_pix2pose = []
        pix2pose_dir = cfg['path_to_pix2pose_weights']
        th_outlier = cfg['outlier_th']
        self.model_scale = cfg['model_scale']
        for t_id, target_obj in enumerate(self.target_objs):
            weight_fn = os.path.join(
                pix2pose_dir, "{:02d}/inference.hdf5".format(target_obj))
            print("Load pix2pose weights from ", weight_fn)
            model_param = self.model_params['{}'.format(target_obj)]
            obj_param = bop_io.get_model_params(model_param)
            recog_temp = recog.pix2pose(weight_fn,
                                        camK=self.camK,
                                        res_x=self.im_width,
                                        res_y=self.im_height,
                                        obj_param=obj_param,
                                        th_ransac=self.ransac_th,
                                        th_outlier=th_outlier,
                                        th_inlier=self.inlier_th)
            self.obj_pix2pose.append(recog_temp)
            ply_fn = os.path.join(self.cfg['model_dir'],
                                  self.cfg['ply_files'][t_id])
            if (icp):
                obj_model = Model3D()
                obj_model.load(ply_fn, scale=cfg['model_scale'])
                self.obj_models.append(obj_model)
            else:
                obj_model = inout.load_ply(ply_fn)
                self.obj_bboxes.append(self.get_3d_box_points(
                    obj_model['pts']))

        rospy.init_node('pix2pose', anonymous=True)
        self.detect_pub = rospy.Publisher("/pix2pose/detected_object",
                                          ros_image)

        #self.pose_pub = rospy.Publisher("/pix2pose/object_pose", Pose)
        self.pose_pub = rospy.Publisher("/pix2pose/object_pose", ros_image)
        self.sub = rospy.Subscriber(self.rgb_topic, ros_image, self.callback)
        self.graph = tf.get_default_graph()
Exemple #13
0
ref_gt = inout.load_scene_gt(
    os.path.join("/home/kiru/media/hdd/bop/tless/train_render_reconst/000001",
                 "scene_gt.json"))
ref_camera = inout.load_scene_camera(
    os.path.join("/home/kiru/media/hdd/bop/tless/train_render_reconst/000001",
                 "scene_camera.json"))
#bop_dir,source_dir,model_plys,model_info,model_ids,rgb_files,depth_files,mask_files,gts,cam_param_global = bop_io.get_dataset('hb',train=True)
#bop_dir,source_dir,model_plys,model_info,model_ids,rgb_files,depth_files,mask_files,gts,cam_param_global = bop_io.get_dataset('itodd',train=True)
bop_dir, source_dir, model_plys, model_info, model_ids, rgb_files, depth_files, mask_files, gts, cam_param_global = bop_io.get_dataset(
    'ycbv', train=True)

im_width, im_height = cam_param_global['im_size']
camK = cam_param_global['K']
cam_K_list = np.array(camK).reshape(-1)

ren = Renderer((im_width, im_height), camK)
source_dir = bop_dir + "/train"
if not (os.path.exists(source_dir)): os.makedirs(source_dir)

for i in range(len(model_plys)):
    target_dir = source_dir + "/{:06d}".format(model_ids[i])
    if not (os.path.exists(target_dir)): os.makedirs(target_dir)
    if not (os.path.exists(target_dir + "/rgb")):
        os.makedirs(target_dir + "/rgb")
    if not (os.path.exists(target_dir + "/depth")):
        os.makedirs(target_dir + "/depth")
    if not (os.path.exists(target_dir + "/mask")):
        os.makedirs(target_dir + "/mask")
    new_gt = copy.deepcopy(ref_gt)
    new_camera = copy.deepcopy(ref_camera)
    scene_gt = os.path.join(target_dir, "scene_gt.json")
        args.mesh_path = 'mesh_templates/uvsphere_16rings.obj'
    else:
        raise
    print('Using autodetected mesh', args.mesh_path)

mesh_template = MeshTemplate(args.mesh_path, is_symmetric=args.symmetric)

if args.generate_pseudogt:
    # Ideally, the renderer should run at a higher resolution than the input image,
    # or a sufficiently high resolution at the very least.
    renderer_res = max(1024, 2*args.pseudogt_resolution)
else:
    # Match neural network input resolution
    renderer_res = args.image_resolution
    
renderer = Renderer(renderer_res, renderer_res)

class ImageDataset(torch.utils.data.Dataset):
    def __init__(self, cmr_dataset, img_size):
        self.cmr_dataset = cmr_dataset
        self.paths = cmr_dataset.get_paths()
        
        self.extra_img_keys = []
        if isinstance(img_size, list):
            for res in img_size[1:]:
                self.extra_img_keys.append(f'img_{res}')

    def __len__(self):
        return len(self.cmr_dataset)

    def __getitem__(self, idx):
        def __init__(self, mesh, res_h, res_w):
            super().__init__()

            self.res = (res_h, res_w)
            self.inverse_renderer = Renderer(res_h, res_w)
            self.mesh = mesh
Exemple #16
0
                                          pin_memory=True, shuffle=False)


if not args.texture_only:
    # Load libraries needed for differentiable rendering and FID evaluation
    from rendering.renderer import Renderer
    import scipy

    mesh_template = MeshTemplate(args.mesh_path, is_symmetric=args.symmetric_g)

    # For real-time FID evaluation
    if not args.save_results:
        evaluation_res = 299 # Same as Inception input resolution
    else:
        evaluation_res = 512 # For exporting images: higher resolution
    renderer = Renderer(evaluation_res, evaluation_res)
    renderer = nn.DataParallel(renderer, gpu_ids)

    if not args.save_results:
        inception_model = nn.DataParallel(init_inception(), gpu_ids).cuda().eval()

        # Statistics for real images are computed only once and cached
        m_real_train, s_real_train = None, None
        m_real_val, s_real_val = None, None

        # Load precomputed statistics to speed up FID computation
        stats = np.load(os.path.join(cache_dir, f'precomputed_fid_{evaluation_res}x{evaluation_res}_train.npz'), allow_pickle=True)
        m_real_train = stats['stats_m']
        s_real_train = stats['stats_s'] + np.triu(stats['stats_s'].T, 1)
        assert stats['num_images'] == len(train_ds), 'Number of images does not match'
        assert stats['resolution'] == evaluation_res, 'Resolution does not match'
max_trans_pert = float(args["--max_trans_pert"])
iterations = int(args["--iterations"])

bench = load_sixd(sixd_base, nr_frames=1, seq=obj)
croppings = yaml.load(open('config/croppings.yaml', 'r'))

if 'linemod' in network:
    dataset_name = 'linemod'
elif 'tejani' in network:
    dataset_name = 'tejani'
else:
    raise Exception('Could not determine dataset')

with tf.Session() as session:
    architecture = Architecture(network_file=network, sess=session)
    ren = Renderer((640, 480), bench.cam)
    refiner = Refiner(architecture=architecture, ren=ren, session=session)

    for frame in bench.frames:
        col = frame.color.copy()
        _, gt_pose, _ = frame.gt[0]

        perturbed_pose = perturb_pose(gt_pose, max_rot_pert, max_trans_pert)

        refinable = Refinable(model=bench.models[str(int(obj))], label=0, hypo_pose=perturbed_pose,
                              metric_crop_shape=croppings[dataset_name]['obj_{:02d}'.format(int(obj))], input_col=col)

        for i in range(iterations):
            refinable.input_col = col.copy()

            start = timer()
Exemple #18
0
class Game(object):
    def __init__(self):
        self._SAVE_FILE_PATH = 'save.plsf'
        self._done = False
        self._event_dispatcher = UserEventDispatcher(self)
        self._renderer = Renderer(GameSettings.Screen.WIDTH,
                                  GameSettings.Screen.HEIGHT)
        self._timer = GameTimer()
        self._city = self.load_game()
        self._ui = UI(self)
        self._state = NormalState()

    def start(self) -> None:
        self._timer.reset()
        while not self._done:
            for user_event in pygame.event.get():
                self._event_dispatcher.dispatch(user_event)
            self._city.simulate()
            self._renderer.render(self._city, self._ui)

    def get_city(self) -> City:
        return self._city

    def get_state(self) -> GameState:
        return self._state

    def set_state(self, state: GameState) -> None:
        self._state = state

    def toggle_grid(self) -> None:
        self._ui.toggle_grid()

    def toggle_buildings_window(self) -> None:
        self._ui.toggle_buildings_window()

    def handle_mouse_down(self, position: Tuple) -> None:
        if type(self._state) is NormalState:
            self._ui.handle_mouse_down(position)

    def handle_mouse_up(self, position: Tuple) -> None:
        if type(self._state) is NormalState:
            self._ui.handle_mouse_up(position)
        elif type(self._state) is PlacingNewBuildingState:
            building = self._state.building
            building.set_position((position[0] / GameSettings.Game.FIELD_SIZE,
                                   position[1] / GameSettings.Game.FIELD_SIZE))
            if not self._city.is_rectangle_ocuppied(building.position):
                self._city.add_city_object(building)
                self._state = NormalState()

    def save_game(self) -> None:
        json_data = self._city.jsonify()
        with open(self._SAVE_FILE_PATH, 'w') as save_file:
            save_file.write(json.dumps(json_data, indent=4))

    def load_game(self) -> City:
        city = City()
        if os.path.isfile(self._SAVE_FILE_PATH):
            with open('save.plsf') as save_file:
                data = json.load(save_file)
            try:
                city.load(data)
            except Exception:
                traceback.print_exc()
                city = City()

        return city

    def end_game(self) -> None:
        self.save_game()
        self._done = True
Exemple #19
0
    def __init__(self, edge_processor: EdgeProcessor, grid: Grid):
        Renderer.__init__(self)
        self.edge_processor = edge_processor
        self.grid = grid

        shader_settings: List[ShaderSetting] = []
        shader_settings.extend(
            [ShaderSetting("sample_point", ["sample/sample.vert", "basic/discard_screen_color.frag"],
                           ["edge_importance_threshold", "screen_width", "screen_height"]),
             ShaderSetting("sample_line",
                           ["sample/sample_impostor.vert", "basic/color.frag",
                            "sample/points_to_line.geom"],
                           ["edge_importance_threshold"]),
             ShaderSetting("sample_sphere", ["sample/sample_impostor.vert",
                                             "sample/point_to_sphere_impostor_phong.frag",
                                             "sample/point_to_sphere_impostor.geom"],
                           ["edge_object_radius", "edge_importance_threshold"]),
             ShaderSetting("sample_transparent_sphere", ["sample/sample_impostor.vert",
                                                         "sample/point_to_sphere_impostor_transparent.frag",
                                                         "sample/point_to_sphere_impostor.geom"],
                           ["edge_object_radius", "edge_base_opacity", "edge_importance_opacity", "edge_depth_opacity",
                            "edge_opacity_exponent", "edge_importance_threshold"]),
             ShaderSetting("sample_ellipsoid_transparent", ["sample/sample_impostor.vert",
                                                            "sample/points_to_ellipsoid_impostor_transparent.frag",
                                                            "sample/points_to_ellipsoid_impostor.geom"],
                           ["edge_object_radius", "edge_base_opacity", "edge_importance_opacity", "edge_depth_opacity",
                            "edge_opacity_exponent", "edge_importance_threshold"])
             ])
        self.set_shader(shader_settings)

        self.data_handler: LayeredVertexDataHandler = LayeredVertexDataHandler([[VertexDataHandler(
            [(self.edge_processor.sample_buffer[i][j], 0), (self.edge_processor.edge_buffer[i][j], 2)], []) for j in
            range(len(self.edge_processor.sample_buffer[i]))] for i in range(len(self.edge_processor.sample_buffer))])

        def generate_element_count_func(ep: EdgeProcessor) -> Callable:
            buffered_ep: EdgeProcessor = ep

            def element_count_func(layer: int, buffer: int):
                return buffered_ep.get_buffer_points(layer, buffer)

            return element_count_func

        self.render_funcs["sample_point"] = generate_render_function(OGLRenderFunction.ARRAYS_INSTANCED, GL_POINTS,
                                                                     10.0, depth_test=True)
        self.render_funcs["sample_line"] = generate_render_function(OGLRenderFunction.ARRAYS_INSTANCED, GL_POINTS,
                                                                    line_width=2.0, depth_test=True)
        self.render_funcs["sample_sphere"] = generate_render_function(OGLRenderFunction.ARRAYS_INSTANCED, GL_POINTS,
                                                                      depth_test=True)
        self.render_funcs["sample_transparent_sphere"] = generate_render_function(OGLRenderFunction.ARRAYS_INSTANCED,
                                                                                  GL_POINTS, add_blending=True)
        self.render_funcs["sample_ellipsoid_transparent"] = generate_render_function(OGLRenderFunction.ARRAYS_INSTANCED,
                                                                                     GL_POINTS, add_blending=True)
        self.element_count_funcs["sample_point"] = generate_element_count_func(edge_processor)
        self.element_count_funcs["sample_line"] = generate_element_count_func(edge_processor)
        self.element_count_funcs["sample_sphere"] = generate_element_count_func(edge_processor)
        self.element_count_funcs["sample_transparent_sphere"] = generate_element_count_func(edge_processor)
        self.element_count_funcs["sample_ellipsoid_transparent"] = generate_element_count_func(edge_processor)

        self.create_sets(self.data_handler)

        self.importance_threshold: float = 0.0