Exemplo n.º 1
0
    def load_observations(self, filenames, house_dir, load_frames=False):
        counts_house_ids = list(map(lambda f: f.split('_'), filenames))
        num_observations = len(counts_house_ids)
        for i, (obs_id, house_id) in enumerate(counts_house_ids):
            print(
                f'Loading observation obs_id={obs_id} house id={house_id}, {i}/{num_observations}'
            )
            house_observations = self._observations.get(house_id, {})
            num_rows = 0
            for row in csv.DictReader(
                    open(
                        os.path.join(self._priors_dir, obs_id + '_' +
                                     house_id + '.relpos.csv'))):
                if len(row) == 0:
                    continue
                obs = RelativeObservation.fromstring(row)
                house_observations[(obs.obj_id, obs.ref_id)] = obs
                num_rows += 1
            if num_rows > 0:
                self._observations[obs_id] = house_observations

            if load_frames:
                house_frames = self._semantic_frames.get(house_id, {})
                for row in csv.DictReader(
                        open(
                            os.path.join(
                                self._priors_dir,
                                obs_id + '_' + house_id + '.semframes.csv'))):
                    local2world = np.matrix(str2nparr(
                        row['local2world'])).reshape(4, 4)
                    frame = {
                        'obj_id': row['obj_id'],
                        'obb': OBB.from_local2world_transform(local2world),
                        'aabb': {
                            'min': row['world_aabb_min'],
                            'max': row['world_aabb_max']
                        }
                    }
                    house_frames[frame['obj_id']] = frame
                self._semantic_frames[house_id] = house_frames

        print('Grouping observations by categories...')
        groups = {}
        num_obs = len(self._observations)
        for i, (obs_id, house_id) in enumerate(counts_house_ids):
            print(f'Observation in house id={house_id} {i}/{num_obs}')
            house = House(file_dir=os.path.join(house_dir, obs_id + '.json'),
                          include_support_information=False)
            self._objects.init_from_house(house, update_sim=False)
            for observation in self._observations[obs_id].values():
                key = self._objects.get_observation_key(observation)
                key_bin = groups.get(key, [])
                key_bin.append(observation)
                groups[key] = key_bin
        print('Done grouping')

        self._grouped_observations = groups
Exemplo n.º 2
0
    def get_render_obb(self):
        o = Obj(self.modelId)

        bbox_dims = o.bbox_max - o.bbox_min
        model_matrix = Transform(scale=bbox_dims[:3],
                                 translation=o.bbox_min[:3]).as_mat4()
        full_matrix = np.matmul(np.transpose(self.get_transformation()),
                                model_matrix)
        obb = OBB.from_local2world_transform(full_matrix)
        obb_tris = np.asarray(obb.get_triangles(), dtype=np.float32)
        rendered_obb = torch.from_numpy(
            TopDownView.render_object_full_size_helper(obb_tris,
                                                       self.room.size))
        return rendered_obb
Exemplo n.º 3
0
 def update(self, o, xform=None, update_sim=False):
     if not hasattr(o, 'category'):
         o.category = self.category(o.modelId, scheme=self._categorization_type)
     model_id = o.modelId if hasattr(o, 'modelId') else None
     o.model2world = self.semantic_frame_matrix(model_id)
     o.xform = xform if xform else Transform.from_node(o)
     if hasattr(o, 'transform'):
         o.transform = o.xform.as_mat4_flat_row_major()
     o.obb = OBB.from_local2world_transform(np.matmul(o.xform.as_mat4(), o.model2world))
     o.frame = self.node_to_semantic_frame(o, o.obb)
     self._objects[o.id] = o
     # room geometries pre-transformed, so after obb computation above is done, set back to identity transform
     if hasattr(o, 'type') and o.type == 'Room':
         o.xform = Transform()
         o.transform = o.xform.as_mat4_flat_row_major()
     if update_sim:
         self._sim.set_state(obj_id=o.id, position=o.xform.translation, rotation_q=o.xform.rotation)
Exemplo n.º 4
0
    def render(self, room):
        projection = self.pgen.get_projection(room)

        visualization = np.zeros((self.size, self.size))
        nodes = []

        for node in room.nodes:
            modelId = node.modelId  #Camelcase due to original json

            t = np.asarray(node.transform).reshape(4, 4)

            o = Obj(modelId)
            t = projection.to_2d(t)
            o.transform(t)

            save_t = t
            t = projection.to_2d()
            bbox_min = np.dot(np.asarray([node.xmin, node.zmin, node.ymin, 1]),
                              t)
            bbox_max = np.dot(np.asarray([node.xmax, node.zmax, node.ymax, 1]),
                              t)
            xmin = math.floor(bbox_min[0])
            ymin = math.floor(bbox_min[2])
            xsize = math.ceil(bbox_max[0]) - xmin + 1
            ysize = math.ceil(bbox_max[2]) - ymin + 1

            description = {}
            description["modelId"] = modelId
            description["transform"] = node.transform
            description["bbox_min"] = bbox_min
            description["bbox_max"] = bbox_max
            description["id"] = node.id
            description["child"] = [c.id for c in node.child
                                    ] if node.child else None
            description["parent"] = node.parent.id if isinstance(
                node.parent, Node) else node.parent
            #if description["parent"] is None or description["parent"] == "Ceiling":
            #    print(description["modelId"])
            #    print(description["parent"])
            #    print(node.zmin - room.zmin)
            #print("FATAL ERROR")

            #Since it is possible that the bounding box information of a room
            #Was calculated without some doors/windows
            #We need to handle these cases
            if ymin < 0:
                ymin = 0
            if xmin < 0:
                xmin = 0

            #xmin = 0
            #ymin = 0
            #xsize = 256
            #ysize = 256

            #print(list(bbox_min), list(bbox_max))
            #print(xmin, ymin, xsize, ysize)
            rendered = self.render_object(o, xmin, ymin, xsize, ysize,
                                          self.size)
            description["height_map"] = torch.from_numpy(rendered).float()

            #tmp = np.zeros((self.size, self.size))
            #tmp[xmin:xmin+rendered.shape[0],ymin:ymin+rendered.shape[1]] = rendered
            #visualization += tmp

            # Compute the pixel-space dimensions of the object before it has been
            #    transformed (i.e. in object space)
            objspace_bbox_min = np.dot(o.bbox_min, t)
            objspace_bbox_max = np.dot(o.bbox_max, t)
            description['objspace_dims'] = np.array([
                objspace_bbox_max[0] - objspace_bbox_min[0],
                objspace_bbox_max[2] - objspace_bbox_min[2]
            ])

            # Render an OBB height map as well
            bbox_dims = o.bbox_max - o.bbox_min
            model_matrix = Transform(scale=bbox_dims[:3],
                                     translation=o.bbox_min[:3]).as_mat4()
            full_matrix = np.matmul(np.transpose(save_t), model_matrix)
            obb = OBB.from_local2world_transform(full_matrix)
            obb_tris = np.asarray(obb.get_triangles(), dtype=np.float32)
            bbox_min = np.min(np.min(obb_tris, 0), 0)
            bbox_max = np.max(np.max(obb_tris, 0), 0)
            xmin, ymin = math.floor(bbox_min[0]), math.floor(bbox_min[2])
            xsize, ysize = math.ceil(bbox_max[0]) - xmin + 1, math.ceil(
                bbox_max[2]) - ymin + 1
            rendered_obb = self.render_object_helper(obb_tris, xmin, ymin,
                                                     xsize, ysize, self.size)
            description["height_map_obb"] = torch.from_numpy(
                rendered_obb).float()
            description['bbox_min_obb'] = bbox_min
            description['bbox_max_obb'] = bbox_max

            tmp = np.zeros((self.size, self.size))
            tmp[xmin:xmin + rendered.shape[0],
                ymin:ymin + rendered.shape[1]] = rendered
            visualization += tmp

            nodes.append(description)

        if hasattr(room, "transform"):
            t = projection.to_2d(np.asarray(room.transform).reshape(4, 4))
        else:
            t = projection.to_2d()
        #Render the floor
        o = Obj(room.modelId + "f", room.house_id, is_room=True)
        o.transform(t)
        floor = self.render_object(o, 0, 0, self.size, self.size, self.size)
        visualization += floor
        floor = torch.from_numpy(floor).float()

        #Render the walls
        o = Obj(room.modelId + "w", room.house_id, is_room=True)
        o.transform(t)
        wall = self.render_object(o, 0, 0, self.size, self.size, self.size)
        visualization += wall
        wall = torch.from_numpy(wall).float()
        return (visualization, (floor, wall, nodes))