Ejemplo n.º 1
0
 def __init__(self,
              details=False,
              model_details=False,
              save_freq=True,
              save_dest=""):
     """
     Parameters
     ----------
     details (bool, optional): If true, then frequency information will be shown on screen
     model_details (bool, optional): since there are so many model ids, this additional bool
         controls if those should be printed
     save_freq (bool, optional): if true, then the category frequency information will be saved
     save_dest (string, optional): directory to which frequency information is saved
     """
     self.room_count = 0
     self.object_count = 0
     self.room_types_count = {}
     self.fine_categories_count = {}
     self.coarse_categories_count = {}
     self.final_categories_count = {}
     self.models_count = {}
     self.object_category = ObjectCategories()
     self.floor_node_only = False
     self.details = details
     self.model_details = model_details
     self.save_freq = save_freq
     data_dir = utils.get_data_root_dir()
     self.save_dest = f"{data_dir}/{save_dest}"
Ejemplo n.º 2
0
 def __init__(self, categorization_type='final', sim_mode='direct'):
     self._object_data = ObjectData()
     self._object_categories = ObjectCategories()
     self._objects = {}
     self._room = None
     self._categorization_type = categorization_type
     self._sim = Simulator(mode=sim_mode)
Ejemplo n.º 3
0
    def learn(self, data_folder="bedroom_final", data_root_dir=None):
        if not data_root_dir:
            data_root_dir = utils.get_data_root_dir()
        data_dir = f"{data_root_dir}/{data_folder}"
        self.data_dir = data_dir
        self.category_map = ObjectCategories()

        files = os.listdir(data_dir)
        files = [f for f in files if ".pkl" in f and not "domain" in f and not "_" in f]

        with open(f"{data_dir}/final_categories_frequency", "r") as f:
            lines = f.readlines()
            cats = [line.split()[0] for line in lines]
            self.category_count = [int(line.split()[1]) for line in lines if line.split()[0] not in ["window", "door"]]

        self.categories = [cat for cat in cats if cat not in set(['window', 'door'])]
        self.cat_to_index = {self.categories[i]:i for i in range(len(self.categories))}
        self.num_categories = len(self.categories)
        self.categories.append("floor")
        N = self.num_categories
        
        self.support_count = [[0 for i in range(N+1)] for j in range(N)]

        for index in range(len(files)):
            print(index)
            with open(f"{data_dir}/{index}.pkl", "rb") as f:
                (_, _, nodes), _ = pickle.load(f)
            
            object_nodes = []
            id_to_cat = {}
            for node in nodes:
                modelId = node["modelId"]
                category = self.category_map.get_final_category(modelId)
                if not category in ["door", "window"]:
                    object_nodes.append(node)
                    id_to_cat[node["id"]] = self.cat_to_index[category]
                    node["category"] = self.cat_to_index[category]
            
            for node in object_nodes:
                parent = node["parent"]
                category = node["category"]
                if parent == "Floor" or parent is None:
                    self.support_count[category][-1] += 1
                else:
                    self.support_count[category][id_to_cat[parent]] += 1
            #quit()

        self.possible_supports={}
        for i in range(self.num_categories):
            print(f"Support for {self.categories[i]}:")
            supports = [(c, self.support_count[i][c]/self.category_count[i]) for c in range(N+1)]
            supports = sorted(supports, key = lambda x:-x[1])
            supports = [s for s in supports if s[1] > 0.01]
            for s in supports:
                print(f"    {self.categories[s[0]]}:{s[1]:4f}")
            self.possible_supports[i] = [s[0] for s in supports]
        
        print(self.possible_supports)       
        self.N = N
Ejemplo n.º 4
0
    def __init__(self,
                 scene_indices=(0, 4000),
                 data_folder="bedroom_fin_256",
                 data_root_dir=None,
                 seed=None,
                 do_rotation_augmentation=False,
                 cat_only=False,
                 use_same_category_batches=False,
                 importance_order=False):
        super(LatentDataset, self).__init__()
        self.category_map = ObjectCategories()
        self.seed = seed
        self.data_folder = data_folder
        self.data_root_dir = data_root_dir
        self.scene_indices = scene_indices
        self.do_rotation_augmentation = do_rotation_augmentation
        self.cat_only = cat_only

        self.cat_name2index = None

        self.cat_index2scenes = None

        if self.data_root_dir is None:
            self.data_root_dir = utils.get_data_root_dir()
        with open(
                f"{self.data_root_dir}/{self.data_folder}/final_categories_frequency",
                "r") as f:
            lines = f.readlines()
            names = [line.split()[0] for line in lines]
            names = [
                name for name in names
                if ((name != 'door') and (name != 'window'))
            ]
            self.catnames = names
            self.cat_name2index = {names[i]: i for i in range(0, len(names))}
            self.n_categories = len(names)
            self.cat2freq = {}
            for line in lines:
                cat, freq = line.split(' ')
                self.cat2freq[cat] = int(freq)
            maxfreq = max(self.cat2freq.values())
            self.cat2freq_normalized = {
                cat: freq / maxfreq
                for cat, freq in self.cat2freq.items()
            }

        self.build_cat2scene()
        self.build_cats_in_scene_indices()
        self.compute_cat_sizes()

        # See 'prepare_same_category_batches' below for info
        self.use_same_category_batches = use_same_category_batches
        if use_same_category_batches:
            self.same_category_batch_indices = []
        else:
            self.same_category_batch_indices = None

        self.importance_order = importance_order
Ejemplo n.º 5
0
 def __init__(self,
              arrangement_priors,
              num_angle_divisions=8,
              num_pairwise_priors=-1,
              sim_mode='direct'):
     self._objects = ObjectCollection(sim_mode=sim_mode)
     self.room_id = None
     self._priors = arrangement_priors
     self._num_angle_divisions = num_angle_divisions
     self._num_pairwise_priors = num_pairwise_priors
     self.category_map = ObjectCategories()
Ejemplo n.º 6
0
    def __init__(self,
                 scene_indices=(0, 4000),
                 data_folder="bedroom_fin_256",
                 data_root_dir=None,
                 seed=None,
                 do_rotation_augmentation=False,
                 cat_only=False,
                 use_same_category_batches=False,
                 importance_order=False,
                 epoch_size=None):
        super(LatentDataset, self).__init__()
        self.category_map = ObjectCategories()
        self.seed = seed
        self.data_folder = data_folder
        self.data_root_dir = data_root_dir
        self.scene_indices = scene_indices
        self.do_rotation_augmentation = do_rotation_augmentation
        self.cat_only = cat_only

        self.cat_name2index = None

        self.cat_index2scenes = None

        if self.data_root_dir is None:
            self.data_root_dir = utils.get_data_root_dir()

        self.catnames = self.category_map.all_non_arch_categories(
            self.data_root_dir, data_folder)
        self.cat_name2index = {
            self.catnames[i]: i
            for i in range(0, len(self.catnames))
        }
        self.n_categories = len(self.catnames)

        self.build_cat2scene()
        self.build_cats_in_scene_indices()

        self.cat_importances = self.category_map.all_non_arch_category_importances(
            self.data_root_dir, data_folder)

        # See 'prepare_same_category_batches' below for info
        self.use_same_category_batches = use_same_category_batches
        if use_same_category_batches:
            self.same_category_batch_indices = []
            assert (epoch_size is not None)
            self.epoch_size = epoch_size
        else:
            self.same_category_batch_indices = None

        self.importance_order = importance_order
Ejemplo n.º 7
0
 def __init__(self,
              data_dir,
              data_root_dir,
              scene_indices=(0, 4000),
              p_auxiliary=0.7,
              seed=None,
              ablation=None):
     """
     Parameters
     ----------
     data_root_dir (String): root dir where all data lives
     data_dir (String): directory where this dataset lives (relative to data_root_dir)
     scene_indices (tuple[int, int]): list of indices of scenes (in data_dir) that are considered part of this set
     p_auxiliary (int): probability that a auxiliary category is chosen
         Note that since (existing centroid) is sparse, it is actually treated as non-auxiliary when sampling
     seed (int or None, optional): random seed, to replicate stuff, if set
     ablation (string or None, optional): see data.RenderedComposite.get_composite, and paper
     """
     self.category_map = ObjectCategories()
     self.seed = seed
     self.data_dir = data_dir
     self.data_root_dir = data_root_dir
     self.scene_indices = scene_indices
     self.p_auxiliary = p_auxiliary
     self.ablation = ablation
Ejemplo n.º 8
0
    def __init__(self,
                 scene_indices=(0, 4000),
                 data_folder="bedroom",
                 data_root_dir=None,
                 seed=None):
        super(LocDataset, self).__init__()
        self.category_map = ObjectCategories()
        self.seed = seed
        self.data_folder = data_folder
        self.data_root_dir = data_root_dir
        self.scene_indices = scene_indices

        data_root_dir = utils.get_data_root_dir()
        with open(f"{data_root_dir}/{data_folder}/final_categories_frequency",
                  "r") as f:
            lines = f.readlines()
        self.n_categories = len(lines) - 2  # -2 for 'window' and 'door'
Ejemplo n.º 9
0
    def render_graph(self, room, root, targets):
        target_identifiers = list(map(lambda x: x[0], targets))

        projection = self.pgen.get_projection(room)
        
        visualization = np.zeros((self.size,self.size))
        nodes = []

        for i, node in enumerate(room.nodes):
            modelId = node.modelId #Camelcase due to original json

            t = np.asarray(node.transform).reshape(4,4)

            o = Obj(modelId)
            
            # NEW
            objspace_width = np.linalg.norm(o.front_left - o.front_right)
            objspace_depth = np.linalg.norm(o.front_left - o.back_left)
            #END NEW

            t = projection.to_2d(t)
            o.transform(t)

            # NEW
            worldspace_width = np.linalg.norm(o.front_left - o.front_right)
            worldspace_depth = np.linalg.norm(o.front_left - o.back_left)
            #END NEW

            t = projection.to_2d()
            bbox_min = np.dot(np.asarray([node.xmin, node.zmin, node.ymin, 1]), t)
            bbox_max = np.dot(np.asarray([node.xmax, node.zmax, node.ymax, 1]), t)
            xmin = math.floor(bbox_min[0])
            ymin = math.floor(bbox_min[2])
            xsize = math.ceil(bbox_max[0]) - xmin + 1
            ysize = math.ceil(bbox_max[2]) - ymin + 1

            description = {}
            description["modelId"] = modelId
            description["transform"] = node.transform
            description["bbox_min"] = bbox_min
            description["bbox_max"] = bbox_max
            
            #Since it is possible that the bounding box information of a room
            #Was calculated without some doors/windows
            #We need to handle these cases
            if ymin < 0: 
                ymin = 0
            if xmin < 0: 
                xmin = 0

            # render object
            rendered = self.render_object(o, xmin, ymin, xsize, ysize, self.size)
            description["height_map"] = torch.from_numpy(rendered).float()

            tmp = np.zeros((self.size, self.size))
            tmp[xmin:xmin+rendered.shape[0],ymin:ymin+rendered.shape[1]] = rendered

            # render bbox

            for idx, line in enumerate(o.bbox_lines()):
                direction = line[1] - line[0]
                distance = np.linalg.norm(direction)
                norm_direction = direction / distance
                for t in range(math.floor(distance)):
                    point = line[0] + t * norm_direction
                    x = min(math.floor(point[0]), self.size - 1)
                    y = min(math.floor(point[2]), self.size - 1)
                    tmp[x][y] = 1

            # temporarily darken image to see more easily
            category = ObjectCategories().get_coarse_category(modelId)
            identifier = f"{category}_{i}"
            if identifier in target_identifiers:
                tmp *= 0.8
            elif identifier != root:
                tmp *= 0.1
            else:
                for ray in o.front_rays():
                    for t in range(self.size):
                        point = ray.origin + t * ray.direction
                        if point[0] < 0 or point[0] > self.size or point[2] < 0 or point[2] > self.size:
                            break
                        color = 1 if int(t * objspace_depth / worldspace_depth) % 2 == 0 else -100
                        tmp[math.floor(point[0])][math.floor(point[2])] = color
                for ray in o.back_rays():
                    for t in range(self.size):
                        point = ray.origin + t * ray.direction
                        if point[0] < 0 or point[0] > self.size or point[2] < 0 or point[2] > self.size:
                            break
                        color = 1 if int(t * objspace_depth / worldspace_depth) % 2 else -100
                        tmp[math.floor(point[0])][math.floor(point[2])] = color
                for ray in o.left_rays():
                    for t in range(self.size):
                        point = ray.origin + t * ray.direction
                        if point[0] < 0 or point[0] > self.size or point[2] < 0 or point[2] > self.size:
                            break
                        color = 1 if int(t * objspace_width / worldspace_width) % 2 else -100
                        tmp[math.floor(point[0])][math.floor(point[2])] = color
                for ray in o.right_rays():
                    for t in range(self.size):
                        point = ray.origin + t * ray.direction
                        if point[0] < 0 or point[0] > self.size or point[2] < 0 or point[2] > self.size:
                            break
                        color = 1 if int(t * objspace_width / worldspace_width) % 2 else -100
                        tmp[math.floor(point[0])][math.floor(point[2])] = color

            visualization += tmp
            
            nodes.append(description)
        
        #Render the floor
        o = Obj(room.modelId+"f", room.house_id, is_room=True)
        t = projection.to_2d()
        o.transform(t)
        floor = self.render_object(o, 0, 0, self.size, self.size, self.size)
        visualization += floor
        floor = torch.from_numpy(floor).float()
    
        #Render the walls
        o = Obj(room.modelId+"w", room.house_id, is_room=True)
        t = projection.to_2d()
        o.transform(t)
        wall = self.render_object(o, 0, 0, self.size, self.size, self.size)
        visualization += wall
        wall = torch.from_numpy(wall).float()
        
        return (visualization, (floor, wall, nodes))
Ejemplo n.º 10
0
class ModelPrior():
    def __init__(self):
        pass

    def learn(self, data_folder, data_root_dir=None):
        if data_root_dir is None:
            data_root_dir = utils.get_data_root_dir()
        data_dir = f"{data_root_dir}/{data_folder}"
        self.data_dir = data_dir
        self.category_map = ObjectCategories()

        files = os.listdir(data_dir)
        files = [f for f in files if ".pkl" in f and not "domain" in f]

        with open(f"{data_dir}/final_categories_frequency", "r") as f:
            lines = f.readlines()
            cats = [line.split()[0] for line in lines]

        self.categories = [
            cat for cat in cats if cat not in set(['window', 'door'])
        ]
        self.cat_to_index = {
            self.categories[i]: i
            for i in range(len(self.categories))
        }

        with open(f"{data_dir}/model_frequency", "r") as f:
            lines = f.readlines()
            models = [line.split()[0] for line in lines]
            self.model_freq = [int(l[:-1].split()[1]) for l in lines]

        self.models = [
            model for model in models if self.category_map.get_final_category(
                model) not in set(['window', 'door'])
        ]
        self.model_to_index = {
            self.models[i]: i
            for i in range(len(self.models))
        }

        N = len(self.models)
        self.num_categories = len(self.categories)

        self.model_index_to_cat = [
            self.cat_to_index[self.category_map.get_final_category(
                self.models[i])] for i in range(N)
        ]

        self.count = [[0 for i in range(N)] for j in range(N)]

        for index in range(len(files)):
            #for index in range(100):
            with open(f"{data_dir}/{index}.pkl", "rb") as f:
                (_, _, nodes), _ = pickle.load(f)

            object_nodes = []
            for node in nodes:
                modelId = node["modelId"]
                category = self.category_map.get_final_category(modelId)
                if not category in ["door", "window"]:
                    object_nodes.append(node)

            for i in range(len(object_nodes)):
                for j in range(i + 1, len(object_nodes)):
                    a = self.model_to_index[object_nodes[i]["modelId"]]
                    b = self.model_to_index[object_nodes[j]["modelId"]]
                    self.count[a][b] += 1
                    self.count[b][a] += 1
            print(index, end="\r")

        self.N = N

    def save(self, dest=None):
        if dest == None:
            dest = f"{self.data_dir}/model_prior.pkl"
        with open(dest, "wb") as f:
            pickle.dump(self.__dict__, f, pickle.HIGHEST_PROTOCOL)

    def load(self, data_dir):
        source = f"{data_dir}/model_prior.pkl"
        with open(source, "rb") as f:
            self.__dict__ = pickle.load(f)

    def sample(self, category, models):
        N = self.N
        indices = [
            i for i in range(N) if self.model_index_to_cat[i] == category
        ]
        p = [self.model_freq[indices[i]] for i in range(len(indices))]
        p = np.asarray(p)
        for model in models:
            i = self.model_to_index[model]
            p1 = [self.count[indices[j]][i] for j in range(len(indices))]
            p1 = np.asarray(p1)
            p1 = p1 / p1.sum()
            p = p * p1

        p = p / sum(p)
        numbers = np.asarray([i for i in range(len(indices))])
        return self.models[indices[np.random.choice(numbers, p=p)]]

    def get_models(self, category, important, others):
        N = self.N
        indices = [
            i for i in range(N) if self.model_index_to_cat[i] == category
        ]
        to_remove = []

        freq = [self.model_freq[indices[i]] for i in range(len(indices))]
        total_freq = sum(freq)
        for j in range(len(indices)):
            if freq[j] / total_freq < 0.01:
                if not indices[j] in to_remove:
                    to_remove.append(indices[j])

        for model in important:
            i = self.model_to_index[model]
            freq = [self.count[indices[j]][i] for j in range(len(indices))]
            total_freq = sum(freq)
            if total_freq > 0:
                for j in range(len(indices)):
                    if freq[j] / total_freq < 0.1:
                        if not indices[j] in to_remove:
                            to_remove.append(indices[j])

        for model in others:
            i = self.model_to_index[model]
            freq = [self.count[indices[j]][i] for j in range(len(indices))]
            total_freq = sum(freq)
            if total_freq > 0:
                for j in range(len(indices)):
                    if freq[j] / total_freq < 0.05:
                        if not indices[j] in to_remove:
                            to_remove.append(indices[j])

        for item in to_remove:
            if len(indices) > 1:
                indices.remove(item)

        return [self.models[index] for index in indices]
Ejemplo n.º 11
0
class LatentDataset(data.Dataset):
    def __init__(self,
                 scene_indices=(0, 4000),
                 data_folder="bedroom_fin_256",
                 data_root_dir=None,
                 seed=None,
                 do_rotation_augmentation=False,
                 cat_only=False,
                 use_same_category_batches=False,
                 importance_order=False,
                 epoch_size=None):
        super(LatentDataset, self).__init__()
        self.category_map = ObjectCategories()
        self.seed = seed
        self.data_folder = data_folder
        self.data_root_dir = data_root_dir
        self.scene_indices = scene_indices
        self.do_rotation_augmentation = do_rotation_augmentation
        self.cat_only = cat_only

        self.cat_name2index = None

        self.cat_index2scenes = None

        if self.data_root_dir is None:
            self.data_root_dir = utils.get_data_root_dir()

        self.catnames = self.category_map.all_non_arch_categories(
            self.data_root_dir, data_folder)
        self.cat_name2index = {
            self.catnames[i]: i
            for i in range(0, len(self.catnames))
        }
        self.n_categories = len(self.catnames)

        self.build_cat2scene()
        self.build_cats_in_scene_indices()

        self.cat_importances = self.category_map.all_non_arch_category_importances(
            self.data_root_dir, data_folder)

        # See 'prepare_same_category_batches' below for info
        self.use_same_category_batches = use_same_category_batches
        if use_same_category_batches:
            self.same_category_batch_indices = []
            assert (epoch_size is not None)
            self.epoch_size = epoch_size
        else:
            self.same_category_batch_indices = None

        self.importance_order = importance_order

    # Build a map from category index to the scene indices that contain an instance of that category
    # This ignores scene_indices and does it for the whole data folder
    def build_cat2scene(self):
        self.cat_index2scenes = defaultdict(list)
        data_root_dir = self.data_root_dir or utils.get_data_root_dir()
        data_dir = f'{data_root_dir}/{self.data_folder}'
        filename = f'{data_dir}/cat_index2scenes'
        # Create new cached map file
        if not os.path.exists(filename):
            print(
                'Building map of category to scenes containing an instance...')
            pkls = [
                path for path in os.listdir(data_dir) if path.endswith('.pkl')
            ]
            pklnames = [os.path.splitext(path)[0] for path in pkls]
            # Only get the .pkl files which are numbered scenes
            indices = [
                int(pklname) for pklname in pklnames if pklname.isdigit()
            ]
            i = 0
            for idx in indices:
                i += 1
                sys.stdout.write(f'   {i}/{len(indices)}\r')
                sys.stdout.flush()
                scene = RenderedScene(idx, self.data_folder,
                                      self.data_root_dir)
                object_nodes = scene.object_nodes
                for node in object_nodes:
                    self.cat_index2scenes[node['category']].append(idx)
            pickle.dump(self.cat_index2scenes, open(filename, 'wb'))
            print('')
        # Load an existing cached map file from disk
        else:
            self.cat_index2scenes = pickle.load(open(filename, 'rb'))

    def __len__(self):
        if self.use_same_category_batches:
            return self.epoch_size
        else:
            return self.scene_indices[1] - self.scene_indices[0]

    # First, find the set of categories that occur within the scene indices
    # We do this because it's possible that there might be some category that
    #    occurs in the dataset, but only in the test set...
    def build_cats_in_scene_indices(self):
        cats_seen = {}
        for cat, scene_indices in self.cat_index2scenes.items():
            scenes = [idx for idx in scene_indices if \
                (idx >= self.scene_indices[0] and idx < self.scene_indices[1])]
            if len(scenes) > 0:
                cats_seen[cat] = True
        cats_seen = list(cats_seen.keys())
        self.cats_seen = cats_seen

    # Use at the beginning of each epoch to support loading batches of all the same category
    # NOTE: The data loader must have shuffle set to False for this to work
    def prepare_same_category_batches(self, batch_size):
        # Build a random list of category indices (grouped by batch_size)
        # This requires than length of dataset is a multiple of batch_size
        assert (len(self) % batch_size == 0)
        num_batches = len(self) // batch_size
        self.same_category_batch_indices = []
        for i in range(num_batches):
            # cat_index = random.randint(0, self.n_categories-1)
            cat_index = random.choice(self.cats_seen)
            for j in range(batch_size):
                self.same_category_batch_indices.append(cat_index)

    # 'importance' = a function of both size and observation frequency
    def sort_object_nodes_by_importance(self,
                                        object_nodes,
                                        noise=None,
                                        swap_prob=None):
        # Build list of pairs of (index, importance)
        index_imp_pairs = []
        for i in range(0, len(object_nodes)):
            node = object_nodes[i]
            cat = node["category"]
            imp = self.cat_importances[cat]
            index_imp_pairs.append((i, imp))

        # Optionally, add noise to these importance scores
        # Noise is expressed as a multiple of the standard deviation of the importance scores
        # A typical value might be really small, e.g. 0.05(?)
        if noise is not None:
            imps = [pair[1] for pair in index_imp_pairs]
            istd = np.array(imps).std()
            index_imp_pairs = [(index,
                                imp + noise * random.normalvariate(0, istd))
                               for index, imp in index_imp_pairs]

        # Sort based on importance
        index_imp_pairs.sort(key=lambda tup: tup[1], reverse=True)

        sorted_nodes = [object_nodes[tup[0]] for tup in index_imp_pairs]

        # Optionally, swap nodes with some probabilitiy
        if swap_prob is not None:
            indices = list(range(len(sorted_nodes)))
            for i in range(len(indices)):
                if random.random() < swap_prob:
                    indices_ = list(range(len(sorted_nodes)))
                    idx1 = random.choice(indices_)
                    indices_.remove(idx1)
                    idx2 = random.choice(indices_)
                    tmp = indices[idx1]
                    indices[idx1] = indices[idx2]
                    indices[idx2] = tmp
                    tmp = sorted_nodes[idx1]
                    sorted_nodes[idx1] = sorted_nodes[idx2]
                    sorted_nodes[idx2] = tmp

        return sorted_nodes

    def order_object_nodes(self, object_nodes):
        if self.importance_order:
            object_nodes = self.sort_object_nodes_by_importance(object_nodes)
        else:
            object_nodes = object_nodes[:]
            random.shuffle(object_nodes)

        # The following extra sorting passes only apply to datasets that have second-tier objects
        # We can check for this by looking for the presence of certain object properties e.g. 'parent'
        if 'parent' in object_nodes[0]:
            # Make sure that all second-tier objects come *after* first tier ones
            def is_second_tier(node):
                return (node['parent'] != 'Wall') and \
                       (node['parent'] != 'Floor')

            object_nodes.sort(key=lambda node: int(is_second_tier(node)))

            # Make sure that all children come after their parents
            def cmp_parent_child(node1, node2):
                # Less than (negative): node1 is the parent of node2
                if node2['parent'] == node1['id']:
                    return -1
                # Greater than (postive): node2 is the parent of node1
                elif node1['parent'] == node2['id']:
                    return 1
                # Equal (zero): all other cases
                else:
                    return 0

            object_nodes.sort(key=cmp_to_key(cmp_parent_child))

        # #### TEST: make sure office_chair comes *after* desk
        # def chair_vs_desk(node1, node2):
        #     catname1 = self.catnames[node1['category']]
        #     catname2 = self.catnames[node2['category']]
        #     if (catname1 == 'desk') and (catname2 == 'office_chair'):
        #         return -1
        #     elif (catname1 == 'office_chair') and (catname2 == 'desk'):
        #         return 1
        #     else:
        #         return 0
        # object_nodes.sort(key = cmp_to_key(chair_vs_desk))

        return object_nodes

    def get_scene(self, index, stop_prob=None):
        i = index + self.scene_indices[0]
        scene = RenderedScene(i, self.data_folder, self.data_root_dir)
        object_nodes = self.order_object_nodes(scene.object_nodes)

        # With some probability, sample a 'stop' (i.e. the end of the scene build sequence)
        if stop_prob is not None and random.random() < stop_prob:
            output_node = None
            input_nodes = object_nodes
        else:
            # Pick a random index at which to split into (a) existing objects and
            #    (b) objects yet-to-be-added.
            split_idx = random.randint(0, len(object_nodes) - 1)
            # This object is the ouput node
            output_node = object_nodes[split_idx]
            # All object before this index are input nodes
            input_nodes = object_nodes[0:split_idx]

        return scene, input_nodes, output_node

    def get_scene_specific_category(self, cat_index_or_name, empty_room=False):
        if isinstance(cat_index_or_name, list):
            cat_index_or_name = random.choice(cat_index_or_name)
        if isinstance(cat_index_or_name, int):
            cat_index = cat_index_or_name
        else:
            cat_name = cat_index_or_name
            cat_index = self.cat_name2index[cat_name]

        # Pull out a scene (within scene_indices) that has an instance of this category
        scenes_for_cat = [idx for idx in self.cat_index2scenes[cat_index] if \
            (idx >= self.scene_indices[0] and idx < self.scene_indices[1])]
        scene_index = random.choice(scenes_for_cat)
        scene = RenderedScene(scene_index, self.data_folder,
                              self.data_root_dir)
        object_nodes = self.order_object_nodes(scene.object_nodes)

        # Pick a random instance of the category
        cat_indices = [
            i for i in range(0, len(object_nodes))
            if object_nodes[i]['category'] == cat_index
        ]
        split_idx = random.choice(cat_indices)
        # This object is the ouput node
        output_node = object_nodes[split_idx]
        if empty_room:
            input_nodes = []  # No other objects in the scene
        else:
            input_nodes = object_nodes[
                0:split_idx]  # All object before this index are input nodes

        return scene, input_nodes, output_node

    def get_scene_same_category_batch(self, index):
        cat_index = self.same_category_batch_indices[index]
        return self.get_scene_specific_category(cat_index)

    # Balance training data so that we train equally often on all target categories
    def get_scene_uniform_category(self, stop_prob=None):
        if stop_prob is not None and random.random() < stop_prob:
            scene_index = random.randint(self.scene_indices[0],
                                         self.scene_indices[1] - 1)
            scene = RenderedScene(scene_index, self.data_folder,
                                  self.data_root_dir)
            output_node = None
            input_nodes = self.order_object_nodes(scene.object_nodes)
            return scene, input_nodes, output_node
        else:
            cat_index = random.choice(self.cats_seen)
            return self.get_scene_specific_category(cat_index)

    def __getitem__(self, index):
        if self.seed:
            random.seed(self.seed)

        if self.use_same_category_batches:
            scene, input_nodes, output_node = self.get_scene_same_category_batch(
                index)
        elif self.cat_only:
            scene, input_nodes, output_node = self.get_scene(index,
                                                             stop_prob=0.1)
        else:
            scene, input_nodes, output_node = self.get_scene(index)

        # Get the composite images
        if not self.do_rotation_augmentation:
            input_img = create_transformed_composite(scene, input_nodes, 0)
            if not self.cat_only:
                output_img = create_transformed_composite(
                    scene, [output_node], 0)
        else:
            # Data augmentation: Get the composite images under a random cardinal rotation
            rot = random.choice([0, 90, 180, 270])
            input_img = create_transformed_composite(scene, input_nodes, rot)
            if not self.cat_only:
                output_img = create_transformed_composite(
                    scene, [output_node], rot)

        # Get the category of the object
        # This is an integer index
        if output_node is None:
            cat = torch.LongTensor([self.n_categories])
        else:
            cat = torch.LongTensor([output_node["category"]])

        # Also get the count of all categories currently in the scene
        catcount = torch.zeros(self.n_categories)
        for node in input_nodes:
            catidx = node['category']
            catcount[catidx] = catcount[catidx] + 1

        # If the dataset is configured to only care about predicting the category, then we can go ahead
        #    and return now
        if self.cat_only:
            return input_img, cat, catcount

        # Select just the object mask channel from the output image
        output_img = output_img[2]
        # Put a singleton dimension back in for the channel dimension
        output_img = torch.unsqueeze(output_img, 0)
        # Make sure that it has value 1 everywhere (hack: multiply by huge number and clamp)
        output_img *= 1000
        torch.clamp(output_img, 0, 1, out=output_img)  # Clamp in place

        # Get the location of the object
        # Normalize the coordinates to [-1, 1], with (0,0) being the image center
        loc = output_node['location']
        x = loc[0]
        y = loc[1]
        w = output_img.size()[2]
        x_ = ((x / w) - 0.5) * 2
        y_ = ((y / w) - 0.5) * 2
        loc = torch.Tensor([x_, y_])

        # Get the orientation of the object
        # Here, we assume that there is no scale, and that the only rotation is about the up vector
        #  (so we can just read the cos, sin values directly out of the transformation matrix)
        xform = output_node["transform"]
        cos = xform[0]
        sin = xform[8]
        orient = torch.Tensor([cos, sin])

        # Get the object-space dimensions of the output object (in pixel space)
        # (Normalize to [0, 1])
        xsize, ysize = output_node['objspace_dims']
        xsize = xsize / w
        ysize = ysize / w
        # dims = torch.Tensor([xsize, ysize])
        dims = torch.Tensor([ysize, xsize
                             ])  # Not sure why this flip is necessary atm...

        return input_img, output_img, cat, loc, orient, dims, catcount
Ejemplo n.º 12
0
class GlobalCategoryFilter():

    category_map = ObjectCategories()

    def get_filter():

        door_window = GlobalCategoryFilter.category_map.all_arch_categories()

        second_tier = [
            "table_lamp", "chandelier", "guitar", "amplifier", "keyboard",
            "drumset", "microphone", "accordion", "toy", "xbox", "playstation",
            "fishbowl", "chessboard", "iron", "helmet", "telephone",
            "stationary_container", "ceiling_fan", "bottle", "fruit_bowl",
            "glass", "knife_rack", "plates", "books", "book", "television",
            "wood_board", "switch", "pillow", "laptop", "clock", "helmet",
            "bottle", "trinket", "glass", "range_hood", "candle", "soap_dish"
        ]

        wall_objects = ["wall_lamp", "mirror", "curtain", "blind"]

        unimportant = [
            "toy", "fish_tank", "tricycle", "vacuum_cleaner", "weight_scale",
            "heater", "picture_frame", "beer", "shoes", "weight_scale",
            "decoration", "ladder", "tripod", "air_conditioner", "cart",
            "fireplace_tools", "vase"
        ]

        inhabitants = ["person", "cat", "bird", "dog", "pet"]

        special_filter = [
            "rug",
        ]

        filtered = second_tier + unimportant + inhabitants + special_filter + wall_objects

        unwanted_complex_structure = ["partition", "column", "arch", "stairs"]
        set_items = [
            "chair_set", "stereo_set", "table_and_chair",
            "slot_machine_and_chair", "kitchen_set", "double_desk",
            "double_desk_with_chairs", "dressing_table_with_stool",
            "kitchen_island_with_range_hood_and_table"
        ]

        outdoor = [
            "lawn_mower", "car", "motorcycle", "bicycle", "garage_door",
            "outdoor_seating", "fence"
        ]

        rejected = unwanted_complex_structure + set_items + outdoor

        return filtered, rejected, door_window

    def get_filter_latent():

        door_window = GlobalCategoryFilter.category_map.all_arch_categories()

        second_tier_include = [
            "table_lamp",
            "television",
            "picture_frame",
            "books",
            "book",
            "laptop",
            "floor_lamp",
            "vase",
            "plant",
            "console",
            "stereo_set",
            "toy",
            "fish_tank",
            "cup",
            "glass",
            "fruit_bowl",
            "bottle",
            "fishbowl",
            "pillow",
        ]

        second_tier = [
            "chandelier", "guitar", "amplifier", "keyboard", "drumset",
            "microphone", "accordion", "chessboard", "iron", "helmet",
            "stationary_container", "ceiling_fan", "knife_rack", "plates",
            "wood_board", "switch", "clock", "helmet", "trinket", "range_hood",
            "candle", "soap_dish"
        ]

        wall_objects = [
            "wall_lamp", "mirror", "curtain", "wall_shelf", "blinds", "blind"
        ]

        unimportant = [
            "tricycle",
            "fish_tank",
            "vacuum_cleaner",
            "weight_scale",
            "heater",
            "picture_frame",
            "beer",
            "shoes",
            "weight_scale",
            "decoration",
            "ladder",
            "tripod",
            "air_conditioner",
            "cart",
            "fireplace_tools",
            "ironing_board",
        ]

        inhabitants = ["person", "cat", "bird", "dog", "pet"]

        special_filter = [
            "rug",
        ]

        filtered = second_tier + unimportant + inhabitants + special_filter + wall_objects

        unwanted_complex_structure = ["partition", "column", "arch", "stairs"]
        set_items = [
            "chair_set", "stereo_set", "table_and_chair",
            "slot_machine_and_chair", "kitchen_set", "double_desk",
            "double_desk_with_chairs", "desk_with_shelves",
            "dressing_table_with_stool", "armchair_with_ottoman",
            "kitchen_island_with_range_hood_and_table"
        ]

        outdoor = [
            "lawn_mower", "car", "motorcycle", "bicycle", "garage_door",
            "outdoor_seating", "fence"
        ]

        rejected = unwanted_complex_structure + set_items + outdoor

        return filtered, rejected, door_window, second_tier_include
Ejemplo n.º 13
0
class DatasetStats(DatasetAction):
    """
    Gather stats of the houses
    Useful to get an idea of what's in the dataset
    And must be called to generate the model frequency information that's used
    in the NN modules
    """
    def __init__(self,
                 details=False,
                 model_details=False,
                 save_freq=True,
                 save_dest=""):
        """
        Parameters
        ----------
        details (bool, optional): If true, then frequency information will be shown on screen
        model_details (bool, optional): since there are so many model ids, this additional bool
            controls if those should be printed
        save_freq (bool, optional): if true, then the category frequency information will be saved
        save_dest (string, optional): directory to which frequency information is saved
        """
        self.room_count = 0
        self.object_count = 0
        self.room_types_count = {}
        self.fine_categories_count = {}
        self.coarse_categories_count = {}
        self.final_categories_count = {}
        self.models_count = {}
        self.object_category = ObjectCategories()
        self.floor_node_only = False
        self.details = details
        self.model_details = model_details
        self.save_freq = save_freq
        data_dir = utils.get_data_root_dir()
        self.save_dest = f"{data_dir}/{save_dest}"

    def step(self, houses, num_threads=1):
        for house in houses:
            self.room_count += len(house.rooms)
            for room in house.rooms:
                room_types = room.roomTypes
                for room_type in room_types:
                    self.room_types_count[room_type] = \
                        self.room_types_count.get(room_type, 0) + 1
            filters = [floor_node_filter] if self.floor_node_only else []
            nodes = list(set([node for nodes in [room.get_nodes(filters) \
                              for room in house.rooms] for node in nodes \
                              if node.type == "Object"]))
            for node in nodes:
                self.object_count += 1
                fine_category = self.object_category.get_fine_category(
                    node.modelId)
                coarse_category = self.object_category.get_coarse_category(
                    node.modelId)
                final_category = self.object_category.get_final_category(
                    node.modelId)

                self.fine_categories_count[fine_category] = \
                    self.fine_categories_count.get(fine_category, 0) + 1
                self.coarse_categories_count[coarse_category] = \
                    self.coarse_categories_count.get(coarse_category, 0) + 1
                self.final_categories_count[final_category] = \
                    self.final_categories_count.get(final_category, 0) + 1
                self.models_count[node.modelId] = \
                    self.models_count.get(node.modelId, 0) + 1
            yield house

    def final(self):
        print(f"\nPrinting Results...")
        print(
            f"\nThere are {self.room_count} non-empty rooms in the selection.")
        print(f"There are {self.object_count} objects in the rooms.")
        print(
            f"On average, there are {self.object_count/self.room_count:.3f} objects for each room\n"
        )

        print(
            f"There are {len(self.fine_categories_count)} fine categories among these objects."
        )

        if self.details:
            print(f"\n{'Model Category':40s}{'Occurence'}")
            for category in sorted(list((self.fine_categories_count.items())),
                                   key=lambda x: -x[1]):
                print(f"{category[0]:40s}{category[1]}")

        print(
            f"\nThere are {len(self.coarse_categories_count)} coarse categories among these objects."
        )
        if self.details:
            print(f"\n{'Coarse Category':40s}{'Occurence'}")
            for category in sorted(list(
                (self.coarse_categories_count.items())),
                                   key=lambda x: -x[1]):
                print(f"{category[0]:40s}{category[1]}")

        print(
            f"\nThere are {len(self.final_categories_count)} final categories among these objects."
        )
        if self.details:
            print(f"\n{'Final Category':40s}{'Occurence'}")
            for category in sorted(list((self.final_categories_count.items())),
                                   key=lambda x: -x[1]):
                print(f"{category[0]:40s}{category[1]}")

        print(
            f"\nThere are {len(self.models_count)} unique models among these objects."
        )
        if self.details and self.model_details:
            print(f"\n{'Model':40s}{'Occurence'}")
            for category in sorted(list((self.models_count.items())),
                                   key=lambda x: -x[1]):
                print(f"{category[0]:40s}{category[1]}")

        if self.save_freq:
            with open(f"{self.save_dest}/fine_categories_frequency", "w") as f:
                for cat in sorted(list((self.fine_categories_count.items())),
                                  key=lambda x: -x[1]):
                    f.write(f"{cat[0]} {cat[1]}\n")
            with open(f"{self.save_dest}/coarse_categories_frequency",
                      "w") as f:
                for cat in sorted(list((self.coarse_categories_count.items())),
                                  key=lambda x: -x[1]):
                    f.write(f"{cat[0]} {cat[1]}\n")
            with open(f"{self.save_dest}/final_categories_frequency",
                      "w") as f:
                for cat in sorted(list((self.final_categories_count.items())),
                                  key=lambda x: -x[1]):
                    f.write(f"{cat[0]} {cat[1]}\n")
            with open(f"{self.save_dest}/model_frequency", "w") as f:
                for cat in sorted(list((self.models_count.items())),
                                  key=lambda x: -x[1]):
                    f.write(f"{cat[0]} {cat[1]}\n")
Ejemplo n.º 14
0
    use_jitter = False
    jitter_stdev = 0.01

    which_to_load = 500

    parser = argparse.ArgumentParser(description='orient')
    parser.add_argument('--save-dir', type=str, required=True)
    parser.add_argument('--data-folder', type=str, default="")
    args = parser.parse_args()
    outdir = f'./output/{args.save_dir}'
    utils.ensuredir(outdir)

    data_folder = args.data_folder

    data_root_dir = utils.get_data_root_dir()
    categories = ObjectCategories().all_non_arch_categories(
        data_root_dir, data_folder)
    num_categories = len(categories)
    num_input_channels = num_categories + 8

    nc = num_categories

    # Dataset size is based on the number of available scenes - the valid set size
    dataset_size = len([f for f in os.listdir(f'{data_root_dir}/{data_folder}') \
        if f.endswith('.jpg')]) - valid_set_size
    dataset_size = int(dataset_size / batch_size) * batch_size

    logfile = open(f"{outdir}/log.txt", 'w')

    def LOG(msg):
        print(msg)
        logfile.write(msg + '\n')
Ejemplo n.º 15
0
class RenderedScene():
    """
    Loading a rendered room
    Attributes
    ----------
    category_map (ObjectCategories): object category mapping
        that should be the same across all instances of the class
    categories (list[string]): all categories present in this room type.
        Loaded once when the first room is loaded to reduce disk access.
    cat_to_index (dict[string, int]): maps a category to corresponding index
    current_data_dir (string): keep track of the current data directory, if
        it changes, then categories and cat_to_index should be recomputed
    """
    category_map = ObjectCategories()
    categories = None
    cat_to_index = None
    current_data_dir = None

    def __init__(self, index, data_dir, data_root_dir=None, \
                 shuffle=True, load_objects=True, seed=None, rotation=0):
        """
        Load a rendered scene from file
        Parameters
        ----------
        index (int): room number
        data_dir (string): location of the pre-rendered rooms
        data_root_dir (string or None, optional): if specified,
            use this as the root directory
        shuffle (bool, optional): If true, randomly order the objects
            in the room. Otherwise use the default order as written
            in the original dataset
        load_objects (bool, optional): If false, only load the doors
            and windows. Otherwise load all objects in the room
        seed (int or None, optional): if set, use a fixed random seed
            so we can replicate a particular experiment
        """
        if seed:
            random.seed(seed)

        if not data_root_dir:
            data_root_dir = utils.get_data_root_dir()

        if RenderedScene.categories is None or RenderedScene.current_data_dir != data_dir:
            with open(f"{data_root_dir}/{data_dir}/final_categories_frequency",
                      "r") as f:
                lines = f.readlines()
                cats = [line.split()[0] for line in lines]

            RenderedScene.categories = [
                cat for cat in cats if cat not in set(['window', 'door'])
            ]
            RenderedScene.cat_to_index = {
                RenderedScene.categories[i]: i
                for i in range(len(RenderedScene.categories))
            }
            RenderedScene.current_data_dir = data_dir

        #print(index, rotation)
        if rotation != 0:
            fname = f"{index}_{rotation}"
        else:
            fname = index

        with open(f"{data_root_dir}/{data_dir}/{fname}.pkl", "rb") as f:
            (self.floor, self.wall, nodes), self.room = pickle.load(f)

        self.index = index
        self.rotation = rotation

        self.object_nodes = []
        self.door_window_nodes = []
        for node in nodes:
            category = RenderedScene.category_map.get_final_category(
                node["modelId"])
            if category in ["door", "window"]:
                node["category"] = category
                self.door_window_nodes.append(node)
            elif load_objects:
                node["category"] = RenderedScene.cat_to_index[category]
                self.object_nodes.append(node)

        if shuffle:
            random.shuffle(self.object_nodes)

        self.size = self.floor.shape[0]

    def create_composite(self):
        """
        Create a initial composite that only contains the floor,
        wall, doors and windows. See RenderedComposite for how
        to add more objects
        """
        r = RenderedComposite(RenderedScene.categories, self.floor, self.wall,
                              self.door_window_nodes)
        return r
Ejemplo n.º 16
0
class SupportPrior():
    def __init__(self):
        pass

    def learn(self, data_folder="bedroom_final", data_root_dir=None):
        if not data_root_dir:
            data_root_dir = utils.get_data_root_dir()
        data_dir = f"{data_root_dir}/{data_folder}"
        self.data_dir = data_dir
        self.category_map = ObjectCategories()

        files = os.listdir(data_dir)
        files = [
            f for f in files
            if ".pkl" in f and not "domain" in f and not "_" in f
        ]

        self.categories = self.category_map.all_non_arch_categories(
            data_root_dir, data_folder)
        self.category_count = self.category_map.all_non_arch_category_counts(
            data_root_dir, data_folder)
        self.cat_to_index = {
            self.categories[i]: i
            for i in range(len(self.categories))
        }
        self.num_categories = len(self.categories)
        self.categories.append("floor")
        N = self.num_categories

        self.support_count = [[0 for i in range(N + 1)] for j in range(N)]

        for index in range(len(files)):
            print(index)
            with open(f"{data_dir}/{index}.pkl", "rb") as f:
                (_, _, nodes), _ = pickle.load(f)

            object_nodes = []
            id_to_cat = {}
            for node in nodes:
                modelId = node["modelId"]
                category = self.category_map.get_final_category(modelId)
                if not self.category_map.is_arch(category):
                    object_nodes.append(node)
                    id_to_cat[node["id"]] = self.cat_to_index[category]
                    node["category"] = self.cat_to_index[category]

            for node in object_nodes:
                parent = node["parent"]
                category = node["category"]
                if parent == "Floor" or parent is None:
                    self.support_count[category][-1] += 1
                else:
                    self.support_count[category][id_to_cat[parent]] += 1
            #quit()

        self.possible_supports = {}
        for i in range(self.num_categories):
            print(f"Support for {self.categories[i]}:")
            supports = [(c, self.support_count[i][c] / self.category_count[i])
                        for c in range(N + 1)]
            supports = sorted(supports, key=lambda x: -x[1])
            supports = [s for s in supports if s[1] > 0.01]
            for s in supports:
                print(f"    {self.categories[s[0]]}:{s[1]:4f}")
            self.possible_supports[i] = [s[0] for s in supports]

        print(self.possible_supports)
        self.N = N

    def save(self, dest=None):
        if dest == None:
            dest = f"{self.data_dir}/support_prior.pkl"
        with open(dest, "wb") as f:
            pickle.dump(self.__dict__, f, pickle.HIGHEST_PROTOCOL)

    def load(self, data_dir):
        source = f"{data_dir}/support_prior.pkl"
        with open(source, "rb") as f:
            self.__dict__ = pickle.load(f)
Ejemplo n.º 17
0
    def learn(self, data_folder="bedroom_final", data_root_dir=None):
        if not data_root_dir:
            data_root_dir = utils.get_data_root_dir()
        data_dir = f"{data_root_dir}/{data_folder}"
        self.data_dir = data_dir
        self.category_map = ObjectCategories()

        files = os.listdir(data_dir)
        files = [
            f for f in files
            if ".pkl" in f and not "domain" in f and not "_" in f
        ]

        self.categories = self.category_map.all_non_arch_categories(
            data_root_dir, data_folder)
        self.cat_to_index = {
            self.categories[i]: i
            for i in range(len(self.categories))
        }

        with open(f"{data_dir}/model_frequency", "r") as f:
            lines = f.readlines()
            models = [line.split()[0] for line in lines]
            self.model_freq = [int(l[:-1].split()[1]) for l in lines]

        self.models = [
            model for model in models if not self.category_map.is_arch(
                self.category_map.get_final_category(model))
        ]
        self.model_to_index = {
            self.models[i]: i
            for i in range(len(self.models))
        }

        N = len(self.models)
        self.num_categories = len(self.categories)

        self.model_index_to_cat = [
            self.cat_to_index[self.category_map.get_final_category(
                self.models[i])] for i in range(N)
        ]

        self.count = [[0 for i in range(N)] for j in range(N)]

        for index in range(len(files)):
            #for index in range(100):
            with open(f"{data_dir}/{index}.pkl", "rb") as f:
                (_, _, nodes), _ = pickle.load(f)

            object_nodes = []
            for node in nodes:
                modelId = node["modelId"]
                category = self.category_map.get_final_category(modelId)
                if not self.category_map.is_arch(category):
                    object_nodes.append(node)

            for i in range(len(object_nodes)):
                for j in range(i + 1, len(object_nodes)):
                    a = self.model_to_index[object_nodes[i]["modelId"]]
                    b = self.model_to_index[object_nodes[j]["modelId"]]
                    self.count[a][b] += 1
                    self.count[b][a] += 1
            print(index)

        self.N = N
Ejemplo n.º 18
0
class ArrangementGreedySampler:
    """
    Iterative optimization of object arrangements using greedy sampling of ArrangementPriors
    """
    def __init__(self,
                 arrangement_priors,
                 num_angle_divisions=8,
                 num_pairwise_priors=-1,
                 sim_mode='direct'):
        self._objects = ObjectCollection(sim_mode=sim_mode)
        self.room_id = None
        self._priors = arrangement_priors
        self._num_angle_divisions = num_angle_divisions
        self._num_pairwise_priors = num_pairwise_priors
        self.category_map = ObjectCategories()

    @property
    def objects(self):
        return self._objects

    def init(self, house, only_architecture=True, room_id=None):
        if not room_id:
            room_id = house.rooms[0].id
        self._objects.init_from_room(house,
                                     room_id,
                                     only_architecture=only_architecture)
        self.room_id = room_id

    def log_prob(self, filter_ref_obj=None, ignore_categories=list()):
        observations = self._objects.get_relative_observations(
            self.room_id,
            filter_ref_obj=filter_ref_obj,
            ignore_categories=ignore_categories)
        observations_by_key = {}

        # top_k_prior_categories = None
        # if filter_ref_obj and num_pairwise_priors specified, filter observations to only those in top k priors
        # if filter_ref_obj and self._num_pairwise_priors > 0:
        #     category = self._objects.category(filter_ref_obj.modelId, scheme='final')
        #     priors = list(filter(lambda p: p.ref_obj_category == category, self._priors.pairwise_priors))
        #     k = min(self._num_pairwise_priors, len(priors))
        #     priors = list(sorted(priors, key=lambda p: self._priors.pairwise_occurrence_log_prob(p)))[-k:]
        #     top_k_prior_categories = set(map(lambda p: p.obj_category, priors))

        for o in observations.values():
            # only pairwise observations in which filter_ref_obj is the reference
            if filter_ref_obj and o.ref_id != filter_ref_obj.id:
                continue
            key = self._objects.get_observation_key(o)
            os_key = observations_by_key.get(key, [])
            os_key.append(o)
            observations_by_key[key] = os_key
        return self._priors.log_prob(observations_by_key)

    def get_candidate_transform(self, node, max_iterations=100):
        num_checks = 0
        zmin = self._objects.room.zmin
        while True:
            num_checks += 1
            p = self._objects.room.obb.sample()
            ray_from = [p[0], zmin - .5, p[2]]
            ray_to = [p[0], zmin + .5, p[2]]
            intersection = ags._objects.simulator.ray_test(ray_from, ray_to)
            if intersection.id == self.room_id + 'f' or num_checks > max_iterations:
                break
        xform = Transform()
        xform.set_translation([p[0], zmin + .1, p[2]])
        angle = random() * 2 * math.pi
        angular_resolution = 2 * math.pi / self._num_angle_divisions
        angle = round(angle / angular_resolution) * angular_resolution
        xform.set_rotation(radians=angle)
        return xform

    def sample_placement(self,
                         node,
                         n_samples,
                         houses_log=None,
                         max_attempts_per_sample=10,
                         ignore_categories=list(),
                         collision_threshold=0):
        """
        Sample placement for given node
        """
        self._objects.add_object(node)
        max_lp = -np.inf
        max_xform = None
        max_house = None
        num_noncolliding_samples = 0
        for i in range(max_attempts_per_sample * n_samples):
            xform = self.get_candidate_transform(node)
            self._objects.update(node, xform=xform, update_sim=True)
            collisions = self._objects.get_collisions(obj_id_a=node.id)
            # print(f'i={i}, samples_so_far={num_noncolliding_samples}, n_samples={n_samples},'
            #       f'max_attempts_per_sample={max_attempts_per_sample}')
            if collision_threshold > 0:
                if min(collisions.values(), key=lambda c: c.distance
                       ).distance < -collision_threshold:
                    continue
            elif len(collisions) > 0:
                continue
            lp = self.log_prob(filter_ref_obj=node,
                               ignore_categories=ignore_categories)
            print(f'lp={lp}')
            if lp > max_lp:
                max_xform = xform
                max_lp = lp
                if houses_log is not None:
                    max_house = self._objects.as_house()
            num_noncolliding_samples += 1
            if num_noncolliding_samples == n_samples:
                break
        if houses_log is not None:
            houses_log.append(max_house)
        self._objects.update(node, xform=max_xform, update_sim=True)

    def placeable_objects_sorted_by_size(self, house):
        objects = []
        fixed_objects = []
        for n in house.levels[0].nodes:
            if n.type != 'Object':
                continue
            category = self._objects.category(n.modelId, scheme='final')
            if self.category_map.is_arch(category):
                fixed_objects.append(n)
                continue
            objects.append(n)

        for o in objects:
            # to_delete = []
            # for k in o.__dict__:
            #     if k not in ['id', 'modelId', 'transform', 'type', 'valid', 'bbox']:
            #         to_delete.append(k)
            # for k in to_delete:
            #     delattr(o, k)
            dims = self._objects._object_data.get_aligned_dims(o.modelId)
            o.volume = dims[0] * dims[1] * dims[2]
        objects = list(sorted(objects, key=lambda x: x.volume, reverse=True))
        return objects, fixed_objects
Ejemplo n.º 19
0
class ObjectCollection:
    """Provides observation information for a collection of objects"""
    def __init__(self, categorization_type='final', sim_mode='direct'):
        self._object_data = ObjectData()
        self._object_categories = ObjectCategories()
        self._objects = {}
        self._room = None
        self._categorization_type = categorization_type
        self._sim = Simulator(mode=sim_mode)

    @property
    def simulator(self):
        return self._sim

    @property
    def room(self):
        return self._room

    @property
    def objects(self):
        return self._objects

    def add_object(self, o):
        if o.id in self._objects and self._objects[o.id] != o:
            print(f'Warning: ignoring node with duplicate node id={o.id}')
            return None
        if hasattr(o, 'type') and o.type == 'Room':  # room node
            self._room = o
            self._sim.add_room(o, wall=True, floor=True, ceiling=False)
        else:  # other nodes
            self._sim.add_object(o)
        self.update(o, update_sim=True)
        return o.id

    def _remove(self, obj_id):
        if obj_id not in self._objects:
            print(f'Warning: tried to remove not present object with id={obj_id}')
        else:
            del self._objects[obj_id]
            self._sim.remove(obj_id)

    def update(self, o, xform=None, update_sim=False):
        if not hasattr(o, 'category'):
            o.category = self.category(o.modelId, scheme=self._categorization_type)
        model_id = o.modelId if hasattr(o, 'modelId') else None
        o.model2world = self.semantic_frame_matrix(model_id)
        o.xform = xform if xform else Transform.from_node(o)
        if hasattr(o, 'transform'):
            o.transform = o.xform.as_mat4_flat_row_major()
        o.obb = OBB.from_local2world_transform(np.matmul(o.xform.as_mat4(), o.model2world))
        o.frame = self.node_to_semantic_frame(o, o.obb)
        self._objects[o.id] = o
        # room geometries pre-transformed, so after obb computation above is done, set back to identity transform
        if hasattr(o, 'type') and o.type == 'Room':
            o.xform = Transform()
            o.transform = o.xform.as_mat4_flat_row_major()
        if update_sim:
            self._sim.set_state(obj_id=o.id, position=o.xform.translation, rotation_q=o.xform.rotation)

    def randomize_object_transforms(self):
        for o in self._objects.values():
            if o is self._room:
                continue
            t = self._room.obb.sample()
            t[1] = o.xform.translation[1]
            o.xform.set_translation(t)
            r = random() * 2 * math.pi
            o.xform.set_rotation(radians=r)
            self.update(o, o.xform)

    def init_from_room(self, house, room_id, only_architecture=False, update_sim=True):
        self.reset()
        room = next(r for r in house.rooms if r.id == room_id)
        self.add_object(room)
        if not only_architecture:
            for o in room.nodes:
                self.add_object(o)
        # if update_sim:
        #     self._sim.add_house_room_only(house, room, only_architecture=only_architecture, no_ceil=True, no_floor=True)

    def init_from_house(self, house, update_sim=True):
        self.reset()
        for o in house.nodes:
            self.add_object(o)
        # if update_sim:
        #     self._sim.add_house(house, no_ceil=True, no_floor=True)

    def as_house(self):
        room_nodes = [dict(n.__dict__) for n in self._objects.values() if n.type != 'Room']
        for i, n in enumerate(room_nodes):
            n['originalId'] = n.originalId if hasattr(n, 'originalId') else n['id']
            n['id'] = f'0_{str(i + 1)}'  # overwrite id with linearized id
        room = {
            'id': '0_0',
            'originalId': self.room.originalId if hasattr(self.room, 'originalId') else self.room.id,
            'type': 'Room',
            'valid': 1,
            'modelId': self.room.modelId,
            'nodeIndices': list(range(1, len(room_nodes) + 1)),
            'roomTypes': self.room.roomTypes,
            'bbox': self.room.bbox,
        }
        house_dict = {
            'version': '[email protected]',
            'id': self.room.house_id,
            'up': [0, 1, 0],
            'front': [0, 0, 1],
            'scaleToMeters': 1,
            'levels': [{'id': '0', 'nodes': [room] + room_nodes}]
        }
        return House(house_json=house_dict)

    def reset(self):
        self._objects = {}
        self._sim.reset()

    def reinit_simulator(self, wall=True, floor=True, ceiling=False):
        self._sim.reset()
        for o in self._objects.values():
            if hasattr(o, 'type') and o.type == 'Room':  # room node:
                self._sim.add_room(self.room, wall=wall, floor=floor, ceiling=ceiling)
            else:  # other nodes
                self._sim.add_object(o)
            self._sim.set_state(obj_id=o.id, position=o.xform.translation, rotation_q=o.xform.rotation)

    def get_relative_observations(self, room_id, filter_ref_obj, ignore_categories):
        out = {}
        ref_objects = [filter_ref_obj] if filter_ref_obj else self._objects.values()
        for o_r in ref_objects:
            if o_r.category in ignore_categories:
                continue
            for o_i in self._objects.values():
                if o_i is o_r:
                    continue
                if o_i.category in ignore_categories:
                    continue
                out[(o_i.id, o_r.id)] = self.object_frames_to_relative_observation(o_i.frame, o_r.frame, room_id)
        return out

    def get_collisions(self, include_collision_with_static=True, obj_id_a=None):
        # update sim state to match state of this ObjectCollection
        for o_id, o in self._objects.items():
            self._sim.set_state(obj_id=o.id, position=o.xform.translation, rotation_q=o.xform.rotation)
        self._sim.step()  # sim step needed to create contacts
        return self._sim.get_contacts(obj_id_a=obj_id_a, include_collision_with_static=include_collision_with_static)

    def get_observation_key(self, observation):
        room_node = self._objects[observation.room_id]
        room_types = '-'.join(room_node.roomTypes) if hasattr(room_node, 'roomTypes') else ''
        obj_node = self._objects[observation.obj_id]
        obj_cat = self.category(obj_node.modelId, scheme=self._categorization_type)
        ref_node = self._objects[observation.ref_id]
        ref_cat = self.category(ref_node.modelId, scheme=self._categorization_type)
        key = ObservationCategory(room_types=room_types, obj_category=obj_cat, ref_obj_category=ref_cat)
        return key

    def category(self, model_id, scheme):
        if 'rm' in model_id:
            return 'room'
        if scheme == 'coarse':
            return self._object_categories.get_coarse_category(model_id)
        elif scheme == 'fine':
            return self._object_categories.get_fine_category(model_id)
        elif scheme == 'final':
            return self._object_categories.get_final_category(model_id)
        else:
            raise RuntimeError(f'Unknown categorization type: {scheme}')

    def semantic_frame_matrix(self, model_id):
        if model_id in self._object_data.model_to_data:
            return self._object_data.get_model_semantic_frame_matrix(model_id)
        else:  # not a model, so assume identity semantic frame
            return np.identity(4)

    def object_frames_to_relative_observation(self, frame, ref_frame, room_id):
        ref_dims = ref_frame['obb'].half_dimensions
        rel_centroid = ref_frame['obb'].transform_point(frame['obb'].centroid)
        rel_min = ref_frame['obb'].transform_point(frame['aabb']['min'])
        rel_max = ref_frame['obb'].transform_point(frame['aabb']['max'])
        rel_up = ref_frame['obb'].transform_direction(frame['obb'].rotation_matrix[:3, 1])
        rel_front = ref_frame['obb'].transform_direction(-frame['obb'].rotation_matrix[:3, 2])  # note: +z = back
        cp = self._sim.get_closest_point(obj_id_a=frame['obj_id'], obj_id_b=ref_frame['obj_id'])
        rel_cp = ref_frame['obb'].transform_point(cp.positionOnAInWS)
        # NOTE: below approximate closest point calls are for removing pybullet call and debugging memory leak
        # cp = frame['obb'].closest_point(ref_frame['obb'].centroid)
        # rel_cp = ref_frame['obb'].transform_point(cp)
        out = RelativeObservation(room_id=room_id, obj_id=frame['obj_id'], ref_id=ref_frame['obj_id'],
                                  ref_dims=ref_dims, centroid=rel_centroid, min=rel_min, max=rel_max, closest=rel_cp,
                                  front=rel_front, up=rel_up)
        return out

    @staticmethod
    def node_to_semantic_frame(node, obb):
        aabb_min, aabb_max = obb.to_aabb()
        out = {
            'obj_id': node.id,
            'obb': obb,
            'aabb': {'min': aabb_min, 'max': aabb_max}
        }
        return out
Ejemplo n.º 20
0
    def learn(self, data_folder, data_root_dir=None):
        if data_root_dir is None:
            data_root_dir = utils.get_data_root_dir()
        data_dir = f"{data_root_dir}/{data_folder}"
        self.data_dir = data_dir
        self.category_map = ObjectCategories()

        files = os.listdir(data_dir)
        files = [f for f in files if ".pkl" in f and not "domain" in f]

        with open(f"{data_dir}/final_categories_frequency", "r") as f:
            lines = f.readlines()
            cats = [line.split()[0] for line in lines]

        self.categories = [
            cat for cat in cats if cat not in set(['window', 'door'])
        ]
        self.cat_to_index = {
            self.categories[i]: i
            for i in range(len(self.categories))
        }

        with open(f"{data_dir}/model_frequency", "r") as f:
            lines = f.readlines()
            models = [line.split()[0] for line in lines]
            self.model_freq = [int(l[:-1].split()[1]) for l in lines]

        self.models = [
            model for model in models if self.category_map.get_final_category(
                model) not in set(['window', 'door'])
        ]
        self.model_to_index = {
            self.models[i]: i
            for i in range(len(self.models))
        }

        N = len(self.models)
        self.num_categories = len(self.categories)

        self.model_index_to_cat = [
            self.cat_to_index[self.category_map.get_final_category(
                self.models[i])] for i in range(N)
        ]

        self.count = [[0 for i in range(N)] for j in range(N)]

        for index in range(len(files)):
            #for index in range(100):
            with open(f"{data_dir}/{index}.pkl", "rb") as f:
                (_, _, nodes), _ = pickle.load(f)

            object_nodes = []
            for node in nodes:
                modelId = node["modelId"]
                category = self.category_map.get_final_category(modelId)
                if not category in ["door", "window"]:
                    object_nodes.append(node)

            for i in range(len(object_nodes)):
                for j in range(i + 1, len(object_nodes)):
                    a = self.model_to_index[object_nodes[i]["modelId"]]
                    b = self.model_to_index[object_nodes[j]["modelId"]]
                    self.count[a][b] += 1
                    self.count[b][a] += 1
            print(index, end="\r")

        self.N = N
Ejemplo n.º 21
0
class RenderedScene():
    """
    Loading a rendered room
    Attributes
    ----------
    category_map (ObjectCategories): object category mapping
        that should be the same across all instances of the class
    categories (list[string]): all categories present in this room type.
        Loaded once when the first room is loaded to reduce disk access.
    cat_to_index (dict[string, int]): maps a category to corresponding index
    current_data_dir (string): keep track of the current data directory, if
        it changes, then categories and cat_to_index should be recomputed
    """
    category_map = ObjectCategories()
    categories = None
    cat_to_index = None
    current_data_dir = None

    def __init__(self, index, data_dir, data_root_dir=None, \
                 shuffle=True, load_objects=True, seed=None, rotation=0):
        """
        Load a rendered scene from file
        Parameters
        ----------
        index (int): room number
        data_dir (string): location of the pre-rendered rooms
        data_root_dir (string or None, optional): if specified,
            use this as the root directory
        shuffle (bool, optional): If true, randomly order the objects
            in the room. Otherwise use the default order as written
            in the original dataset
        load_objects (bool, optional): If false, only load the doors
            and windows. Otherwise load all objects in the room
        seed (int or None, optional): if set, use a fixed random seed
            so we can replicate a particular experiment
        """
        if seed:
            random.seed(seed)

        if not data_root_dir:
            data_root_dir = utils.get_data_root_dir()

        if RenderedScene.categories is None or RenderedScene.current_data_dir != data_dir:
            RenderedScene.categories = RenderedScene.category_map.all_non_arch_categories(
                data_root_dir, data_dir)
            RenderedScene.cat_to_index = {
                RenderedScene.categories[i]: i
                for i in range(len(RenderedScene.categories))
            }
            RenderedScene.current_data_dir = data_dir
        #print(RenderedScene.cat_to_index)
        #print(index, rotation)
        if rotation != 0:
            fname = f"{index}_{rotation}"
        else:
            fname = index

        with open(f"{data_root_dir}/{data_dir}/{fname}.pkl", "rb") as f:
            (self.floor, wall, nodes), self.room = pickle.load(f)

        self.size = self.floor.shape[0]

        self.wall = wall["height_map"]
        self.wall_segments = wall["segments"]

        # Compute normals for wall segment objects
        mask = (self.floor + self.wall) != 0
        for wall_seg in self.wall_segments:
            seg = [np.array([x[0], x[2]])
                   for x in wall_seg["points"]]  # reduce to 2d
            v_diff = seg[1] - seg[0]
            v_norm = np.array([-v_diff[1], v_diff[0]])
            v_norm = v_norm / np.linalg.norm(v_norm)
            #### Decide whether to flip the normal vector
            midp = (seg[0] + seg[1]) / 2
            test_point = np.ceil(midp +
                                 5 * v_norm * np.array([-1, 1])).astype(int)
            # If (1) a point slightly along the normal falls outside the image
            #    (2) the pixel in the room mask image at this point is not filled
            #    then this must be pointing out of the room and thus should be flipped.
            out = (test_point >= self.size).any() or (test_point < 0).any()
            if out or not mask[int(test_point[0]), int(test_point[1])]:
                v_norm = -v_norm
            wall_seg["normal"] = v_norm
            wall_seg["points"] = seg

        self.index = index
        self.rotation = rotation

        self.object_nodes = []
        self.door_window_nodes = []
        for node in nodes:
            category = RenderedScene.category_map.get_final_category(
                node["modelId"])
            if RenderedScene.category_map.is_arch(category):
                node["category"] = category
                self.door_window_nodes.append(node)
            elif load_objects:
                node["category"] = RenderedScene.cat_to_index[category]
                self.object_nodes.append(node)

        if shuffle:
            random.shuffle(self.object_nodes)

    def create_composite(self):
        """
        Create a initial composite that only contains the floor,
        wall, doors and windows. See RenderedComposite for how
        to add more objects
        """
        r = RenderedComposite(RenderedScene.categories, self.floor, self.wall,
                              self.door_window_nodes)
        return r