コード例 #1
0
ファイル: model_prior.py プロジェクト: zebrajack/planit
class ModelPrior():
    def __init__(self):
        pass

    def learn(self, data_folder="bedroom_final", data_root_dir=None):
        if not data_root_dir:
            data_root_dir = utils.get_data_root_dir()
        data_dir = f"{data_root_dir}/{data_folder}"
        self.data_dir = data_dir
        self.category_map = ObjectCategories()

        files = os.listdir(data_dir)
        files = [
            f for f in files
            if ".pkl" in f and not "domain" in f and not "_" in f
        ]

        self.categories = self.category_map.all_non_arch_categories(
            data_root_dir, data_folder)
        self.cat_to_index = {
            self.categories[i]: i
            for i in range(len(self.categories))
        }

        with open(f"{data_dir}/model_frequency", "r") as f:
            lines = f.readlines()
            models = [line.split()[0] for line in lines]
            self.model_freq = [int(l[:-1].split()[1]) for l in lines]

        self.models = [
            model for model in models if not self.category_map.is_arch(
                self.category_map.get_final_category(model))
        ]
        self.model_to_index = {
            self.models[i]: i
            for i in range(len(self.models))
        }

        N = len(self.models)
        self.num_categories = len(self.categories)

        self.model_index_to_cat = [
            self.cat_to_index[self.category_map.get_final_category(
                self.models[i])] for i in range(N)
        ]

        self.count = [[0 for i in range(N)] for j in range(N)]

        for index in range(len(files)):
            #for index in range(100):
            with open(f"{data_dir}/{index}.pkl", "rb") as f:
                (_, _, nodes), _ = pickle.load(f)

            object_nodes = []
            for node in nodes:
                modelId = node["modelId"]
                category = self.category_map.get_final_category(modelId)
                if not self.category_map.is_arch(category):
                    object_nodes.append(node)

            for i in range(len(object_nodes)):
                for j in range(i + 1, len(object_nodes)):
                    a = self.model_to_index[object_nodes[i]["modelId"]]
                    b = self.model_to_index[object_nodes[j]["modelId"]]
                    self.count[a][b] += 1
                    self.count[b][a] += 1
            print(index)

        self.N = N

    def save(self, dest=None):
        if dest == None:
            dest = f"{self.data_dir}/model_prior.pkl"
        with open(dest, "wb") as f:
            pickle.dump(self.__dict__, f, pickle.HIGHEST_PROTOCOL)

    def load(self, data_dir):
        source = f"{data_dir}/model_prior.pkl"
        with open(source, "rb") as f:
            self.__dict__ = pickle.load(f)

    def sample(self, category, models):
        N = self.N
        indices = [
            i for i in range(N) if self.model_index_to_cat[i] == category
        ]
        p = [self.model_freq[indices[i]] for i in range(len(indices))]
        p = np.asarray(p)
        for model in models:
            i = self.model_to_index[model]
            p1 = [self.count[indices[j]][i] for j in range(len(indices))]
            p1 = np.asarray(p1)
            p1 = p1 / p1.sum()
            p = p * p1

        p = p / sum(p)
        numbers = np.asarray([i for i in range(len(indices))])
        return self.models[indices[np.random.choice(numbers, p=p)]]

    def get_models(self, category, important, others):
        N = self.N
        indices = [
            i for i in range(N) if self.model_index_to_cat[i] == category
        ]
        to_remove = []

        freq = [self.model_freq[indices[i]] for i in range(len(indices))]
        total_freq = sum(freq)
        for j in range(len(indices)):
            if freq[j] / total_freq < 0.01:
                if not indices[j] in to_remove:
                    to_remove.append(indices[j])

        for model in important:
            i = self.model_to_index[model]
            freq = [self.count[indices[j]][i] for j in range(len(indices))]
            total_freq = sum(freq)
            if total_freq > 0:
                for j in range(len(indices)):
                    if freq[j] / total_freq < 0.1:
                        if not indices[j] in to_remove:
                            to_remove.append(indices[j])

        for model in others:
            i = self.model_to_index[model]
            freq = [self.count[indices[j]][i] for j in range(len(indices))]
            total_freq = sum(freq)
            if total_freq > 0:
                for j in range(len(indices)):
                    if freq[j] / total_freq < 0.05:
                        if not indices[j] in to_remove:
                            to_remove.append(indices[j])

        for item in to_remove:
            if len(indices) > 1:
                indices.remove(item)

        return [self.models[index] for index in indices]
コード例 #2
0
ファイル: arrangement.py プロジェクト: zebrajack/planit
class ArrangementGreedySampler:
    """
    Iterative optimization of object arrangements using greedy sampling of ArrangementPriors
    """
    def __init__(self,
                 arrangement_priors,
                 num_angle_divisions=8,
                 num_pairwise_priors=-1,
                 sim_mode='direct'):
        self._objects = ObjectCollection(sim_mode=sim_mode)
        self.room_id = None
        self._priors = arrangement_priors
        self._num_angle_divisions = num_angle_divisions
        self._num_pairwise_priors = num_pairwise_priors
        self.category_map = ObjectCategories()

    @property
    def objects(self):
        return self._objects

    def init(self, house, only_architecture=True, room_id=None):
        if not room_id:
            room_id = house.rooms[0].id
        self._objects.init_from_room(house,
                                     room_id,
                                     only_architecture=only_architecture)
        self.room_id = room_id

    def log_prob(self, filter_ref_obj=None, ignore_categories=list()):
        observations = self._objects.get_relative_observations(
            self.room_id,
            filter_ref_obj=filter_ref_obj,
            ignore_categories=ignore_categories)
        observations_by_key = {}

        # top_k_prior_categories = None
        # if filter_ref_obj and num_pairwise_priors specified, filter observations to only those in top k priors
        # if filter_ref_obj and self._num_pairwise_priors > 0:
        #     category = self._objects.category(filter_ref_obj.modelId, scheme='final')
        #     priors = list(filter(lambda p: p.ref_obj_category == category, self._priors.pairwise_priors))
        #     k = min(self._num_pairwise_priors, len(priors))
        #     priors = list(sorted(priors, key=lambda p: self._priors.pairwise_occurrence_log_prob(p)))[-k:]
        #     top_k_prior_categories = set(map(lambda p: p.obj_category, priors))

        for o in observations.values():
            # only pairwise observations in which filter_ref_obj is the reference
            if filter_ref_obj and o.ref_id != filter_ref_obj.id:
                continue
            key = self._objects.get_observation_key(o)
            os_key = observations_by_key.get(key, [])
            os_key.append(o)
            observations_by_key[key] = os_key
        return self._priors.log_prob(observations_by_key)

    def get_candidate_transform(self, node, max_iterations=100):
        num_checks = 0
        zmin = self._objects.room.zmin
        while True:
            num_checks += 1
            p = self._objects.room.obb.sample()
            ray_from = [p[0], zmin - .5, p[2]]
            ray_to = [p[0], zmin + .5, p[2]]
            intersection = ags._objects.simulator.ray_test(ray_from, ray_to)
            if intersection.id == self.room_id + 'f' or num_checks > max_iterations:
                break
        xform = Transform()
        xform.set_translation([p[0], zmin + .1, p[2]])
        angle = random() * 2 * math.pi
        angular_resolution = 2 * math.pi / self._num_angle_divisions
        angle = round(angle / angular_resolution) * angular_resolution
        xform.set_rotation(radians=angle)
        return xform

    def sample_placement(self,
                         node,
                         n_samples,
                         houses_log=None,
                         max_attempts_per_sample=10,
                         ignore_categories=list(),
                         collision_threshold=0):
        """
        Sample placement for given node
        """
        self._objects.add_object(node)
        max_lp = -np.inf
        max_xform = None
        max_house = None
        num_noncolliding_samples = 0
        for i in range(max_attempts_per_sample * n_samples):
            xform = self.get_candidate_transform(node)
            self._objects.update(node, xform=xform, update_sim=True)
            collisions = self._objects.get_collisions(obj_id_a=node.id)
            # print(f'i={i}, samples_so_far={num_noncolliding_samples}, n_samples={n_samples},'
            #       f'max_attempts_per_sample={max_attempts_per_sample}')
            if collision_threshold > 0:
                if min(collisions.values(), key=lambda c: c.distance
                       ).distance < -collision_threshold:
                    continue
            elif len(collisions) > 0:
                continue
            lp = self.log_prob(filter_ref_obj=node,
                               ignore_categories=ignore_categories)
            print(f'lp={lp}')
            if lp > max_lp:
                max_xform = xform
                max_lp = lp
                if houses_log is not None:
                    max_house = self._objects.as_house()
            num_noncolliding_samples += 1
            if num_noncolliding_samples == n_samples:
                break
        if houses_log is not None:
            houses_log.append(max_house)
        self._objects.update(node, xform=max_xform, update_sim=True)

    def placeable_objects_sorted_by_size(self, house):
        objects = []
        fixed_objects = []
        for n in house.levels[0].nodes:
            if n.type != 'Object':
                continue
            category = self._objects.category(n.modelId, scheme='final')
            if self.category_map.is_arch(category):
                fixed_objects.append(n)
                continue
            objects.append(n)

        for o in objects:
            # to_delete = []
            # for k in o.__dict__:
            #     if k not in ['id', 'modelId', 'transform', 'type', 'valid', 'bbox']:
            #         to_delete.append(k)
            # for k in to_delete:
            #     delattr(o, k)
            dims = self._objects._object_data.get_aligned_dims(o.modelId)
            o.volume = dims[0] * dims[1] * dims[2]
        objects = list(sorted(objects, key=lambda x: x.volume, reverse=True))
        return objects, fixed_objects
コード例 #3
0
ファイル: support_prior.py プロジェクト: zebrajack/planit
class SupportPrior():
    def __init__(self):
        pass

    def learn(self, data_folder="bedroom_final", data_root_dir=None):
        if not data_root_dir:
            data_root_dir = utils.get_data_root_dir()
        data_dir = f"{data_root_dir}/{data_folder}"
        self.data_dir = data_dir
        self.category_map = ObjectCategories()

        files = os.listdir(data_dir)
        files = [
            f for f in files
            if ".pkl" in f and not "domain" in f and not "_" in f
        ]

        self.categories = self.category_map.all_non_arch_categories(
            data_root_dir, data_folder)
        self.category_count = self.category_map.all_non_arch_category_counts(
            data_root_dir, data_folder)
        self.cat_to_index = {
            self.categories[i]: i
            for i in range(len(self.categories))
        }
        self.num_categories = len(self.categories)
        self.categories.append("floor")
        N = self.num_categories

        self.support_count = [[0 for i in range(N + 1)] for j in range(N)]

        for index in range(len(files)):
            print(index)
            with open(f"{data_dir}/{index}.pkl", "rb") as f:
                (_, _, nodes), _ = pickle.load(f)

            object_nodes = []
            id_to_cat = {}
            for node in nodes:
                modelId = node["modelId"]
                category = self.category_map.get_final_category(modelId)
                if not self.category_map.is_arch(category):
                    object_nodes.append(node)
                    id_to_cat[node["id"]] = self.cat_to_index[category]
                    node["category"] = self.cat_to_index[category]

            for node in object_nodes:
                parent = node["parent"]
                category = node["category"]
                if parent == "Floor" or parent is None:
                    self.support_count[category][-1] += 1
                else:
                    self.support_count[category][id_to_cat[parent]] += 1
            #quit()

        self.possible_supports = {}
        for i in range(self.num_categories):
            print(f"Support for {self.categories[i]}:")
            supports = [(c, self.support_count[i][c] / self.category_count[i])
                        for c in range(N + 1)]
            supports = sorted(supports, key=lambda x: -x[1])
            supports = [s for s in supports if s[1] > 0.01]
            for s in supports:
                print(f"    {self.categories[s[0]]}:{s[1]:4f}")
            self.possible_supports[i] = [s[0] for s in supports]

        print(self.possible_supports)
        self.N = N

    def save(self, dest=None):
        if dest == None:
            dest = f"{self.data_dir}/support_prior.pkl"
        with open(dest, "wb") as f:
            pickle.dump(self.__dict__, f, pickle.HIGHEST_PROTOCOL)

    def load(self, data_dir):
        source = f"{data_dir}/support_prior.pkl"
        with open(source, "rb") as f:
            self.__dict__ = pickle.load(f)