def load_observations(self, filenames, house_dir, load_frames=False): counts_house_ids = list(map(lambda f: f.split('_'), filenames)) num_observations = len(counts_house_ids) for i, (obs_id, house_id) in enumerate(counts_house_ids): print( f'Loading observation obs_id={obs_id} house id={house_id}, {i}/{num_observations}' ) house_observations = self._observations.get(house_id, {}) num_rows = 0 for row in csv.DictReader( open( os.path.join(self._priors_dir, obs_id + '_' + house_id + '.relpos.csv'))): if len(row) == 0: continue obs = RelativeObservation.fromstring(row) house_observations[(obs.obj_id, obs.ref_id)] = obs num_rows += 1 if num_rows > 0: self._observations[obs_id] = house_observations if load_frames: house_frames = self._semantic_frames.get(house_id, {}) for row in csv.DictReader( open( os.path.join( self._priors_dir, obs_id + '_' + house_id + '.semframes.csv'))): local2world = np.matrix(str2nparr( row['local2world'])).reshape(4, 4) frame = { 'obj_id': row['obj_id'], 'obb': OBB.from_local2world_transform(local2world), 'aabb': { 'min': row['world_aabb_min'], 'max': row['world_aabb_max'] } } house_frames[frame['obj_id']] = frame self._semantic_frames[house_id] = house_frames print('Grouping observations by categories...') groups = {} num_obs = len(self._observations) for i, (obs_id, house_id) in enumerate(counts_house_ids): print(f'Observation in house id={house_id} {i}/{num_obs}') house = House(file_dir=os.path.join(house_dir, obs_id + '.json'), include_support_information=False) self._objects.init_from_house(house, update_sim=False) for observation in self._observations[obs_id].values(): key = self._objects.get_observation_key(observation) key_bin = groups.get(key, []) key_bin.append(observation) groups[key] = key_bin print('Done grouping') self._grouped_observations = groups
def create_dataset(source="", dest="main", \ num_houses=-1, batch_size=1000): """ Create a pickled version of the dataset from the json files Parameters ---------- dest (string, optional): directory to save to num_houses (int, optional): If -1, then all the houses will be loaded, otherwise the first num_houses houses will be loaded batch_size (int, optional): number of houses in one .pkl file """ data_dir = utils.get_data_root_dir() data_dir = utils.get_data_root_dir() dest_dir = f"{data_dir}/{dest}" if not os.path.exists(dest_dir): os.makedirs(dest_dir) to_save = [] cur_index = 0 def pkl_name(index): return f"{dest_dir}/{cur_index}.pkl" if source == "": house_dir = f"{data_dir}/data/house" house_ids = dict(enumerate(os.listdir(house_dir))) print(f"There are {len(house_ids)} houses in the dataset.") num_houses = len(house_ids) if num_houses == -1 else num_houses start_house_i = 0 while os.path.exists(pkl_name(cur_index)): print(f'Batch file {pkl_name(cur_index)} exists, skipping batch') cur_index += 1 start_house_i = cur_index * batch_size for i in range(start_house_i, num_houses): print(f"Now loading house id={house_ids[i]} {i+1}/{num_houses}...", end="\r") house = House(i) if house.rooms: to_save.append(house) if len(to_save) == batch_size: with open(pkl_name(cur_index), "wb") as f: pickle.dump(to_save, f, pickle.HIGHEST_PROTOCOL) to_save = [] cur_index += 1 print() with open(pkl_name(cur_index), "wb") as f: pickle.dump(to_save, f, pickle.HIGHEST_PROTOCOL) else: print("Currently only supports loading nothing")
def as_house(self): room_nodes = [ dict(n.__dict__) for n in self._objects.values() if n.type != 'Room' ] for i, n in enumerate(room_nodes): n['originalId'] = n.originalId if hasattr( n, 'originalId') else n['id'] n['id'] = f'0_{str(i + 1)}' # overwrite id with linearized id room = { 'id': '0_0', 'originalId': self.room.originalId if hasattr(self.room, 'originalId') else self.room.id, 'type': 'Room', 'valid': 1, 'modelId': self.room.modelId, 'nodeIndices': list(range(1, len(room_nodes) + 1)), 'roomTypes': self.room.roomTypes, 'bbox': self.room.bbox, } house_dict = { 'version': '[email protected]', 'id': self.room.house_id, 'up': [0, 1, 0], 'front': [0, 0, 1], 'scaleToMeters': 1, 'levels': [{ 'id': '0', 'nodes': [room] + room_nodes }] } return House(house_json=house_dict)
args = parser.parse_args() if args.task == 'arrange': # initialize arrangement priors and sampler filename = os.path.join(args.priors_dir, f'priors.pkl.gz') ap = ArrangementPriors(priors_file=filename, w_dist=0.0, w_clearance=1.0, w_closest=1.0, w_orientation=1.0, w_same_category=1.0) ags = ArrangementGreedySampler(arrangement_priors=ap, sim_mode=args.sim_mode) # load house and set architecture original_house = House(file_dir=args.input, include_support_information=False) ags.init(original_house, only_architecture=True) # split into placeable objects and fixed portals (doors, windows) placeables, fixed_objects = ags.placeable_objects_sorted_by_size( original_house) # add fixed objects (not to be arranged) for n in fixed_objects: ags.objects.add_object(n) # place objects one-by-one houses_states = [] num_placeables = len(placeables) for (i, n) in enumerate(placeables): print(f'Placing {i+1}/{num_placeables}')
contactNormalOnBInWS=c[7], distance=c[8], normalForce=c[9]) out.append(o) return out def ray_test(self, from_pos, to_pos): hit = p.rayTest(rayFromPosition=from_pos, rayToPosition=to_pos, physicsClientId=self._pid) intersection = Intersection._make(*hit) del hit # NOTE force garbage collection of pybullet objects if intersection.id >= 0: # if intersection, replace bid with id intersection = intersection._replace( id=self._bid_to_body[intersection.id].id) return intersection if __name__ == '__main__': from data import House sim = Simulator(mode='gui', verbose=True) h = House(id_='d119e6e0bd567d923aea774c2a984bf0', include_arch_information=False) # h = House(index=5) sim.add_house(h, no_walls=False, no_ceil=True, use_separate_walls=False, static=False) sim.run()
parser.add_argument('--task', type=str, required=True, help='<Required> task [collect|save_pkl]') parser.add_argument('--input', type=str, help='house json or directory with house json files') parser.add_argument('--priors_dir', type=str, required=True, help='priors data directory') parser.add_argument('--house_dir', type=str, help='house data directory') args = parser.parse_args() rod = RelativeObservationsDatabase(name='suncg_priors', priors_dir=args.priors_dir, verbose=True) if os.path.isdir(args.input): house_files = sorted(glob.glob(os.path.join(args.input, '**/*.json'), recursive=True)) else: house_files = [args.input] if args.task == 'collect': for f in house_files: house = House(file_dir=f, include_support_information=False) prefix = os.path.splitext(os.path.basename(f))[0] priors_file = os.path.join(args.priors_dir, prefix + '_' + house.id + '.relpos.csv') if os.path.exists(priors_file): print(f'Priors already exist at {priors_file}') else: rod.collect_observations([house]) rod.save_observations_by_house(prefix) rod.clear() if args.task == 'save_pkl': relpos_files = filter(lambda p: '.relpos.csv' in p, os.listdir(args.input)) relpos_files = list(map(lambda p: os.path.basename(p).split('.')[0], relpos_files)) print(relpos_files) rod.load_observations(relpos_files, args.house_dir, load_frames=False) # reduced = {k: rod.grouped_observations[k] for k in [