Example #1
0
def get_ckpt(config_path, seed, version):
    variant = osp.split(config_path)[1].split('.')[0]
    config = get_config(config_path)
    root = Path(f'runs/{variant}-{seed}/lightning_logs/')
    run_path = sorted(root.iterdir(), key=osp.getmtime)[-1]
    # * This is the default output, if you want to play around with a different checkpoint load it here.
    model_ckpt = list(run_path.joinpath('checkpoints').glob("*"))[0]
    return model_ckpt, config, variant
Example #2
0
def run_exp(
    config: str,
    seed: int,
    opts=None,
):
    # Grab config, set name, all that good stuff.
    if opts is None:
        opts = []
    variant_name = osp.split(config)[1].split('.')[0]
    config = get_config(config, opts)
    config.defrost()
    config.VARIANT = variant_name
    config.SEED = seed
    config.freeze()

    pl.utilities.seed.seed_everything(seed=seed)
    print(f"Starting {config.VARIANT} run with seed {config.SEED}")

    model = SeqSeqRNN(config)
    print(model)

    dataset = SequenceRecallDataset(config, split="train")
    length = len(dataset)
    train, val = random_split(dataset,
                              [int(length * 0.8), length - int(length * 0.8)],
                              generator=torch.Generator().manual_seed(42))
    print("Training on ", len(train), " examples")

    lr_logger = LearningRateMonitor(logging_interval='step')

    trainer = pl.Trainer(
        max_epochs=config.TRAIN.EPOCHS,
        gpus=1,
        val_check_interval=1.0,
        callbacks=[lr_logger],
        default_root_dir=f"./runs/{config.VARIANT}-{config.SEED}")

    trainer.fit(
        model,
        DataLoader(
            train, batch_size=config.TRAIN.BATCH_SIZE
        ),  # * Note, there's 2x the number of minibatches that I expect, not sure why.
        DataLoader(val, batch_size=config.TRAIN.BATCH_SIZE))

    test_dataset = SequenceRecallDataset(config, split="test")

    print()
    print("Train results")
    trainer.test(model, DataLoader(dataset, batch_size=64))

    print()
    print("Test results")
    trainer.test(model, DataLoader(test_dataset, batch_size=64))
Example #3
0
def load_recurrent_model(config, seed, version=None):
    # index: belief_index
    variant = osp.split(config)[1].split('.')[0]
    config = get_config(config)
    root = Path(f'runs/{variant}-{seed}/lightning_logs/')
    if version is not None:
        run_path = root.joinpath(f'version_{version}')
    else:
        run_path = sorted(root.iterdir(), key=osp.getmtime)[-1]
    model_ckpt = list(run_path.joinpath('checkpoints').glob("*"))[0]

    weights = torch.load(model_ckpt, map_location='cpu')
    model = SeqSeqRNN(config)
    model.load_state_dict(weights['state_dict'])
    return model
Example #4
0
def run_fp_finder(
    config, seed,
    tag='',
    num_fp=2000,
    override=False,
    save_root=SAVE_ROOT,
    jitter=0.0,
    exclude_outliers=False,
):
    cache_fps = get_cache_path(config, seed, tag, save_root=save_root)

    if not override and osp.exists(cache_fps):
        print(f"{cache_fps} exists, quitting.")
        return
    model = load_recurrent_model(config, seed)
    config = get_config(config)
    dataset = SequenceRecallDataset(config, split='test')

    # TODO remove support for ids and choices in plotting code
    obs, ics, trajs, input_seqs, input_lengths = get_model_inputs(dataset, model, size=num_fp)
    ics = noise_ics(ics, seed=seed, jitter_scale=jitter)
    fps, deltas = find_fixed_points(ics, obs, model.rnn)

    if exclude_outliers:
        outlier_indices = find_outliers(ics, fps.cpu(), mode='mean', threshold=3.0)
        fps = fps[~outlier_indices]
        deltas = deltas[~outlier_indices]

    uniq_fps, deltas = get_unique(fps, deltas)
    print(f"{len(uniq_fps)} unique FPs in {len(fps)} total.")

    uniq_fps = uniq_fps.cpu()
    deltas = deltas.cpu()
    torch.save({
        'fps': uniq_fps,
        'deltas': deltas,
        'source': ics,
        'trajs': trajs,
        'input_seqs': input_seqs,
        'input_lengths': input_lengths
    }, cache_fps)
    print(f"Finished. Saved at {cache_fps}")
colors = scipy.io.loadmat('tools/mit_semseg/data/color150.mat')['colors']
net_encoder = ModelBuilder.build_encoder(
    arch='hrnetv2',
    fc_dim=720,
    weights='tools/mit_semseg/ckpt/hrnetv2/encoder_epoch_30.pth')
net_decoder = ModelBuilder.build_decoder(
    arch='c1',
    fc_dim=720,
    num_class=150,
    weights='tools/mit_semseg/ckpt/hrnetv2/decoder_epoch_30.pth',
    use_softmax=True)

crit = torch.nn.NLLLoss(ignore_index=-1)
segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)
objectName = "chair"
config = get_config("tools/semantic_anti_eval/config/se_check_" + objectName + "_anticipator_256.yaml")
random.seed(config.TASK_CONFIG.SEED)
np.random.seed(config.TASK_CONFIG.SEED)
torch.manual_seed(config.TASK_CONFIG.SEED)
trainer = SemAntExpTrainer(config)

ppo_cfg = trainer.config.RL.PPO
ans_cfg = trainer.config.RL.ANS
mapper_cfg = trainer.config.RL.ANS.MAPPER
occ_cfg = trainer.config.RL.ANS.SEMANTIC_ANTICIPATOR
trainer.device = (
    torch.device("cuda", 1)
    if torch.cuda.is_available()
    else torch.device("cpu")
)
sem_cfg = ans_cfg.SEMANTIC_ANTICIPATOR
def main(args):
    
    config = get_config()

    mapper_config = config.RL.ANS.MAPPER
    mapper_config.defrost()
    mapper_config.map_size = 130
    mapper_config.map_scale = 0.02
    mapper_config.freeze()

    mapper = Mapper(mapper_config, None)

    M = args.global_map_size
    skip_scene = args.skip_scene
    config_path = args.config_path
    save_dir = args.save_dir
    safe_mkdir(save_dir)

    seen_map_save_root = os.path.join(save_dir, "seen_area_maps")
    wall_map_save_root = os.path.join(save_dir, "wall_maps")
    semantic_map_save_root = os.path.join(save_dir, "semantic_maps")
    json_save_path = os.path.join(save_dir, "all_maps_info.json")

    config = habitat_extensions.get_extended_config(config_path)

    scenes_list = glob.glob(f"")
    dataset_path = config.DATASET.DATA_PATH.replace("{split}", config.DATASET.SPLIT)
    with gzip.open(dataset_path, "rt") as fp:
        dataset = json.load(fp)

    num_episodes = len(dataset["episodes"])

    print("===============> Loading data per scene")
    scene_to_data = {}
    if num_episodes == 0:
        content_path = os.path.join(
            dataset_path[: -len(f"{config.DATASET.SPLIT}.json.gz")], "content"
        )
        scene_paths = glob.glob(f"{content_path}/*")
        print(f"Number of scenes found: {len(scene_paths)}")
        for scene_data_path in scene_paths:
            with gzip.open(scene_data_path, "rt") as fp:
                scene_data = json.load(fp)
            num_episodes += len(scene_data["episodes"])
            scene_id = scene_data["episodes"][0]["scene_id"].split("/")[-1]
            scene_to_data[scene_id] = scene_data["episodes"]
    else:
        for ep in dataset["episodes"]:
            scene_id = ep["scene_id"].split("/")[-1]
            if scene_id not in scene_to_data:
                scene_to_data[scene_id] = []
            scene_to_data[scene_id].append(ep)

    print("===============> Computing heights for different floors in each scene")
    scenes_to_floor_heights = {}
    for scene_id, scene_data in scene_to_data.items():
        # Identify the number of unique floors in this scene
        floor_heights = []
        for ep in scene_data:
            height = ep["start_position"][1]
            if len(floor_heights) == 0:
                floor_heights.append(height)
            # Measure height difference from all existing floors
            d2floors = map(lambda x: abs(x - height), floor_heights)
            d2floors = np.array(list(d2floors))
            if not np.any(d2floors < 0.5):
                floor_heights.append(height)
        # Store this in the dict
        scenes_to_floor_heights[scene_id] = floor_heights

    env = DummyRLEnv(config=config)
    env.seed(1234)
    _ = env.reset()
    device = torch.device("cuda:0")

    safe_mkdir(seen_map_save_root)
    safe_mkdir(wall_map_save_root)
    safe_mkdir(semantic_map_save_root)

    # Data format for saving top-down maps per scene:
    # For each split, create a json file that contains the following dictionary:
    # key - scene_id
    # value - [{'floor_height': ...,
    #           'seen_map_path': ...,
    #           'wall_map_path': ...,
    #           'world_position': ...,
    #           'world_heading': ...},
    #          .,
    #          .,
    #          .,
    #         ]
    # The floor_height specifies a single height value on that floor.
    # All other heights within 0.5m of this height will correspond to this floor.
    # The *_map_path specifies the path to a .npy file that contains the
    # corresponding map. This map is in the world coordinate system, not episode
    # centric start-view coordinate system.
    # The world_position is the (X, Y, Z) position of the agent w.r.t. which this
    # map was computed. The world_heading is the clockwise rotation (-Z to X)
    # of the agent in the world coordinates.
    # The .npy files will be stored in seen_map_save_root and wall_map_save_root.

    # Create top-down maps per scene, per floor
    per_scene_per_floor_maps = {}
    print("===============> generate meta information for gt map")
    for target_scene in tqdm.tqdm(scene_to_data.keys()):
        per_scene_per_floor_maps[target_scene] = {}
        for episode in scene_to_data[target_scene]:
            scene_id = target_scene
            start_position = episode['start_position']
            start_rotation = episode['start_rotation']
            start_height = start_position[1]
            floor_heights = scenes_to_floor_heights[scene_id]
            d2floors = map(lambda x: abs(x - start_height), floor_heights)
            d2floors = np.array(list(d2floors))
            floor_idx = np.where(d2floors < 0.5)[0][0].item()   
            if floor_idx in per_scene_per_floor_maps[scene_id]:
                continue
            start_heading = compute_heading_from_quaternion(quaternion_from_coeff(start_rotation))
            seen_map_save_path = f"{seen_map_save_root}/{scene_id}_{floor_idx}.npy"
            wall_map_save_path = f"{wall_map_save_root}/{scene_id}_{floor_idx}.npy"
            semantic_map_save_path = f"{semantic_map_save_root}/{scene_id}_{floor_idx}.npy"


            save_dict = {
            "seen_map_path": seen_map_save_path,
            "wall_map_path": wall_map_save_path,
            "semantic_map_path": semantic_map_save_path,
            "floor_height": start_height,
            "start_rotation": start_rotation,
            "world_position": start_position,
            "world_heading": start_heading,
            "scene_id": episode['scene_id']
            }

            per_scene_per_floor_maps[scene_id][floor_idx] = save_dict
            if len(per_scene_per_floor_maps[scene_id]) == len(scenes_to_floor_heights[scene_id]):
                break
    print("===============> save meta information for gt map")        
    save_json = {}
    for scene in per_scene_per_floor_maps.keys():
        scene_save_data = []
        for floor_idx, floor_data in per_scene_per_floor_maps[scene].items():
            scene_save_data.append(floor_data)
        save_json[scene] = scene_save_data

    json.dump(save_json, open(json_save_path, "w"))    

    print("===============> start to draw semantic map")  
    scene_ids = sorted(list(per_scene_per_floor_maps.keys()))
    print(scene_ids)
    start_scene = scene_ids[skip_scene]
    print(f"===============> start with scene {start_scene}")

    for target_scene in tqdm.tqdm(scene_ids[skip_scene:],desc='scenes', position=0):
        for floor_idx in per_scene_per_floor_maps[target_scene]:
            
            scene_meta_info = per_scene_per_floor_maps[target_scene][floor_idx]
            # don't regenerate maps
            
            if os.path.isfile(scene_meta_info['semantic_map_path']):
                continue
            print(scene_meta_info)

            env.habitat_env.current_episode.start_position = scene_meta_info['world_position']
            env.habitat_env.current_episode.start_rotation = scene_meta_info['start_rotation']
            env.habitat_env.current_episode.scene_id = scene_meta_info['scene_id']

            env.habitat_env.reconfigure(env.habitat_env._config)
            _ = env.habitat_env.task.reset(env.habitat_env.current_episode)

            scene_id = target_scene
            agent_state = env.habitat_env.sim.get_agent_state()
            start_position = np.array(agent_state.position)

            global_seen_map, global_wall_map = get_episode_map(
                env, mapper, M, config, device
            )

            #generate semantic layers
            global_semantic_map = generate_semantic_layers(env,mapper, M,config,global_seen_map)

            seen_map_save_path = f"{seen_map_save_root}/{scene_id}_{floor_idx}.npy"
            wall_map_save_path = f"{wall_map_save_root}/{scene_id}_{floor_idx}.npy"
            semantic_map_save_path = f"{semantic_map_save_root}/{scene_id}_{floor_idx}.npy"
            np.save(seen_map_save_path, (global_seen_map > 0))
            np.save(wall_map_save_path, (global_wall_map > 0))
            np.save(semantic_map_save_path, (global_semantic_map > 0))

            # clean the memory to avoid overflow
            global_seen_map = None
            global_wall_map = None
            global_semantic_map = None
            gc.collect()
Example #7
0
from analyze_utils import (prep_plt, plot_to_axis, load_device, loc_part_ratio,
                           part_ratio, svd)

from fp_finder import (load_recurrent_model, get_model_inputs, noise_ics,
                       run_fp_finder, get_cache_path, get_jac_pt,
                       get_jac_pt_sequential, get_eigvals, time_const)

from fp_plotter import (get_pca_view, scatter_pca_points_with_labels,
                        add_pca_inset, plot_spectrum)

config = './config/e2a_alph2.yaml'
config = './config/e2a_alph8.yaml'
config = './config/e2a_alph32.yaml'
tag = ''
seed = 0
cn = get_config(config)
override_cache = True
override_cache = False

model = load_recurrent_model(config, seed)
cn = get_config(config)
dataset = SequenceRecallDataset(cn, split='test')

obs, ics, *_ = get_model_inputs(dataset, model, 2000)  # get trajectories
mean_obs = obs

cache_fps = get_cache_path(config, seed, tag=tag)
if not osp.exists(cache_fps):
    run_fp_finder(config, seed, tag=tag, override=override_cache)
    cache_fps = get_cache_path(config, seed, tag=tag)
                if episode_step_count[0].item() == self.config.T_EXP:
                    seed = int(time.time())
                    print('change random seed to {}'.format(seed))
                    random.seed(seed)
                    np.random.seed(seed)
                    torch.manual_seed(seed)

                    observations = self.envs.reset()
                    batch = self._prepare_batch(observations)
                    prev_batch = batch
                    # Reset episode step counter
                    episode_step_count.fill_(0)
                    # Reset states
                    for k in ground_truth_states.keys():
                        ground_truth_states[k].fill_(0)
                    for k in state_estimates.keys():
                        state_estimates[k].fill_(0)

            self.envs.close()

if __name__ == "__main__":
    from config.default import get_config
    import random
    config = get_config("./tools/semantic_anti_train/config/chair_train_256.yaml")
    seed = int(time.time())
    print('change random seed to {}'.format(seed))
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    trainer = SemAntExpTrainer(config)
    trainer.train()
def main(args):
    file_path = args.map_info
    with open(file_path) as json_file:
        all_maps_info = json.load(json_file)

    _, label_idx = get_items_list("data/mpcat40.tsv")

    config_path = "tools/generate_topdown_maps/config/mp3d_train.yaml"

    minDistance = 0
    maxDistance = 2.5

    config = get_config(config_path)
    config = habitat_extensions.get_extended_config(config_path)

    try:
        env.close()
    except NameError:
        pass

    env = DummyRLEnv(config=config)
    env.seed(1234)
    device = torch.device("cuda:0")

    _ = env.reset()

    scene_objects = {}
    scene_ids = sorted(list(all_maps_info.keys()))

    for scene_id in scene_ids:
        scene_objects[scene_id] = {}

        floor_id = 0
        for scene_floor in all_maps_info[scene_id]:
            scene_objects[scene_id][floor_id] = {}
            floor_id += 1

    for scene in tqdm.tqdm(scene_ids):
        switch_to_next_scene(env, all_maps_info[scene][0]["scene_id"])
        obj_pos = get_obj_per_scene(env, label_idx)
        for floor_id in scene_objects[scene]:

            floor_height = all_maps_info[scene][floor_id]["floor_height"]
            floor_objects = {}
            scene_objects[scene][floor_id] = floor_objects

            for target_obj in obj_pos.keys():

                object_class = target_obj
                floor_objects[object_class] = 0
                target_objects = obj_pos[object_class]

                for obj_id in target_objects:

                    objectHeight = target_objects[obj_id][0][1]
                    dObjectToFloor = objectHeight - floor_height

                    if dObjectToFloor > minDistance and dObjectToFloor < maxDistance:  #Check if Object is within 2.5m above the floor height
                        floor_objects[object_class] += 1
                    else:
                        continue

    #Create dataframe out of scene_objects dictionary
    df_scene_objects = pd.concat(
        {
            k: pd.DataFrame.from_dict(v, 'index')
            for k, v in scene_objects.items()
        },
        axis=0)
    df_scene_objects.to_csv("data/scene_object_prevelance.tsv", sep="\t")