Esempio n. 1
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--input-type",
        default="blind",
        choices=["blind", "rgb", "depth", "rgbd"],
    )
    parser.add_argument("--evaluation", type=str, required=True, choices=["local", "remote"])
    config_paths = os.environ["CHALLENGE_CONFIG_FILE"]
    parser.add_argument("--model-path", default="", type=str)
    args = parser.parse_args()

    config = get_config('configs/ddppo_pointnav.yaml', 
                ['BASE_TASK_CONFIG_PATH', config_paths]).clone()
    config.defrost()
    config.TORCH_GPU_ID = 0
    config.INPUT_TYPE = args.input_type
    config.MODEL_PATH = args.model_path

    config.RANDOM_SEED = 7
    config.freeze()

    agent = DDPPOAgent(config)
    if args.evaluation == "local":
        challenge = habitat.Challenge(eval_remote=False)
    else:
        challenge = habitat.Challenge(eval_remote=True)

    challenge.submit(agent)
Esempio n. 2
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--evaluation", type=str, required=True, choices=["local", "remote"])
    args = parser.parse_args()

    config_paths = os.environ["CHALLENGE_CONFIG_FILE"]
    config = habitat.get_config(config_paths)
    agent = RandomAgent(task_config=config)

    if args.evaluation == "local":
        challenge = habitat.Challenge(eval_remote=False)
    else:
        challenge = habitat.Challenge(eval_remote=True)

    challenge.submit(agent)
Esempio n. 3
0
def run_cfg(cfg, uuid):
    if cfg['eval_kwargs']['exp_path'] is not None:
        # Process exp path
        exp_paths = [cfg['eval_kwargs']['exp_path']]

        # Set up config with the first exp only
        metadata_dir = get_subdir(exp_paths[0], 'metadata')
        config_path = os.path.join(metadata_dir, 'config.json')

        # Load config
        with open(config_path) as config:
            config_data = json.load(config)

        # Update configs
        config_data['uuid'] += '_benchmark' + uuid
        config_data['cfg']['saving']['log_dir'] += '/benchmark'
        config_data['cfg']['saving']['visdom_log_file'] = os.path.join(config_data['cfg']['saving']['log_dir'], 'visdom_logs.json')
        config_data['cfg']['learner']['test'] = True

        if cfg['eval_kwargs']['overwrite_configs']:
            config_data['cfg'] = update_dict_deepcopy(config_data['cfg'], cfg)

        set_seed(config_data['cfg']['training']['seed'])

        # Get checkpoints
        ckpt_paths = []
        for exp_path in exp_paths:
            ckpts_dir = get_subdir(exp_path, 'checkpoints')
            ckpt_path = os.path.join(ckpts_dir, 'ckpt-latest.dat')
            ckpt_paths.append(ckpt_path)
    else:
        config_data = { 'cfg': cfg, 'uuid': uuid }
        ckpt_paths = [None]
        exp_paths = [LOG_DIR]

    if 'eval_kwargs' in cfg and 'debug' in cfg['eval_kwargs']:
        if cfg['eval_kwargs']['debug']:
            config_data['cfg']['saving']['logging_type'] = 'visdom'
            config_data['cfg']['saving']['save_eval_videos'] = True
        else:
            config_data['cfg']['saving']['save_eval_videos'] = False

    print(pprint.pformat(config_data))
    print('Loaded:', config_data['uuid'])
    agent = HabitatAgent(ckpt_path=ckpt_paths[0], config_data=config_data)

    if cfg['eval_kwargs']['challenge']:
        challenge = habitat.Challenge()
        challenge.submit(agent)
    else:
        benchmark = habitat.Benchmark(config_file=cfg['eval_kwargs']['benchmark_config'], config_dir='/')
        metrics = benchmark.evaluate(agent, cfg['eval_kwargs']['benchmark_episodes'])
        agent.finish_benchmark(metrics)
        benchmark._env.close()

        everything = update_dict_deepcopy(metrics, config_data)
        patience, unstuck_dist = config_data['cfg']['learner']['backout']['patience'], config_data['cfg']['learner']['backout']['unstuck_dist']
        write_location = os.path.join(exp_paths[0], f'benchmark_data_p{patience}_d{unstuck_dist}.json')
        with open(write_location, 'w') as outfile:
            json.dump(everything, outfile)
Esempio n. 4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--agent-class", type=str, default="GoalFollower")
    args = parser.parse_args()

    agent = get_agent_cls(args.agent_class)(success_distance=0.2)
    challenge = habitat.Challenge()
    challenge.submit(agent)
Esempio n. 5
0
def main():
    # ! Note, there's some additional config not ported from dev setup, but those choices shouldn't matter...
    parser = argparse.ArgumentParser()
    parser.add_argument("--evaluation",
                        type=str,
                        required=True,
                        choices=["local", "remote"])
    config_paths = os.environ["CHALLENGE_CONFIG_FILE"]
    parser.add_argument("--model-path", default="", type=str)
    parser.add_argument("--config-path",
                        type=str,
                        required=True,
                        default="configs/aux_objectnav.yaml")
    args = parser.parse_args()

    DEFAULT_CONFIG = "configs/il_objectnav.yaml"
    config = get_config([DEFAULT_CONFIG, args.config_path],
                        ['BASE_TASK_CONFIG_PATH', config_paths]).clone()
    config.defrost()
    config.TORCH_GPU_ID = 0
    config.MODEL_PATH = args.model_path

    seed = 7
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.random.manual_seed(seed)
    config.RANDOM_SEED = 7
    config.freeze()
    torch.backends.cudnn.benchmark = False

    agent = ILAgent(config)
    if args.evaluation == "local":
        challenge = habitat.Challenge(eval_remote=False)
    else:
        challenge = habitat.Challenge(eval_remote=True)

    challenge.submit(agent)
def run_cfg(cfg, uuid):
    if cfg['eval_kwargs']['exp_path'] is not None:
        # Process exp path
        exp_paths = [cfg['eval_kwargs']['exp_path']]

        # Set up config with the first exp only
        metadata_dir = get_subdir(exp_paths[0], 'metadata')
        config_path = os.path.join(metadata_dir, 'config.json')

        # Load config
        with open(config_path) as config:
            config_data = json.load(config)

        # Update configs
        config_data['uuid'] += '_benchmark' + uuid
        config_data['cfg']['saving']['log_dir'] += '/benchmark'
        config_data['cfg']['saving']['visdom_log_file'] = os.path.join(config_data['cfg']['saving']['log_dir'], 'visdom_logs.json')
        config_data['cfg']['learner']['test'] = True

        if cfg['eval_kwargs']['overwrite_configs']:
            config_data['cfg'] = update_dict_deepcopy(config_data['cfg'], cfg)

        set_seed(config_data['cfg']['training']['seed'])

        # Get checkpoints
        ckpt_paths = []
        for exp_path in exp_paths:
            ckpts_dir = get_subdir(exp_path, 'checkpoints')
            ckpt_path = os.path.join(ckpts_dir, 'ckpt-latest.dat')
            ckpt_paths.append(ckpt_path)
    else:
        config_data = { 'cfg': cfg, 'uuid': uuid }
        ckpt_paths = [None]
        exp_paths = [LOG_DIR]

    if 'eval_kwargs' in cfg and 'debug' in cfg['eval_kwargs']:
        if cfg['eval_kwargs']['debug']:
            config_data['cfg']['saving']['logging_type'] = 'visdom'
            config_data['cfg']['saving']['save_eval_videos'] = True
        else:
            config_data['cfg']['saving']['save_eval_videos'] = False

    print(pprint.pformat(config_data))
    print('Loaded:', config_data['uuid'])
    agent = HabitatAgent(ckpt_path=ckpt_paths[0], config_data=config_data)

    if cfg['eval_kwargs']['challenge']:
        challenge = habitat.Challenge()
        challenge.submit(agent)
    else:
Esempio n. 7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--input-type",
        default="blind",
        choices=["blind", "rgb", "depth", "rgbd"],
    )
    parser.add_argument("--model-path", default="", type=str)
    args = parser.parse_args()

    config = get_defaut_config()
    config.INPUT_TYPE = args.input_type
    config.MODEL_PATH = args.model_path

    agent = PPOAgent(config)
    challenge = habitat.Challenge()
    challenge.submit(agent)
Esempio n. 8
0
 def get_action_space(self, request, context):
     challenge = habitat.Challenge()
     agent = unpack_for_grpc(request.SerializedEntity)
     challenge.submit(agent)
     return evaluation_pb2.Package(SerializedEntity=agent)
Esempio n. 9
0
def main():
    config_paths = os.environ["CHALLENGE_CONFIG_FILE"]
    config = habitat.get_config(config_paths)
    agent = RandomAgent(task_config=config)
    challenge = habitat.Challenge()
    challenge.submit(agent)
Esempio n. 10
0
def main():
    params = parse_args(default_files=('./habitat_submission.conf', ))
    is_submission = (params.habitat_eval not in ['localtest', 'gendata'])

    if params.seed > 0:
        np.random.seed(params.seed)
        random.seed(params.seed)

    # Update for 2021 change in environment variable naming
    if os.environ["TRACK_CONFIG_FILE"]:
        print("Overwriting CHALLENGE_CONFIG_FILE with %s" %
              os.environ["TRACK_CONFIG_FILE"])
        os.environ["CHALLENGE_CONFIG_FILE"] = os.environ["TRACK_CONFIG_FILE"]

    if not is_submission:
        if params.habitat_config_file != "":
            os.environ["CHALLENGE_CONFIG_FILE"] = params.habitat_config_file
        elif params.overwrite_agent_noise >= 0.:
            assert params.overwrite_agent_noise == 0.  # need a separate file otherwise
            print("Overwriting agent noise %f" % params.overwrite_agent_noise)
            os.environ[
                "CHALLENGE_CONFIG_FILE"] = './configs/challenge_pointnav_supervised_nonoise.yaml'
        else:
            os.environ[
                "CHALLENGE_CONFIG_FILE"] = './configs/challenge_pointnav_supervised.yaml'

    # parser = argparse.ArgumentParser()
    # parser.add_argument("--evaluation", type=str, required=True, choices=["local", "remote"])
    # parser.add_argument("--config", type=str, default='', required=False)
    # args = parser.parse_args()
    #
    # if args.config != '':
    #     os.environ["CHALLENGE_CONFIG_FILE"] = args.config

    config_paths = os.environ["CHALLENGE_CONFIG_FILE"]
    config = habitat.get_config(config_paths)

    if config.SIMULATOR.TYPE.startswith('Spot'):
        # This will register the custom sim class
        from spot import spotsim

        # Update compressed episode json
        jsonfile = config.DATASET.DATA_PATH
        assert jsonfile.endswith('.gz')
        jsonfile = jsonfile[:-3]
        import subprocess
        subprocess.call(['gzip -kf ' + jsonfile], shell=True)
        # import ipdb as pdb
        # pdb.set_trace()

        # from habitat.sims.pyrobot import pyrobot

    print("Using config file(s): %s" % (str(config_paths)))
    logdir = params.logpath if params.logpath != '' \
        else './temp/evals/{}-{}'.format(time.strftime('%m-%d-%H-%M-%S', time.localtime()), params.name)  # this is only used for videos now, should pass it in to challenge eval
    os.makedirs(logdir, exist_ok=True)
    # agent = RandomAgent(task_config=config)

    if params.habitat_eval in ["localtest", "gendata"]:
        # # TODO Try to add additional noiseless sensors. Changes here have no effect!! Modify config file instead.
        # config.defrost()
        # # config.SIMULATOR.AGENT_0.SENSORS.append("NOISELESS_RGB_SENSOR")
        # # config.SIMULATOR.NOISELESS_RGB_SENSOR = habitat.Config(config.SIMULATOR.RGB_SENSOR)
        # # config.SIMULATOR.NOISELESS_RGB_SENSOR.NOISE_MODEL = ""
        # # config.SIMULATOR.NOISELESS_RGB_SENSOR.NOISE_MODEL_KWARGS = habitat.Config()
        # # config.SIMULATOR.AGENT_0.SENSORS.append("NOISELESS_DEPTH_SENSOR")
        # # config.SIMULATOR.NOISELESS_DEPTH_SENSOR = habitat.Config(config.SIMULATOR.DEPTH_SENSOR)
        # # config.SIMULATOR.NOISELESS_DEPTH_SENSOR.NOISE_MODEL = ""
        # # config.SIMULATOR.NOISELESS_DEPTH_SENSOR.NOISE_MODEL_KWARGS = habitat.Config()
        # config.SIMULATOR.AGENT_0.SENSORS.append("NOISELESS_RGB_SENSOR")
        # config.SIMULATOR.NOISELESS_RGB_SENSOR = habitat.Config(config.SIMULATOR.RGB_SENSOR)
        # config.SIMULATOR.NOISELESS_RGB_SENSOR.TYPE = "SecondHabitatSimRGBSensor"
        # config.SIMULATOR.NOISELESS_RGB_SENSOR.NOISE_MODEL = ""
        # config.SIMULATOR.NOISELESS_RGB_SENSOR.NOISE_MODEL_KWARGS = habitat.Config()
        # config.freeze()

        challenge = habitat.Challenge(eval_remote=False)
        env = challenge._env

        grid_cell_size = 0.05  # 5cm
        map_size = (maps.COORDINATE_MAX - maps.COORDINATE_MIN) / grid_cell_size
        assert config.TASK.TOP_DOWN_MAP.MAP_RESOLUTION == int(map_size)

        if params.seed > 0:
            env._sim.seed(params.seed)
            env.seed(params.seed)

        # agent = ShortestPathAgent(task_config=config, env=env)

        if params.habitat_eval == "gendata":
            assert len(params.data_map_sizes) > 0

            # Initialize writers
            data_filenames = [
                os.path.join(
                    logdir, "agentplandata.m%d.%d-%d.tfrecords" %
                    (map_size, params.skip_first_n, params.num_episodes))
                for map_size in params.data_map_sizes
            ]

            # with statement for variable number of items
            from contextlib import ExitStack
            with ExitStack() as stack:
                tfwriters = [
                    stack.enter_context(
                        tf.python_io.TFRecordWriter(data_filename))
                    for data_filename in data_filenames
                ]
                agent = DSLAMAgent(task_config=config,
                                   params=params,
                                   env=env,
                                   logdir=logdir,
                                   tfwriters=tfwriters)
                challenge.submit(agent,
                                 num_episodes=params.num_episodes,
                                 skip_first_n=params.skip_first_n)
        else:
            agent = DSLAMAgent(task_config=config,
                               params=params,
                               env=env,
                               logdir=logdir)
            challenge.submit(agent,
                             num_episodes=params.num_episodes,
                             skip_first_n=params.skip_first_n)

    elif params.habitat_eval == "local":
        challenge = habitat.Challenge(eval_remote=False)

        # if params.seed > 0:
        #     challenge._env._sim.seed(params.seed)
        #     challenge._env.seed(params.seed)

        agent = DSLAMAgent(task_config=config,
                           params=params,
                           env=None,
                           logdir=logdir)
        # agent = RandomAgent(task_config=config, params=params)
        challenge.submit(agent)  # , num_episodes=params.num_episodes)

    else:
        challenge = habitat.Challenge(eval_remote=True)
        agent = DSLAMAgent(task_config=config,
                           params=params,
                           env=None,
                           logdir=logdir)
        # agent = RandomAgent(task_config=config, params=params)
        challenge.submit(agent)
Esempio n. 11
0
def main():
    agent = ForwardOnlyAgent()
    challenge = habitat.Challenge()
    challenge.submit(agent)