Ejemplo n.º 1
0
def run_exp(exp_config: str, run_type: str, opts=None) -> None:
    r"""Runs experiment given mode and config

    Args:
        exp_config: path to config file.
        run_type: "train" or "eval.
        opts: list of strings of additional config options.

    Returns:
        None.
    """
    config = get_config(exp_config, opts)

    config.defrost()
    config.TASK_CONFIG.TASK.POINTGOAL_WITH_GPS_SENSOR = config.TASK_CONFIG.TASK.POINTGOAL_SENSOR.clone(
    )
    config.TASK_CONFIG.TASK.POINTGOAL_WITH_GPS_SENSOR.TYPE = (
        "PointGoalWithGPSSensor")
    config.freeze()

    random.seed(config.TASK_CONFIG.SEED)
    np.random.seed(config.TASK_CONFIG.SEED)
    torch.manual_seed(config.TASK_CONFIG.SEED)
    if config.FORCE_TORCH_SINGLE_THREADED and torch.cuda.is_available():
        torch.set_num_threads(1)

    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
    trainer = trainer_init(config)

    if run_type == "train":
        trainer.train()
    elif run_type == "eval":
        trainer.eval()
def run_exp(exp_config: str, run_type: str, opts=None) -> None:
    r"""Runs experiment given mode and config

    Args:
        exp_config: path to config file.
        run_type: "train" or "eval.
        opts: list of strings of additional config options.

    Returns:
        None.
    """
    config = get_config(exp_config, opts)
    logger.info(f"config: {config}")
    logger.add_filehandler(config.LOG_FILE)

    random.seed(config.TASK_CONFIG.SEED)
    np.random.seed(config.TASK_CONFIG.SEED)
    torch.manual_seed(config.TASK_CONFIG.SEED)
    torch.backends.cudnn.benchmark = True

    if run_type == "eval" and config.EVAL.EVAL_NONLEARNING:
        evaluate_agent(config)
        return

    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
    trainer = trainer_init(config)

    if run_type == "train":
        trainer.train()
    elif run_type == "eval":
        trainer.eval()
Ejemplo n.º 3
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--run-type",
        choices=["train", "eval"],
        required=True,
        help="run type of the experiment (train or eval)",
    )
    parser.add_argument(
        "--exp-config",
        type=str,
        required=True,
        help="path to config yaml containing info about experiment",
    )
    parser.add_argument(
        "opts",
        default=None,
        nargs=argparse.REMAINDER,
        help="Modify config options from command line",
    )
    args = parser.parse_args()
    config = get_config(args.exp_config, args.opts)
    random.seed(config.TASK_CONFIG.SEED)
    np.random.seed(config.TASK_CONFIG.SEED)

    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
    trainer = trainer_init(config)

    if args.run_type == "train":
        trainer.train()
    elif args.run_type == "eval":
        trainer.eval()
Ejemplo n.º 4
0
def run_exp(exp_config: str, run_type: str, opts=None) -> None:
    r"""Runs experiment given mode and config

    Args:
        exp_config: path to config file.
        run_type: "train" or "eval.
        opts: list of strings of additional config options.

    Returns:
        None.
    """
    config = get_config(exp_config, opts)

    random.seed(config.TASK_CONFIG.SEED)
    np.random.seed(config.TASK_CONFIG.SEED)
    torch.manual_seed(config.TASK_CONFIG.SEED)

    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
    trainer = trainer_init(config)

    if run_type == "train":
        trainer.train()
    elif run_type == "eval":
        trainer.eval()
Ejemplo n.º 5
0
def get_trainer(trainer_name: str, trainer_cfg: Config) -> BaseTrainer:
    r"""
    Create specific trainer instance according to name.
    Args:
        trainer_name: name of registered trainer .
        trainer_cfg: config file for trainer.

    Returns:
        an instance of the specified trainer.
    """
    trainer = baseline_registry.get_trainer(trainer_name)
    assert trainer is not None, f"{trainer_name} is not supported"
    return trainer(trainer_cfg)
Ejemplo n.º 6
0
def run_exp(exp_config: str,
            run_type: str,
            opts=None,
            *args,
            **kwargs) -> None:
    r"""Runs experiment given mode and config

    Args:
        exp_config: path to config file.
        run_type: "train" or "eval.
        opts: list of strings of additional config options.

    Returns:
        None.
    """
    config = get_config(exp_config, opts)
    config.defrost()
    config.DIFFICULTY = arguments.diff
    if arguments.stop:
        config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS = [
            "STOP", "MOVE_FORWARD", "TURN_LEFT", "TURN_RIGHT"
        ]
    else:
        config.TASK_CONFIG.TASK.POSSIBLE_ACTIONS = [
            "MOVE_FORWARD", "TURN_LEFT", "TURN_RIGHT"
        ]
    if arguments.seed != 'none':
        config.TASK_CONFIG.SEED = int(arguments.seed)
    config.freeze()
    random.seed(config.TASK_CONFIG.SEED)
    np.random.seed(config.TASK_CONFIG.SEED)

    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
    trainer = trainer_init(config)

    if run_type == "train":
        trainer.train()
    elif run_type == "eval":
        trainer.eval()
    elif run_type == 'benchmark':
        trainer.benchmark()
Ejemplo n.º 7
0
def execute_exp(config: Config, run_type: str) -> None:
    r"""This function runs the specified config with the specified runtype
    Args:
    config: Habitat.config
    runtype: str {train or eval}
    """
    random.seed(config.TASK_CONFIG.SEED)
    np.random.seed(config.TASK_CONFIG.SEED)
    torch.manual_seed(config.TASK_CONFIG.SEED)
    if config.FORCE_TORCH_SINGLE_THREADED and torch.cuda.is_available():
        torch.set_num_threads(1)

    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
    trainer = trainer_init(config, run_type)

    if run_type == "train":
        return trainer.train()
    elif run_type == "eval":
        return trainer.eval()
Ejemplo n.º 8
0
def run_exp(exp_config: str,
            run_type: str,
            world_size,
            rank,
            opts=None) -> None:
    r"""Runs experiment given mode and config

    Args:
        exp_config: path to config file.
        run_type: "train" or "eval.
        opts: list of strings of additional config options.

    Returns:
        None.
    """
    config = get_config(exp_config, opts)

    random.seed(config.TASK_CONFIG.SEED)
    np.random.seed(config.TASK_CONFIG.SEED)
    torch.manual_seed(config.TASK_CONFIG.SEED)

    config.defrost()
    config.WORLD_SIZE = world_size
    config.RANK = rank
    config.DISTRIBUTED = True
    config.freeze()

    init_distributed(world_size, rank)

    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
    trainer = trainer_init(config)

    if run_type == "train":
        trainer.train()
    elif run_type == "eval":
        trainer.eval()
Ejemplo n.º 9
0
def run_exp(exp_config: str,
            run_type: str,
            ckpt_path="",
            run_id=None,
            run_suffix="",
            opts=None) -> None:
    r"""Runs experiment given mode and config

    Args:
        exp_config: path to config file.
        run_type: "train" or "eval.
        ckpt_path: If training, ckpt to resume. If evaluating, ckpt to evaluate.
        run_id: If using slurm batch, run id to prefix.s
        opts: list of strings of additional config options.

    Returns:
        None.
    """
    config = get_config(exp_config, opts)

    # Add tracking of the number of episodes
    config.defrost()

    config.TASK_CONFIG.TASK.EPISODE_INFO_EXAMPLE = habitat.Config()
    # The type field is used to look-up the measure in the registry.
    # By default, the things are registered with the class name
    config.TASK_CONFIG.TASK.EPISODE_INFO_EXAMPLE.TYPE = "EpisodeInfoExample"
    config.TASK_CONFIG.TASK.EPISODE_INFO_EXAMPLE.VALUE = 5
    # Add the measure to the list of measures in use
    config.TASK_CONFIG.TASK.MEASUREMENTS.append("EPISODE_INFO_EXAMPLE")
    config.freeze()

    variant_name = os.path.split(exp_config)[1].split('.')[0]
    config.defrost()
    if run_suffix != "" and run_suffix is not None:
        variant_name = f"{variant_name}-{run_suffix}"

    if not osp.exists(config.LOG_FILE):
        os.makedirs(config.LOG_FILE)

    config.TENSORBOARD_DIR = os.path.join(config.TENSORBOARD_DIR, variant_name)
    config.CHECKPOINT_FOLDER = os.path.join(config.CHECKPOINT_FOLDER,
                                            variant_name)
    config.VIDEO_DIR = os.path.join(config.VIDEO_DIR, variant_name)
    config.LOG_FILE = os.path.join(config.LOG_FILE,
                                   f"{variant_name}.log")  # actually a logdir
    if run_type == "eval":
        # config.TRAINER_NAME = "ppo"
        config.NUM_PROCESSES = 6  # nice
    else:
        # Add necessary supervisory signals
        train_sensors = config.RL.AUX_TASKS.required_sensors
        config.SENSORS.extend(
            train_sensors)  # the task cfg sensors are overwritten by this one

    if run_id is None:
        random.seed(config.TASK_CONFIG.SEED)
        np.random.seed(config.TASK_CONFIG.SEED)
        trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
        assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
        trainer = trainer_init(config)

        # If not doing multiple runs (with run_id), default behavior is to overwrite
        if run_type == "train":
            if ckpt_path is not None:
                ckpt_dir, ckpt_file = os.path.split(ckpt_path)
                ckpt_index = ckpt_file.split('.')[1]
                ckpt = int(ckpt_index)
                start_updates = ckpt * config.CHECKPOINT_INTERVAL + 1
                trainer.train(ckpt_path=ckpt_path,
                              ckpt=ckpt,
                              start_updates=start_updates)
            elif not DO_PRESERVE_RUNS:
                # if os.path.exists(config.TENSORBOARD_DIR):
                #     print("Removing tensorboard directory...")
                #     shutil.rmtree(config.TENSORBOARD_DIR, ignore_errors=True)
                # if os.path.exists(config.CHECKPOINT_FOLDER):
                #     print("Removing checkpoint folder...")
                #     shutil.rmtree(config.CHECKPOINT_FOLDER, ignore_errors=True)
                # if os.path.exists(config.LOG_FILE):
                #     print("Removing log file...")
                #     shutil.rmtree(config.LOG_FILE, ignore_errors=True)
                trainer.train()
            else:
                # if os.path.exists(config.TENSORBOARD_DIR) or os.path.exists(config.CHECKPOINT_FOLDER) \
                #     or os.path.exists(config.LOG_FILE):
                #     print(f"TB dir exists: {os.path.exists(config.TENSORBOARD_DIR)}")
                #     print(f"Ckpt dir exists: {os.path.exists(config.CHECKPOINT_FOLDER)}")
                #     print(f"Log file exists: {os.path.exists(config.LOG_FILE)}")
                #     print("Run artifact exists, please clear manually")
                #     exit(1)
                trainer.train()
        elif run_type == "eval":
            trainer.eval(ckpt_path)
        return

    run_prefix = f'run_{run_id}'
    seed = run_id
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(config.TASK_CONFIG.SEED)

    # Exetnds off old modifications
    tb_dir = os.path.join(config.TENSORBOARD_DIR, run_prefix)
    ckpt_dir = os.path.join(config.CHECKPOINT_FOLDER, run_prefix)
    log_dir, log_file = os.path.split(config.LOG_FILE)
    log_file_extended = f"{run_prefix}--{log_file}"
    log_file_path = os.path.join(log_dir, log_file_extended)

    config.TASK_CONFIG.SEED = seed
    config.TENSORBOARD_DIR = tb_dir
    config.CHECKPOINT_FOLDER = ckpt_dir
    config.LOG_FILE = log_file_path

    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
    trainer = trainer_init(config)
    if run_type == "train":
        if ckpt_path is None:
            # if DO_PRESERVE_RUNS and (os.path.exists(tb_dir) or os.path.exists(ckpt_dir) or os.path.exists(log_file_extended)):
            #     print(f"TB dir exists: {os.path.exists(tb_dir)}")
            #     print(f"Ckpt dir exists: {os.path.exists(ckpt_dir)}")
            #     print(f"Log file exists: {os.path.exists(log_file_extended)}")
            #     print("Run artifact exists, please clear manually")
            #     exit(1)
            # else:
            #     shutil.rmtree(tb_dir, ignore_errors=True)
            #     shutil.rmtree(ckpt_dir, ignore_errors=True)
            #     if os.path.exists(log_file_extended):
            #         os.remove(log_file_extended)
            trainer.train()
        else:  # Resume training from checkpoint
            # Parse the checkpoint #, calculate num updates, update the config
            ckpt_dir, ckpt_file = os.path.split(ckpt_path)
            ckpt_index = ckpt_file.split('.')[1]
            true_path = os.path.join(ckpt_dir, run_prefix,
                                     f"{run_prefix}.{ckpt_index}.pth")
            ckpt = int(ckpt_index)
            start_updates = ckpt * config.CHECKPOINT_INTERVAL + 1
            trainer.train(ckpt_path=true_path,
                          ckpt=ckpt,
                          start_updates=start_updates)
    else:
        ckpt_dir, ckpt_file = os.path.split(ckpt_path)
        ckpt_index = ckpt_file.split('.')[1]
        true_path = os.path.join(ckpt_dir, run_prefix,
                                 f"{run_prefix}.{ckpt_index}.pth")
        trainer.eval(true_path)
Ejemplo n.º 10
0
if __name__ == "__main__":
    seed = "42"  # @param {type:"string"}
    steps_in_thousands = "10"  # @param {type:"string"}

    config.defrost()
    config.TASK_CONFIG.SEED = int(seed)
    config.TOTAL_NUM_STEPS = int(steps_in_thousands)
    config.LOG_INTERVAL = 1
    config.freeze()

    random.seed(config.TASK_CONFIG.SEED)
    np.random.seed(config.TASK_CONFIG.SEED)

# %%
if __name__ == "__main__":
    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    trainer = trainer_init(config)
    trainer.train()

# %%
# @markdown (double click to see the code)

# example tensorboard visualization
# for more details refer to [link](https://github.com/facebookresearch/habitat-lab/tree/master/habitat_baselines#additional-utilities).

try:
    from IPython import display

    with open("./res/img/tensorboard_video_demo.gif", "rb") as f:
        display.display(display.Image(data=f.read(), format="png"))
except ImportError:
Ejemplo n.º 11
0
def run_exp(exp_config: str, run_type: str, opts=None) -> None:
    r"""Runs experiment given mode and config

    Args:
        exp_config: path to config file.
        run_type: "train" or "eval.
        opts: list of strings of additional config options.

    Returns:
        None.
    """
    config = get_config(exp_config, opts)

    random.seed(config.TASK_CONFIG.SEED)
    np.random.seed(config.TASK_CONFIG.SEED)
    torch.manual_seed(config.TASK_CONFIG.SEED)

    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
    trainer = trainer_init(config)

    if run_type == "train":
        trainer.train()
    elif run_type == "eval":
        # following are modified based on ppo_trainer.py
        actor_critic, batch, device, not_done_masks, test_recurrent_hidden_states = trainer.eval_bruce(
        )

    def transform_callback(data):
        nonlocal actor_critic
        nonlocal batch
        nonlocal not_done_masks
        nonlocal test_recurrent_hidden_states
        global flag
        global t_prev_update
        global observation

        if flag == 2:
            observation["depth"] = np.reshape(data.data[0:-2], (256, 256, 1))
            observation["pointgoal_with_gps_compass"] = data.data[-2:]
            flag = 1
            return

        pointgoal_received = data.data[-2:]
        translate_amount = 0.25  # meters
        rotate_amount = 0.174533  # radians

        isrotated = (rotate_amount * 0.95 <=
                     abs(pointgoal_received[1] -
                         observation["pointgoal_with_gps_compass"][1]) <=
                     rotate_amount * 1.05)
        istimeup = (time.time() - t_prev_update) >= 4

        # print('istranslated is '+ str(istranslated))
        # print('isrotated is '+ str(isrotated))
        # print('istimeup is '+ str(istimeup))

        if isrotated or istimeup:
            vel_msg = Twist()
            vel_msg.linear.x = 0
            vel_msg.linear.y = 0
            vel_msg.linear.z = 0
            vel_msg.angular.x = 0
            vel_msg.angular.y = 0
            vel_msg.angular.z = 0
            pub_vel.publish(vel_msg)
            time.sleep(0.2)
            print("entered update step")

            # cv2.imshow("Depth", observation['depth'])
            # cv2.waitKey(100)

            observation["depth"] = np.reshape(data.data[0:-2], (256, 256, 1))
            observation["pointgoal_with_gps_compass"] = data.data[-2:]

            batch = batch_obs([observation])
            for sensor in batch:
                batch[sensor] = batch[sensor].to(device)
            if flag == 1:
                not_done_masks = torch.tensor([0.0],
                                              dtype=torch.float,
                                              device=device)
                flag = 0
            else:
                not_done_masks = torch.tensor([1.0],
                                              dtype=torch.float,
                                              device=device)

            _, actions, _, test_recurrent_hidden_states = actor_critic.act(
                batch,
                test_recurrent_hidden_states,
                not_done_masks,
                deterministic=True)

            action_id = actions.item()
            print("observation received to produce action_id is " +
                  str(observation["pointgoal_with_gps_compass"]))
            print("action_id from net is " + str(actions.item()))

            t_prev_update = time.time()
            vel_msg = Twist()
            vel_msg.linear.x = 0
            vel_msg.linear.y = 0
            vel_msg.linear.z = 0
            vel_msg.angular.x = 0
            vel_msg.angular.y = 0
            vel_msg.angular.z = 0
            if action_id == 0:
                vel_msg.linear.x = 0.25 / 4
                pub_vel.publish(vel_msg)
            elif action_id == 1:
                vel_msg.angular.z = 10 / 180 * 3.1415926
                pub_vel.publish(vel_msg)
            elif action_id == 2:
                vel_msg.angular.z = -10 / 180 * 3.1415926
                pub_vel.publish(vel_msg)
            else:
                pub_vel.publish(vel_msg)
                sub.unregister()
                print("NN finished navigation task")

    sub = rospy.Subscriber("depth_and_pointgoal",
                           numpy_msg(Floats),
                           transform_callback,
                           queue_size=1)
    rospy.spin()
Ejemplo n.º 12
0
def run_exp(exp_config: str, run_type: str, ckpt_path="", run_id=None, run_suffix=None, opts=None) -> None:
    r"""Runs experiment given mode and config

    Args:
        exp_config: path to config file.
        run_type: "train" or "eval.
        ckpt_path: If evaluating, path to a checkpoint.
        run_id: If using slurm batch, run id to prefix.
        opts: list of strings of additional config options.

    Returns:
        None.
    """
    if run_type != "eval":
        print("Detailed runs only supported for evaluation")
        exit(1)

    config = get_config(exp_config, opts)
    variant_name = os.path.split(exp_config)[1].split('.')[0]
    config.defrost()
    if run_suffix != "" and run_suffix is not None:
        variant_name = f"{variant_name}-{run_suffix}"
    config.TENSORBOARD_DIR = os.path.join(config.TENSORBOARD_DIR, variant_name)
    config.CHECKPOINT_FOLDER = os.path.join(config.CHECKPOINT_FOLDER, variant_name)
    config.LOG_FILE = os.path.join(config.LOG_FILE, f"{variant_name}.log") # actually a logdir
    config.NUM_PROCESSES = 1

    run_prefix = 'run'
    if run_id is not None:
        config.TASK_CONFIG.SEED = run_id
        run_prefix = f'run_{run_id}'
    random.seed(config.TASK_CONFIG.SEED)
    np.random.seed(config.TASK_CONFIG.SEED)

    # Sample input - /baseline/run_0.80.pth - need to add the extra folder
    ckpt_dir, ckpt_file = os.path.split(ckpt_path)
    variant = ckpt_dir.split('/')[-1]
    ckpt_index = ckpt_file.split('.')[1]
    ckpt_path = os.path.join(ckpt_dir, run_prefix, ckpt_file)

    # This config isn't used for detailed statistics, just put there somewhere where they won't overwrite

    # * Modify as desired
    detail_dir = os.path.join("/srv/share/jye72/r2_viz", "map_viz", f"{variant}_{run_prefix}_{ckpt_index}")
    config.VIDEO_DIR = os.path.join(detail_dir)

    # * Modify
    make_background = False
    map_name = "quantico"
    eval_stats_dir = os.path.join(f'/nethome/jye72/share/r3_detailed/') # /{map_name}')
    config.TASK_CONFIG.TASK_SENSORS = [
        'POINTGOAL_WITH_GPS_COMPASS_SENSOR',
        'GPS_SENSOR', 'HEADING_SENSOR'
    ]

    if make_background:
        config.TEST_EPISODE_COUNT = 1
        label = f"{variant}_{ckpt_index}_{run_prefix}-bg"
        log_diagnostics = [Diagnostics.basic, Diagnostics.top_down_map]
    else:
        config.VIDEO_OPTION = []
        label = f"{variant}_{ckpt_index}_{run_prefix}"
        # log_diagnostics = [Diagnostics.basic, Diagnostics.actions,
            # Diagnostics.weights, Diagnostics.gps, Diagnostics.heading]
        log_diagnostics = [Diagnostics.basic] # , Diagnostics.actions, Diagnostics.weights]
        log_diagnostics = [Diagnostics.basic, Diagnostics.actions, Diagnostics.weights]

    # * Modify as desired
    use_own_dataset = False

    if use_own_dataset:
        config.TASK_CONFIG.DATASET.DATA_PATH = \
            '/nethome/jye72/projects/data/datasets/pointnav/gibson/scene_viz/{split}/{split}.json.gz'
        config.TASK_CONFIG.DATASET.SPLIT = 'all'
        config.TASK_CONFIG.DATASET.SCENES_DIR = "data/scene_datasets/gibson"
        config.TASK_CONFIG.EVAL.SPLIT = 'all' # quirk in the code necessitates this

    # Make a top-down-map clean for visualization
    map_cfg = config.TASK_CONFIG.TASK.TOP_DOWN_MAP
    map_cfg.MAP_RESOLUTION = 4000
    map_cfg.DRAW_SOURCE = False
    map_cfg.DRAW_GOAL_POSITIONS = False
    map_cfg.DRAW_VIEW_POINTS = False
    map_cfg.DRAW_SHORTEST_PATH = False

    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
    trainer = trainer_init(config)
    trainer.eval(ckpt_path, log_diagnostics=log_diagnostics,
        output_dir=eval_stats_dir, label=label)
Ejemplo n.º 13
0
def run_exp(exp_config: str, run_type: str, opts=None,
            experiment: str = None, gpu_id: int = 0, other_patterns=None) \
    -> None:
    r"""Runs experiment given mode and config

    Args:
        exp_config: path to config file.
        run_type: "train" or "eval.
        opts: list of strings of additional config options.

    Returns:
        None.
    """
    if run_type == "train":
        replace_config = dict()

        # Change config based on arguments
        commit_hash = get_git_revision_hash()
        replace_config[CFG_COMMIT_HASH] = commit_hash

        # New folder path based on timestamp & prefix
        fld_name = datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')
        if experiment is not None:
            fld_name += f"_{experiment}"

        replace_config[CFG_RESULTS_PREFIX] = fld_name
        replace_config[CFG_GPU] = gpu_id

        # Add other regex patterns to change in config
        if other_patterns is not None:
            assert len(other_patterns) % 2 == 0, "Must list pairs of arguments"
            for k, v in zip(other_patterns[::2], other_patterns[1::2]):
                replace_config[k] = v
        with open(exp_config, "r") as f:
            config_file_lines = f.readlines()

        config_file_lines = preprocess_config(config_file_lines,
                                              replace_config)

        path = None
        # Get out folder path
        for line in config_file_lines:
            if line.startswith(RESULTS_FOLDER_PATH):
                path = yaml.load(line)[RESULTS_FOLDER_PATH]
                path = path[:path.find(fld_name) + len(fld_name)]
                break

        assert path is not None, "Results path not found"

        # Generate folder for results
        os.makedirs(path)
        cfg_name = os.path.basename(exp_config)
        new_cfg = os.path.join(path, cfg_name)
        with open(new_cfg, "w") as f:
            f.writelines(config_file_lines)

        # Read new generated config
        exp_config = new_cfg

    config = get_config(exp_config, opts)

    # Random seed
    random.seed(config.TASK_CONFIG.SEED)
    torch.manual_seed(config.TASK_CONFIG.SEED)
    np.random.seed(config.TASK_CONFIG.SEED)

    torch.backends.cudnn.deterministic = True  # Slower that normal
    torch.backends.cudnn.benchmark = False

    trainer_init = baseline_registry.get_trainer(config.TRAINER_NAME)
    assert trainer_init is not None, f"{config.TRAINER_NAME} is not supported"
    trainer = trainer_init(config)

    if run_type == "train":
        trainer.train()
    elif run_type == "eval":
        trainer.eval()