Esempio n. 1
0
def test_error(key):
    """Measure the error of a single model."""
    reset_cfg()
    cfg.merge_from_file(model_zoo.get_config_file(key))
    cfg.TEST.WEIGHTS = model_zoo.get_weights_file(key)
    cfg.OUT_DIR, cfg.LOG_DEST = tempfile.mkdtemp(), "file"
    dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=trainer.test_model)
    log_file = os.path.join(cfg.OUT_DIR, "stdout.log")
    data = logging.sort_log_data(logging.load_log_data(log_file))["test_epoch"]
    data = {"top1_err": data["top1_err"][-1], "top5_err": data["top5_err"][-1]}
    shutil.rmtree(cfg.OUT_DIR)
    return data
Esempio n. 2
0
def main():
    # Load config options
    config.load_cfg_fom_args("Train a classification model.")
    config.assert_and_infer_cfg()
    cfg.freeze()

    # Ensure that the output dir exists
    os.makedirs(cfg.OUT_DIR, exist_ok=True)
    # Save the config
    config.dump_cfg()

    # Perform training
    if cfg.NUM_GPUS > 1:
        dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=train_model)
    else:
        train_model()
Esempio n. 3
0
def main():
    """Execute operation (train, test, time, etc.)."""
    args = parse_args()
    mode = args.mode
    config.load_cfg(args.cfg)
    cfg.merge_from_list(args.opts)
    config.assert_cfg()
    cfg.freeze()
    if mode == "info":
        print(builders.get_model()())
        print("complexity:", net.complexity(builders.get_model()))
    elif mode == "train":
        dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=trainer.train_model)
    elif mode == "test":
        dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=trainer.test_model)
    elif mode == "time":
        dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=trainer.time_model)
    elif mode == "scale":
        cfg.defrost()
        cx_orig = net.complexity(builders.get_model())
        scaler.scale_model()
        cx_scaled = net.complexity(builders.get_model())
        cfg_file = config.dump_cfg()
        print("Scaled config dumped to:", cfg_file)
        print("Original model complexity:", cx_orig)
        print("Scaled model complexity:", cx_scaled)
Esempio n. 4
0
def main():

    corruptions, levels = config.load_cfg_fom_args(
        "Train a classification model.")
    config.assert_and_infer_cfg()

    # Perform training
    results = dist.multi_proc_run(
        num_proc=cfg.NUM_GPUS,
        fun=lambda: trainer.test_ftta_model(corruptions, levels))

    # plot the results table
    for index, level in enumerate(levels):
        console = Console()
        table = Table(show_header=True, header_style="cyan")
        table.add_column('level')
        for corruption in corruptions:
            table.add_column(corruptions_concise[corruption])
        res = list(map(lambda x: str(x), results[index]))
        res = [str(level)] + res
        table.add_row(*res)
        console.print(table)
Esempio n. 5
0
def main():
    config.load_cfg_fom_args("Train a classification model.")
    config.assert_and_infer_cfg()
    cfg.freeze()
    dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=trainer.train_model)
Esempio n. 6
0
def main():
    config.load_cfg_fom_args("Compute model and loader timings.")
    config.assert_and_infer_cfg()
    cfg.freeze()
    dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=trainer.time_model)
Esempio n. 7
0
def main():
    config.load_cfg_fom_args("Compute precise time for a model on 1 GPU.")
    config.assert_and_infer_cfg()
    cfg.freeze()
    dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=trainer.time_model)