Beispiel #1
0
def main():
    config.load_cfg_fom_args("Train a classification model.")
    config.assert_and_infer_cfg()
    cfg.freeze()

    # Perform training
    dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=trainer.train_model)
Beispiel #2
0
def main():
    config.load_cfg_fom_args("Scale a model.")
    config.assert_and_infer_cfg()
    cx_orig = net.complexity(builders.get_model())
    scaler.scale_model()
    cx_scaled = net.complexity(builders.get_model())
    cfg_file = config.dump_cfg()
    print("Scaled config dumped to:", cfg_file)
    print("Original model complexity:", cx_orig)
    print("Scaled model complexity:", cx_scaled)
Beispiel #3
0
def main():
    # Load config options
    config.load_cfg_fom_args("Test a trained classification model.")
    config.assert_and_infer_cfg()
    cfg.freeze()

    # Perform evaluation
    if cfg.NUM_GPUS > 1:
        dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=test_model)
    else:
        test_model()
Beispiel #4
0
def main():
    config.load_cfg_fom_args("Train a classification model.")
    config.assert_and_infer_cfg()

    D2Utils.cfg_merge_from_easydict(cfg, global_cfg)

    cfg.freeze()

    trainer_module = cfg.get('trainer_module', 'pycls.core.trainer')
    trainer_module = importlib.import_module(trainer_module)
    dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=trainer_module.train_model)
Beispiel #5
0
def main():
    config.load_cfg_fom_args("Train a classification model.")
    config.assert_and_infer_cfg()
    cfg.freeze()
    print("building model {}".format(cfg.MODEL.TYPE))
    model = build_model()
    model.eval()
    x = torch.randn(1, 3, 224, 224)
    y = model(x)
    print(y.shape)
    model_complex = complexity(model)
    print(model_complex)
def main():
    # Parse cmd line args
    args = parse_args()

    # Load config options
    cfg.merge_from_file(args.cfg_file)
    cfg.merge_from_list(args.opts)
    assert_and_infer_cfg()
    cfg.freeze()

    # Perform evaluation
    if cfg.NUM_GPUS > 1:
        mpu.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=single_proc_test)
    else:
        single_proc_test()
Beispiel #7
0
def main():
    # Load config options
    config.load_cfg_fom_args("Train a classification model.")
    config.assert_and_infer_cfg()
    cfg.freeze()

    # Ensure that the output dir exists
    os.makedirs(cfg.OUT_DIR, exist_ok=True)
    # Save the config
    config.dump_cfg()

    # Perform training
    if cfg.NUM_GPUS > 1:
        dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=train_model)
    else:
        train_model()
Beispiel #8
0
def main():
    # Parse cmd line args
    args = parse_args()

    # Load config options
    cfg.merge_from_file(args.cfg_file)
    cfg.merge_from_list(args.opts)
    assert_and_infer_cfg()
    cfg.freeze()

    # Ensure that the output dir exists
    os.makedirs(cfg.OUT_DIR, exist_ok=True)
    # Save the config
    dump_cfg()

    # Perform training
    if cfg.NUM_GPUS > 1:
        mpu.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=single_proc_train)
    else:
        single_proc_train()
Beispiel #9
0
def main():

    corruptions, levels = config.load_cfg_fom_args(
        "Train a classification model.")
    config.assert_and_infer_cfg()

    # Perform training
    results = dist.multi_proc_run(
        num_proc=cfg.NUM_GPUS,
        fun=lambda: trainer.test_ftta_model(corruptions, levels))

    # plot the results table
    for index, level in enumerate(levels):
        console = Console()
        table = Table(show_header=True, header_style="cyan")
        table.add_column('level')
        for corruption in corruptions:
            table.add_column(corruptions_concise[corruption])
        res = list(map(lambda x: str(x), results[index]))
        res = [str(level)] + res
        table.add_row(*res)
        console.print(table)
Beispiel #10
0
def main():
    config.load_cfg_fom_args("Test a trained classification model.")
    config.assert_and_infer_cfg()
    cfg.freeze()
    test()
Beispiel #11
0
def main():
    config.load_cfg_fom_args("Compute model and loader timings.")
    config.assert_and_infer_cfg()
    cfg.freeze()
    dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=trainer.time_model)
Beispiel #12
0
def main():
    config.load_cfg_fom_args("Test a trained classification model.")
    config.assert_and_infer_cfg()
    cfg.freeze()
    dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=trainer.test_model)
Beispiel #13
0
def main():
    config.load_cfg_fom_args("Compute precise time for a model on 1 GPU.")
    config.assert_and_infer_cfg()
    cfg.freeze()
    dist.multi_proc_run(num_proc=cfg.NUM_GPUS, fun=trainer.time_model)
Beispiel #14
0
SPACE = 'darts'
DATASET = 'cifar10'
SOURCE_TASK = 'cls'
TARGET_TASK = 'psd50000'

file_list = np.load(
    f'configs/sample_based/{SPACE}/{DATASET}/selected_files.npy',
    allow_pickle=True)
input_dir = f'configs/sample_based/{SPACE}/{DATASET}/{SOURCE_TASK}/'
output_dir = f'configs/sample_based/{SPACE}/{DATASET}/{TARGET_TASK}/'

if not os.path.exists(output_dir):
    os.makedirs(output_dir)
for f in file_list:
    f_name = f.split('.')[0]
    source_config = f'{input_dir}/{f}'
    target_config = f'{output_dir}/{f}'
    config.load_cfg(input_dir, f)
    config.assert_and_infer_cfg()
    config.cfg.TRAIN.PSD_LABEL_SPLIT = 50000
    config.cfg.TRAIN.PSD_UNLABEL_BATCH_SIZE = 50
    config.cfg.TRAIN.PSD_LABEL_BATCH_SIZE = 32
    config.cfg.TRAIN.PSD_THRESHOLD = 0.95
    config.cfg.LOG_PERIOD = 100
    config.cfg.TASK = 'psd'
    config.cfg.OPTIM.MAX_EPOCH = 50
    config.cfg.OPTIM.BASE_LR = 0.1
    config.cfg.OPTIM.MOMENTUM = 0.9
    config.dump_cfg_to_file(target_config)
    print(source_config, target_config)