Esempio n. 1
0
def test_compression_eval_trained(_params, tmp_path):
    p = _params
    args = p['args']
    tc = p['test_config']

    args['mode'] = 'test'
    args['log-dir'] = tmp_path
    args['workers'] = 4
    args['seed'] = 1
    checkpoint_path = os.path.join(args['checkpoint-save-dir'],
                                   tc['checkpoint_name'] + '_best.pth')
    args['resume'] = checkpoint_path
    if 'weights' in args:
        del args['weights']

    reset_context('orig')
    reset_context('quantized_graphs')
    runner = Command(
        create_command_line(get_cli_dict_args(args), tc['sample_type']))
    res = runner.run(timeout=tc['timeout'])
    assert res == 0

    acc1 = parse_best_acc1(tmp_path)
    assert torch.load(checkpoint_path)['best_acc1'] == approx(
        acc1, abs=tc['absolute_tolerance_eval'])
Esempio n. 2
0
def test_loaded_model_evals_according_to_saved_acc(_params, tmp_path):
    p = _params
    config_path = p['sample_config_path']
    checkpoint_path = p['checkpoint_path']

    tmp_path = str(tmp_path)
    args = {}
    args['data'] = tmp_path + '/' + p['dataset']
    args['dataset'] = p['dataset']
    args['config'] = str(config_path)
    args['mode'] = 'test'
    args['log-dir'] = tmp_path
    args['workers'] = 4
    args['seed'] = 1
    args['resume'] = checkpoint_path

    if p['execution_mode'] == ExecutionMode.MULTIPROCESSING_DISTRIBUTED:
        args['multiprocessing-distributed'] = ''
    else:
        pytest.skip("DataParallel eval takes too long for this test to be run during pre-commit")

    runner = Command(create_command_line(get_cli_dict_args(args), "classification"))
    res = runner.run()
    assert res == 0

    acc1 = parse_best_acc1(tmp_path)
    assert torch.load(checkpoint_path)['best_acc1'] == pytest.approx(acc1)
Esempio n. 3
0
def test_compression_train(_params, tmp_path):
    p = _params
    args = p['args']
    tc = p['test_config']

    args['mode'] = 'train'
    args['log-dir'] = tmp_path
    args['workers'] = 4
    args['seed'] = 1

    reset_context('orig')
    reset_context('quantized_graphs')
    runner = Command(
        create_command_line(get_cli_dict_args(args), tc['sample_type']))
    res = runner.run(timeout=tc['timeout'])

    assert res == 0
    checkpoint_path = os.path.join(args['checkpoint-save-dir'],
                                   tc['checkpoint_name'] + '_best.pth')
    assert os.path.exists(checkpoint_path)
    actual_acc = torch.load(checkpoint_path)['best_acc1']
    ref_acc = tc['expected_accuracy']
    better_accuracy_tolerance = 3
    tolerance = tc[
        'absolute_tolerance_train'] if actual_acc < ref_acc else better_accuracy_tolerance
    assert actual_acc == approx(ref_acc, abs=tolerance)
def test_compression_eval_trained(_params, tmp_path):
    p = _params
    args = p['args']
    tc = p['test_config']

    args['mode'] = 'test'
    args['log-dir'] = tmp_path
    args['workers'] = 4
    args['seed'] = 1
    checkpoint_path = os.path.join(args['checkpoint-save-dir'],
                                   tc['checkpoint_name'] + '_best.pth')
    args['resume'] = checkpoint_path
    if 'weights' in args:
        del args['weights']

    reset_context('orig')
    reset_context('quantized_graphs')
    runner = Command(
        create_command_line(get_cli_dict_args(args), tc['sample_type']))
    res = runner.run(timeout=tc['timeout'])
    assert res == 0

    output_path = None
    for root, _, names in os.walk(str(tmp_path)):
        for name in names:
            if 'output' in name:
                output_path = os.path.join(root, name)

    assert os.path.exists(output_path)
    with open(output_path, "r") as f:
        last_line = f.readlines()[-1]
        acc1 = float(re.findall("\\d+\\.\\d+", last_line)[0])
        assert torch.load(checkpoint_path)['best_acc1'] == approx(
            acc1, abs=tc['absolute_tolerance_eval'])
Esempio n. 5
0
def test_loaded_model_evals_according_to_saved_acc(_params, tmp_path, dataset_dir):
    p = _params
    config_path = p['sample_config_path']
    checkpoint_path = p['checkpoint_path']

    metrics_path = str(tmp_path.joinpath('metrics.json'))
    tmp_path = str(tmp_path)
    args = {}
    if not dataset_dir:
        dataset_dir = tmp_path
    args['data'] = dataset_dir
    args['dataset'] = p['dataset']
    args['config'] = str(config_path)
    args['mode'] = 'test'
    args['log-dir'] = tmp_path
    args['workers'] = 0  # Workaroundr the PyTorch MultiProcessingDataLoader issue
    args['seed'] = 1
    args['resume'] = checkpoint_path
    args['metrics-dump'] = metrics_path

    if p['execution_mode'] == ExecutionMode.MULTIPROCESSING_DISTRIBUTED:
        args['multiprocessing-distributed'] = ''
    else:
        pytest.skip("DataParallel eval takes too long for this test to be run during pre-commit")

    runner = Command(create_command_line(get_cli_dict_args(args), "classification"))
    res = runner.run()
    assert res == 0

    with open(metrics_path) as metric_file:
        metrics = json.load(metric_file)
        assert torch.load(checkpoint_path)['best_acc1'] == pytest.approx(metrics['Accuracy'])
Esempio n. 6
0
def test_compression_eval_trained(_params, tmp_path):
    p = _params
    args = p['args']
    tc = p['test_config']

    args['mode'] = 'test'
    args['log-dir'] = tmp_path
    args['workers'] = 0  # Workaround for PyTorch MultiprocessingDataLoader issues
    args['seed'] = 1
    checkpoint_path = os.path.join(args['checkpoint-save-dir'], tc['checkpoint_name'] + '_best.pth')
    args['resume'] = checkpoint_path
    if 'weights' in args:
        del args['weights']

    runner = Command(create_command_line(get_cli_dict_args(args), tc['sample_type']))
    runner.run(timeout=tc['timeout'])

    acc1 = parse_best_acc1(tmp_path)
    assert torch.load(checkpoint_path)['best_acc1'] == approx(acc1, abs=tc['absolute_tolerance_eval'])
Esempio n. 7
0
def test_compression_train(_params, tmp_path):
    p = _params
    args = p['args']
    tc = p['test_config']

    args['mode'] = 'train'
    args['log-dir'] = tmp_path
    args['workers'] = 0  # Workaround for PyTorch MultiprocessingDataLoader issues
    args['seed'] = 1

    runner = Command(create_command_line(get_cli_dict_args(args), tc['sample_type']))
    runner.run(timeout=tc['timeout'])

    checkpoint_path = os.path.join(args['checkpoint-save-dir'], tc['checkpoint_name'] + '_best.pth')
    assert os.path.exists(checkpoint_path)
    actual_acc = torch.load(checkpoint_path)['best_acc1']
    ref_acc = tc['expected_accuracy']
    better_accuracy_tolerance = 3
    tolerance = tc['absolute_tolerance_train'] if actual_acc < ref_acc else better_accuracy_tolerance
    assert actual_acc == approx(ref_acc, abs=tolerance)