def test_interpretation_synthetic():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    runner = project.runner(model=model, runner_name='default_experiment')
    flow = project.get_full_flow()

    loaders = OrderedDict({'infer': nested_loaders['valid']})

    model = runner.best()

    tensorboard_converters = TensorboardConverters(
        logdir=runner.project_dir / runner.default_logdir,
        tensorboard_loggers=TensorboardConverter(),
        datasets=datasets)

    infer_dict_callback = InferDictCallback()
    callbacks = OrderedDict([
        ("interpretation", InterpretationCallback(flow,
                                                  tensorboard_converters)),
        ("inference", infer_dict_callback),
        ("reducer", ReplaceGatherCallback(flow, infer_dict_callback))
    ])
    r = runner.infer(loaders=loaders, callbacks=callbacks)

    interpretations = callbacks["interpretation"].interpretations
    print(interpretations)
def test_load_tuned_pipeline_from_decoder():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    runner = project.runner(model=model, runner_name='default_experiment')
    tuned_params = torch.load(runner.project_dir / runner.default_logdir /
                              'tuned_params.pkl')
    flow = project.get_full_flow()
    flow.get_decoder().load_tuned(tuned_params)
def test_composite_decoding():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    runner = project.runner(model=model, runner_name='default_experiment')
    flow = project.get_full_flow()
    decoder = flow.get_decoder()
    res = runner.load_inference_results()
    activated_predictions = decoder(res['logits']['test'])
    print(activated_predictions)
def test_composite_filtering():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    runner = project.runner(model=model, runner_name='default_experiment')
    flow = project.get_full_flow()
    filter_func = flow.get_filter()
    res = runner.load_inference_results()
    filtered_results = filter_func(res['logits']['test'],
                                   res['targets']['test'])
    print(filtered_results)
def test_evaluation_is_shown():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    runner = project.runner(model=model, runner_name='default_experiment')
    evaluation = runner.evaluate()
    accuracy_df = evaluation[evaluation['metric_name'] == 'accuracy']
    assert np.alltrue(accuracy_df['metric_res'] > 0.98)
    mae_df = evaluation[evaluation['metric_name'] == 'mean_absolute_error']
    assert np.alltrue(mae_df['metric_res'] < 5e-2)
    pd.set_option('display.max_columns', None)
Пример #6
0
def test_synthetic_dataset_missing_values():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    dataset = datasets['train']

    for i in range(32):
        X, y = dataset[i]
        assert 'gt' in X
        assert '_availability' in X['gt']

        if y['camera_blocked'].item() < 0:
            assert not X['gt']['_availability']['camera_blocked'].item()
def test_interpretation_default_runner():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    runner = project.runner(model=model,
                            runner_name='default_experiment',
                            balance_dataparallel_memory=True)
    model = runner.best()
    project.converters.tensorboard_converters.type_mapping['img'] = [
        img_publisher
    ]
    project.converters.tensorboard_converters.type_mapping['text'] = [
        text_publisher
    ]
    r = runner.infer(model=model)
    print(r)
Пример #8
0
def test_synthetic_dataset_default_runner():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    runner = project.runner(model=model, runner_name='default_experiment', balance_dataparallel_memory=True)
    flow: TaskFlow = project.get_full_flow()
    criterion = flow.get_loss()
    assert len(criterion.get_metrics()) < 100, 'Callbacks are too many!'

    project.converters.tensorboard_converters.type_mapping['img'] = [img_publisher]
    project.converters.tensorboard_converters.type_mapping['text'] = [text_publisher]
    # runner.train(num_epochs=10, callbacks=runner.default_callbacks[:1])
    runner.train(num_epochs=10)

    early_stop_callback = runner.default_callbacks[-1]
    assert early_stop_callback.best_score >= 0, 'Negative loss function!'
    print_any_prediction(criterion, model, nested_loaders, runner)
Пример #9
0
def treelib_explanation_on_first_batch():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    runner = project.runner(model=model, runner_name='default_experiment')
    model = runner.best()
    n = 4 * torch.cuda.device_count()
    flow: TaskFlow = project.get_full_flow()
    dataset = flow.get_dataset()
    loader = DataLoader(dataset, batch_size=n, shuffle=False)
    X, y = next(iter(loader))
    del X['gt']
    model = model.eval()
    res = model(X)
    treelib_explainer = flow.get_treelib_explainer()
    tree = treelib_explainer(res)
    return tree
Пример #10
0
def test_synthetic_dataset():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    runner = project.runner(model=model, runner_name='security_logs')
    flow: TaskFlow = project.get_full_flow()
    criterion = flow.get_loss()

    args = TrainingArguments(
        num_epochs=2,
        callbacks=[],
        loaders=nested_loaders,
        optimizer=optim.Adam(model.parameters(), lr=1e-4),
    )

    runner.train(**args)

    print_any_prediction(criterion, model, nested_loaders, runner)
Пример #11
0
def test_rf_multi_input():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    runner = project.runner(model=model,
                            runner_name='default_experiment',
                            balance_dataparallel_memory=False)
    loader = nested_loaders['valid']
    X, y = next(iter(loader))
    X = runner.batch_to_model_device(X)
    y = runner.batch_to_model_device(y)

    class FeaturesModel(nn.Module):
        def __init__(self, model):
            super().__init__()
            self.model = model

        def forward(self, x):
            features = self.model.seq[:4](x['syn_img'])
            return {'features': features}

    model = FeaturesModel(model).eval()
    with torch.no_grad():
        res = model(X)
        out_mask = {
            key: torch.zeros_like(value).bool()
            for key, value in res.items()
        }
        out_mask['features'][:, :, 11, 11] = True

    rf_mask = compute_receptive_field(model, X, out_mask)

    nonzero_indices = (torch.nonzero(rf_mask['syn_img']))
    mins = nonzero_indices.min(dim=0).values
    maxs = nonzero_indices.max(dim=0).values + 1
    lengths = maxs - mins

    assert lengths[0].item() == loader.batch_size
    assert lengths[1].item() == 3
    assert lengths[2].item() == 9
    assert lengths[3].item() == 9
def test_load_tuned_pipeline():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    runner = project.runner(model=model, runner_name='default_experiment')
    tuned_params = runner.load_tuned()
    print(tuned_params)
Пример #13
0
def test_repr_flow():
    model, nested_loaders, datasets, project = synthetic_dataset_preparation()
    flow = project.get_full_flow()
    print(flow)