def contrib_score_grad(trained_model): K.clear_session() fpath = trained_model / 'imp-score.grad.fixture.h5' bpnet_contrib(str(trained_model), str(fpath), method='grad', overwrite=True) return fpath
def test_contrib_regions(tmp_path, method, trained_model, regions): """Test different scenarios regarding subsetting """ K.clear_session() bpnet_contrib(str(trained_model), str(tmp_path / 'imp-score.h5'), method=method, regions=str(regions))
def test_contrib_bias_model(tmp_path, method, trained_model_w_bias): """Test whether we can compute differnet contribution scores """ K.clear_session() fpath = tmp_path / 'imp-score.h5' bpnet_contrib(str(trained_model_w_bias), str(fpath), method=method) cf = ContribFile(fpath) assert cf.get_contrib()['Task1'].shape[-1] == 4
def contrib_score_grad_null(trained_model): K.clear_session() fpath = trained_model / 'imp-score.grad.null.fixture.h5' bpnet_contrib(str(trained_model), str(fpath), method='grad', shuffle_seq=True, max_regions=16, overwrite=True) return fpath
def test_contrib_dataspec_fasta_file(tmp_path, method, trained_model, dataspec_bias, fasta_file): """Test different scenarios regarding subsetting """ K.clear_session() with pytest.raises(ValueError): bpnet_contrib(str(trained_model), str(tmp_path / 'imp-score.h5'), method=method, fasta_file=str(fasta_file), dataspec=str(dataspec_bias))