def test_variance(distributions, reduction): # WARNING: Highly variable test. (Need more iteration for a better estimation.) np.random.seed(1337) var = Variance(reduction=reduction) marg = var(distributions) assert np.all(marg == [1, 2, 0]), "Variance is not right {}".format(marg) var = Variance(0.99, reduction=reduction) marg = var(distributions) assert np.any(marg != [1, 2, 0])
def test_heuristics_reorder_list(): # we are just testing if given calculated uncertainty measures for chunks of data # the `reorder_indices` would make correct decision. Here index 0 has the # highest uncertainty chosen but both methods (uncertainties1 and uncertainties2) streaming_prediction = [np.array([0.98]), np.array([0.87, 0.68]), np.array([0.96, 0.54])] heuristic = BALD() ranks = heuristic.reorder_indices(streaming_prediction) assert np.all(ranks == [0, 3, 1, 2, 4]), "reorder list for BALD is not right {}".format(ranks) heuristic = Variance() ranks = heuristic.reorder_indices(streaming_prediction) assert np.all(ranks == [0, 3, 1, 2, 4]), "reorder list for Variance is not right {}".format( ranks) heuristic = Entropy() ranks = heuristic.reorder_indices(streaming_prediction) assert np.all(ranks == [0, 3, 1, 2, 4]), "reorder list for Entropy is not right {}".format( ranks) heuristic = Margin() ranks = heuristic.reorder_indices(streaming_prediction) assert np.all(ranks == [4, 2, 1, 3, 0]), "reorder list for Margin is not right {}".format(ranks) heuristic = Certainty() ranks = heuristic.reorder_indices(streaming_prediction) assert np.all(ranks == [4, 2, 1, 3, 0]), "reorder list for Certainty is not right {}".format( ranks) heuristic = Random() ranks = heuristic.reorder_indices(streaming_prediction) assert ranks.size == 5, "reorder list for Random is not right {}".format( ranks)
def test_combine_heuristics_reorder_list(): # we are just testing if given calculated uncertainty measures for chunks of data # the `reorder_indices` would make correct decision. Here index 0 has the # highest uncertainty chosen but both methods (uncertainties1 and uncertainties2) bald_firstchunk = np.array([0.98]) bald_secondchunk = np.array([0.87, 0.68]) variance_firstchunk = np.array([0.76]) variance_secondchunk = np.array([0.63, 0.48]) streaming_prediction = [[bald_firstchunk, variance_firstchunk], [bald_secondchunk, variance_secondchunk]] heuristics = CombineHeuristics([BALD(), Variance()], weights=[0.5, 0.5], reduction='mean') ranks = heuristics.reorder_indices(streaming_prediction) assert np.all(ranks == [0, 1, 2]), "Combine Heuristics is not right {}".format(ranks)
def test_combine_heuristics_uncertainty_generator(): np.random.seed(1337) prediction_chunks = [chunks(distributions_3d, 2), chunks(distributions_5d, 2)] predictions = [distributions_3d, distributions_5d] heuristics = CombineHeuristics([BALD(), Variance()], weights=[0.5, 0.5], reduction='mean') assert np.allclose( heuristics.get_uncertainties(predictions), heuristics.get_uncertainties(prediction_chunks), ) prediction_chunks = [chunks(distributions_3d, 2), chunks(distributions_5d, 2)] ranks = heuristics(prediction_chunks) assert np.all(ranks == [1, 2, 0]), "Combine Heuristics is not right {}".format(ranks)
def wrapped(_, logits): return logits probability_distribution = wrapped(None, logits) assert np.alltrue((probability_distribution >= 0) & (probability_distribution <= 1)).all() def test_that_precomputed_passes_back_predictions(): precomputed = Precomputed() ranks = np.arange(10) assert (precomputed(ranks) == ranks).all() @pytest.mark.parametrize('heuristic1, heuristic2, weights', [(BALD(), Variance(), [0.7, 0.3]), (BALD(), Entropy(reduction='mean'), [0.9, 0.8]), (Entropy(), Variance(), [4, 8]), (Certainty(), Variance(), [9, 2]), (Certainty(), Certainty(reduction='mean'), [1, 3])]) def test_combine_heuristics(heuristic1, heuristic2, weights): np.random.seed(1337) predictions = [distributions_3d, distributions_5d] if isinstance(heuristic1, Certainty) and not isinstance(heuristic2, Certainty): with pytest.raises(Exception) as e_info: heuristics = CombineHeuristics([heuristic1, heuristic2], weights=weights, reduction='mean') assert 'heuristics should have the same value for `revesed` parameter' in str(
def wrapped(_, logits): return logits probability_distribution = wrapped(None, logits) assert np.alltrue((probability_distribution >= 0) & (probability_distribution <= 1)).all() def test_that_precomputed_passes_back_predictions(): precomputed = Precomputed() ranks = np.arange(10) assert (precomputed(ranks) == ranks).all() @pytest.mark.parametrize( 'heuristic1, heuristic2, weights', [(BALD(), Variance(), [0.7, 0.3]), (BALD(), Entropy(reduction='mean'), [0.9, 0.8]), (Entropy(), Variance(), [4, 8]), (Certainty(), Variance(), [9, 2]), (Certainty(), Certainty(reduction='mean'), [1, 3])] ) def test_combine_heuristics(heuristic1, heuristic2, weights): np.random.seed(1337) predictions = [distributions_3d, distributions_5d] if isinstance(heuristic1, Certainty) and not isinstance(heuristic2, Certainty): with pytest.raises(Exception) as e_info: heuristics = CombineHeuristics([heuristic1, heuristic2], weights=weights, reduction='mean') assert 'heuristics should have the same value for `revesed` parameter' in str( e_info.value)
upsampled = F.interpolate(upsample(x), size=down[-(i + 2)][0]) x = module(torch.cat((upsampled, down[-(i + 2)][1]), 1)) return self.end_conv(x) from collections.abc import Sequence def _stack_preds(out): if isinstance(out[0], Sequence): out = [torch.stack(ts, dim=-1) for ts in zip(*out)] else: out = torch.stack(out, dim=-1) return out if __name__ == '__main__': unet = UNet() # print(unet) x = torch.rand(1, 3, 733, 427) with torch.no_grad(): preds = [unet(x) for _ in range(20)] preds_stack = _stack_preds(preds) # heuristic = BALD() heuristic = Variance() # heuristic = Entropy() metric = heuristic(preds_stack) print('Input:', x.shape, '-> UNet(input):', unet(x).shape)
model = nn.Sequential(nn.Flatten(), nn.Linear(9, 16), nn.Dropout(), nn.Linear(16, 8), nn.ReLU(), nn.Linear(8, 1)) model = patch_module(model) # Set dropout layers for MC-Dropout. model.apply(weight_init_normal) if use_cuda: model = model.cuda() wrapper = ModelWrapper(model=model, criterion=nn.L1Loss()) optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0.9, weight_decay=5e-4) # We will use Variance as our heuristic for regression problems. variance = Variance() # Setup our active learning loop for our experiments al_loop = ActiveLearningLoop( dataset=al_dataset, get_probabilities=wrapper.predict_on_dataset, heuristic=variance, query_size=250, # We will label 20 examples per step. # KWARGS for predict_on_dataset iterations=20, # 20 sampling for MC-Dropout batch_size=16, use_cuda=use_cuda, verbose=False, workers=0, )