Beispiel #1
0
    def on_epoch_end(self, data: Data):
        for _ in range(self.trials):
            img_path = self.dataset.one_shot_trial(self.N)
            input_img = (
                np.array([
                    np.expand_dims(cv2.imread(i, cv2.IMREAD_GRAYSCALE), -1) /
                    255. for i in img_path[0]
                ],
                         dtype=np.float32),
                np.array([
                    np.expand_dims(cv2.imread(i, cv2.IMREAD_GRAYSCALE), -1) /
                    255. for i in img_path[1]
                ],
                         dtype=np.float32))
            prediction_score = feed_forward(self.model,
                                            input_img,
                                            training=False).numpy()

            if np.argmax(
                    prediction_score) == 0 and prediction_score.std() > 0.01:
                self.correct += 1

            self.total += 1

        data.write_with_log(self.outputs[0], self.correct / self.total)
Beispiel #2
0
    def on_epoch_end(self, data: Data):
        device = next(self.model.parameters()).device
        for _ in range(self.trials):
            img_path = self.dataset.one_shot_trial(self.N)
            input_img = (
                np.array([
                    np.expand_dims(cv2.imread(i, cv2.IMREAD_GRAYSCALE),
                                   -1).reshape((1, 105, 105)) / 255.
                    for i in img_path[0]
                ],
                         dtype=np.float32),
                np.array([
                    np.expand_dims(cv2.imread(i, cv2.IMREAD_GRAYSCALE),
                                   -1).reshape((1, 105, 105)) / 255.
                    for i in img_path[1]
                ],
                         dtype=np.float32))

            input_img = (to_tensor(input_img[0], "torch").to(device),
                         to_tensor(input_img[1], "torch").to(device))
            model = self.model.module if torch.cuda.device_count(
            ) > 1 else self.model
            prediction_score = feed_forward(
                model, input_img, training=False).cpu().detach().numpy()

            if np.argmax(
                    prediction_score) == 0 and prediction_score.std() > 0.01:
                self.correct += 1

            self.total += 1

        data.write_with_log(self.outputs[0], self.correct / self.total)
Beispiel #3
0
 def setUpClass(cls):
     x = np.array([[1, 2], [3, 4]])
     x_pred = np.array([[1, 5, 3], [2, 1, 0]])
     x_1d = np.array([2.5])
     x_pred_1d = np.array([1])
     cls.data = Data({'x': x, 'x_pred': x_pred})
     cls.data_1d = Data({'x': x_1d, 'x_pred': x_pred_1d})
     cls.mcc = MCC(true_key='x', pred_key='x_pred')
 def setUpClass(cls):
     x = np.array([[1, 2], [3, 4]])
     x_pred = np.array([[1, 5, 3], [2, 1, 0]])
     x_binary = np.array([1])
     x_pred_binary = np.array([0.9])
     cls.data = Data({'x': x, 'x_pred': x_pred})
     cls.data_binary = Data({'x': x_binary, 'x_pred': x_pred_binary})
     cls.precision = Precision(true_key='x', pred_key='x_pred')
 def setUpClass(cls):
     x = np.array([[1, 2], [3, 4]])
     x_pred = np.array([[1, 5, 3], [2, 1, 0]])
     x_binary = np.array([1])
     x_pred_binary = np.array([0.9])
     cls.data = Data({'x': x, 'x_pred': x_pred})
     cls.data_binary = Data({'x': x_binary, 'x_pred': x_pred_binary})
     cls.f1score = F1Score(true_key='x', pred_key='x_pred')
     cls.f1score_output = np.array([0., 0.67])
Beispiel #6
0
    def on_batch_end(self, data: Data) -> None:
        y_true, y_pred = to_number(data[self.true_key]), to_number(
            data[self.pred_key])
        batch_size = y_true.shape[0]
        y_true, y_pred = y_true.reshape((batch_size, -1)), y_pred.reshape(
            (batch_size, -1))

        prediction_label = (y_pred >= self.threshold).astype(np.int32)

        intersection = np.sum(y_true * prediction_label, axis=-1)
        area_sum = np.sum(y_true, axis=-1) + np.sum(prediction_label, axis=-1)
        dice = (2. * intersection + self.smooth) / (area_sum + self.smooth)
        data.write_per_instance_log(self.outputs[0], dice)
        self.dice.extend(list(dice))
Beispiel #7
0
    def setUpClass(cls):
        x = np.array([[1, 2], [3, 4]])
        x_pred = np.array([[1, 5, 3], [2, 1, 0]])
        x_1d = np.array([2.5])
        x_pred_1d = np.array([1])
        x_1d_logit = np.array([1])
        x_pred_1d_logit = np.array([2.5])

        cls.data = Data({'x': x, 'x_pred': x_pred})
        cls.data_1d = Data({'x': x_1d, 'x_pred': x_pred_1d})
        cls.data_1d_logit = Data({'x': x_1d_logit, 'x_pred': x_pred_1d_logit})
        cls.accuracy = Accuracy(true_key='x', pred_key='x_pred')
        cls.accuracy_logit = Accuracy(true_key='x',
                                      pred_key='x_pred',
                                      from_logits=True)
 def _simulate_training(self, trace: LabelTracker, data: Data):
     trace.on_begin(Data())
     system = trace.system
     global_step = 0
     for epoch_idx, epoch in enumerate(self.training_data, start=1):
         system.epoch_idx = epoch_idx
         trace.on_epoch_begin(Data())
         for batch_idx, batch in enumerate(epoch, start=1):
             system.batch_idx = batch_idx
             global_step += 1
             system.global_step = global_step
             trace.on_batch_begin(Data())
             trace.on_batch_end(batch)
         trace.on_epoch_end(Data())
     trace.on_end(data)
 def test_on_batch_end(self):
     self.calibration_error.y_true = []
     self.calibration_error.y_pred = []
     batch1 = {'y': np.array([0, 0, 1, 1]), 'y_pred': np.array([[1.0, 0.0], [1.0, 0.0], [0.0, 1.0], [0.0, 1.0]])}
     self.calibration_error.on_batch_end(data=Data(batch1))
     with self.subTest('Check true values'):
         self.assertTrue(is_equal(self.calibration_error.y_true, list(batch1['y'])))
     with self.subTest('Check pred values'):
         self.assertTrue(is_equal(self.calibration_error.y_pred, list(batch1['y_pred'])))
     batch2 = {'y': np.array([1, 1, 0, 0]), 'y_pred': np.array([[0.0, 1.0], [0.0, 1.0], [1.0, 0.0], [1.0, 0.0]])}
     self.calibration_error.on_batch_end(data=Data(batch2))
     with self.subTest('Check true values (2 batches)'):
         self.assertTrue(is_equal(self.calibration_error.y_true, list(batch1['y']) + list(batch2['y'])))
     with self.subTest('Check pred values (2 batches)'):
         self.assertTrue(is_equal(self.calibration_error.y_pred, list(batch1['y_pred']) + list(batch2['y_pred'])))
Beispiel #10
0
 def setUpClass(cls):
     x = np.array([1, 2, 3])
     x_pred = np.array([[1, 1, 3], [2, 3, 4], [1, 1, 0]])
     cls.data = Data({'x': x, 'x_pred': x_pred})
     cls.dice_output = [1.4999999987500001, 2.3999999972, 2.3999999972]
     cls.dice = Dice(true_key='x', pred_key='x_pred')
     cls.dice.system = sample_system_object()
    def test_basic_happy_path(self):
        labeltracker = LabelTracker(label='y',
                                    metric='acc',
                                    bounds=None,
                                    outputs='out')
        system = sample_system_object()
        labeltracker.system = system
        response = Data()
        self._simulate_training(labeltracker, response)

        # Check responses
        with self.subTest('Check that a response was written'):
            self.assertIn('out', response)
        with self.subTest('Check that data was written to the system'):
            self.assertIn('out', system.custom_graphs)
        response = response['out']
        with self.subTest('Check consistency of outputs'):
            self.assertEqual(response, system.custom_graphs['out'])
        with self.subTest('Check that all 3 labels have summaries'):
            self.assertEqual(3, len(response))
        with self.subTest('Check correct mean values (epoch 1)'):
            self.assertDictEqual(
                self.epoch_1_means, {
                    elem.name: round(elem.history['train']['acc'][3], 6)
                    for elem in response
                })
        with self.subTest('Check correct mean values (epoch 2)'):
            self.assertDictEqual(
                self.epoch_2_means, {
                    elem.name: round(elem.history['train']['acc'][6], 6)
                    for elem in response
                })
    def test_multiple_bounds(self):
        labeltracker = LabelTracker(label='y',
                                    metric='acc',
                                    bounds=['std', 'range'],
                                    outputs='out')
        system = sample_system_object()
        labeltracker.system = system
        response = Data()
        self._simulate_training(labeltracker, response)

        # Check responses
        with self.subTest('Check that a response was written'):
            self.assertIn('out', response)
        with self.subTest('Check that data was written to the system'):
            self.assertIn('out', system.custom_graphs)
        response = response['out']
        with self.subTest('Check consistency of outputs'):
            self.assertEqual(response, system.custom_graphs['out'])
        with self.subTest('Check that all 3 labels have summaries'):
            self.assertEqual(3, len(response))
        with self.subTest('Check that regular mean is not present'):
            for elem in response:
                self.assertNotIn('acc', elem.history['train'])
        with self.subTest('Check that stddev and range are both present'):
            for elem in response:
                self.assertIn('acc ($\\mu \\pm \\sigma$)',
                              elem.history['train'])
                self.assertIn('acc ($min, \\mu, max$)', elem.history['train'])
    def test_basic_happy_path(self):
        instance_tracker = InstanceTracker(index='idx',
                                           metric='ce',
                                           outputs='out',
                                           mode='train')
        system = sample_system_object()
        instance_tracker.system = system
        response = Data()
        self._simulate_training(instance_tracker, response)

        # Check responses
        with self.subTest('Check that a response was written'):
            self.assertIn('out', response)
        with self.subTest('Check that data was written to the system'):
            self.assertIn('out', system.custom_graphs)
        response = response['out']
        with self.subTest('Check consistency of outputs'):
            self.assertEqual(response, system.custom_graphs['out'])
        with self.subTest('Check that 10 indices were tracked'):
            self.assertEqual(10, len(response))
        recorded_indices = {summary.name for summary in response}
        with self.subTest('Check that 5 min keys were tracked'):
            min_keys = [3, 4, 8, 11, 14]
            for key in min_keys:
                self.assertIn(key, recorded_indices)
        with self.subTest('Check that 5 max keys were tracked'):
            max_keys = [7, 8, 24, 25, 27, 28]
            for key in max_keys:
                self.assertIn(key, recorded_indices)
    def test_specific_indices(self):
        instance_tracker = InstanceTracker(index='idx',
                                           metric='ce',
                                           n_max_to_keep=0,
                                           n_min_to_keep=0,
                                           list_to_keep=[1, 5, 17],
                                           outputs='out',
                                           mode='train')
        system = sample_system_object()
        instance_tracker.system = system
        response = Data()
        self._simulate_training(instance_tracker, response)

        # Check responses
        with self.subTest('Check that a response was written'):
            self.assertIn('out', response)
        with self.subTest('Check that data was written to the system'):
            self.assertIn('out', system.custom_graphs)
        response = response['out']
        with self.subTest('Check consistency of outputs'):
            self.assertEqual(response, system.custom_graphs['out'])
        with self.subTest('Check that 3 indices were tracked'):
            self.assertEqual(3, len(response))
        recorded_indices = {summary.name for summary in response}
        with self.subTest('Check that the correct keys were tracked'):
            target_keys = [1, 5, 17]
            for key in target_keys:
                self.assertIn(key, recorded_indices)
 def test_perfect_calibration(self):
     self.calibration_error.y_true = [0] * 50 + [1] * 50
     self.calibration_error.y_pred = list(
         np.array([1.0, 0.0] * 25 + [0.5, 0.5] * 50 + [0.0, 1.0] * 25).reshape(100, 2))
     data = Data()
     self.calibration_error.on_epoch_end(data=data)
     self.assertEqual(0.0, data['calibration_error'])
Beispiel #16
0
 def setUpClass(cls):
     x = np.random.rand(1, 5, 5)
     x_pred = np.random.rand(1, 5, 7)
     cls.data = Data({'x': x, 'x_pred': x_pred})
     cls.map = MeanAveragePrecision(true_key='x',
                                    pred_key='x_pred',
                                    num_classes=3)
     cls.iou_element_shape = (5, 5)
Beispiel #17
0
 def setUpClass(cls):
     x = np.array([[1, 2], [3, 4]])
     x_pred = np.array([[1, 5, 3], [2, 1, 0]])
     cls.data = Data({'x': x, 'x_pred': x_pred})
     cls.matrix = np.array([[0, 0, 0], [1, 1, 0], [0, 0, 0]])
     cls.confusion_matrix = ConfusionMatrix(true_key='x',
                                            pred_key='x_pred',
                                            num_classes=3)
 def test_on_epoch_end(self):
     self.calibration_error.y_true = [0] * 50 + [1] * 50
     self.calibration_error.y_pred = list(np.array([1.0, 0.0] * 50 + [0.0, 1.0] * 50).reshape(100, 2))
     data = Data()
     self.calibration_error.on_epoch_end(data=data)
     with self.subTest('Check if calibration error exists'):
         self.assertIn('calibration_error', data)
     with self.subTest('Check the value of calibration error'):
         self.assertEqual(0.0, data['calibration_error'])
    def test_save_and_load_state(self):
        def instantiate_system():
            tracker = InstanceTracker(index='idx',
                                      metric='ce',
                                      n_max_to_keep=0,
                                      n_min_to_keep=0,
                                      list_to_keep=[1, 5, 17],
                                      outputs='out',
                                      mode='train')
            system = sample_system_object()
            system.traces.append(tracker)
            tracker.system = system
            return system, tracker

        system, tracker = instantiate_system()

        # Make some changes
        response = Data()
        self._simulate_training(tracker, response)

        # Save the state
        save_path = tempfile.mkdtemp()
        system.save_state(save_dir=save_path)

        # reinstantiate system and load the state
        system, tracker = instantiate_system()
        system.load_state(save_path)

        loaded_tracker = system.traces[-1]
        response = loaded_tracker.index_history
        with self.subTest(
                'Check that the restored container has the correct type'):
            self.assertTrue(isinstance(response, defaultdict))
        with self.subTest('Check that the mode was saved'):
            self.assertIn('train', response)
        response = response['train']
        with self.subTest('Check that all 3 labels have summaries'):
            self.assertEqual(3, len(response))
        with self.subTest('Check that the specified indices were restored'):
            target_keys = [1, 5, 17]
            for key in target_keys:
                self.assertIn(key, response)
        response = response[1]
        with self.subTest('Check that values were restored'):
            self.assertEqual(0.8, round(response[0][1], 5))
            self.assertEqual(0.3, round(response[1][1], 5))
    def setUpClass(cls):
        # Epoch 1 Data
        batch1_metrics = tf.constant(
            [0.9, 0.8, 0.1, 0.4, 0.9, 0.6, 0.1, 0.6, 0.8, 0.3])
        batch1_labels = tf.constant([2, 1, 0, 1, 0, 1, 1, 1, 0, 0])
        batch2_metrics = tf.constant(
            [0.3, 0.1, 0.0, 0.8, 0.5, 0.5, 0.6, 0.2, 0.5, 1.0])
        batch2_labels = tf.constant([1, 1, 1, 1, 0, 1, 0, 0, 0, 0])
        batch3_metrics = tf.constant(
            [0.3, 0.4, 0.7, 0.9, 0.3, 0.0, 0.3, 0.7, 0.8, 0.6])
        batch3_labels = tf.constant([2, 2, 2, 2, 2, 0, 2, 2, 2, 2])
        # Epoch 2 Data
        batch4_metrics = tf.constant(
            [0.8, 0.9, 0.6, 0.1, 0.7, 0.3, 0.9, 0.9, 0.4, 0.6])
        batch4_labels = tf.constant([0, 1, 0, 1, 0, 1, 1, 1, 0, 0])
        batch5_metrics = tf.constant(
            [0.4, 0.4, 0.9, 0.1, 0.4, 0.9, 0.0, 0.8, 1.0, 0.1])
        batch5_labels = tf.constant([2, 2, 2, 2, 0, 1, 0, 0, 0, 0])
        batch6_metrics = tf.constant(
            [0.3, 0.9, 0.9, 0.5, 0.9, 0.8, 0.6, 0.1, 0.1, 0.2])
        batch6_labels = tf.constant([1, 1, 1, 1, 2, 2, 2, 2, 2, 2])

        cls.training_data = [[
            Data({
                'acc': batch1_metrics,
                'y': batch1_labels
            }),
            Data({
                'acc': batch2_metrics,
                'y': batch2_labels
            }),
            Data({
                'acc': batch3_metrics,
                'y': batch3_labels
            })
        ],
                             [
                                 Data({
                                     'acc': batch4_metrics,
                                     'y': batch4_labels
                                 }),
                                 Data({
                                     'acc': batch5_metrics,
                                     'y': batch5_labels
                                 }),
                                 Data({
                                     'acc': batch6_metrics,
                                     'y': batch6_labels
                                 })
                             ]]

        cls.epoch_1_means = {0: 0.49, 1: 0.42, 2: 0.59}
        cls.epoch_2_means = {0: 0.54, 1: 0.66, 2: 0.45}
    def setUpClass(cls):
        # Epoch 1 Data
        batch1_metrics = tf.constant(
            [0.9, 0.8, 0.1, 0.4, 0.9, 0.6, 0.1, 0.6, 0.8, 0.3])
        batch1_idx = tf.constant([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
        batch2_metrics = tf.constant(
            [0.3, 0.1, 0.0, 0.8, 0.5, 0.5, 0.6, 0.2, 0.5, 1.0])
        batch2_idx = tf.constant([10, 11, 12, 13, 14, 15, 16, 17, 18, 19])
        batch3_metrics = tf.constant(
            [0.3, 0.4, 0.7, 0.9, 0.3, 0.0, 0.3, 0.7, 0.8, 0.6])
        batch3_idx = tf.constant([20, 21, 22, 23, 24, 25, 26, 27, 28, 29])
        # Epoch 2 Data
        batch4_metrics = tf.constant(
            [0.8, 0.9, 0.6, 0.1, 0.7, 0.3, 0.9, 0.9, 0.4, 0.6])
        batch4_idx = tf.constant([21, 2, 18, 3, 15, 22, 12, 27, 23, 9])
        batch5_metrics = tf.constant(
            [0.4, 0.4, 0.9, 0.1, 0.4, 0.9, 0.0, 0.8, 1.0, 0.1])
        batch5_idx = tf.constant([20, 5, 28, 8, 6, 25, 11, 13, 7, 16])
        batch6_metrics = tf.constant(
            [0.3, 0.9, 0.9, 0.5, 0.9, 0.8, 0.6, 0.1, 0.1, 0.2])
        batch6_idx = tf.constant([1, 19, 24, 10, 0, 29, 17, 14, 4, 26])

        cls.training_data = [[
            Data({
                'ce': batch1_metrics,
                'idx': batch1_idx
            }),
            Data({
                'ce': batch2_metrics,
                'idx': batch2_idx
            }),
            Data({
                'ce': batch3_metrics,
                'idx': batch3_idx
            })
        ],
                             [
                                 Data({
                                     'ce': batch4_metrics,
                                     'idx': batch4_idx
                                 }),
                                 Data({
                                     'ce': batch5_metrics,
                                     'idx': batch5_idx
                                 }),
                                 Data({
                                     'ce': batch6_metrics,
                                     'idx': batch6_idx
                                 })
                             ]]
    def test_save_and_load_state(self):
        def instantiate_system():
            tracker = LabelTracker(label='y',
                                   metric='acc',
                                   bounds=[None, 'range'],
                                   outputs='out')
            system = sample_system_object()
            system.traces.append(tracker)
            tracker.system = system
            return system, tracker

        system, tracker = instantiate_system()

        # Make some changes
        response = Data()
        self._simulate_training(tracker, response)

        # Save the state
        save_path = tempfile.mkdtemp()
        system.save_state(save_dir=save_path)

        # reinstantiate system and load the state
        system, tracker = instantiate_system()
        system.load_state(save_path)

        loaded_tracker = system.traces[-1]
        response = list(loaded_tracker.label_summaries.values())
        with self.subTest('Check that all 3 labels have summaries'):
            self.assertEqual(3, len(response))
        with self.subTest('Check correct mean values (epoch 1)'):
            self.assertDictEqual(
                self.epoch_1_means, {
                    elem.name: round(elem.history['train']['acc'][3], 6)
                    for elem in response
                })
        with self.subTest('Check correct mean values (epoch 2)'):
            self.assertDictEqual(
                self.epoch_2_means, {
                    elem.name: round(elem.history['train']['acc'][6], 6)
                    for elem in response
                })
Beispiel #23
0
 def on_epoch_end(self, data: Data) -> None:
     data.write_with_log(self.outputs[0], np.mean(self.dice))
 def test_on_epoch_begin(self):
     self.calibration_error.on_epoch_begin(data=Data())
     with self.subTest('Check initial value of y_true'):
         self.assertEqual(self.calibration_error.y_true, [])
     with self.subTest('Check initial value of y_pred'):
         self.assertEqual(self.calibration_error.y_pred, [])
Beispiel #25
0
 def on_batch_end(self, data: Data) -> None:
     grads = tf.reduce_max(tf.abs(data.get('grads')))
     print(grads)
 def on_batch_end(self, data: Data) -> None:
     grads = torch.max(torch.abs(data.get('grads')))
     print(grads)