Пример #1
0
 def test_l1_loss_value(self):
     with self.test_session():
         predicted = tf.constant([1, 1], dtype=tf.float32, name='predicted')
         labels = tf.constant([1, 0], dtype=tf.float32, name='labels')
         test_loss_func = LossFunction(loss_type='L1Loss')
         computed_l1_loss = test_loss_func(predicted, labels)
         self.assertAlmostEqual(computed_l1_loss.eval(), 0.5)
Пример #2
0
 def test_MAE_loss_value_weight(self):
     with self.test_session():
         weights = tf.constant([1, 2], dtype=tf.float32, name='weights')
         predicted = tf.constant([1, 1], dtype=tf.float32, name='predicted')
         labels = tf.constant([1, 0], dtype=tf.float32, name='labels')
         test_loss_func = LossFunction(loss_type='MAE')
         computed_MAE_loss = test_loss_func(predicted, labels, weights)
         self.assertAlmostEqual(computed_MAE_loss.eval(), 2.0 / 3.0)
Пример #3
0
 def test_MAE_loss_value(self):
     with self.test_session():
         predicted = tf.constant([[1, 2]],
                                 dtype=tf.float32,
                                 name='predicted')
         labels = tf.constant([[1.2, 0]], dtype=tf.float32, name='labels')
         test_loss_func = LossFunction(loss_type='MAE')
         computed_MAE_loss = test_loss_func(predicted, labels)
         self.assertAlmostEqual(computed_MAE_loss.eval(), 1.1)
Пример #4
0
 def test_rmse_loss_value(self):
     with self.test_session():
         predicted = tf.constant([[1.2, 1]],
                                 dtype=tf.float32,
                                 name='predicted')
         labels = tf.constant([[1, 0]], dtype=tf.float32, name='labels')
         test_loss_func = LossFunction(loss_type='RMSE')
         computed_rmse_loss = test_loss_func(predicted, labels)
         self.assertAlmostEqual(computed_rmse_loss.eval(), 0.7211, places=4)
Пример #5
0
 def test_rmse_loss_value_weight(self):
     with self.cached_session():
         weights = tf.constant([[1, 2.1]], dtype=tf.float32, name='weights')
         predicted = tf.constant([[1, 1]],
                                 dtype=tf.float32,
                                 name='predicted')
         labels = tf.constant([[1, 0]], dtype=tf.float32, name='labels')
         test_loss_func = LossFunction(loss_type='RMSE')
         computed_rmse_loss = test_loss_func(predicted, labels, weights)
         self.assertAlmostEqual(computed_rmse_loss.eval(), 0.8231, places=4)
Пример #6
0
 def test_huber_loss_value(self):
     with self.test_session():
         predicted = tf.constant([[1, 2, 0.5]],
                                 dtype=tf.float32,
                                 name='predicted')
         labels = tf.constant([[1, 0, 1]], dtype=tf.float32, name='labels')
         test_loss_func = LossFunction(loss_type='Huber')
         computed_huber_loss = test_loss_func(predicted, labels)
         self.assertAlmostEqual(computed_huber_loss.eval(),
                                0.5417,
                                places=4)
Пример #7
0
 def test_smooth_loss_value(self):
     with self.cached_session():
         predicted = tf.constant([[1, 2.375, 0.5]],
                                 dtype=tf.float32,
                                 name='predicted')
         labels = tf.constant([[1, 0, 1]], dtype=tf.float32, name='labels')
         test_loss_func = LossFunction(loss_type='SmoothL1')
         computed_smooth_loss = test_loss_func(predicted, labels)
         self.assertAlmostEqual(computed_smooth_loss.eval(),
                                2.125 / 3,
                                places=4)
Пример #8
0
 def test_l2_loss_value_weight(self):
     with self.test_session():
         weights = tf.constant([[1, 2]], dtype=tf.float32, name='weights')
         predicted = tf.constant([[1, 2]],
                                 dtype=tf.float32,
                                 name='predicted')
         labels = tf.constant([[1, 0]], dtype=tf.float32, name='labels')
         test_loss_func = LossFunction(loss_type='L2Loss')
         computed_l2_loss = test_loss_func(predicted, labels, weights)
         self.assertAlmostEqual(computed_l2_loss.eval(),
                                8.0 / 9.0,
                                places=3)
Пример #9
0
    def test_cosine_loss_value_equal2(self):

        with self.cached_session():
            predicted = tf.constant([[[1, 0], [0.5, 0.5]]],
                                    dtype=tf.float32,
                                    name='predicted')
            labels = tf.constant([[[1, 0], [0.5, 0.5]]],
                                 dtype=tf.float32,
                                 name='labels')
            test_loss_func = LossFunction(loss_type='Cosine')
            computed_cosine_loss = test_loss_func(predicted, labels)
            self.assertAlmostEqual(computed_cosine_loss.eval(), 0)
Пример #10
0
 def test_huber_loss_value_weight(self):
     with self.test_session():
         weights = tf.constant([1, 2, 1], dtype=tf.float32, name='weights')
         predicted = tf.constant([1, 2, 0.5],
                                 dtype=tf.float32,
                                 name='predicted')
         labels = tf.constant([1, 0, 1], dtype=tf.float32, name='labels')
         test_loss_func = LossFunction(loss_type='Huber')
         computed_huber_loss = test_loss_func(predicted,
                                              labels,
                                              weight_map=weights)
         self.assertAlmostEqual(computed_huber_loss.eval(), 3.125 / 4)
Пример #11
0
 def test_cosine_loss_value_weight(self):
     with self.cached_session():
         weights = tf.constant([[[2], [1]]],
                               dtype=tf.float32,
                               name='weights')
         predicted = tf.constant([[[1, 0], [0.5, 0.5]]],
                                 dtype=tf.float32,
                                 name='predicted')
         labels = tf.constant([[[0, 1], [0.5, 0.5]]],
                              dtype=tf.float32,
                              name='labels')
         test_loss_func = LossFunction(loss_type='Cosine')
         computed_cosine_loss = test_loss_func(predicted, labels, weights)
         self.assertAlmostEqual(computed_cosine_loss.eval(), 2.0 / 3.0)
Пример #12
0
 def test_smooth_loss_value_weight(self):
     with self.cached_session():
         weights = tf.constant([[1, 2, 1]],
                               dtype=tf.float32,
                               name='weights')
         predicted = tf.constant([[1, 2.375, 0.5]],
                                 dtype=tf.float32,
                                 name='predicted')
         labels = tf.constant([[1, 0, 1]], dtype=tf.float32, name='labels')
         test_loss_func = LossFunction(loss_type='SmoothL1')
         computed_smooth_l1_loss = test_loss_func(predicted,
                                                  labels,
                                                  weight_map=weights)
         self.assertAlmostEqual(computed_smooth_l1_loss.eval(), 4.125 / 4)
Пример #13
0
    def connect_data_and_network(self,
                                 outputs_collector=None,
                                 gradients_collector=None):
        def switch_sampler(for_training):
            with tf.name_scope('train' if for_training else 'validation'):
                sampler = self.get_sampler()[0][0 if for_training else -1]
                return sampler.pop_batch_op()

        if self.is_training:
            if self.action_param.validation_every_n > 0:
                data_dict = tf.cond(tf.logical_not(self.is_validation),
                                    lambda: switch_sampler(True),
                                    lambda: switch_sampler(False))
            else:
                data_dict = switch_sampler(for_training=True)

            image = tf.cast(data_dict['image'], tf.float32)
            net_args = {
                'is_training': self.is_training,
                'keep_prob': self.net_param.keep_prob
            }
            net_out = self.net(image, **net_args)
            with tf.name_scope('Optimiser'):
                optimiser_class = OptimiserFactory.create(
                    name=self.action_param.optimiser)
                self.optimiser = optimiser_class.get_instance(
                    learning_rate=self.action_param.lr)
            loss_func = LossFunction(loss_type=self.action_param.loss_type)

            crop_layer = CropLayer(border=self.regression_param.loss_border,
                                   name='crop-88')
            prediction = crop_layer(net_out)
            ground_truth = crop_layer(data_dict.get('output', None))
            weight_map = None if data_dict.get('weight', None) is None \
                else crop_layer(data_dict.get('weight', None))
            data_loss = loss_func(prediction=prediction,
                                  ground_truth=ground_truth,
                                  weight_map=weight_map)

            reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
            if self.net_param.decay > 0.0 and reg_losses:
                reg_loss = tf.reduce_mean(
                    [tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
                loss = data_loss + reg_loss
            else:
                loss = data_loss
            grads = self.optimiser.compute_gradients(loss)

            # Gradient Clipping associated with VDSR3D
            # Gradients are clipped by value, instead of clipping by global norm.
            # The authors of VDSR do not specify a threshold for the clipping process.
            # grads2, vars2 = zip(*grads)
            # grads2, _ = tf.clip_by_global_norm(grads2, 5.0)
            # grads = zip(grads2, vars2)
            grads = [(tf.clip_by_value(grad, -0.00001 / self.action_param.lr,
                                       +0.00001 / self.action_param.lr), val)
                     for grad, val in grads if grad is not None]

            # collecting gradients variables
            gradients_collector.add_to_collection([grads])
            # collecting output variables
            outputs_collector.add_to_collection(var=data_loss,
                                                name='Loss',
                                                average_over_devices=False,
                                                collection=CONSOLE)
            outputs_collector.add_to_collection(var=data_loss,
                                                name='Loss',
                                                average_over_devices=True,
                                                summary_type='scalar',
                                                collection=TF_SUMMARIES)
        elif self.is_inference:
            data_dict = switch_sampler(for_training=False)
            image = tf.cast(data_dict['image'], tf.float32)
            net_args = {
                'is_training': self.is_training,
                'keep_prob': self.net_param.keep_prob
            }
            net_out = self.net(image, **net_args)

            crop_layer = CropLayer(border=0, name='crop-88')
            post_process_layer = PostProcessingLayer('IDENTITY')
            net_out = post_process_layer(crop_layer(net_out))

            outputs_collector.add_to_collection(var=net_out,
                                                name='window',
                                                average_over_devices=False,
                                                collection=NETWORK_OUTPUT)
            outputs_collector.add_to_collection(
                var=data_dict['image_location'],
                name='location',
                average_over_devices=False,
                collection=NETWORK_OUTPUT)
            self.initialise_aggregator()
Пример #14
0
    def connect_data_and_network(self,
                                 outputs_collector=None,
                                 gradients_collector=None):

        def switch_sampler(for_training):
            with tf.name_scope('train' if for_training else 'validation'):
                sampler = self.get_sampler()[0][0 if for_training else -1]
                return sampler.pop_batch_op()

        if self.is_training:
            if self.action_param.validation_every_n > 0:
                data_dict = tf.cond(tf.logical_not(self.is_validation),
                                    lambda: switch_sampler(True),
                                    lambda: switch_sampler(False))
            else:
                data_dict = switch_sampler(for_training=True)

            image = tf.cast(data_dict['image'], tf.float32)
            net_out = self.net(image, is_training=self.is_training)
            with tf.name_scope('Optimiser'):
                optimiser_class = OptimiserFactory.create(
                    name=self.action_param.optimiser)
                self.optimiser = optimiser_class.get_instance(
                    learning_rate=self.action_param.lr)
            loss_func = LossFunction(
                loss_type=self.action_param.loss_type)

            crop_layer = CropLayer(
                border=self.regression_param.loss_border, name='crop-88')
            prediction = crop_layer(net_out)
            ground_truth = crop_layer(data_dict.get('output', None))
            weight_map = None if data_dict.get('weight', None) is None \
                else crop_layer(data_dict.get('weight', None))
            data_loss = loss_func(prediction=prediction,
                                  ground_truth=ground_truth,
                                  weight_map=weight_map)

            reg_losses = tf.get_collection(
                tf.GraphKeys.REGULARIZATION_LOSSES)
            if self.net_param.decay > 0.0 and reg_losses:
                reg_loss = tf.reduce_mean(
                    [tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
                loss = data_loss + reg_loss
            else:
                loss = data_loss
            grads = self.optimiser.compute_gradients(loss)
            # collecting gradients variables
            gradients_collector.add_to_collection([grads])
            # collecting output variables
            outputs_collector.add_to_collection(
                var=data_loss, name='Loss',
                average_over_devices=False, collection=CONSOLE)
            outputs_collector.add_to_collection(
                var=data_loss, name='Loss',
                average_over_devices=True, summary_type='scalar',
                collection=TF_SUMMARIES)
        else:
            data_dict = switch_sampler(for_training=False)
            image = tf.cast(data_dict['image'], tf.float32)
            net_out = self.net(image, is_training=self.is_training)

            crop_layer = CropLayer(border=0, name='crop-88')
            post_process_layer = PostProcessingLayer('IDENTITY')
            net_out = post_process_layer(crop_layer(net_out))

            outputs_collector.add_to_collection(
                var=net_out, name='window',
                average_over_devices=False, collection=NETWORK_OUTPUT)
            outputs_collector.add_to_collection(
                var=data_dict['image_location'], name='location',
                average_over_devices=False, collection=NETWORK_OUTPUT)
            init_aggregator = \
                self.SUPPORTED_SAMPLING[self.net_param.window_sampling][2]
            init_aggregator()
Пример #15
0
    def connect_data_and_network(self,
                                 outputs_collector=None,
                                 gradients_collector=None):
        def switch_sampler(for_training):
            with tf.name_scope('train' if for_training else 'validation'):
                sampler = self.get_sampler()[0][0 if for_training else -1]
                return sampler.pop_batch_op()

        if self.is_training:
            if self.action_param.validation_every_n > 0:
                data_dict = tf.cond(tf.logical_not(self.is_validation),
                                    lambda: switch_sampler(for_training=True),
                                    lambda: switch_sampler(for_training=False))
            else:
                data_dict = switch_sampler(for_training=True)

            image = tf.cast(data_dict['image'], tf.float32)
            net_args = {
                'is_training': self.is_training,
                'keep_prob': self.net_param.keep_prob
            }
            net_out = self.net(image, **net_args)

            with tf.name_scope('Optimiser'):
                optimiser_class = OptimiserFactory.create(
                    name=self.action_param.optimiser)
                self.optimiser = optimiser_class.get_instance(
                    learning_rate=self.action_param.lr)
            loss_func = LossFunction(loss_type=self.action_param.loss_type)

            crop_layer = CropLayer(border=self.regression_param.loss_border)
            weight_map = data_dict.get('weight', None)
            weight_map = None if weight_map is None else crop_layer(weight_map)
            data_loss = loss_func(prediction=crop_layer(net_out),
                                  ground_truth=crop_layer(data_dict['output']),
                                  weight_map=weight_map)
            reg_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
            if self.net_param.decay > 0.0 and reg_losses:
                reg_loss = tf.reduce_mean(
                    [tf.reduce_mean(reg_loss) for reg_loss in reg_losses])
                loss = data_loss + reg_loss
            else:
                loss = data_loss

            # Get all vars
            to_optimise = tf.trainable_variables()
            vars_to_freeze = \
                self.action_param.vars_to_freeze or \
                self.action_param.vars_to_restore
            if vars_to_freeze:
                import re
                var_regex = re.compile(vars_to_freeze)
                # Only optimise vars that are not frozen
                to_optimise = \
                    [v for v in to_optimise if not var_regex.search(v.name)]
                tf.logging.info(
                    "Optimizing %d out of %d trainable variables, "
                    "the other variables are fixed (--vars_to_freeze %s)",
                    len(to_optimise), len(tf.trainable_variables()),
                    vars_to_freeze)

            grads = self.optimiser.compute_gradients(
                loss, var_list=to_optimise, colocate_gradients_with_ops=True)
            # collecting gradients variables
            gradients_collector.add_to_collection([grads])
            # collecting output variables
            outputs_collector.add_to_collection(var=data_loss,
                                                name='loss',
                                                average_over_devices=False,
                                                collection=CONSOLE)
            outputs_collector.add_to_collection(var=data_loss,
                                                name='loss',
                                                average_over_devices=True,
                                                summary_type='scalar',
                                                collection=TF_SUMMARIES)
        elif self.is_inference:
            data_dict = switch_sampler(for_training=False)
            image = tf.cast(data_dict['image'], tf.float32)
            net_args = {
                'is_training': self.is_training,
                'keep_prob': self.net_param.keep_prob
            }
            net_out = self.net(image, **net_args)
            net_out = PostProcessingLayer('IDENTITY')(net_out)

            outputs_collector.add_to_collection(var=net_out,
                                                name='window',
                                                average_over_devices=False,
                                                collection=NETWORK_OUTPUT)
            outputs_collector.add_to_collection(
                var=data_dict['image_location'],
                name='location',
                average_over_devices=False,
                collection=NETWORK_OUTPUT)
            self.initialise_aggregator()