コード例 #1
0
 def accuracy_op(self):
     clear_lazyprop_on_lazyprop_cleared(self, 'accuracy_op',
                                        self.input_layer)
     return tf.reduce_mean(
         tf.cast(
             tf.nn.in_top_k(self._pre_softmax_activation_predict,
                            tf.argmax(self.target_placeholder, 1), 1),
             tf.float32))
コード例 #2
0
 def _pre_softmax_activation_train(self):
     clear_lazyprop_on_lazyprop_cleared(self,
                                        '_pre_softmax_activation_train',
                                        self.input_layer,
                                        'activation_train')
     with self.name_scope(is_train=True):
         input_activation = self._process_input_activation_train(
             self.input_layer.activation_train)
         return self._layer_activation(input_activation, True)
コード例 #3
0
 def _pre_softmax_activation_predict(self):
     clear_lazyprop_on_lazyprop_cleared(self,
                                        '_pre_softmax_activation_predict',
                                        self.input_layer,
                                        'activation_predict')
     with self.name_scope(is_predict=True):
         input_activation = self._process_input_activation_predict(
             self.input_layer.activation_predict)
         return self._layer_activation(input_activation, False)
コード例 #4
0
 def log_probability_of_targets_op(self):
     clear_lazyprop_on_lazyprop_cleared(self,
                                        'log_probability_of_targets_op',
                                        self.input_layer)
     return tf.reduce_sum(
         tf.log(
             tf.reduce_sum(
                 tf.nn.softmax(self.activation_predict) *
                 self.target_placeholder, 1)))
コード例 #5
0
ファイル: base_layer.py プロジェクト: afcarl/tensordynamic
    def bactivation_train(self):
        """The activation used for training this layer, this will often be the same as prediction except with dropout or
        random noise applied.

        Returns:
            tensorflow.Tensor
        """
        clear_lazyprop_on_lazyprop_cleared(self, 'bactivation_train', self, 'activation_train')
        return self._layer_bactivation(self.activation_train, True)
コード例 #6
0
ファイル: base_layer.py プロジェクト: afcarl/tensordynamic
    def bactivation_predict(self):
        """The activation used for predictions from this layer, this will often be the same as training except without
        dropout or random noise applied.

        Returns:
            tensorflow.Tensor
        """
        clear_lazyprop_on_lazyprop_cleared(self, 'bactivation_predict', self, 'activation_predict')
        return self._layer_bactivation(self.activation_predict, False)
コード例 #7
0
ファイル: base_layer.py プロジェクト: afcarl/tensordynamic
    def gradients_with_respect_to_error_op(self):
        clear_lazyprop_on_lazyprop_cleared(self, "gradients_with_respect_to_error_op",
                                           self.last_layer, "target_loss_op_predict")

        gradients_ops = []
        for variable in self.resizable_variables:
            gradients_ops.append(tf.gradients(self.last_layer.target_loss_op_predict, variable)[0])

        return gradients_ops
コード例 #8
0
ファイル: base_layer.py プロジェクト: afcarl/tensordynamic
    def hessien_with_respect_to_error_op(self):
        clear_lazyprop_on_lazyprop_cleared(self, "hessien_with_respect_to_error_op",
                                           self, "gradients_with_respect_to_error_op")

        hessian_ops = []
        for variable, gradients in zip(self.resizable_variables, self.gradients_with_respect_to_error_op):
            hessian_ops.append(tf.gradients(gradients, variable)[0])

        # TODO: use tf.hessian in tensorflow 1. also use tf.diag_part
        return hessian_ops
コード例 #9
0
ファイル: base_layer.py プロジェクト: afcarl/tensordynamic
    def activation_predict(self):
        """The activation used for predictions from this layer, this will often be the same as training except without
        dropout or random noise applied.

        Returns:
            tensorflow.Tensor
        """
        clear_lazyprop_on_lazyprop_cleared(self, 'activation_predict', self.input_layer)
        input_tensor = self.input_layer.activation_predict

        with self.name_scope(is_predict=True):
            input_tensor = self._process_input_activation_predict(input_tensor)
            return self._layer_activation(input_tensor, False)
コード例 #10
0
ファイル: base_layer.py プロジェクト: afcarl/tensordynamic
    def activation_train(self):
        """The activation used for training this layer, this will often be the same as prediction except with dropout or
        random noise applied.

        Returns:
            tensorflow.Tensor
        """
        clear_lazyprop_on_lazyprop_cleared(self, 'activation_train', self.input_layer)
        input_tensor = self.input_layer.activation_train

        with self.name_scope(is_train=True):
            input_tensor = self._process_input_activation_train(input_tensor)

            return self._layer_activation(input_tensor, True)
コード例 #11
0
ファイル: test_lazyprop.py プロジェクト: afcarl/tensordynamic
    def test_clear_lazyprop_on_lazyprop_cleared(self):
        prop_class_1 = _PropClass()
        prop_class_2 = _PropClass()

        clear_lazyprop_on_lazyprop_cleared(prop_class_2, 'lazyprop',
                                           prop_class_1, 'lazyprop')

        prop_class_1.STATIC_VAL = 1
        prop_class_2.STATIC_VAL = 2

        self.assertEqual(prop_class_1.lazyprop, 1)
        self.assertEqual(prop_class_2.lazyprop, 2)

        prop_class_1.STATIC_VAL = 3
        prop_class_2.STATIC_VAL = 4

        clear_lazyprop(prop_class_1, 'lazyprop')

        self.assertEqual(prop_class_1.lazyprop, 3)
        self.assertEqual(prop_class_2.lazyprop, 4)
コード例 #12
0
 def target_loss_op_predict(self):
     clear_lazyprop_on_lazyprop_cleared(self, 'target_loss_op_predict',
                                        self.input_layer)
     with self.name_scope(is_predict=True):
         return self._target_loss_op(self._pre_softmax_activation_predict)
コード例 #13
0
 def activation_train(self):
     clear_lazyprop_on_lazyprop_cleared(self, 'activation_train',
                                        self.input_layer,
                                        'activation_train')
     with self.name_scope(is_train=True):
         return tf.nn.softmax(self._pre_softmax_activation_train)