Esempio n. 1
0
    def set_learning_rate(self, lr, exp_decay=None):
        """

        :param lr:
        :param exp_decay: a dictionary like dict(decay_steps=None, decay_rate=None, staircase=False, name=None),
                        otherwise None.
        :return:
        """
        if isinstance(exp_decay, dict):
            if 'decay_steps' not in exp_decay:
                raise KeyError(
                    'Set decay_steps in exp_decay=dict(decay_steps)')
            if 'decay_rate' not in exp_decay:
                raise KeyError('Set decay_steps in exp_decay=dict(decay_rate)')
            if 'staircase' not in exp_decay:
                exp_decay['staircase'] = False
            if 'name' not in exp_decay:
                exp_decay['name'] = None
            self._lr = tfnn.train.exponential_decay(
                lr,
                self.global_step,
                decay_steps=exp_decay['decay_steps'],
                decay_rate=exp_decay['decay_rate'],
                staircase=exp_decay['staircase'],
                name=exp_decay['name'])
        else:
            self._lr = tfnn.constant(lr)
        tfnn.scalar_summary('learning_rate', self._lr)
Esempio n. 2
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            if self.method == 'softmax':
                self.predictions = tfnn.nn.softmax(self.layers_results['final'][-1], name='predictions')
            elif self.method == 'sigmoid':
                self.predictions = tfnn.nn.sigmoid(self.layers_results['final'][-1], name='predictions')
        with tfnn.name_scope('loss'):
            if self.method == 'softmax':
                self.cross_entropy = tfnn.nn.softmax_cross_entropy_with_logits(
                    self.layers_results['final'][-1],
                    self.target_placeholder,
                    name='xentropy')
            elif self.method == 'sigmoid':
                self.cross_entropy = tfnn.nn.sigmoid_cross_entropy_with_logits(
                    self.layers_results['final'][-1],
                    self.target_placeholder,
                    name='xentropy')
            else:
                raise ValueError("method should be one of ['sparse_softmax', 'softmax', 'sigmoid']")
            self.loss = tfnn.reduce_mean(self.cross_entropy, name='xentropy_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for layer in self.layers_results['Layer'][1:]:
                        regularizers += tfnn.nn.l2_loss(layer.W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers

            tfnn.scalar_summary('loss', self.loss)
Esempio n. 3
0
 def __init__(self, network, ):
     self.network = network
     if isinstance(self.network, tfnn.ClfNetwork):
         with tfnn.name_scope('accuracy'):
             with tfnn.name_scope('correct_prediction'):
                 correct_prediction = tfnn.equal(tfnn.argmax(network.predictions, 1),
                                               tfnn.argmax(network.target_placeholder, 1), name='correct_prediction')
             with tfnn.name_scope('accuracy'):
                 self.accuracy = tfnn.reduce_mean(tfnn.cast(correct_prediction, tfnn.float32), name='accuracy')
             tfnn.scalar_summary('accuracy', self.accuracy)
     elif isinstance(self.network, tfnn.RegNetwork):
         self.first_time_lm = True
         self.first_time_soc = True
     with tfnn.name_scope('r2_score'):
         with tfnn.name_scope('ys_mean'):
             ys_mean = tfnn.reduce_mean(network.target_placeholder, reduction_indices=[0], name='ys_mean')
         with tfnn.name_scope('total_sum_squares'):
             ss_tot = tfnn.reduce_sum(tfnn.square(network.target_placeholder - ys_mean),
                                      reduction_indices=[0], name='total_sum_squares')
         # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)
         with tfnn.name_scope('residual_sum_squares'):
             ss_res = tfnn.reduce_sum(tfnn.square(network.target_placeholder - network.predictions),
                                      reduction_indices=[0], name='residual_sum_squares')
         with tfnn.name_scope('coefficient_of_determination'):
             self.r2_score = tfnn.sub(tfnn.constant(1, dtype=tfnn.float32), (ss_res / ss_tot)[0],
                                      name='coefficient_of_determination')
         tfnn.scalar_summary('r2_score', self.r2_score)
Esempio n. 4
0
    def set_learning_rate(self, lr, exp_decay=None):
        """

        :param lr:
        :param exp_decay: a dictionary like dict(decay_steps=None, decay_rate=None, staircase=False, name=None),
                        otherwise None.
        :return:
        """
        if isinstance(exp_decay, dict):
            if 'decay_steps' not in exp_decay:
                raise KeyError('Set decay_steps in exp_decay=dict(decay_steps)')
            if 'decay_rate' not in exp_decay:
                raise KeyError('Set decay_steps in exp_decay=dict(decay_rate)')
            if 'staircase' not in exp_decay:
                exp_decay['staircase'] = False
            if 'name' not in exp_decay:
                exp_decay['name'] = None
            self._lr = tfnn.train.exponential_decay(lr, self.global_step,
                                                   decay_steps=exp_decay['decay_steps'],
                                                   decay_rate=exp_decay['decay_rate'],
                                                   staircase=exp_decay['staircase'],
                                                   name=exp_decay['name'])
        else:
            self._lr = tfnn.constant(lr)
        tfnn.scalar_summary('learning_rate', self._lr)
Esempio n. 5
0
 def _set_accuracy(self):
     if isinstance(self.network, tfnn.ClfNetwork):
         with tfnn.name_scope('accuracy'):
             correct_prediction = tfnn.equal(
                 tfnn.argmax(self.network.predictions, 1),
                 tfnn.argmax(self.network.target_placeholder, 1),
                 name='correct_prediction')
             self.accuracy = tfnn.reduce_mean(
                 tfnn.cast(correct_prediction, tfnn.float32), name='accuracy')
             tfnn.scalar_summary('accuracy', self.accuracy)
Esempio n. 6
0
    def __init__(self, input_size, output_size, do_dropout, do_l2, ntype):
        self.normalizer = Normalizer()
        self.input_size = input_size
        self.output_size = output_size
        self.global_step = tfnn.Variable(0, trainable=False)
        if do_dropout and do_l2:
            raise ValueError(
                'Cannot do dropout and l2 at once. Choose only one of them.')
        if do_dropout:
            self.reg = 'dropout'
        if do_l2:
            self.reg = 'l2'
        if (do_dropout is False) & (do_l2 is False):
            self.reg = None

        with tfnn.name_scope('inputs'):
            self.data_placeholder = tfnn.placeholder(
                dtype=tfnn.float32,
                shape=[None, self.input_size],
                name='x_input')
            self.target_placeholder = tfnn.placeholder(
                dtype=tfnn.float32,
                shape=[None, self.output_size],
                name='y_input')
            if do_dropout:
                self.keep_prob_placeholder = tfnn.placeholder(
                    dtype=tfnn.float32)
                tfnn.scalar_summary('dropout_keep_probability',
                                    self.keep_prob_placeholder)
                _reg_value = self.keep_prob_placeholder
            elif do_l2:
                self.l2_placeholder = tfnn.placeholder(tfnn.float32)
                tfnn.scalar_summary('l2_value', self.l2_placeholder)
                _reg_value = self.l2_placeholder
            else:
                _reg_value = None

        self.layers_configs = {
            'type': ['input'],
            'name': ['input_layer'],
            'neural_structure': [{
                'input_size': self.input_size,
                'output_size': self.input_size
            }],
            'ntype':
            ntype,
        }
        self.layers_results = {
            'reg_value': _reg_value,
            'Layer': [None],
            'Wx_plus_b': [None],
            'activated': [None],
            'dropped': [None],
            'final': [self.data_placeholder]
        }
Esempio n. 7
0
 def _set_accuracy(self):
     if isinstance(self.network, tfnn.ClfNetwork):
         with tfnn.name_scope('accuracy'):
             correct_prediction = tfnn.equal(
                 tfnn.argmax(self.network.predictions, 1),
                 tfnn.argmax(self.network.target_placeholder, 1),
                 name='correct_prediction')
             self.accuracy = tfnn.reduce_mean(tfnn.cast(
                 correct_prediction, tfnn.float32),
                                              name='accuracy')
             tfnn.scalar_summary('accuracy', self.accuracy)
Esempio n. 8
0
    def __init__(self, input_size, output_size, do_dropout, do_l2, ntype):
        self.normalizer = Normalizer()
        self.input_size = input_size
        self.output_size = output_size
        self.global_step = tfnn.Variable(0, trainable=False)
        if do_dropout and do_l2:
            raise ValueError('Cannot do dropout and l2 at once. Choose only one of them.')
        if do_dropout:
            self.reg = 'dropout'
        if do_l2:
            self.reg = 'l2'
        if (do_dropout is False) & (do_l2 is False):
            self.reg = None

        with tfnn.name_scope('inputs'):
            self.data_placeholder = tfnn.placeholder(dtype=tfnn.float32,
                                                     shape=[None, self.input_size],
                                                     name='x_input')
            self.target_placeholder = tfnn.placeholder(dtype=tfnn.float32,
                                                       shape=[None, self.output_size],
                                                       name='y_input')
            if do_dropout:
                self.keep_prob_placeholder = tfnn.placeholder(dtype=tfnn.float32)
                tfnn.scalar_summary('dropout_keep_probability', self.keep_prob_placeholder)
                _reg_value = self.keep_prob_placeholder
            elif do_l2:
                self.l2_placeholder = tfnn.placeholder(tfnn.float32)
                tfnn.scalar_summary('l2_value', self.l2_placeholder)
                _reg_value = self.l2_placeholder
            else:
                _reg_value = None

        self.layers_configs = {
            'type': ['input'],
            'name': ['input_layer'],
            'neural_structure': [{'input_size': self.input_size, 'output_size': self.input_size}],
            'ntype': ntype,
        }
        self.layers_results = {
            'reg_value': _reg_value,
            'Layer': [None],
            'Wx_plus_b': [None],
            'activated': [None],
            'dropped': [None],
            'final': [self.data_placeholder]
        }
Esempio n. 9
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            self.predictions = self.layers_final_output.iloc[-1] + 0
        with tfnn.name_scope('loss'):
            loss_square = tfnn.square(self.target_placeholder - self.layers_final_output.iloc[-1], name='loss_square')
            loss_sum = tfnn.reduce_sum(loss_square, reduction_indices=[1], name='loss_sum')
            self.loss = tfnn.reduce_mean(loss_sum, name='loss_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for W in self.Ws:
                        regularizers += tfnn.nn.l2_loss(W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers
            tfnn.scalar_summary('loss', self.loss)
Esempio n. 10
0
 def _set_r2(self):
     if isinstance(self.network, tfnn.RegNetwork):
         with tfnn.name_scope('r2_score'):
             self.ys_mean = ys_mean = tfnn.reduce_mean(self.network.target_placeholder,
                                                       reduction_indices=[0],
                                                       name='ys_mean')
             self.ss_tot = ss_tot = tfnn.reduce_sum(
                 tfnn.square(self.network.target_placeholder - ys_mean),
                 reduction_indices=[0], name='total_sum_squares')
             # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)
             self.ss_res = ss_res = tfnn.reduce_sum(
                 tfnn.square(self.network.target_placeholder - self.network.predictions),
                 reduction_indices=[0], name='residual_sum_squares')
             self.aaa = ss_res / ss_tot
             self.r2 = tfnn.reduce_mean(
                 tfnn.sub(tfnn.ones_like(ss_res, dtype=tfnn.float32), (ss_res / ss_tot)),
                 name='coefficient_of_determination')
             tfnn.scalar_summary('r2_score', self.r2)
Esempio n. 11
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            self.predictions = self.layers_results['final'][-1]
        with tfnn.name_scope('loss'):
            loss_square = tfnn.square(self.target_placeholder - self.predictions,
                                      name='loss_square')
            loss_sum = tfnn.reduce_sum(loss_square, reduction_indices=[1], name='loss_sum')
            self.loss = tfnn.reduce_mean(loss_sum, name='loss_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for layer in self.layers_results['Layer'][1:]:
                        regularizers += tfnn.nn.l2_loss(layer.W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers
            tfnn.scalar_summary('loss', self.loss)
Esempio n. 12
0
 def __init__(
     self,
     network,
 ):
     self.network = network
     if isinstance(self.network, tfnn.ClfNetwork):
         with tfnn.name_scope('accuracy'):
             with tfnn.name_scope('correct_prediction'):
                 correct_prediction = tfnn.equal(
                     tfnn.argmax(network.predictions, 1),
                     tfnn.argmax(network.target_placeholder, 1),
                     name='correct_prediction')
             with tfnn.name_scope('accuracy'):
                 self.accuracy = tfnn.reduce_mean(tfnn.cast(
                     correct_prediction, tfnn.float32),
                                                  name='accuracy')
             tfnn.scalar_summary('accuracy', self.accuracy)
     elif isinstance(self.network, tfnn.RegNetwork):
         self.first_time_lm = True
         self.first_time_soc = True
     with tfnn.name_scope('r2_score'):
         with tfnn.name_scope('ys_mean'):
             ys_mean = tfnn.reduce_mean(network.target_placeholder,
                                        reduction_indices=[0],
                                        name='ys_mean')
         with tfnn.name_scope('total_sum_squares'):
             ss_tot = tfnn.reduce_sum(
                 tfnn.square(network.target_placeholder - ys_mean),
                 reduction_indices=[0],
                 name='total_sum_squares')
         # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)
         with tfnn.name_scope('residual_sum_squares'):
             ss_res = tfnn.reduce_sum(
                 tfnn.square(network.target_placeholder -
                             network.predictions),
                 reduction_indices=[0],
                 name='residual_sum_squares')
         with tfnn.name_scope('coefficient_of_determination'):
             self.r2_score = tfnn.sub(tfnn.constant(1, dtype=tfnn.float32),
                                      (ss_res / ss_tot)[0],
                                      name='coefficient_of_determination')
         tfnn.scalar_summary('r2_score', self.r2_score)
Esempio n. 13
0
    def __init__(self, n_inputs, n_outputs, input_dtype, output_dtype, output_activator,
                 do_dropout, do_l2, seed=None):
        self.normalizer = Normalizer()
        self.n_inputs = n_inputs
        self.n_outputs = n_outputs
        self.input_dtype = input_dtype
        self.output_dtype = output_dtype
        self.output_activator = output_activator
        if do_dropout and do_l2:
            raise ValueError('Cannot do dropout and l2 at once. Choose only one of them.')
        if do_dropout:
            self.reg = 'dropout'
        if do_l2:
            self.reg = 'l2'
        if (do_dropout is False) & (do_l2 is False):
            self.reg = None
        self.seed = seed

        with tfnn.name_scope('inputs'):
            self.data_placeholder = tfnn.placeholder(dtype=input_dtype, shape=[None, n_inputs], name='x_input')
            self.target_placeholder = tfnn.placeholder(dtype=output_dtype, shape=[None, n_outputs], name='y_input')
            if do_dropout:
                self.keep_prob_placeholder = tfnn.placeholder(dtype=tfnn.float32)
                tfnn.scalar_summary('dropout_keep_probability', self.keep_prob_placeholder)
            if do_l2:
                self.l2_placeholder = tfnn.placeholder(tfnn.float32)
                tfnn.scalar_summary('l2_lambda', self.l2_placeholder)
        self.layers_type = pd.Series([])
        self.layers_output = pd.Series([])
        self.layers_activated_output = pd.Series([])
        self.layers_dropped_output = pd.Series([])
        self.layers_final_output = pd.Series([])
        self.Ws = pd.Series([])
        self.bs = pd.Series([])
        self.record_activators = pd.Series([])
        self.record_neurons = []
        self.last_layer_neurons = n_inputs
        self.last_layer_outputs = self.data_placeholder
        self.layer_number = 1
        self.has_output_layer = False
        self._is_output_layer = False
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            self.predictions = self.layers_results['final'][-1]
        with tfnn.name_scope('loss'):
            loss_square = tfnn.square(self.target_placeholder -
                                      self.predictions,
                                      name='loss_square')
            loss_sum = tfnn.reduce_sum(loss_square,
                                       reduction_indices=[1],
                                       name='loss_sum')
            self.loss = tfnn.reduce_mean(loss_sum, name='loss_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for layer in self.layers_results['Layer'][1:]:
                        regularizers += tfnn.nn.l2_loss(layer.W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers
            tfnn.scalar_summary('loss', self.loss)
Esempio n. 15
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            self.predictions = self.layers_final_output.iloc[-1] + 0
        with tfnn.name_scope('loss'):
            loss_square = tfnn.square(self.target_placeholder -
                                      self.layers_final_output.iloc[-1],
                                      name='loss_square')
            loss_sum = tfnn.reduce_sum(loss_square,
                                       reduction_indices=[1],
                                       name='loss_sum')
            self.loss = tfnn.reduce_mean(loss_sum, name='loss_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for W in self.Ws:
                        regularizers += tfnn.nn.l2_loss(W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers
            tfnn.scalar_summary('loss', self.loss)
Esempio n. 16
0
    def __init__(self, n_inputs, n_outputs, input_dtype, output_dtype, output_activator,
                 do_dropout, do_l2, seed=None):
        self.normalizer = Normalizer()
        self.n_inputs = n_inputs
        self.n_outputs = n_outputs
        self.input_dtype = input_dtype
        self.output_dtype = output_dtype
        self.output_activator = output_activator
        if do_dropout and do_l2:
            raise ValueError('Cannot do dropout and l2 at once. Choose only one of them.')
        if do_dropout:
            self.reg = 'dropout'
        if do_l2:
            self.reg = 'l2'
        if (do_dropout is False) & (do_l2 is False):
            self.reg = None
        self.seed = seed

        with tfnn.name_scope('inputs'):
            self.data_placeholder = tfnn.placeholder(dtype=input_dtype, shape=[None, n_inputs], name='x_input')
            self.target_placeholder = tfnn.placeholder(dtype=output_dtype, shape=[None, n_outputs], name='y_input')
            if do_dropout:
                self.keep_prob_placeholder = tfnn.placeholder(dtype=tfnn.float32)
                tfnn.scalar_summary('dropout_keep_probability', self.keep_prob_placeholder)
            if do_l2:
                self.l2_placeholder = tfnn.placeholder(tfnn.float32)
                tfnn.scalar_summary('l2_lambda', self.l2_placeholder)
        self.layers_output = pd.Series([])
        self.layers_activated_output = pd.Series([])
        self.layers_dropped_output = pd.Series([])
        self.layers_final_output = pd.Series([])
        self.Ws = pd.Series([])
        self.bs = pd.Series([])
        self.record_activators = pd.Series([])
        self.record_neurons = []
        self.last_layer_neurons = n_inputs
        self.last_layer_outputs = self.data_placeholder
        self.hidden_layer_number = 1
        self.has_output_layer = False
        self._is_output_layer = False
Esempio n. 17
0
 def _set_r2(self):
     if isinstance(self.network, tfnn.RegNetwork):
         with tfnn.name_scope('r2_score'):
             self.ys_mean = ys_mean = tfnn.reduce_mean(
                 self.network.target_placeholder,
                 reduction_indices=[0],
                 name='ys_mean')
             self.ss_tot = ss_tot = tfnn.reduce_sum(
                 tfnn.square(self.network.target_placeholder - ys_mean),
                 reduction_indices=[0],
                 name='total_sum_squares')
             # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)
             self.ss_res = ss_res = tfnn.reduce_sum(
                 tfnn.square(self.network.target_placeholder -
                             self.network.predictions),
                 reduction_indices=[0],
                 name='residual_sum_squares')
             self.aaa = ss_res / ss_tot
             self.r2 = tfnn.reduce_mean(tfnn.sub(
                 tfnn.ones_like(ss_res, dtype=tfnn.float32),
                 (ss_res / ss_tot)),
                                        name='coefficient_of_determination')
             tfnn.scalar_summary('r2_score', self.r2)
Esempio n. 18
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            if self.method == 'softmax':
                self.predictions = tfnn.nn.softmax(
                    self.layers_final_output.iloc[-1], name='predictions')
            elif self.method == 'sigmoid':
                self.predictions = tfnn.nn.sigmoid(
                    self.layers_final_output.iloc[-1], name='predictions')
        with tfnn.name_scope('loss'):
            if self.method == 'softmax':
                self.cross_entropy = tfnn.nn.softmax_cross_entropy_with_logits(
                    self.layers_final_output.iloc[-1],
                    self.target_placeholder,
                    name='xentropy')
            elif self.method == 'sigmoid':
                self.cross_entropy = tfnn.nn.sigmoid_cross_entropy_with_logits(
                    self.layers_final_output.iloc[-1],
                    self.target_placeholder,
                    name='xentropy')
            else:
                raise ValueError(
                    "method should be one of ['sparse_softmax', 'softmax', 'sigmoid']"
                )
            self.loss = tfnn.reduce_mean(self.cross_entropy,
                                         name='xentropy_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for W in self.Ws:
                        regularizers += tfnn.nn.l2_loss(W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers

            tfnn.scalar_summary('loss', self.loss)
Esempio n. 19
0
    def _set_confusion_metrics(self):
        # from https://cloud.google.com/solutions/machine-learning-with-financial-time-series-data
        # for onehot data
        with tfnn.name_scope('f1_score'):
            predictions = tfnn.argmax(self.network.predictions, 1)
            actuals = tfnn.argmax(self.network.target_placeholder, 1)

            ones_like_actuals = tfnn.ones_like(actuals)
            zeros_like_actuals = tfnn.zeros_like(actuals)
            ones_like_predictions = tfnn.ones_like(predictions)
            zeros_like_predictions = tfnn.zeros_like(predictions)

            tp = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, ones_like_actuals),
                        tfnn.equal(predictions, ones_like_predictions)),
                    "float"))

            tn = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, zeros_like_actuals),
                        tfnn.equal(predictions, zeros_like_predictions)),
                    "float"))

            fp = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, zeros_like_actuals),
                        tfnn.equal(predictions, ones_like_predictions)),
                    "float"))

            fn = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, ones_like_actuals),
                        tfnn.equal(predictions, zeros_like_predictions)),
                    "float"))

            self.recall = tp / (tp + fn)
            self.precision = tp / (tp + fp)

            self.f1 = tfnn.div(2 * (self.precision * self.recall),
                               (self.precision + self.recall),
                               name='f1_score')
            tfnn.scalar_summary('f1_score', self.f1)
            tfnn.scalar_summary('precision', self.precision)
            tfnn.scalar_summary('recall', self.recall)
Esempio n. 20
0
    def _set_confusion_metrics(self):
        # from https://cloud.google.com/solutions/machine-learning-with-financial-time-series-data
        # for onehot data
        with tfnn.name_scope('f1_score'):
            predictions = tfnn.argmax(self.network.predictions, 1)
            actuals = tfnn.argmax(self.network.target_placeholder, 1)

            ones_like_actuals = tfnn.ones_like(actuals)
            zeros_like_actuals = tfnn.zeros_like(actuals)
            ones_like_predictions = tfnn.ones_like(predictions)
            zeros_like_predictions = tfnn.zeros_like(predictions)

            tp = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, ones_like_actuals),
                        tfnn.equal(predictions, ones_like_predictions)
                    ), "float"))

            tn = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, zeros_like_actuals),
                        tfnn.equal(predictions, zeros_like_predictions)
                    ), "float"))

            fp = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, zeros_like_actuals),
                        tfnn.equal(predictions, ones_like_predictions)
                    ), "float"))

            fn = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, ones_like_actuals),
                        tfnn.equal(predictions, zeros_like_predictions)
                    ), "float"))

            self.recall = tp / (tp + fn)
            self.precision = tp / (tp + fp)

            self.f1 = tfnn.div(2 * (self.precision * self.recall),
                               (self.precision + self.recall), name='f1_score')
            tfnn.scalar_summary('f1_score', self.f1)
            tfnn.scalar_summary('precision', self.precision)
            tfnn.scalar_summary('recall', self.recall)