예제 #1
0
파일: evaluator.py 프로젝트: YiyingW/tfnn
 def __init__(self, network, ):
     self.network = network
     if isinstance(self.network, tfnn.ClfNetwork):
         with tfnn.name_scope('accuracy'):
             with tfnn.name_scope('correct_prediction'):
                 correct_prediction = tfnn.equal(tfnn.argmax(network.predictions, 1),
                                               tfnn.argmax(network.target_placeholder, 1), name='correct_prediction')
             with tfnn.name_scope('accuracy'):
                 self.accuracy = tfnn.reduce_mean(tfnn.cast(correct_prediction, tfnn.float32), name='accuracy')
             tfnn.scalar_summary('accuracy', self.accuracy)
     elif isinstance(self.network, tfnn.RegNetwork):
         self.first_time_lm = True
         self.first_time_soc = True
     with tfnn.name_scope('r2_score'):
         with tfnn.name_scope('ys_mean'):
             ys_mean = tfnn.reduce_mean(network.target_placeholder, reduction_indices=[0], name='ys_mean')
         with tfnn.name_scope('total_sum_squares'):
             ss_tot = tfnn.reduce_sum(tfnn.square(network.target_placeholder - ys_mean),
                                      reduction_indices=[0], name='total_sum_squares')
         # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)
         with tfnn.name_scope('residual_sum_squares'):
             ss_res = tfnn.reduce_sum(tfnn.square(network.target_placeholder - network.predictions),
                                      reduction_indices=[0], name='residual_sum_squares')
         with tfnn.name_scope('coefficient_of_determination'):
             self.r2_score = tfnn.sub(tfnn.constant(1, dtype=tfnn.float32), (ss_res / ss_tot)[0],
                                      name='coefficient_of_determination')
         tfnn.scalar_summary('r2_score', self.r2_score)
예제 #2
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            if self.method == 'softmax':
                self.predictions = tfnn.nn.softmax(self.layers_results['final'][-1], name='predictions')
            elif self.method == 'sigmoid':
                self.predictions = tfnn.nn.sigmoid(self.layers_results['final'][-1], name='predictions')
        with tfnn.name_scope('loss'):
            if self.method == 'softmax':
                self.cross_entropy = tfnn.nn.softmax_cross_entropy_with_logits(
                    self.layers_results['final'][-1],
                    self.target_placeholder,
                    name='xentropy')
            elif self.method == 'sigmoid':
                self.cross_entropy = tfnn.nn.sigmoid_cross_entropy_with_logits(
                    self.layers_results['final'][-1],
                    self.target_placeholder,
                    name='xentropy')
            else:
                raise ValueError("method should be one of ['sparse_softmax', 'softmax', 'sigmoid']")
            self.loss = tfnn.reduce_mean(self.cross_entropy, name='xentropy_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for layer in self.layers_results['Layer'][1:]:
                        regularizers += tfnn.nn.l2_loss(layer.W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers

            tfnn.scalar_summary('loss', self.loss)
예제 #3
0
 def _set_r2(self):
     if isinstance(self.network, tfnn.RegNetwork):
         with tfnn.name_scope('r2_score'):
             self.ys_mean = ys_mean = tfnn.reduce_mean(self.network.target_placeholder,
                                                       reduction_indices=[0],
                                                       name='ys_mean')
             self.ss_tot = ss_tot = tfnn.reduce_sum(
                 tfnn.square(self.network.target_placeholder - ys_mean),
                 reduction_indices=[0], name='total_sum_squares')
             # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)
             self.ss_res = ss_res = tfnn.reduce_sum(
                 tfnn.square(self.network.target_placeholder - self.network.predictions),
                 reduction_indices=[0], name='residual_sum_squares')
             self.aaa = ss_res / ss_tot
             self.r2 = tfnn.reduce_mean(
                 tfnn.sub(tfnn.ones_like(ss_res, dtype=tfnn.float32), (ss_res / ss_tot)),
                 name='coefficient_of_determination')
             tfnn.scalar_summary('r2_score', self.r2)
예제 #4
0
 def _set_accuracy(self):
     if isinstance(self.network, tfnn.ClfNetwork):
         with tfnn.name_scope('accuracy'):
             correct_prediction = tfnn.equal(
                 tfnn.argmax(self.network.predictions, 1),
                 tfnn.argmax(self.network.target_placeholder, 1),
                 name='correct_prediction')
             self.accuracy = tfnn.reduce_mean(
                 tfnn.cast(correct_prediction, tfnn.float32), name='accuracy')
             tfnn.scalar_summary('accuracy', self.accuracy)
예제 #5
0
 def __init__(
     self,
     network,
 ):
     self.network = network
     if isinstance(self.network, tfnn.ClfNetwork):
         with tfnn.name_scope('accuracy'):
             with tfnn.name_scope('correct_prediction'):
                 correct_prediction = tfnn.equal(
                     tfnn.argmax(network.predictions, 1),
                     tfnn.argmax(network.target_placeholder, 1),
                     name='correct_prediction')
             with tfnn.name_scope('accuracy'):
                 self.accuracy = tfnn.reduce_mean(tfnn.cast(
                     correct_prediction, tfnn.float32),
                                                  name='accuracy')
             tfnn.scalar_summary('accuracy', self.accuracy)
     elif isinstance(self.network, tfnn.RegNetwork):
         self.first_time_lm = True
         self.first_time_soc = True
     with tfnn.name_scope('r2_score'):
         with tfnn.name_scope('ys_mean'):
             ys_mean = tfnn.reduce_mean(network.target_placeholder,
                                        reduction_indices=[0],
                                        name='ys_mean')
         with tfnn.name_scope('total_sum_squares'):
             ss_tot = tfnn.reduce_sum(
                 tfnn.square(network.target_placeholder - ys_mean),
                 reduction_indices=[0],
                 name='total_sum_squares')
         # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)
         with tfnn.name_scope('residual_sum_squares'):
             ss_res = tfnn.reduce_sum(
                 tfnn.square(network.target_placeholder -
                             network.predictions),
                 reduction_indices=[0],
                 name='residual_sum_squares')
         with tfnn.name_scope('coefficient_of_determination'):
             self.r2_score = tfnn.sub(tfnn.constant(1, dtype=tfnn.float32),
                                      (ss_res / ss_tot)[0],
                                      name='coefficient_of_determination')
         tfnn.scalar_summary('r2_score', self.r2_score)
예제 #6
0
 def _set_accuracy(self):
     if isinstance(self.network, tfnn.ClfNetwork):
         with tfnn.name_scope('accuracy'):
             correct_prediction = tfnn.equal(
                 tfnn.argmax(self.network.predictions, 1),
                 tfnn.argmax(self.network.target_placeholder, 1),
                 name='correct_prediction')
             self.accuracy = tfnn.reduce_mean(tfnn.cast(
                 correct_prediction, tfnn.float32),
                                              name='accuracy')
             tfnn.scalar_summary('accuracy', self.accuracy)
예제 #7
0
 def _set_r2(self):
     if isinstance(self.network, tfnn.RegNetwork):
         with tfnn.name_scope('r2_score'):
             self.ys_mean = ys_mean = tfnn.reduce_mean(
                 self.network.target_placeholder,
                 reduction_indices=[0],
                 name='ys_mean')
             self.ss_tot = ss_tot = tfnn.reduce_sum(
                 tfnn.square(self.network.target_placeholder - ys_mean),
                 reduction_indices=[0],
                 name='total_sum_squares')
             # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)
             self.ss_res = ss_res = tfnn.reduce_sum(
                 tfnn.square(self.network.target_placeholder -
                             self.network.predictions),
                 reduction_indices=[0],
                 name='residual_sum_squares')
             self.aaa = ss_res / ss_tot
             self.r2 = tfnn.reduce_mean(tfnn.sub(
                 tfnn.ones_like(ss_res, dtype=tfnn.float32),
                 (ss_res / ss_tot)),
                                        name='coefficient_of_determination')
             tfnn.scalar_summary('r2_score', self.r2)
예제 #8
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            self.predictions = self.layers_final_output.iloc[-1] + 0
        with tfnn.name_scope('loss'):
            loss_square = tfnn.square(self.target_placeholder - self.layers_final_output.iloc[-1], name='loss_square')
            loss_sum = tfnn.reduce_sum(loss_square, reduction_indices=[1], name='loss_sum')
            self.loss = tfnn.reduce_mean(loss_sum, name='loss_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for W in self.Ws:
                        regularizers += tfnn.nn.l2_loss(W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers
            tfnn.scalar_summary('loss', self.loss)
예제 #9
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            self.predictions = self.layers_results['final'][-1]
        with tfnn.name_scope('loss'):
            loss_square = tfnn.square(self.target_placeholder - self.predictions,
                                      name='loss_square')
            loss_sum = tfnn.reduce_sum(loss_square, reduction_indices=[1], name='loss_sum')
            self.loss = tfnn.reduce_mean(loss_sum, name='loss_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for layer in self.layers_results['Layer'][1:]:
                        regularizers += tfnn.nn.l2_loss(layer.W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers
            tfnn.scalar_summary('loss', self.loss)
예제 #10
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            self.predictions = self.layers_final_output.iloc[-1] + 0
        with tfnn.name_scope('loss'):
            loss_square = tfnn.square(self.target_placeholder -
                                      self.layers_final_output.iloc[-1],
                                      name='loss_square')
            loss_sum = tfnn.reduce_sum(loss_square,
                                       reduction_indices=[1],
                                       name='loss_sum')
            self.loss = tfnn.reduce_mean(loss_sum, name='loss_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for W in self.Ws:
                        regularizers += tfnn.nn.l2_loss(W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers
            tfnn.scalar_summary('loss', self.loss)
예제 #11
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            self.predictions = self.layers_results['final'][-1]
        with tfnn.name_scope('loss'):
            loss_square = tfnn.square(self.target_placeholder -
                                      self.predictions,
                                      name='loss_square')
            loss_sum = tfnn.reduce_sum(loss_square,
                                       reduction_indices=[1],
                                       name='loss_sum')
            self.loss = tfnn.reduce_mean(loss_sum, name='loss_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for layer in self.layers_results['Layer'][1:]:
                        regularizers += tfnn.nn.l2_loss(layer.W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers
            tfnn.scalar_summary('loss', self.loss)
예제 #12
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            if self.method == 'softmax':
                self.predictions = tfnn.nn.softmax(
                    self.layers_final_output.iloc[-1], name='predictions')
            elif self.method == 'sigmoid':
                self.predictions = tfnn.nn.sigmoid(
                    self.layers_final_output.iloc[-1], name='predictions')
        with tfnn.name_scope('loss'):
            if self.method == 'softmax':
                self.cross_entropy = tfnn.nn.softmax_cross_entropy_with_logits(
                    self.layers_final_output.iloc[-1],
                    self.target_placeholder,
                    name='xentropy')
            elif self.method == 'sigmoid':
                self.cross_entropy = tfnn.nn.sigmoid_cross_entropy_with_logits(
                    self.layers_final_output.iloc[-1],
                    self.target_placeholder,
                    name='xentropy')
            else:
                raise ValueError(
                    "method should be one of ['sparse_softmax', 'softmax', 'sigmoid']"
                )
            self.loss = tfnn.reduce_mean(self.cross_entropy,
                                         name='xentropy_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for W in self.Ws:
                        regularizers += tfnn.nn.l2_loss(W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers

            tfnn.scalar_summary('loss', self.loss)