Пример #1
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            if self.method == 'softmax':
                self.predictions = tfnn.nn.softmax(self.layers_results['final'][-1], name='predictions')
            elif self.method == 'sigmoid':
                self.predictions = tfnn.nn.sigmoid(self.layers_results['final'][-1], name='predictions')
        with tfnn.name_scope('loss'):
            if self.method == 'softmax':
                self.cross_entropy = tfnn.nn.softmax_cross_entropy_with_logits(
                    self.layers_results['final'][-1],
                    self.target_placeholder,
                    name='xentropy')
            elif self.method == 'sigmoid':
                self.cross_entropy = tfnn.nn.sigmoid_cross_entropy_with_logits(
                    self.layers_results['final'][-1],
                    self.target_placeholder,
                    name='xentropy')
            else:
                raise ValueError("method should be one of ['sparse_softmax', 'softmax', 'sigmoid']")
            self.loss = tfnn.reduce_mean(self.cross_entropy, name='xentropy_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for layer in self.layers_results['Layer'][1:]:
                        regularizers += tfnn.nn.l2_loss(layer.W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers

            tfnn.scalar_summary('loss', self.loss)
Пример #2
0
    def add_hidden_layer(self, n_neurons, activator=None, dropout_layer=False):
        """
        W shape(n_last_layer_neurons, n_this_layer_neurons]
        b shape(n_this_layer_neurons, ]
        product = tfnn.matmul(x, W) + b
        :param n_neurons: Number of neurons in this layer
        :param activator: The activation function
        :return:
        """
        if not self._is_output_layer:
            layer_name = 'hidden_layer%i' % self.layer_number
        else:
            layer_name = 'output_layer'
        with tfnn.name_scope(layer_name):
            with tfnn.name_scope('weights'):
                W = self._weight_variable([self.last_layer_neurons, n_neurons])
                tfnn.histogram_summary(layer_name+'/weights', W)
            with tfnn.name_scope('biases'):
                b = self._bias_variable([n_neurons, ])
                tfnn.histogram_summary(layer_name + '/biases', b)
            with tfnn.name_scope('Wx_plus_b'):
                product = tfnn.add(tfnn.matmul(self.last_layer_outputs, W, name='Wx'), b, name='Wx_plus_b')
            if activator is None:
                activated_product = product
            else:
                activated_product = activator(product)
            tfnn.histogram_summary(layer_name+'/activated_product', activated_product)
            if (self.reg == 'dropout') and dropout_layer:
                dropped_product = tfnn.nn.dropout(activated_product,
                                                self.keep_prob_placeholder,
                                                seed=self.seed, name='dropout')
                self.layers_dropped_output.set_value(label=len(self.layers_dropped_output),
                                                     value=dropped_product)
                final_product = dropped_product
            else:
                final_product = activated_product

        self.layers_type.set_value(len(self.layers_type), "func")
        self.layer_number += 1
        self.last_layer_outputs = final_product
        self.Ws.set_value(label=len(self.Ws), value=W)
        self.bs.set_value(label=len(self.bs), value=b)
        if activator is None:
            self.record_activators.set_value(label=len(self.record_activators), value=None)
        else:
            self.record_activators.set_value(label=len(self.record_activators), value=activator(0).name)
        self.record_neurons.append(n_neurons)

        self.layers_output.set_value(label=len(self.layers_output),
                                     value=product)
        self.layers_activated_output.set_value(label=len(self.layers_output),
                                               value=activated_product)
        self.layers_final_output.set_value(label=len(self.layers_final_output),
                                           value=final_product)
        self.last_layer_neurons = n_neurons
Пример #3
0
    def add_hidden_layer(self, n_neurons, activator=None, dropout_layer=False):
        """
        W shape(n_last_layer_neurons, n_this_layer_neurons]
        b shape(n_this_layer_neurons, ]
        product = tfnn.matmul(x, W) + b
        :param n_neurons: Number of neurons in this layer
        :param activator: The activation function
        :return:
        """
        if not self._is_output_layer:
            layer_name = 'hidden_layer%i' % self.hidden_layer_number
        else:
            layer_name = 'output_layer'
        with tfnn.name_scope(layer_name):
            with tfnn.name_scope('weights'):
                W = self._weight_variable([self.last_layer_neurons, n_neurons])
                tfnn.histogram_summary(layer_name+'/weights', W)
            with tfnn.name_scope('biases'):
                b = self._bias_variable([n_neurons, ])
                tfnn.histogram_summary(layer_name + '/biases', b)
            with tfnn.name_scope('Wx_plus_b'):
                product = tfnn.add(tfnn.matmul(self.last_layer_outputs, W, name='Wx'), b, name='Wx_plus_b')
            if activator is None:
                activated_product = product
            else:
                activated_product = activator(product)
            tfnn.histogram_summary(layer_name+'/activated_product', activated_product)
            if (self.reg == 'dropout') and dropout_layer:
                dropped_product = tfnn.nn.dropout(activated_product,
                                                self.keep_prob_placeholder,
                                                seed=self.seed, name='dropout')
                self.layers_dropped_output.set_value(label=len(self.layers_dropped_output),
                                                     value=dropped_product)
                final_product = dropped_product
            else:
                final_product = activated_product

        self.hidden_layer_number += 1
        self.last_layer_outputs = final_product
        self.Ws.set_value(label=len(self.Ws), value=W)
        self.bs.set_value(label=len(self.bs), value=b)
        if activator is None:
            self.record_activators.set_value(label=len(self.record_activators), value=None)
        else:
            self.record_activators.set_value(label=len(self.record_activators), value=activator(0).name)
        self.record_neurons.append(n_neurons)

        self.layers_output.set_value(label=len(self.layers_output),
                                     value=product)
        self.layers_activated_output.set_value(label=len(self.layers_output),
                                               value=activated_product)
        self.layers_final_output.set_value(label=len(self.layers_final_output),
                                           value=final_product)
        self.last_layer_neurons = n_neurons
Пример #4
0
 def __init__(self, network, ):
     self.network = network
     if isinstance(self.network, tfnn.ClfNetwork):
         with tfnn.name_scope('accuracy'):
             with tfnn.name_scope('correct_prediction'):
                 correct_prediction = tfnn.equal(tfnn.argmax(network.predictions, 1),
                                               tfnn.argmax(network.target_placeholder, 1), name='correct_prediction')
             with tfnn.name_scope('accuracy'):
                 self.accuracy = tfnn.reduce_mean(tfnn.cast(correct_prediction, tfnn.float32), name='accuracy')
             tfnn.scalar_summary('accuracy', self.accuracy)
     elif isinstance(self.network, tfnn.RegNetwork):
         self.first_time_lm = True
         self.first_time_soc = True
     with tfnn.name_scope('r2_score'):
         with tfnn.name_scope('ys_mean'):
             ys_mean = tfnn.reduce_mean(network.target_placeholder, reduction_indices=[0], name='ys_mean')
         with tfnn.name_scope('total_sum_squares'):
             ss_tot = tfnn.reduce_sum(tfnn.square(network.target_placeholder - ys_mean),
                                      reduction_indices=[0], name='total_sum_squares')
         # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)
         with tfnn.name_scope('residual_sum_squares'):
             ss_res = tfnn.reduce_sum(tfnn.square(network.target_placeholder - network.predictions),
                                      reduction_indices=[0], name='residual_sum_squares')
         with tfnn.name_scope('coefficient_of_determination'):
             self.r2_score = tfnn.sub(tfnn.constant(1, dtype=tfnn.float32), (ss_res / ss_tot)[0],
                                      name='coefficient_of_determination')
         tfnn.scalar_summary('r2_score', self.r2_score)
Пример #5
0
    def _check_image_shape(self, layers_configs, layers_results):
        """
        have effect only on the first conv layer
        """
        if self.image_shape is not None:
            if len(layers_configs['type']) == 1:
                if isinstance(self.image_shape, tuple):
                    self.image_shape = list(self.image_shape)
                elif not isinstance(self.image_shape, list):
                    raise ValueError('image_shape can only be a tuple or list')

                # image shape have to be (x, y, channel)
                layers_configs['neural_structure'][-1][
                    'output_size'] = self.image_shape
                _xs_placeholder = layers_results['final'][-1]
                replaced_image_shape = self.image_shape.copy()
                replaced_image_shape.insert(0, -1)
                with tfnn.name_scope('reshape_inputs'):
                    _image_placeholder = tfnn.reshape(_xs_placeholder,
                                                      replaced_image_shape)
                layers_results['activated'][-1] = _image_placeholder
                layers_results['final'][-1] = _image_placeholder
            else:
                raise IndexError(
                    'This is not the first conv layer, leave image_shape as default'
                )
Пример #6
0
 def set_optimizer(self, optimizer=None, global_step=None,):
     if optimizer is None:
         optimizer = tfnn.train.GradientDescentOptimizer(0.01)
     if not self.has_output_layer:
         raise NotImplementedError('Please add output layer.')
     with tfnn.name_scope('trian'):
         self.train_op = optimizer.minimize(self.loss, global_step)
     self.sess = tfnn.Session()
Пример #7
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            self.predictions = self.layers_final_output.iloc[-1] + 0
        with tfnn.name_scope('loss'):
            loss_square = tfnn.square(self.target_placeholder - self.layers_final_output.iloc[-1], name='loss_square')
            loss_sum = tfnn.reduce_sum(loss_square, reduction_indices=[1], name='loss_sum')
            self.loss = tfnn.reduce_mean(loss_sum, name='loss_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for W in self.Ws:
                        regularizers += tfnn.nn.l2_loss(W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers
            tfnn.scalar_summary('loss', self.loss)
Пример #8
0
 def set_optimizer(self, optimizer=None, global_step=None,):
     if optimizer is None:
         optimizer = tfnn.train.GradientDescentOptimizer(0.01)
     if not self.has_output_layer:
         raise NotImplementedError('Please add output layer.')
     with tfnn.name_scope('trian'):
         self.train_op = optimizer.minimize(self.loss, global_step)
     self.sess = tfnn.Session()
Пример #9
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            self.predictions = self.layers_results['final'][-1]
        with tfnn.name_scope('loss'):
            loss_square = tfnn.square(self.target_placeholder - self.predictions,
                                      name='loss_square')
            loss_sum = tfnn.reduce_sum(loss_square, reduction_indices=[1], name='loss_sum')
            self.loss = tfnn.reduce_mean(loss_sum, name='loss_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for layer in self.layers_results['Layer'][1:]:
                        regularizers += tfnn.nn.l2_loss(layer.W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers
            tfnn.scalar_summary('loss', self.loss)
Пример #10
0
 def _set_accuracy(self):
     if isinstance(self.network, tfnn.ClfNetwork):
         with tfnn.name_scope('accuracy'):
             correct_prediction = tfnn.equal(
                 tfnn.argmax(self.network.predictions, 1),
                 tfnn.argmax(self.network.target_placeholder, 1),
                 name='correct_prediction')
             self.accuracy = tfnn.reduce_mean(
                 tfnn.cast(correct_prediction, tfnn.float32), name='accuracy')
             tfnn.scalar_summary('accuracy', self.accuracy)
Пример #11
0
    def __init__(self, input_size, output_size, do_dropout, do_l2, ntype):
        self.normalizer = Normalizer()
        self.input_size = input_size
        self.output_size = output_size
        self.global_step = tfnn.Variable(0, trainable=False)
        if do_dropout and do_l2:
            raise ValueError(
                'Cannot do dropout and l2 at once. Choose only one of them.')
        if do_dropout:
            self.reg = 'dropout'
        if do_l2:
            self.reg = 'l2'
        if (do_dropout is False) & (do_l2 is False):
            self.reg = None

        with tfnn.name_scope('inputs'):
            self.data_placeholder = tfnn.placeholder(
                dtype=tfnn.float32,
                shape=[None, self.input_size],
                name='x_input')
            self.target_placeholder = tfnn.placeholder(
                dtype=tfnn.float32,
                shape=[None, self.output_size],
                name='y_input')
            if do_dropout:
                self.keep_prob_placeholder = tfnn.placeholder(
                    dtype=tfnn.float32)
                tfnn.scalar_summary('dropout_keep_probability',
                                    self.keep_prob_placeholder)
                _reg_value = self.keep_prob_placeholder
            elif do_l2:
                self.l2_placeholder = tfnn.placeholder(tfnn.float32)
                tfnn.scalar_summary('l2_value', self.l2_placeholder)
                _reg_value = self.l2_placeholder
            else:
                _reg_value = None

        self.layers_configs = {
            'type': ['input'],
            'name': ['input_layer'],
            'neural_structure': [{
                'input_size': self.input_size,
                'output_size': self.input_size
            }],
            'ntype':
            ntype,
        }
        self.layers_results = {
            'reg_value': _reg_value,
            'Layer': [None],
            'Wx_plus_b': [None],
            'activated': [None],
            'dropped': [None],
            'final': [self.data_placeholder]
        }
Пример #12
0
 def _set_accuracy(self):
     if isinstance(self.network, tfnn.ClfNetwork):
         with tfnn.name_scope('accuracy'):
             correct_prediction = tfnn.equal(
                 tfnn.argmax(self.network.predictions, 1),
                 tfnn.argmax(self.network.target_placeholder, 1),
                 name='correct_prediction')
             self.accuracy = tfnn.reduce_mean(tfnn.cast(
                 correct_prediction, tfnn.float32),
                                              name='accuracy')
             tfnn.scalar_summary('accuracy', self.accuracy)
Пример #13
0
 def _check_init(self):
     if not hasattr(self, '_init'):
         if not hasattr(self, 'lr'):
             self.set_learning_rate(0.001)
         self.optimizer = self._optimizer(self._lr,  *self.optimizer_params[0], **self.optimizer_params[1])
         with tfnn.name_scope('trian'):
             self._train_op = self.optimizer.minimize(self.loss, self.global_step, name='train_op')
         # initialize all variables
         self._init = tfnn.initialize_all_variables()
         self.sess = tfnn.Session()
         self.sess.run(self._init)
Пример #14
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            self.predictions = self.layers_results['final'][-1]
        with tfnn.name_scope('loss'):
            loss_square = tfnn.square(self.target_placeholder -
                                      self.predictions,
                                      name='loss_square')
            loss_sum = tfnn.reduce_sum(loss_square,
                                       reduction_indices=[1],
                                       name='loss_sum')
            self.loss = tfnn.reduce_mean(loss_sum, name='loss_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for layer in self.layers_results['Layer'][1:]:
                        regularizers += tfnn.nn.l2_loss(layer.W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers
            tfnn.scalar_summary('loss', self.loss)
Пример #15
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            self.predictions = self.layers_final_output.iloc[-1] + 0
        with tfnn.name_scope('loss'):
            loss_square = tfnn.square(self.target_placeholder -
                                      self.layers_final_output.iloc[-1],
                                      name='loss_square')
            loss_sum = tfnn.reduce_sum(loss_square,
                                       reduction_indices=[1],
                                       name='loss_sum')
            self.loss = tfnn.reduce_mean(loss_sum, name='loss_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for W in self.Ws:
                        regularizers += tfnn.nn.l2_loss(W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers
            tfnn.scalar_summary('loss', self.loss)
Пример #16
0
    def _set_confusion_metrics(self):
        # from https://cloud.google.com/solutions/machine-learning-with-financial-time-series-data
        # for onehot data
        with tfnn.name_scope('f1_score'):
            predictions = tfnn.argmax(self.network.predictions, 1)
            actuals = tfnn.argmax(self.network.target_placeholder, 1)

            ones_like_actuals = tfnn.ones_like(actuals)
            zeros_like_actuals = tfnn.zeros_like(actuals)
            ones_like_predictions = tfnn.ones_like(predictions)
            zeros_like_predictions = tfnn.zeros_like(predictions)

            tp = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, ones_like_actuals),
                        tfnn.equal(predictions, ones_like_predictions)),
                    "float"))

            tn = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, zeros_like_actuals),
                        tfnn.equal(predictions, zeros_like_predictions)),
                    "float"))

            fp = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, zeros_like_actuals),
                        tfnn.equal(predictions, ones_like_predictions)),
                    "float"))

            fn = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, ones_like_actuals),
                        tfnn.equal(predictions, zeros_like_predictions)),
                    "float"))

            self.recall = tp / (tp + fn)
            self.precision = tp / (tp + fp)

            self.f1 = tfnn.div(2 * (self.precision * self.recall),
                               (self.precision + self.recall),
                               name='f1_score')
            tfnn.scalar_summary('f1_score', self.f1)
            tfnn.scalar_summary('precision', self.precision)
            tfnn.scalar_summary('recall', self.recall)
Пример #17
0
 def _check_init(self):
     if not hasattr(self, '_init'):
         if not hasattr(self, 'lr'):
             self.set_learning_rate(0.001)
         self.optimizer = self._optimizer(self._lr,
                                          *self.optimizer_params[0],
                                          **self.optimizer_params[1])
         with tfnn.name_scope('trian'):
             self._train_op = self.optimizer.minimize(self.loss,
                                                      self.global_step,
                                                      name='train_op')
         # initialize all variables
         self._init = tfnn.initialize_all_variables()
         self.sess = tfnn.Session()
         self.sess.run(self._init)
Пример #18
0
    def _init_loss(self):
        with tfnn.name_scope('predictions'):
            if self.method == 'softmax':
                self.predictions = tfnn.nn.softmax(
                    self.layers_final_output.iloc[-1], name='predictions')
            elif self.method == 'sigmoid':
                self.predictions = tfnn.nn.sigmoid(
                    self.layers_final_output.iloc[-1], name='predictions')
        with tfnn.name_scope('loss'):
            if self.method == 'softmax':
                self.cross_entropy = tfnn.nn.softmax_cross_entropy_with_logits(
                    self.layers_final_output.iloc[-1],
                    self.target_placeholder,
                    name='xentropy')
            elif self.method == 'sigmoid':
                self.cross_entropy = tfnn.nn.sigmoid_cross_entropy_with_logits(
                    self.layers_final_output.iloc[-1],
                    self.target_placeholder,
                    name='xentropy')
            else:
                raise ValueError(
                    "method should be one of ['sparse_softmax', 'softmax', 'sigmoid']"
                )
            self.loss = tfnn.reduce_mean(self.cross_entropy,
                                         name='xentropy_mean')

            if self.reg == 'l2':
                with tfnn.name_scope('l2_reg'):
                    regularizers = 0
                    for W in self.Ws:
                        regularizers += tfnn.nn.l2_loss(W, name='l2_reg')
                    regularizers *= self.l2_placeholder
                with tfnn.name_scope('l2_loss'):
                    self.loss += regularizers

            tfnn.scalar_summary('loss', self.loss)
Пример #19
0
    def _set_confusion_metrics(self):
        # from https://cloud.google.com/solutions/machine-learning-with-financial-time-series-data
        # for onehot data
        with tfnn.name_scope('f1_score'):
            predictions = tfnn.argmax(self.network.predictions, 1)
            actuals = tfnn.argmax(self.network.target_placeholder, 1)

            ones_like_actuals = tfnn.ones_like(actuals)
            zeros_like_actuals = tfnn.zeros_like(actuals)
            ones_like_predictions = tfnn.ones_like(predictions)
            zeros_like_predictions = tfnn.zeros_like(predictions)

            tp = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, ones_like_actuals),
                        tfnn.equal(predictions, ones_like_predictions)
                    ), "float"))

            tn = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, zeros_like_actuals),
                        tfnn.equal(predictions, zeros_like_predictions)
                    ), "float"))

            fp = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, zeros_like_actuals),
                        tfnn.equal(predictions, ones_like_predictions)
                    ), "float"))

            fn = tfnn.reduce_sum(
                tfnn.cast(
                    tfnn.logical_and(
                        tfnn.equal(actuals, ones_like_actuals),
                        tfnn.equal(predictions, zeros_like_predictions)
                    ), "float"))

            self.recall = tp / (tp + fn)
            self.precision = tp / (tp + fp)

            self.f1 = tfnn.div(2 * (self.precision * self.recall),
                               (self.precision + self.recall), name='f1_score')
            tfnn.scalar_summary('f1_score', self.f1)
            tfnn.scalar_summary('precision', self.precision)
            tfnn.scalar_summary('recall', self.recall)
Пример #20
0
    def __init__(self, input_size, output_size, do_dropout, do_l2, ntype):
        self.normalizer = Normalizer()
        self.input_size = input_size
        self.output_size = output_size
        self.global_step = tfnn.Variable(0, trainable=False)
        if do_dropout and do_l2:
            raise ValueError('Cannot do dropout and l2 at once. Choose only one of them.')
        if do_dropout:
            self.reg = 'dropout'
        if do_l2:
            self.reg = 'l2'
        if (do_dropout is False) & (do_l2 is False):
            self.reg = None

        with tfnn.name_scope('inputs'):
            self.data_placeholder = tfnn.placeholder(dtype=tfnn.float32,
                                                     shape=[None, self.input_size],
                                                     name='x_input')
            self.target_placeholder = tfnn.placeholder(dtype=tfnn.float32,
                                                       shape=[None, self.output_size],
                                                       name='y_input')
            if do_dropout:
                self.keep_prob_placeholder = tfnn.placeholder(dtype=tfnn.float32)
                tfnn.scalar_summary('dropout_keep_probability', self.keep_prob_placeholder)
                _reg_value = self.keep_prob_placeholder
            elif do_l2:
                self.l2_placeholder = tfnn.placeholder(tfnn.float32)
                tfnn.scalar_summary('l2_value', self.l2_placeholder)
                _reg_value = self.l2_placeholder
            else:
                _reg_value = None

        self.layers_configs = {
            'type': ['input'],
            'name': ['input_layer'],
            'neural_structure': [{'input_size': self.input_size, 'output_size': self.input_size}],
            'ntype': ntype,
        }
        self.layers_results = {
            'reg_value': _reg_value,
            'Layer': [None],
            'Wx_plus_b': [None],
            'activated': [None],
            'dropped': [None],
            'final': [self.data_placeholder]
        }
Пример #21
0
 def _set_r2(self):
     if isinstance(self.network, tfnn.RegNetwork):
         with tfnn.name_scope('r2_score'):
             self.ys_mean = ys_mean = tfnn.reduce_mean(self.network.target_placeholder,
                                                       reduction_indices=[0],
                                                       name='ys_mean')
             self.ss_tot = ss_tot = tfnn.reduce_sum(
                 tfnn.square(self.network.target_placeholder - ys_mean),
                 reduction_indices=[0], name='total_sum_squares')
             # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)
             self.ss_res = ss_res = tfnn.reduce_sum(
                 tfnn.square(self.network.target_placeholder - self.network.predictions),
                 reduction_indices=[0], name='residual_sum_squares')
             self.aaa = ss_res / ss_tot
             self.r2 = tfnn.reduce_mean(
                 tfnn.sub(tfnn.ones_like(ss_res, dtype=tfnn.float32), (ss_res / ss_tot)),
                 name='coefficient_of_determination')
             tfnn.scalar_summary('r2_score', self.r2)
Пример #22
0
 def __init__(
     self,
     network,
 ):
     self.network = network
     if isinstance(self.network, tfnn.ClfNetwork):
         with tfnn.name_scope('accuracy'):
             with tfnn.name_scope('correct_prediction'):
                 correct_prediction = tfnn.equal(
                     tfnn.argmax(network.predictions, 1),
                     tfnn.argmax(network.target_placeholder, 1),
                     name='correct_prediction')
             with tfnn.name_scope('accuracy'):
                 self.accuracy = tfnn.reduce_mean(tfnn.cast(
                     correct_prediction, tfnn.float32),
                                                  name='accuracy')
             tfnn.scalar_summary('accuracy', self.accuracy)
     elif isinstance(self.network, tfnn.RegNetwork):
         self.first_time_lm = True
         self.first_time_soc = True
     with tfnn.name_scope('r2_score'):
         with tfnn.name_scope('ys_mean'):
             ys_mean = tfnn.reduce_mean(network.target_placeholder,
                                        reduction_indices=[0],
                                        name='ys_mean')
         with tfnn.name_scope('total_sum_squares'):
             ss_tot = tfnn.reduce_sum(
                 tfnn.square(network.target_placeholder - ys_mean),
                 reduction_indices=[0],
                 name='total_sum_squares')
         # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)
         with tfnn.name_scope('residual_sum_squares'):
             ss_res = tfnn.reduce_sum(
                 tfnn.square(network.target_placeholder -
                             network.predictions),
                 reduction_indices=[0],
                 name='residual_sum_squares')
         with tfnn.name_scope('coefficient_of_determination'):
             self.r2_score = tfnn.sub(tfnn.constant(1, dtype=tfnn.float32),
                                      (ss_res / ss_tot)[0],
                                      name='coefficient_of_determination')
         tfnn.scalar_summary('r2_score', self.r2_score)
Пример #23
0
    def __init__(self, n_inputs, n_outputs, input_dtype, output_dtype, output_activator,
                 do_dropout, do_l2, seed=None):
        self.normalizer = Normalizer()
        self.n_inputs = n_inputs
        self.n_outputs = n_outputs
        self.input_dtype = input_dtype
        self.output_dtype = output_dtype
        self.output_activator = output_activator
        if do_dropout and do_l2:
            raise ValueError('Cannot do dropout and l2 at once. Choose only one of them.')
        if do_dropout:
            self.reg = 'dropout'
        if do_l2:
            self.reg = 'l2'
        if (do_dropout is False) & (do_l2 is False):
            self.reg = None
        self.seed = seed

        with tfnn.name_scope('inputs'):
            self.data_placeholder = tfnn.placeholder(dtype=input_dtype, shape=[None, n_inputs], name='x_input')
            self.target_placeholder = tfnn.placeholder(dtype=output_dtype, shape=[None, n_outputs], name='y_input')
            if do_dropout:
                self.keep_prob_placeholder = tfnn.placeholder(dtype=tfnn.float32)
                tfnn.scalar_summary('dropout_keep_probability', self.keep_prob_placeholder)
            if do_l2:
                self.l2_placeholder = tfnn.placeholder(tfnn.float32)
                tfnn.scalar_summary('l2_lambda', self.l2_placeholder)
        self.layers_type = pd.Series([])
        self.layers_output = pd.Series([])
        self.layers_activated_output = pd.Series([])
        self.layers_dropped_output = pd.Series([])
        self.layers_final_output = pd.Series([])
        self.Ws = pd.Series([])
        self.bs = pd.Series([])
        self.record_activators = pd.Series([])
        self.record_neurons = []
        self.last_layer_neurons = n_inputs
        self.last_layer_outputs = self.data_placeholder
        self.layer_number = 1
        self.has_output_layer = False
        self._is_output_layer = False
Пример #24
0
    def __init__(self, n_inputs, n_outputs, input_dtype, output_dtype, output_activator,
                 do_dropout, do_l2, seed=None):
        self.normalizer = Normalizer()
        self.n_inputs = n_inputs
        self.n_outputs = n_outputs
        self.input_dtype = input_dtype
        self.output_dtype = output_dtype
        self.output_activator = output_activator
        if do_dropout and do_l2:
            raise ValueError('Cannot do dropout and l2 at once. Choose only one of them.')
        if do_dropout:
            self.reg = 'dropout'
        if do_l2:
            self.reg = 'l2'
        if (do_dropout is False) & (do_l2 is False):
            self.reg = None
        self.seed = seed

        with tfnn.name_scope('inputs'):
            self.data_placeholder = tfnn.placeholder(dtype=input_dtype, shape=[None, n_inputs], name='x_input')
            self.target_placeholder = tfnn.placeholder(dtype=output_dtype, shape=[None, n_outputs], name='y_input')
            if do_dropout:
                self.keep_prob_placeholder = tfnn.placeholder(dtype=tfnn.float32)
                tfnn.scalar_summary('dropout_keep_probability', self.keep_prob_placeholder)
            if do_l2:
                self.l2_placeholder = tfnn.placeholder(tfnn.float32)
                tfnn.scalar_summary('l2_lambda', self.l2_placeholder)
        self.layers_output = pd.Series([])
        self.layers_activated_output = pd.Series([])
        self.layers_dropped_output = pd.Series([])
        self.layers_final_output = pd.Series([])
        self.Ws = pd.Series([])
        self.bs = pd.Series([])
        self.record_activators = pd.Series([])
        self.record_neurons = []
        self.last_layer_neurons = n_inputs
        self.last_layer_outputs = self.data_placeholder
        self.hidden_layer_number = 1
        self.has_output_layer = False
        self._is_output_layer = False
Пример #25
0
    def _check_image_shape(self, layers_configs, layers_results):
        """
        have effect only on the first conv layer
        """
        if self.image_shape is not None:
            if len(layers_configs['type']) == 1:
                if isinstance(self.image_shape, tuple):
                    self.image_shape = list(self.image_shape)
                elif not isinstance(self.image_shape, list):
                    raise ValueError('image_shape can only be a tuple or list')

                # image shape have to be (x, y, channel)
                layers_configs['neural_structure'][-1]['output_size'] = self.image_shape
                _xs_placeholder = layers_results['final'][-1]
                replaced_image_shape = self.image_shape.copy()
                replaced_image_shape.insert(0, -1)
                with tfnn.name_scope('reshape_inputs'):
                    _image_placeholder = tfnn.reshape(_xs_placeholder, replaced_image_shape)
                layers_results['activated'][-1] = _image_placeholder
                layers_results['final'][-1] = _image_placeholder
            else:
                raise IndexError('This is not the first conv layer, leave image_shape as default')
Пример #26
0
 def _set_r2(self):
     if isinstance(self.network, tfnn.RegNetwork):
         with tfnn.name_scope('r2_score'):
             self.ys_mean = ys_mean = tfnn.reduce_mean(
                 self.network.target_placeholder,
                 reduction_indices=[0],
                 name='ys_mean')
             self.ss_tot = ss_tot = tfnn.reduce_sum(
                 tfnn.square(self.network.target_placeholder - ys_mean),
                 reduction_indices=[0],
                 name='total_sum_squares')
             # ss_reg = np.sum(np.square(predictions-ys_mean), axis=0)
             self.ss_res = ss_res = tfnn.reduce_sum(
                 tfnn.square(self.network.target_placeholder -
                             self.network.predictions),
                 reduction_indices=[0],
                 name='residual_sum_squares')
             self.aaa = ss_res / ss_tot
             self.r2 = tfnn.reduce_mean(tfnn.sub(
                 tfnn.ones_like(ss_res, dtype=tfnn.float32),
                 (ss_res / ss_tot)),
                                        name='coefficient_of_determination')
             tfnn.scalar_summary('r2_score', self.r2)
Пример #27
0
    def construct(self, layers_configs, layers_results):
        self._check_image_shape(layers_configs, layers_results)
        self.name = self._check_name(layers_configs)
        # in conv, the _in_size should be the [length, width, channels]
        _in_size = layers_configs['neural_structure'][-1]['output_size']
        with tfnn.variable_scope(self.name):
            with tfnn.variable_scope('weights') as weights_scope:
                self.W = self._weight_variable(
                    [
                        self.patch_x,  # patch length
                        self.patch_y,  # patch width
                        _in_size[-1],  # filter height / channels
                        self.n_filters
                    ],
                    self.w_initial)  # number of filters
                tfnn.histogram_summary(self.name + '/weights', self.W)

                # the image summary for visualizing filters
                weights_scope.reuse_variables()
                weights = tfnn.get_variable('weights', trainable=False)
                # scale weights to [0 255] and convert to uint8 (maybe change scaling?)
                x_min = tfnn.reduce_min(weights)
                x_max = tfnn.reduce_max(weights)
                weights_0_to_1 = (weights - x_min) / (x_max - x_min)
                weights_0_to_255_uint8 = tfnn.image.convert_image_dtype(
                    weights_0_to_1, dtype=tfnn.uint8)
                # to tf.image_summary format [batch_size, height, width, channels]
                W_transposed = tfnn.transpose(weights_0_to_255_uint8,
                                              [3, 0, 1, 2])
                # image Tensor must be 4-D with last dim 1, 3, or 4,
                # (n_filter, length, width, channel)
                channels_to_look = 3
                if W_transposed._shape[-1] > channels_to_look:
                    n_chunks = int(W_transposed._shape[-1] // channels_to_look)
                    W_transposed = tfnn.split(
                        3, n_chunks,
                        W_transposed[:, :, :, :n_chunks * channels_to_look])[0]
                # this will display random 5 filters from the n_filters in conv
                tfnn.image_summary(self.name + '/filters',
                                   W_transposed,
                                   max_images=10)

            with tfnn.variable_scope('biases'):
                self.b = self._bias_variable([
                    self.n_filters,
                ])
                tfnn.histogram_summary(self.name + '/biases', self.b)

            with tfnn.name_scope('Wx_plus_b'):
                product = tfnn.nn.conv2d(
                    input=layers_results['final'][-1],
                    filter=self.W,
                    strides=[1, self.strides[0], self.strides[1], 1],
                    padding=self.padding) \
                    + self.b

            if self.activator is None:
                activated_product = product
            else:
                activated_product = self.activator(product)
            tfnn.histogram_summary(self.name + '/activated_product',
                                   activated_product)

        # pooling process
        with tfnn.name_scope('pooling'):
            pooled_product, _out_size = self.pooling_layer.pool(
                image=activated_product,
                layer_size=_in_size,
                n_filters=self.n_filters)
            tfnn.histogram_summary(self.name + '/pooled_product',
                                   pooled_product)

        _do_dropout = layers_configs['params'][0]['do_dropout']
        if _do_dropout and self.dropout_layer:
            _keep_prob = layers_results['reg_value']
            dropped_product = tfnn.nn.dropout(pooled_product,
                                              _keep_prob,
                                              name='dropout')
            final_product = dropped_product  # don't have to rescale it back, tf dropout has done this
        else:
            dropped_product = None
            final_product = pooled_product

        self.configs_dict = {
            'type': 'conv',
            'name': self.name,
            'neural_structure': {
                'input_size': _in_size,
                'output_size': _out_size
            },
            'params': self._params,
        }
        self.results_dict = {
            'Layer': self,
            'Wx_plus_b': product,
            'activated': activated_product,
            'dropped': dropped_product,
            'final': final_product
        }
Пример #28
0
    def _construct(self, n_neurons, layers_configs, layers_results):
        self.name = self._check_name(layers_configs)
        _input_size = layers_configs['neural_structure'][-1][
            'output_size']  # this is from last layer
        with tfnn.variable_scope(self.name):

            with tfnn.variable_scope('weights') as weights_scope:
                self.W = self._weight_variable([_input_size, n_neurons],
                                               initialize=self.w_initial)
                tfnn.histogram_summary(self.name + '/weights', self.W)

                # the image summary for visualizing filters
                weights_scope.reuse_variables()
                # weights shape [n_inputs, n_hidden_units]
                weights = tfnn.get_variable('weights', trainable=False)
                # scale weights to [0 255] and convert to uint8 (maybe change scaling?)
                x_min = tfnn.reduce_min(weights)
                x_max = tfnn.reduce_max(weights)
                weights_0_to_1 = (weights - x_min) / (x_max - x_min)
                weights_0_to_255_uint8 = tfnn.image.convert_image_dtype(
                    weights_0_to_1, dtype=tfnn.uint8)
                # to tf.image_summary format [batch_size, height, width, channels]
                # (1, n_neurons, weights, 1)
                W_expanded = tfnn.expand_dims(
                    tfnn.expand_dims(weights_0_to_255_uint8, 0), 3)
                tfnn.image_summary(self.name + '/weights', W_expanded)

            with tfnn.variable_scope('biases'):
                self.b = self._bias_variable([
                    n_neurons,
                ])
                tfnn.histogram_summary(self.name + '/biases', self.b)

            with tfnn.name_scope('Wx_plus_b'):
                product = tfnn.add(tfnn.matmul(layers_results['final'][-1],
                                               self.W,
                                               name='Wx'),
                                   self.b,
                                   name='Wx_add_b')

            if self.activator is None:
                activated_product = product
            else:
                activated_product = self.activator(product)
            tfnn.histogram_summary(self.name + '/activated_product',
                                   activated_product)

            _do_dropout = layers_configs['params'][0]['do_dropout']
            if _do_dropout and self.dropout_layer:
                _keep_prob = layers_results['reg_value']
                dropped_product = tfnn.nn.dropout(activated_product,
                                                  _keep_prob,
                                                  name='dropout')
                final_product = dropped_product  # don't have to rescale it back, tf dropout has done this
            else:
                dropped_product = None
                final_product = activated_product

        self.configs_dict = {
            'type': self.layer_type,
            'name': self.name,
            'neural_structure': {
                'input_size': _input_size,
                'output_size': n_neurons
            },
            'params': self._params,
        }
        self.results_dict = {
            'Layer': self,
            'Wx_plus_b': product,
            'activated': activated_product,
            'dropped': dropped_product,
            'final': final_product
        }
Пример #29
0
    def _construct(self, n_neurons, layers_configs, layers_results):
        self.name = self._check_name(layers_configs)
        _input_size = layers_configs['neural_structure'][-1]['output_size']  # this is from last layer
        with tfnn.variable_scope(self.name):

            with tfnn.variable_scope('weights') as weights_scope:
                self.W = self._weight_variable([_input_size, n_neurons], initialize=self.w_initial)
                tfnn.histogram_summary(self.name + '/weights', self.W)

                # the image summary for visualizing filters
                weights_scope.reuse_variables()
                # weights shape [n_inputs, n_hidden_units]
                weights = tfnn.get_variable('weights', trainable=False)
                # scale weights to [0 255] and convert to uint8 (maybe change scaling?)
                x_min = tfnn.reduce_min(weights)
                x_max = tfnn.reduce_max(weights)
                weights_0_to_1 = (weights - x_min) / (x_max - x_min)
                weights_0_to_255_uint8 = tfnn.image.convert_image_dtype(weights_0_to_1, dtype=tfnn.uint8)
                # to tf.image_summary format [batch_size, height, width, channels]
                # (1, n_neurons, weights, 1)
                W_expanded = tfnn.expand_dims(
                    tfnn.expand_dims(weights_0_to_255_uint8, 0), 3)
                tfnn.image_summary(self.name + '/weights', W_expanded)

            with tfnn.variable_scope('biases'):
                self.b = self._bias_variable([n_neurons, ])
                tfnn.histogram_summary(self.name + '/biases', self.b)

            with tfnn.name_scope('Wx_plus_b'):
                product = tfnn.add(tfnn.matmul(layers_results['final'][-1], self.W, name='Wx'),
                                   self.b, name='Wx_add_b')

            if self.activator is None:
                activated_product = product
            else:
                activated_product = self.activator(product)
            tfnn.histogram_summary(self.name + '/activated_product', activated_product)

            _do_dropout = layers_configs['params'][0]['do_dropout']
            if _do_dropout and self.dropout_layer:
                _keep_prob = layers_results['reg_value']
                dropped_product = tfnn.nn.dropout(activated_product,
                                                  _keep_prob,
                                                  name='dropout')
                final_product = dropped_product         # don't have to rescale it back, tf dropout has done this
            else:
                dropped_product = None
                final_product = activated_product

        self.configs_dict = {
            'type': self.layer_type,
            'name': self.name,
            'neural_structure': {'input_size': _input_size, 'output_size': n_neurons},
            'params': self._params,
        }
        self.results_dict = {
            'Layer': self,
            'Wx_plus_b': product,
            'activated': activated_product,
            'dropped': dropped_product,
            'final': final_product
        }
Пример #30
0
    def construct(self, layers_configs, layers_results):
        self._check_image_shape(layers_configs, layers_results)
        self.name = self._check_name(layers_configs)
        # in conv, the _in_size should be the [length, width, channels]
        _in_size = layers_configs['neural_structure'][-1]['output_size']
        with tfnn.variable_scope(self.name):
            with tfnn.variable_scope('weights') as weights_scope:
                self.W = self._weight_variable([
                    self.patch_x,  # patch length
                    self.patch_y,  # patch width
                    _in_size[-1],  # filter height / channels
                    self.n_filters
                ],
                    self.w_initial)  # number of filters
                tfnn.histogram_summary(self.name + '/weights', self.W)

                # the image summary for visualizing filters
                weights_scope.reuse_variables()
                weights = tfnn.get_variable('weights', trainable=False)
                # scale weights to [0 255] and convert to uint8 (maybe change scaling?)
                x_min = tfnn.reduce_min(weights)
                x_max = tfnn.reduce_max(weights)
                weights_0_to_1 = (weights - x_min) / (x_max - x_min)
                weights_0_to_255_uint8 = tfnn.image.convert_image_dtype(weights_0_to_1, dtype=tfnn.uint8)
                # to tf.image_summary format [batch_size, height, width, channels]
                W_transposed = tfnn.transpose(weights_0_to_255_uint8, [3, 0, 1, 2])
                # image Tensor must be 4-D with last dim 1, 3, or 4,
                # (n_filter, length, width, channel)
                channels_to_look = 3
                if W_transposed._shape[-1] > channels_to_look:
                    n_chunks = int(W_transposed._shape[-1] // channels_to_look)
                    W_transposed = tfnn.split(3, n_chunks,
                                              W_transposed[:, :, :, :n_chunks * channels_to_look])[0]
                # this will display random 5 filters from the n_filters in conv
                tfnn.image_summary(self.name + '/filters',
                                   W_transposed, max_images=10)

            with tfnn.variable_scope('biases'):
                self.b = self._bias_variable([self.n_filters, ])
                tfnn.histogram_summary(self.name + '/biases', self.b)

            with tfnn.name_scope('Wx_plus_b'):
                product = tfnn.nn.conv2d(
                    input=layers_results['final'][-1],
                    filter=self.W,
                    strides=[1, self.strides[0], self.strides[1], 1],
                    padding=self.padding) \
                    + self.b

            if self.activator is None:
                activated_product = product
            else:
                activated_product = self.activator(product)
            tfnn.histogram_summary(self.name + '/activated_product', activated_product)

        # pooling process
        with tfnn.name_scope('pooling'):
            pooled_product, _out_size = self.pooling_layer.pool(
                image=activated_product, layer_size=_in_size, n_filters=self.n_filters)
            tfnn.histogram_summary(self.name + '/pooled_product', pooled_product)

        _do_dropout = layers_configs['params'][0]['do_dropout']
        if _do_dropout and self.dropout_layer:
            _keep_prob = layers_results['reg_value']
            dropped_product = tfnn.nn.dropout(
                pooled_product,
                _keep_prob,
                name='dropout')
            final_product = dropped_product         # don't have to rescale it back, tf dropout has done this
        else:
            dropped_product = None
            final_product = pooled_product

        self.configs_dict = {
            'type': 'conv',
            'name': self.name,
            'neural_structure': {'input_size': _in_size, 'output_size': _out_size},
            'params': self._params,
        }
        self.results_dict = {
            'Layer': self,
            'Wx_plus_b': product,
            'activated': activated_product,
            'dropped': dropped_product,
            'final': final_product}