Пример #1
0
    def predict_stochastic(self, X, batch_size=128, verbose=0):
        '''Generate output predictions for the input samples
        batch by batch, using stochastic forward passes. If
        dropout is used at training, during prediction network
        units will be dropped at random as well. This procedure
        can be used for MC dropout (see [ModelTest callbacks](callbacks.md)).

        # Arguments
            X: the input data, as a numpy array.
            batch_size: integer.
            verbose: verbosity mode, 0 or 1.

        # Returns
            A numpy array of predictions.

        # References
            - [Dropout: A simple way to prevent neural networks from overfitting](http://jmlr.org/papers/v15/srivastava14a.html)
            - [Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning](http://arxiv.org/abs/1506.02142)
        '''
        X = models.standardize_X(X)
        if self._predict_stochastic is None:  # we only get self.model after init
            self._predict_stochastic = K.function([self.model.X_test],
                                                  [self.model.y_train])
        return self.model._predict_loop(self._predict_stochastic, X,
                                        batch_size, verbose)[0]
Пример #2
0
    def fit(self, X, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
            validation_split=0., validation_data=None, shuffle=True, show_accuracy=False):

        X = standardize_X(X)

        val_f = None
        val_ins = None
        if validation_data or validation_split:
            if show_accuracy:
                val_f = self._test_with_acc
            else:
                val_f = self._test

        if show_accuracy:
            f = self._train_with_acc
            out_labels = ['loss', 'acc']
        else:
            f = self._train
            out_labels = ['loss']

        ins = X# + [y, sample_weight]
        metrics = ['loss', 'acc', 'val_loss', 'val_acc']
        return self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch,
                         verbose=verbose, callbacks=callbacks,
                         val_f=val_f, val_ins=val_ins,
                         shuffle=shuffle, metrics=metrics)
Пример #3
0
    def fit(self, X, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
            validation_split=0., validation_data=None, shuffle=True, show_accuracy=False):

        X = standardize_X(X)
#        y = standardize_y(y)

        val_f = None
        val_ins = None
        if validation_data or validation_split:
            if show_accuracy:
                val_f = self._test_with_acc
            else:
                val_f = self._test
#        if validation_data:
#            if len(validation_data) == 2:
#                X_val, y_val = validation_data
#                X_val = standardize_X(X_val)
#                y_val = standardize_y(y_val)
#                sample_weight_val = np.ones(y_val.shape[:-1] + (1,))
#            elif len(validation_data) == 3:
#                X_val, y_val, sample_weight_val = validation_data
#                X_val = standardize_X(X_val)
#                y_val = standardize_y(y_val)
#                sample_weight_val = standardize_weights(y_val, sample_weight=sample_weight_val)
#            else:
#                raise Exception("Invalid format for validation data; provide a tuple (X_val, y_val) or (X_val, y_val, sample_weight). \
#                    X_val may be a numpy array or a list of numpy arrays depending on your model input.")
#            val_ins = X_val + [y_val, sample_weight_val]
#
#        elif 0 < validation_split < 1:
#            split_at = int(len(X[0]) * (1 - validation_split))
#            X, X_val = (slice_X(X, 0, split_at), slice_X(X, split_at))
#            y, y_val = (slice_X(y, 0, split_at), slice_X(y, split_at))
#            if sample_weight is not None:
#                sample_weight, sample_weight_val = (slice_X(sample_weight, 0, split_at), slice_X(sample_weight, split_at))
#                sample_weight_val = standardize_weights(y_val, sample_weight=sample_weight_val)
#            else:
#                sample_weight_val = np.ones(y_val.shape[:-1] + (1,))
#            val_ins = X_val + [y_val, sample_weight_val]

        if show_accuracy:
            f = self._train_with_acc
            out_labels = ['loss', 'acc']
        else:
            f = self._train
            out_labels = ['loss']

        ins = X# + [y, sample_weight]
        metrics = ['loss', 'acc', 'val_loss', 'val_acc']
        return self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch,
                         verbose=verbose, callbacks=callbacks,
                         val_f=val_f, val_ins=val_ins,
                         shuffle=shuffle, metrics=metrics)
Пример #4
0
    def fit(self,
            X,
            batch_size=128,
            nb_epoch=100,
            verbose=1,
            callbacks=[],
            validation_split=0.,
            validation_data=None,
            shuffle=True,
            show_accuracy=False):

        X = standardize_X(X)

        val_f = None
        val_ins = None
        if validation_data or validation_split:
            if show_accuracy:
                val_f = self._test_with_acc
            else:
                val_f = self._test

        if show_accuracy:
            f = self._train_with_acc
            out_labels = ['loss', 'acc']
        else:
            f = self._train
            out_labels = ['loss']

        ins = X  # + [y, sample_weight]
        metrics = ['loss', 'acc', 'val_loss', 'val_acc']
        return self._fit(f,
                         ins,
                         out_labels=out_labels,
                         batch_size=batch_size,
                         nb_epoch=nb_epoch,
                         verbose=verbose,
                         callbacks=callbacks,
                         val_f=val_f,
                         val_ins=val_ins,
                         shuffle=shuffle,
                         metrics=metrics)
Пример #5
0
    def predict_stochastic(self, X, batch_size=128, verbose=0):
        '''Generate output predictions for the input samples
        batch by batch, using stochastic forward passes. If
        dropout is used at training, during prediction network
        units will be dropped at random as well. This procedure
        can be used for MC dropout (see [ModelTest callbacks](callbacks.md)).

        # Arguments
            X: the input data, as a numpy array.
            batch_size: integer.
            verbose: verbosity mode, 0 or 1.

        # Returns
            A numpy array of predictions.

        # References
            - [Dropout: A simple way to prevent neural networks from overfitting](http://jmlr.org/papers/v15/srivastava14a.html)
            - [Dropout as a Bayesian Approximation: Representing Model Uncertainty in Deep Learning](http://arxiv.org/abs/1506.02142)
        '''
        X = models.standardize_X(X)
        if self._predict_stochastic is None: # we only get self.model after init
        	self._predict_stochastic = K.function([self.model.X_test], [self.model.y_train])
        return self.model._predict_loop(self._predict_stochastic, X, batch_size, verbose)[0]
Пример #6
0
    def fit(self, X, batch_size=128, nb_epoch=100, verbose=1, callbacks=[],
            validation_split=0., validation_data=None, shuffle=True, show_accuracy=False):
        X = standardize_X(X)
#        y = standardize_y(y)
#        sample_weight = standardize_weights(y, class_weight=class_weight, sample_weight=sample_weight)

        val_f = None
        val_ins = None
        if validation_data or validation_split:
            if show_accuracy:
                val_f = self._test_with_acc
            else:
                val_f = self._test
#        if validation_data:
#            try:
#                X_val, y_val = validation_data
#            except:
#                raise Exception("Invalid format for validation data; provide a tuple (X_val, y_val). \
#                    X_val may be a numpy array or a list of numpy arrays depending on your model input.")
#            X_val = standardize_X(X_val)
#            y_val = standardize_y(y_val)
#            val_ins = X_val + [y_val, np.ones(y_val.shape[:-1] + (1,))]

        if show_accuracy:
            f = self._train_with_acc
            out_labels = ['loss', 'acc']
        else:
            f = self._train
            out_labels = ['loss']

        ins = X# + [y, sample_weight]
        metrics = ['loss', 'acc', 'val_loss', 'val_acc']
        return self._fit(f, ins, out_labels=out_labels, batch_size=batch_size, nb_epoch=nb_epoch,
                         verbose=verbose, callbacks=callbacks,
                         validation_split=validation_split, val_f=val_f, val_ins=val_ins,
                         shuffle=shuffle, metrics=metrics)
Пример #7
0
    def train_on_batch(self, X):
        X = standardize_X(X)

        ins = X
        return self._train(*ins)
Пример #8
0
 def _predict(self, X, batch_size=128, verbose=0):
     X = standardize_X(X)
     return self._predict_loop(self._loss, X, batch_size, verbose)[0]
Пример #9
0
 def _predict(self, X, batch_size=128, verbose=0):
     X = standardize_X(X)
     return self._predict_loop(self._loss, X, batch_size, verbose)[0]