def fit(self, x, y, steps=None, monitors=None, logdir=None): """Neural network model from provided `model_fn` and training data. Note: called first time constructs the graph and initializers variables. Consecutives times it will continue training the same model. This logic follows partial_fit() interface in scikit-learn. To restart learning, create new estimator. Args: x: matrix or tensor of shape [n_samples, n_features...]. Can be iterator that returns arrays of features. The training input samples for fitting the model. y: vector or matrix [n_samples] or [n_samples, n_outputs]. Can be iterator that returns array of targets. The training target values (class labels in classification, real numbers in regression). steps: int, number of steps to train. If None or 0, train for `self.steps`. monitors: List of `BaseMonitor` objects to print training progress and invoke early stopping. logdir: the directory to save the log file that can be used for optional visualization. Returns: Returns self. """ if logdir is not None: self._model_dir = logdir self._data_feeder = setup_train_data_feeder( x, y, n_classes=self.n_classes, batch_size=self.batch_size) self._train_model(input_fn=self._data_feeder.input_builder, feed_fn=self._data_feeder.get_feed_dict_fn(), steps=steps or self.steps, monitors=monitors) return self
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1): """Make inputs into input and feed functions.""" if input_fn is None: if x is None: raise ValueError('Either x or input_fn must be provided.') if contrib_framework.is_tensor(x) or (y is not None and contrib_framework.is_tensor(y)): raise ValueError('Inputs cannot be tensors. Please provide input_fn.') if feed_fn is not None: raise ValueError('Can not provide both feed_fn and x or y.') df = data_feeder.setup_train_data_feeder(x, y, n_classes=None, batch_size=batch_size, shuffle=shuffle, epochs=epochs) return df.input_builder, df.get_feed_dict_fn() if (x is not None) or (y is not None): raise ValueError('Can not provide both input_fn and x or y.') if batch_size is not None: raise ValueError('Can not provide both input_fn and batch_size.') return input_fn, feed_fn
def evaluate(self, x=None, y=None, input_fn=None, feed_fn=None, batch_size=None, steps=None, metrics=None, name=None): """Evaluates given model with provided evaluation data. See superclass Estimator for more details. Args: x: features. y: targets. input_fn: Input function. feed_fn: Function creating a feed dict every time it is called. batch_size: minibatch size to use on the input. steps: Number of steps for which to evaluate model. metrics: Dict of metric ops to run. If None, the default metrics are used. name: Name of the evaluation. Returns: Returns `dict` with evaluation results. """ feed_fn = None if x is not None: eval_data_feeder = setup_train_data_feeder( x, y, n_classes=self.n_classes, batch_size=self.batch_size, epochs=1) input_fn, feed_fn = (eval_data_feeder.input_builder, eval_data_feeder.get_feed_dict_fn()) return self._evaluate_model( input_fn=input_fn, feed_fn=feed_fn, steps=steps or self.steps, name=name)
def fit(self, x, y=None, monitors=None, logdir=None, steps=None): """Trains a GMM clustering on x. Note: See Estimator for logic for continuous training and graph construction across multiple calls to fit. Args: x: training input matrix of shape [n_samples, n_features]. y: labels. Should be None. monitors: List of `Monitor` objects to print training progress and invoke early stopping. logdir: the directory to save the log file that can be used for optional visualization. steps: number of training steps. If not None, overrides the value passed in constructor. Returns: Returns self. """ if logdir is not None: self._model_dir = logdir self._data_feeder = data_feeder.setup_train_data_feeder( x, None, self._num_clusters, self.batch_size) self._train_model(input_fn=self._data_feeder.input_builder, feed_fn=self._data_feeder.get_feed_dict_fn(), steps=steps or self.steps, monitors=monitors, init_feed_fn=self._data_feeder.get_feed_dict_fn()) return self
def fit(self, x, y=None, monitors=None, logdir=None, steps=None, batch_size=128): """Trains a k-means clustering on x. Note: See Estimator for logic for continuous training and graph construction across multiple calls to fit. Args: x: training input matrix of shape [n_samples, n_features]. y: labels. Should be None. monitors: Monitor object to print training progress and invoke early stopping logdir: the directory to save the log file that can be used for optional visualization. steps: number of training steps. If not None, overrides the value passed in constructor. Returns: Returns self. """ assert y is None if logdir is not None: self._model_dir = logdir self._data_feeder = data_feeder.setup_train_data_feeder( x, None, self._num_clusters, batch_size) self._train_model(input_fn=self._data_feeder.input_builder, feed_fn=self._data_feeder.get_feed_dict_fn(), steps=steps, monitors=monitors, init_feed_fn=self._data_feeder.get_feed_dict_fn()) return self
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1): """Make inputs into input and feed functions.""" if input_fn is None: if x is None: raise ValueError('Either x or input_fn must be provided.') if contrib_framework.is_tensor(x) or (y is not None and contrib_framework.is_tensor(y)): raise ValueError( 'Inputs cannot be tensors. Please provide input_fn.') if feed_fn is not None: raise ValueError('Can not provide both feed_fn and x or y.') df = data_feeder.setup_train_data_feeder(x, y, n_classes=None, batch_size=batch_size, shuffle=shuffle, epochs=epochs) return df.input_builder, df.get_feed_dict_fn() if (x is not None) or (y is not None): raise ValueError('Can not provide both input_fn and x or y.') if batch_size is not None: raise ValueError('Can not provide both input_fn and batch_size.') return input_fn, feed_fn
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None, axis=1): """Predict class or regression for `x`.""" if x is not None: predict_data_feeder = setup_train_data_feeder(x, None, n_classes=None, batch_size=batch_size or self.batch_size, shuffle=False, epochs=1) result = super(DeprecatedMixin, self)._infer_model( input_fn=predict_data_feeder.input_builder, feed_fn=predict_data_feeder.get_feed_dict_fn(), outputs=outputs) else: result = super(DeprecatedMixin, self)._infer_model(input_fn=input_fn, outputs=outputs) if self.__deprecated_n_classes > 1 and axis is not None: return np.argmax(result, axis) return result
def evaluate(self, x=None, y=None, input_fn=None, steps=None): """See base class.""" feed_fn = None if x is not None: eval_data_feeder = setup_train_data_feeder( x, y, n_classes=self.n_classes, batch_size=self.batch_size, epochs=1) input_fn, feed_fn = (eval_data_feeder.input_builder, eval_data_feeder.get_feed_dict_fn()) return self._evaluate_model( input_fn=input_fn, feed_fn=feed_fn, steps=steps or self.steps)
def fit(self, x, y=None, monitors=None, logdir=None, steps=None, batch_size=128, relative_tolerance=None): """Trains a k-means clustering on x. Note: See Estimator for logic for continuous training and graph construction across multiple calls to fit. Args: x: training input matrix of shape [n_samples, n_features]. y: labels. Should be None. monitors: Monitor object to print training progress and invoke early stopping logdir: the directory to save the log file that can be used for optional visualization. steps: number of training steps. If not None, overrides the value passed in constructor. batch_size: mini-batch size to use. Requires `use_mini_batch=True`. relative_tolerance: A relative tolerance of change in the loss between iterations. Stops learning if the loss changes less than this amount. Note that this may not work correctly if use_mini_batch=True. Returns: Returns self. """ assert y is None if logdir is not None: self._model_dir = logdir self._data_feeder = data_feeder.setup_train_data_feeder( x, None, self._num_clusters, batch_size if self._use_mini_batch else None) if relative_tolerance is not None: if monitors is not None: monitors += [self._StopWhenConverged(relative_tolerance)] else: monitors = [self._StopWhenConverged(relative_tolerance)] # Make sure that we will eventually terminate. assert ((monitors is not None and len(monitors)) or (steps is not None) or (self.steps is not None)) self._train_model(input_fn=self._data_feeder.input_builder, feed_fn=self._data_feeder.get_feed_dict_fn(), steps=steps, monitors=monitors, init_feed_fn=self._data_feeder.get_feed_dict_fn()) return self
def evaluate(self, x=None, y=None, input_fn=None, steps=None): """See base class.""" feed_fn = None if x is not None: eval_data_feeder = setup_train_data_feeder( x, y, n_classes=self.n_classes, batch_size=self.batch_size, epochs=1) input_fn, feed_fn = (eval_data_feeder.input_builder, eval_data_feeder.get_feed_dict_fn()) return self._evaluate_model(input_fn=input_fn, feed_fn=feed_fn, steps=steps or self.steps)
def predict(self, x=None, input_fn=None, batch_size=None, outputs=None, axis=1): """Predict class or regression for `x`.""" if x is not None: predict_data_feeder = setup_train_data_feeder( x, None, n_classes=None, batch_size=batch_size or self.batch_size, shuffle=False, epochs=1) result = super(DeprecatedMixin, self)._infer_model( input_fn=predict_data_feeder.input_builder, feed_fn=predict_data_feeder.get_feed_dict_fn(), outputs=outputs) else: result = super(DeprecatedMixin, self)._infer_model( input_fn=input_fn, outputs=outputs) if self.__deprecated_n_classes > 1 and axis is not None: return np.argmax(result, axis) return result
def _predict(self, x, axis=-1, batch_size=None): if self._graph is None: raise NotFittedError() # Use the batch size for fitting if the user did not specify one. if batch_size is None: batch_size = self.batch_size predict_data_feeder = setup_train_data_feeder( x, None, n_classes=None, batch_size=batch_size, shuffle=False, epochs=1) preds = np.array(list(self._infer_model( input_fn=predict_data_feeder.input_builder, feed_fn=predict_data_feeder.get_feed_dict_fn(), as_iterable=True))) if self.n_classes > 1 and axis != -1: preds = preds.argmax(axis=axis) return preds
def _get_input_fn(x, y, input_fn, feed_fn, batch_size, shuffle=False, epochs=1): """Make inputs into input and feed functions. Args: x: Numpy, Pandas or Dask matrix or iterable. y: Numpy, Pandas or Dask matrix or iterable. input_fn: Pre-defined input function for training data. feed_fn: Pre-defined data feeder function. batch_size: Size to split data into parts. Must be >= 1. shuffle: Whether to shuffle the inputs. epochs: Number of epochs to run. Returns: Data input and feeder function based on training data. Raises: ValueError: Only one of `(x & y)` or `input_fn` must be provided. """ if input_fn is None: if x is None: raise ValueError('Either x or input_fn must be provided.') if contrib_framework.is_tensor(x) or (y is not None and contrib_framework.is_tensor(y)): raise ValueError('Inputs cannot be tensors. Please provide input_fn.') if feed_fn is not None: raise ValueError('Can not provide both feed_fn and x or y.') df = data_feeder.setup_train_data_feeder(x, y, n_classes=None, batch_size=batch_size, shuffle=shuffle, epochs=epochs) return df.input_builder, df.get_feed_dict_fn() if (x is not None) or (y is not None): raise ValueError('Can not provide both input_fn and x or y.') if batch_size is not None: raise ValueError('Can not provide both input_fn and batch_size.') return input_fn, feed_fn
def _get_input_fn(x, y, batch_size=None): df = data_feeder.setup_train_data_feeder( x, y, n_classes=None, batch_size=batch_size) return df.input_builder, df.get_feed_dict_fn()
def _get_input_fn(x, y, batch_size=None): df = data_feeder.setup_train_data_feeder(x, y, n_classes=None, batch_size=batch_size) return df.input_builder, df.get_feed_dict_fn()
learning_rate = 0.0001 training_epochs = 2000 batch_size = 32 display_step = 1 X = X_train.values.astype(np.float32) Y = Y_train.values.astype(np.float32) X_train = X[:600] Y_train = Y[:600] X_test = X[600:] Y_test = Y[600:] dataiter = feeder.setup_train_data_feeder(x=X_train, y=Y_train, n_classes=2, batch_size=batch_size) x, y = dataiter.input_builder() feed_dict_fun = dataiter.get_feed_dict_fn() # Network Parameters n_hidden_1 = 256 # 1st layer number of features n_hidden_2 = 128 # 2nd layer number of features n_hidden_3 = 64 # 2nd layer number of features n_input = 9 # data input (feature shape: 9) n_classes = 2 # one-hot encoded output def multilayer_perceptron(x, weights, biases): # Hidden layer with RELU activation layer_1 = tf.add(tf.matmul(x, weights['h1']), biases['b1'])