Пример #1
0
 def setUp(self):
     super(ObfuscateTest, self).setUp()
     self._graph = Graph()
     with self._graph.as_default():
         constant([1], dtype=int32, name='c1')
         constant([1], dtype=int32, name='c2')
         constant([1], dtype=int32, name='c3')
Пример #2
0
class ObfuscateTest(TestCase):
    def setUp(self):
        super(ObfuscateTest, self).setUp()
        self._graph = Graph()
        with self._graph.as_default():
            constant([1], dtype=int32, name='c1')
            constant([1], dtype=int32, name='c2')
            constant([1], dtype=int32, name='c3')

    def testObfuscate(self):
        ng, mp = obfuscate_graph_def(self._graph.as_graph_def())
        self.assertEqual(set(mp.values()), {'a', 'b', 'c'})

    def testKeep(self):
        ng, mp = obfuscate_graph_def(self._graph.as_graph_def(),
                                     ['c1', ('c2', 'd2')])
        self.assertEqual(set(mp.values()), {'a', 'c1', 'd2'})
def load_tf_model(model_path, tags=(tag_constants.SERVING,), config=None):
  """Loads the model at the specified path.

  Args:
    model_path: the path to either session_bundle or SavedModel
    tags: the tags that determines the model to load.
    config: tf.ConfigProto containing session configuration options.

  Returns:
    A pair of (Session, map<string, SignatureDef>) objects.

  Raises:
    PredictionError: if the model could not be loaded.
  """
  if loader.maybe_saved_model_directory(model_path):
    try:
      logging.info("Importing tensorflow.contrib in load_tf_model")
      # pylint: disable=redefined-outer-name,unused-variable,g-import-not-at-top
      import tensorflow as tf
      from tensorflow.python.framework.ops import Graph
      # pylint: enable=redefined-outer-name,unused-variable,g-import-not-at-top
      if tf.__version__.startswith("1.0"):
        session = tf_session.Session(target="", graph=None, config=config)
      else:
        session = tf_session.Session(target="", graph=Graph(), config=config)
      meta_graph = loader.load(session, tags=list(tags), export_dir=model_path)
    except Exception as e:  # pylint: disable=broad-except
      raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
                            "Failed to load the model due to bad model data."
                            " tags: %s\n%s" % (list(tags), str(e)))
  else:
    raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
                          "Cloud ML only supports TF 1.0 or above and models "
                          "saved in SavedModel format.")

  if session is None:
    raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
                          "Failed to create session when loading the model")

  if not meta_graph.signature_def:
    raise PredictionError(PredictionError.FAILED_TO_LOAD_MODEL,
                          "MetaGraph must have at least one signature_def.")

  # Remove invalid signatures from the signature map.
  invalid_signatures = []
  for signature_name in meta_graph.signature_def:
    try:
      signature = meta_graph.signature_def[signature_name]
      _update_dtypes(session.graph, signature.inputs)
      _update_dtypes(session.graph, signature.outputs)
    except ValueError as e:
      logging.warn("Error updating signature %s: %s", signature_name, str(e))
      invalid_signatures.append(signature_name)
  for signature_name in invalid_signatures:
    del meta_graph.signature_def[signature_name]

  return session, meta_graph.signature_def
Пример #4
0
 def setUp(self):
     super(QuantizeTest, self).setUp()
     with Session(graph=Graph()) as session:
         with session.graph.as_default():
             # initial [-1, 1) random matrix
             x = constant(2 * random((1, 4096)) - 1,
                          dtype=float32,
                          name='c1')
             y = constant(2 * random((4096, 1)) - 1,
                          dtype=float32,
                          name='c2')
             # matmul to scalar
             z = matmul(x, y, name='c3')
         self._desire_z = session.run(z)
         self._quantized_raw = quantize_graph_def(session.graph_def,
                                                  output_nodes=['c3'],
                                                  only='raw')
         self._quantized_simple = quantize_graph_def(session.graph_def,
                                                     only='simple')
         self._quantized_full = quantize_graph_def(session.graph_def)
Пример #5
0
    def partial_fit(self, X, y, monitor=None, sample_weight=None, **kwargs):
        """Fit the model on a batch of training data.

        Parameters
        ----------
        X : numpy array or sparse matrix of shape [n_samples, n_features]
            Training data
        y : numpy array of shape [n_samples, n_targets]
            Target values
        monitor : callable, optional
            The monitor is called after each iteration with the current
            iteration, a reference to the estimator, and a dictionary with
            {'loss': loss_value} representing the loss calculated by the
            objective function at this iteration.
            If the callable returns True the fitting procedure is stopped.
            The monitor can be used for various things such as computing
            held-out estimates, early stopping, model introspection,
            and snapshoting.
        sample_weight : numpy array of shape [n_samples,]
            Per-sample weights. Re-scale the loss per sample.
            Higher weights force the estimator to put more emphasis
            on these samples. Sample weights are normalized per-batch.

        Returns
        -------
        self : returns an instance of self.
        """

        X, y = self._check_inputs(X, y)
        assert self.batch_size > 0, "batch_size <= 0"

        if sample_weight is not None:
            sample_weight = check_array(sample_weight, ensure_2d=False)

        # Initialize the model if it hasn't been already by a previous call.
        if self._is_fitted:
            y = self._transform_targets(y)
        else:
            self._random_state = check_random_state(self.random_state)
            self._fit_targets(y, **kwargs)
            y = self._transform_targets(y)

            self.is_sparse_ = sp.issparse(X)
            self.input_layer_sz_ = X.shape[1]

            # Set which layer transform function points to
            if self.transform_layer_index is None:
                self._transform_layer_index = len(self.hidden_units) - 1
            else:
                self._transform_layer_index = self.transform_layer_index

            if (self._transform_layer_index < -1 or
                    self._transform_layer_index >= len(self.hidden_units)):
                raise ValueError(
                    "`transform_layer_index` must be in the range "
                    "[-1, len(hidden_units)-1]!")

            # Instantiate the graph.  TensorFlow seems easier to use by just
            # adding to the default graph, and as_default lets you temporarily
            # set a graph to be treated as the default graph.
            self.graph_ = Graph()
            with self.graph_.as_default():
                tf_random_seed.set_random_seed(
                    self._random_state.randint(0, 10000000))

                tf.get_variable_scope().set_initializer(
                    tf.contrib.layers.xavier_initializer())

                self._build_tf_graph()

                # Train model parameters.
                self._session.run(tf.global_variables_initializer())

            # Set an attributed to mark this as at least partially fitted.
            self._is_fitted = True

        # Train the model with the given data.
        with self.graph_.as_default():
            n_examples = X.shape[0]
            indices = np.arange(n_examples)

            for epoch in range(self.n_epochs):
                self._random_state.shuffle(indices)
                for start_idx in range(0, n_examples, self.batch_size):
                    batch_ind = indices[start_idx:start_idx + self.batch_size]

                    if sample_weight is None:
                        batch_sample_weight = None
                    else:
                        batch_sample_weight = sample_weight[batch_ind]

                    feed_dict = self._make_feed_dict(
                        X[batch_ind],
                        y[batch_ind],
                        sample_weight=batch_sample_weight)
                    obj_val, _ = self._session.run(
                        [self._obj_func, self._train_step],
                        feed_dict=feed_dict)
                    _LOGGER.debug("objective: %.4f, epoch: %d, idx: %d",
                                  obj_val, epoch, start_idx)

                _LOGGER.info("objective: %.4f, epoch: %d, idx: %d",
                             obj_val, epoch, start_idx)

                if monitor:
                    stop_early = monitor(epoch, self, {'loss': obj_val})
                    if stop_early:
                        _LOGGER.info(
                            "stopping early due to monitor function.")
                        return self

        return self
Пример #6
0
class MLPBaseEstimator(TFPicklingBase, BaseEstimator):
    """Base class for multilayer perceptron models

    Notes
    -----
    There is currently no dropout between the sparse input layer and first
    hidden layer. Dropout on the sparse input layer would undo the benefits of
    sparsity because the dropout layer is dense.
    """

    def _transform_targets(self, y):
        # This can be overridden to, e.g., map label names to indices when
        # fitting a classifier.
        return y

    def fit(self, X, y, monitor=None, sample_weight=None):
        """Fit the model.

        Parameters
        ----------
        X : numpy array or sparse matrix of shape [n_samples, n_features]
            Training data
        y : numpy array of shape [n_samples, n_targets]
            Target values
        monitor : callable, optional
            The monitor is called after each iteration with the current
            iteration, a reference to the estimator, and a dictionary with
            {'loss': loss_value} representing the loss calculated by the
            objective function at this iteration.
            If the callable returns True the fitting procedure is stopped.
            The monitor can be used for various things such as computing
            held-out estimates, early stopping, model introspection,
            and snapshoting.
        sample_weight : numpy array of shape [n_samples,]
            Per-sample weights. Re-scale the loss per sample.
            Higher weights force the estimator to put more emphasis
            on these samples. Sample weights are normalized per-batch.

        Returns
        -------
        self : returns an instance of self.
        """
        _LOGGER.info("Fitting %s", re.sub(r"\s+", r" ", repr(self)))

        # Mark the model as not fitted (i.e., not fully initialized based on
        # the data).
        self._is_fitted = False

        # Call partial fit, which will initialize and then train the model.
        return self.partial_fit(X, y,
                                monitor=monitor,
                                sample_weight=sample_weight)

    def _fit_targets(self, y):
        # This can be overwritten to set instance variables that pertain to the
        # targets (e.g., an array of class labels).
        pass

    def partial_fit(self, X, y, monitor=None, sample_weight=None, **kwargs):
        """Fit the model on a batch of training data.

        Parameters
        ----------
        X : numpy array or sparse matrix of shape [n_samples, n_features]
            Training data
        y : numpy array of shape [n_samples, n_targets]
            Target values
        monitor : callable, optional
            The monitor is called after each iteration with the current
            iteration, a reference to the estimator, and a dictionary with
            {'loss': loss_value} representing the loss calculated by the
            objective function at this iteration.
            If the callable returns True the fitting procedure is stopped.
            The monitor can be used for various things such as computing
            held-out estimates, early stopping, model introspection,
            and snapshoting.
        sample_weight : numpy array of shape [n_samples,]
            Per-sample weights. Re-scale the loss per sample.
            Higher weights force the estimator to put more emphasis
            on these samples. Sample weights are normalized per-batch.

        Returns
        -------
        self : returns an instance of self.
        """

        X, y = self._check_inputs(X, y)
        assert self.batch_size > 0, "batch_size <= 0"

        if sample_weight is not None:
            sample_weight = check_array(sample_weight, ensure_2d=False)

        # Initialize the model if it hasn't been already by a previous call.
        if self._is_fitted:
            y = self._transform_targets(y)
        else:
            self._random_state = check_random_state(self.random_state)
            self._fit_targets(y, **kwargs)
            y = self._transform_targets(y)

            self.is_sparse_ = sp.issparse(X)
            self.input_layer_sz_ = X.shape[1]

            # Set which layer transform function points to
            if self.transform_layer_index is None:
                self._transform_layer_index = len(self.hidden_units) - 1
            else:
                self._transform_layer_index = self.transform_layer_index

            if (self._transform_layer_index < -1 or
                    self._transform_layer_index >= len(self.hidden_units)):
                raise ValueError(
                    "`transform_layer_index` must be in the range "
                    "[-1, len(hidden_units)-1]!")

            # Instantiate the graph.  TensorFlow seems easier to use by just
            # adding to the default graph, and as_default lets you temporarily
            # set a graph to be treated as the default graph.
            self.graph_ = Graph()
            with self.graph_.as_default():
                tf_random_seed.set_random_seed(
                    self._random_state.randint(0, 10000000))

                tf.get_variable_scope().set_initializer(
                    tf.contrib.layers.xavier_initializer())

                self._build_tf_graph()

                # Train model parameters.
                self._session.run(tf.global_variables_initializer())

            # Set an attributed to mark this as at least partially fitted.
            self._is_fitted = True

        # Train the model with the given data.
        with self.graph_.as_default():
            n_examples = X.shape[0]
            indices = np.arange(n_examples)

            for epoch in range(self.n_epochs):
                self._random_state.shuffle(indices)
                for start_idx in range(0, n_examples, self.batch_size):
                    batch_ind = indices[start_idx:start_idx + self.batch_size]

                    if sample_weight is None:
                        batch_sample_weight = None
                    else:
                        batch_sample_weight = sample_weight[batch_ind]

                    feed_dict = self._make_feed_dict(
                        X[batch_ind],
                        y[batch_ind],
                        sample_weight=batch_sample_weight)
                    obj_val, _ = self._session.run(
                        [self._obj_func, self._train_step],
                        feed_dict=feed_dict)
                    _LOGGER.debug("objective: %.4f, epoch: %d, idx: %d",
                                  obj_val, epoch, start_idx)

                _LOGGER.info("objective: %.4f, epoch: %d, idx: %d",
                             obj_val, epoch, start_idx)

                if monitor:
                    stop_early = monitor(epoch, self, {'loss': obj_val})
                    if stop_early:
                        _LOGGER.info(
                            "stopping early due to monitor function.")
                        return self

        return self

    def _check_inputs(self, X, y):
        # Check that the input X is an array or sparse matrix.
        # Convert to CSR if it's in another sparse format.
        X, y = check_X_y(X, y, accept_sparse='csr', multi_output=True)

        if y.ndim == 2 and y.shape[1] == 1:
            # Following
            # https://github.com/scikit-learn/scikit-learn/blob/51a765a/sklearn/ensemble/forest.py#L223,
            # issue a warning if an Nx1 array was provided.
            warn("A column-vector y was passed when a 1d array was"
                 " expected. Please change the shape of y to "
                 "(n_samples,), for example using ravel().",
                 DataConversionWarning, stacklevel=2)
            y = y[:, 0]
        return X, y

    def __getstate__(self):
        # Handles TF persistence
        state = super(MLPBaseEstimator, self).__getstate__()

        # Add attributes of this estimator
        state.update(dict(activation=self.activation,
                          batch_size=self.batch_size,
                          keep_prob=self.keep_prob,
                          hidden_units=self.hidden_units,
                          random_state=self.random_state,
                          n_epochs=self.n_epochs,
                          solver=self.solver,
                          solver_kwargs=self.solver_kwargs,
                          transform_layer_index=self.transform_layer_index
                          ))

        # Add fitted attributes if the model has been fitted.
        if self._is_fitted:
            state['input_layer_sz_'] = self.input_layer_sz_
            state['is_sparse_'] = self.is_sparse_
            state['_random_state'] = self._random_state
            state['_transform_layer_index'] = self._transform_layer_index

        return state

    @abstractmethod
    def _init_model_output(self, t):
        pass

    @abstractmethod
    def _init_model_objective_fn(self, t):
        pass

    def _set_up_graph(self):
        """Initialize TF objects (needed before fitting or restoring)."""

        # A placeholder to control dropout for training vs. prediction.
        self._keep_prob = \
            tf.placeholder(dtype=np.float32, shape=(), name="keep_prob")

        # Input layers.
        if self.is_sparse_:
            self._input_indices = \
                tf.placeholder(np.int64, [None, 2], "input_indices")
            self._input_values = \
                tf.placeholder(np.float32, [None], "input_values")
            self._input_shape = \
                tf.placeholder(np.int64, [2], "input_shape")
            # t will be the current layer as we build up the graph below.
            t = tf.SparseTensor(self._input_indices, self._input_values,
                                self._input_shape)
        else:
            self._input_values = \
                tf.placeholder(np.float32, [None, self.input_layer_sz_],
                               "input_values")
            t = self._input_values

        # Hidden layers.
        for i, layer_sz in enumerate(self.hidden_units):
            if self.is_sparse_ and i == 0:
                t = affine(t, layer_sz, input_size=self.input_layer_sz_,
                           scope='layer_%d' % i, sparse_input=True)
            else:
                if self.keep_prob != 1.0:
                    t = tf.nn.dropout(t, keep_prob=self._keep_prob)
                t = affine(t, layer_sz, scope='layer_%d' % i)

            t = t if self.activation is None else self.activation(t)

            # Set transformed layer to hidden layer
            if self._transform_layer_index == i:
                self._transform_layer = t

        # The output layer and objective function depend on the model
        # (e.g., classification vs regression).
        t = self._init_model_output(t)

        # set the transform layer to output logits if we have no hidden layers
        if self._transform_layer_index == -1:
            self._transform_layer = t

        self._sample_weight = \
            tf.placeholder(np.float32, [None], "sample_weight")

        self._init_model_objective_fn(t)

        self._train_step = self.solver(
            **self.solver_kwargs if self.solver_kwargs else {}).minimize(
            self._obj_func)

    def _make_feed_dict(self, X, y=None, sample_weight=None):
        # Make the dictionary mapping tensor placeholders to input data.

        if self.is_sparse_:
            indices, values = _sparse_matrix_data(X)

            feed_dict = {
                self._input_indices: indices,
                self._input_values: values,
                self._input_shape: X.shape
            }
        else:
            feed_dict = {
                self._input_values: X
            }

        if y is None:
            # If y is None, then we are doing prediction and should fix
            # dropout.
            feed_dict[self._keep_prob] = 1.0
        else:
            feed_dict[self.input_targets_] = y
            feed_dict[self._keep_prob] = self.keep_prob

        if sample_weight is None:
            feed_dict[self._sample_weight] = np.ones(X.shape[0])
        else:
            feed_dict[self._sample_weight] = sample_weight

        return feed_dict

    def _compute_output(self, X):
        """Get the outputs of the network, for use in prediction methods."""

        if not self._is_fitted:
            raise NotFittedError("Call fit before prediction")

        X = check_array(X, accept_sparse=['csr', 'dok', 'lil', 'csc', 'coo'])

        if self.is_sparse_:
            # For sparse input, make the input a CSR matrix since it can be
            # indexed by row.
            X = X.tocsr() if sp.issparse(X) else sp.csr_matrix(X)
        elif sp.issparse(X):
            # Convert sparse input to dense.
            X = X.todense().A

        # Make predictions in batches.
        pred_batches = []
        start_idx = 0
        n_examples = X.shape[0]
        with self.graph_.as_default():
            while start_idx < n_examples:
                X_batch = \
                    X[start_idx:min(start_idx + self.batch_size, n_examples)]
                feed_dict = self._make_feed_dict(X_batch)
                start_idx += self.batch_size
                pred_batches.append(
                    self._session.run(self.output_layer_, feed_dict=feed_dict))
        y_pred = np.concatenate(pred_batches)
        return y_pred

    @abstractmethod
    def predict(self, X):
        pass

    def transform(self, X, y=None):
        """Transforms input into hidden layer outputs of users choice.

        Parameters
        ----------
        X : {array-like, sparse matrix}, shape = (n_samples, n_features)
            Examples to make predictions about.

        Returns
        -------
        X_new : numpy array of shape [n_samples, n_features_new]
            Transformed array.
        """
        if not self._is_fitted:
            raise NotFittedError("Call fit before transform")

        X = check_array(X, accept_sparse=['csr', 'dok', 'lil', 'csc', 'coo'])

        if self.is_sparse_:
            # For sparse input, make the input a CSR matrix since it can be
            # indexed by row.
            X = X.tocsr() if sp.issparse(X) else sp.csr_matrix(X)
        elif sp.issparse(X):
            # Convert sparse input to dense.
            X = X.todense().A

        # Make predictions in batches.
        embed_batches = []
        start_idx = 0
        n_examples = X.shape[0]
        with self.graph_.as_default():
            while start_idx < n_examples:
                X_batch = \
                    X[start_idx:min(start_idx + self.batch_size, n_examples)]
                feed_dict = self._make_feed_dict(X_batch)
                start_idx += self.batch_size
                embed_batches.append(self._session.run(
                    self._transform_layer, feed_dict=feed_dict))
        embedding = np.concatenate(embed_batches)
        if embedding.ndim == 1:
            embedding = embedding.reshape(-1, 1)
        return embedding

    def fit_transform(self, X, y=None, **fit_params):
            """Fit to data, then transform it.

            Fits transformer to X and y with optional parameters fit_params
            and returns a transformed version of X.

            Parameters
            ----------
            X : numpy array of shape [n_samples, n_features]
                Training set.
            y : numpy array of shape [n_samples]
                Target values.
            Returns
            -------
            X_new : numpy array of shape [n_samples, n_features_new]
                Transformed array.
            """
            # non-optimized default implementation; override when a better
            # method is possible for a given clustering algorithm
            if y is None:
                # fit method of arity 1 (unsupervised transformation)
                return self.fit(X, **fit_params).transform(X)
            else:
                # fit method of arity 2 (supervised transformation)
                return self.fit(X, y, **fit_params).transform(X)
Пример #7
0
    def partial_fit(self, X, y, monitor=None, **kwargs):
        """Fit the model on a batch of training data.

        Parameters
        ----------
        X : numpy array or sparse matrix of shape [n_samples, n_features]
            Training data
        y : numpy array of shape [n_samples, n_targets]
            Target values
        monitor : callable, optional
            The monitor is called after each iteration with the current
            iteration, a reference to the estimator, and a dictionary with
            {'loss': loss_value} representing the loss calculated by the
            objective function at this iteration.
            If the callable returns True the fitting procedure is stopped.
            The monitor can be used for various things such as computing
            held-out estimates, early stopping, model introspection,
            and snapshoting.

        Returns
        -------
        self : returns an instance of self.
        """

        X, y = self._check_inputs(X, y)
        assert self.batch_size > 0, "batch_size <= 0"

        # Initialize the model if it hasn't been already by a previous call.
        if self._is_fitted:
            y = self._transform_targets(y)
        else:
            self._random_state = check_random_state(self.random_state)
            self._fit_targets(y, **kwargs)
            y = self._transform_targets(y)

            self.is_sparse_ = sp.issparse(X)
            self.input_layer_sz_ = X.shape[1]

            # Instantiate the graph.  TensorFlow seems easier to use by just
            # adding to the default graph, and as_default lets you temporarily
            # set a graph to be treated as the default graph.
            self.graph_ = Graph()
            with self.graph_.as_default():
                tf_random_seed.set_random_seed(
                    self._random_state.randint(0, 10000000))

                tf.get_variable_scope().set_initializer(
                    tf.uniform_unit_scaling_initializer(self.init_scale))

                self._build_tf_graph()

                # Train model parameters.
                self._session.run(tf.global_variables_initializer())

            # Set an attributed to mark this as at least partially fitted.
            self._is_fitted = True

        # Train the model with the given data.
        with self.graph_.as_default():
            n_examples = X.shape[0]
            indices = np.arange(n_examples)

            for epoch in range(self.n_epochs):
                self._random_state.shuffle(indices)
                for start_idx in range(0, n_examples, self.batch_size):
                    batch_ind = indices[start_idx:start_idx + self.batch_size]
                    feed_dict = self._make_feed_dict(X[batch_ind],
                                                     y[batch_ind])
                    obj_val, _ = self._session.run(
                        [self._obj_func, self._train_step],
                        feed_dict=feed_dict)
                    _LOGGER.debug("objective: %.4f, epoch: %d, idx: %d",
                                  obj_val, epoch, start_idx)

                _LOGGER.info("objective: %.4f, epoch: %d, idx: %d",
                             obj_val, epoch, start_idx)

                if monitor:
                    stop_early = monitor(epoch, self, {'loss': obj_val})
                    if stop_early:
                        _LOGGER.info(
                            "stopping early due to monitor function.")
                        return self

        return self
Пример #8
0
class MLPBaseEstimator(TFPicklingBase, BaseEstimator):
    """Base class for multilayer perceptron models

    Notes
    -----
    There is currently no dropout between the sparse input layer and first
    hidden layer. Dropout on the sparse input layer would undo the benefits of
    sparsity because the dropout layer is dense.
    """

    def _transform_targets(self, y):
        # This can be overridden to, e.g., map label names to indices when
        # fitting a classifier.
        return y

    def fit(self, X, y, monitor=None):
        """Fit the model.

        Parameters
        ----------
        X : numpy array or sparse matrix of shape [n_samples, n_features]
            Training data
        y : numpy array of shape [n_samples, n_targets]
            Target values
        monitor : callable, optional
            The monitor is called after each iteration with the current
            iteration, a reference to the estimator, and a dictionary with
            {'loss': loss_value} representing the loss calculated by the
            objective function at this iteration.
            If the callable returns True the fitting procedure is stopped.
            The monitor can be used for various things such as computing
            held-out estimates, early stopping, model introspection,
            and snapshoting.

        Returns
        -------
        self : returns an instance of self.
        """
        _LOGGER.info("Fitting %s", re.sub(r"\s+", r" ", repr(self)))

        # Mark the model as not fitted (i.e., not fully initialized based on
        # the data).
        self._is_fitted = False

        # Call partial fit, which will initialize and then train the model.
        return self.partial_fit(X, y, monitor=monitor)

    def _fit_targets(self, y):
        # This can be overwritten to set instance variables that pertain to the
        # targets (e.g., an array of class labels).
        pass

    def partial_fit(self, X, y, monitor=None, **kwargs):
        """Fit the model on a batch of training data.

        Parameters
        ----------
        X : numpy array or sparse matrix of shape [n_samples, n_features]
            Training data
        y : numpy array of shape [n_samples, n_targets]
            Target values
        monitor : callable, optional
            The monitor is called after each iteration with the current
            iteration, a reference to the estimator, and a dictionary with
            {'loss': loss_value} representing the loss calculated by the
            objective function at this iteration.
            If the callable returns True the fitting procedure is stopped.
            The monitor can be used for various things such as computing
            held-out estimates, early stopping, model introspection,
            and snapshoting.

        Returns
        -------
        self : returns an instance of self.
        """

        X, y = self._check_inputs(X, y)
        assert self.batch_size > 0, "batch_size <= 0"

        # Initialize the model if it hasn't been already by a previous call.
        if self._is_fitted:
            y = self._transform_targets(y)
        else:
            self._random_state = check_random_state(self.random_state)
            self._fit_targets(y, **kwargs)
            y = self._transform_targets(y)

            self.is_sparse_ = sp.issparse(X)
            self.input_layer_sz_ = X.shape[1]

            # Instantiate the graph.  TensorFlow seems easier to use by just
            # adding to the default graph, and as_default lets you temporarily
            # set a graph to be treated as the default graph.
            self.graph_ = Graph()
            with self.graph_.as_default():
                tf_random_seed.set_random_seed(
                    self._random_state.randint(0, 10000000))

                tf.get_variable_scope().set_initializer(
                    tf.uniform_unit_scaling_initializer(self.init_scale))

                self._build_tf_graph()

                # Train model parameters.
                self._session.run(tf.global_variables_initializer())

            # Set an attributed to mark this as at least partially fitted.
            self._is_fitted = True

        # Train the model with the given data.
        with self.graph_.as_default():
            n_examples = X.shape[0]
            indices = np.arange(n_examples)

            for epoch in range(self.n_epochs):
                self._random_state.shuffle(indices)
                for start_idx in range(0, n_examples, self.batch_size):
                    batch_ind = indices[start_idx:start_idx + self.batch_size]
                    feed_dict = self._make_feed_dict(X[batch_ind],
                                                     y[batch_ind])
                    obj_val, _ = self._session.run(
                        [self._obj_func, self._train_step],
                        feed_dict=feed_dict)
                    _LOGGER.debug("objective: %.4f, epoch: %d, idx: %d",
                                  obj_val, epoch, start_idx)

                _LOGGER.info("objective: %.4f, epoch: %d, idx: %d",
                             obj_val, epoch, start_idx)

                if monitor:
                    stop_early = monitor(epoch, self, {'loss': obj_val})
                    if stop_early:
                        _LOGGER.info(
                            "stopping early due to monitor function.")
                        return self

        return self

    def _check_inputs(self, X, y):
        # Check that the input X is an array or sparse matrix.
        # Convert to CSR if it's in another sparse format.
        X, y = check_X_y(X, y, accept_sparse='csr', multi_output=True)

        if y.ndim == 2 and y.shape[1] == 1:
            # Following
            # https://github.com/scikit-learn/scikit-learn/blob/51a765a/sklearn/ensemble/forest.py#L223,
            # issue a warning if an Nx1 array was provided.
            warn("A column-vector y was passed when a 1d array was"
                 " expected. Please change the shape of y to "
                 "(n_samples,), for example using ravel().",
                 DataConversionWarning, stacklevel=2)
            y = y[:, 0]
        return X, y

    def __getstate__(self):
        # Handles TF persistence
        state = super(MLPBaseEstimator, self).__getstate__()

        # Add attributes of this estimator
        state.update(dict(activation=self.activation,
                          batch_size=self.batch_size,
                          keep_prob=self.keep_prob,
                          hidden_units=self.hidden_units,
                          init_scale=self.init_scale,
                          random_state=self.random_state,
                          n_epochs=self.n_epochs,
                          solver=self.solver,
                          solver_kwargs=self.solver_kwargs
                          ))

        # Add fitted attributes if the model has been fitted.
        if self._is_fitted:
            state['input_layer_sz_'] = self.input_layer_sz_
            state['is_sparse_'] = self.is_sparse_
            state['_random_state'] = self._random_state

        return state

    @abstractmethod
    def _init_model_output(self, t):
        pass

    @abstractmethod
    def _init_model_objective_fn(self, t):
        pass

    def _set_up_graph(self):
        """Initialize TF objects (needed before fitting or restoring)."""

        # A placeholder to control dropout for training vs. prediction.
        self._keep_prob = \
            tf.placeholder(dtype=np.float32, shape=(), name="keep_prob")

        # Input layers.
        if self.is_sparse_:
            self._input_indices = \
                tf.placeholder(np.int64, [None, 2], "input_indices")
            self._input_values = \
                tf.placeholder(np.float32, [None], "input_values")
            self._input_shape = \
                tf.placeholder(np.int64, [2], "input_shape")
            # t will be the current layer as we build up the graph below.
            t = tf.SparseTensor(self._input_indices, self._input_values,
                                self._input_shape)
        else:
            self._input_values = \
                tf.placeholder(np.float32, [None, self.input_layer_sz_],
                               "input_values")
            t = self._input_values

        # Hidden layers.
        for i, layer_sz in enumerate(self.hidden_units):
            if self.is_sparse_ and i == 0:
                t = affine(t, layer_sz, input_size=self.input_layer_sz_,
                           scope='layer_%d' % i, sparse_input=True)
            else:
                if self.keep_prob != 1.0:
                    t = tf.nn.dropout(t, keep_prob=self._keep_prob)
                t = affine(t, layer_sz, scope='layer_%d' % i)
            t = t if self.activation is None else self.activation(t)

        # The output layer and objective function depend on the model
        # (e.g., classification vs regression).
        t = self._init_model_output(t)
        self._init_model_objective_fn(t)

        self._train_step = self.solver(
            **self.solver_kwargs if self.solver_kwargs else {}).minimize(
            self._obj_func)

    def _make_feed_dict(self, X, y=None):
        # Make the dictionary mapping tensor placeholders to input data.

        if self.is_sparse_:
            indices, values = _sparse_matrix_data(X)

            feed_dict = {
                self._input_indices: indices,
                self._input_values: values,
                self._input_shape: X.shape
            }
        else:
            feed_dict = {
                self._input_values: X
            }

        if y is None:
            # If y is None, then we are doing prediction and should fix
            # dropout.
            feed_dict[self._keep_prob] = 1.0
        else:
            feed_dict[self.input_targets_] = y
            feed_dict[self._keep_prob] = self.keep_prob

        return feed_dict

    def _compute_output(self, X):
        """Get the outputs of the network, for use in prediction methods."""

        if not self._is_fitted:
            raise NotFittedError("Call fit before prediction")

        X = check_array(X, accept_sparse=['csr', 'dok', 'lil', 'csc', 'coo'])

        if self.is_sparse_:
            # For sparse input, make the input a CSR matrix since it can be
            # indexed by row.
            X = X.tocsr() if sp.issparse(X) else sp.csr_matrix(X)
        elif sp.issparse(X):
            # Convert sparse input to dense.
            X = X.todense().A

        # Make predictions in batches.
        pred_batches = []
        start_idx = 0
        n_examples = X.shape[0]
        with self.graph_.as_default():
            while start_idx < n_examples:
                X_batch = \
                    X[start_idx:min(start_idx + self.batch_size, n_examples)]
                feed_dict = self._make_feed_dict(X_batch)
                start_idx += self.batch_size
                pred_batches.append(
                    self._session.run(self.output_layer_, feed_dict=feed_dict))
        y_pred = np.concatenate(pred_batches)
        return y_pred

    @abstractmethod
    def predict(self, X):
        pass
Пример #9
0
import json
import os
import numpy as np
from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
from django.http import JsonResponse
from rest_framework.decorators import api_view
from rest_framework.response import Response
from tensorflow.python.client.session import Session
from tensorflow.python.framework.ops import Graph
from .serializers import FormSubmitSerializer, MedicineDetectSerializer

MODEL_PATH = 'model/model_inception.h5'

model_graph = Graph()
with model_graph.as_default():
    tf_session = Session()
    with tf_session.as_default():
        model = load_model(MODEL_PATH)


@api_view(['POST'])
def formsubmit(request):

    serializer = FormSubmitSerializer(data=request.data)
    if serializer.is_valid():
        serializer.save()

    MedicineName = request.POST['MedicineName']
    Introduction = request.POST['Introduction']
Пример #10
0
 def _exec(g):
     with Session(graph=Graph()) as session:
         with session.graph.as_default():
             import_graph_def(g.graph, name='')
         feeds = feeds_of_graph(g)
         return session.run('c3:0', feed_dict=feeds)