Beispiel #1
0
 def testFprint(self):
   with self._single_threaded_test_session():
     in_data = constant_op.constant(['abc', 'very looooooong string', 'def'])
     out_data = gen_sdca_ops._sdca_fprint(in_data)
     self.assertAllEqual([[4143508125394299908, -6879828354153669051],
                          [5849691694103072671, -4874542629849009556],
                          [603227410218889250, 8762207001949257490]],
                         out_data.eval())
 def testFprint(self):
   with self._single_threaded_test_session():
     in_data = constant_op.constant(['abc', 'very looooooong string', 'def'])
     out_data = gen_sdca_ops._sdca_fprint(in_data)
     self.assertAllEqual([[4143508125394299908, -6879828354153669051],
                          [5849691694103072671, -4874542629849009556],
                          [603227410218889250, 8762207001949257490]],
                         out_data.eval())
Beispiel #3
0
    def minimize(self, global_step=None, name=None):
        """Add operations to train a linear model by minimizing the loss function.

    Args:
      global_step: Optional `Variable` to increment by one after the
        variables have been updated.
      name: Optional name for the returned operation.

    Returns:
      An Operation that updates the variables passed in the constructor.
    """
        # Technically, the op depends on a lot more than the variables,
        # but we'll keep the list short.
        with name_scope(name, 'sdca/minimize'):
            sparse_example_indices = []
            sparse_feature_indices = []
            sparse_features_values = []
            for sf in self._examples['sparse_features']:
                sparse_example_indices.append(sf.example_indices)
                sparse_feature_indices.append(sf.feature_indices)
                # If feature values are missing, sdca assumes a value of 1.0f.
                if sf.feature_values is not None:
                    sparse_features_values.append(sf.feature_values)

            # pylint: disable=protected-access
            example_ids_hashed = gen_sdca_ops._sdca_fprint(
                convert_to_tensor(self._examples['example_ids']))
            # pylint: enable=protected-access
            example_state_data = self._hashtable.lookup(example_ids_hashed)
            # Solver returns example_state_update, new delta sparse_feature_weights
            # and delta dense_feature_weights.

            weights_tensor = self._convert_n_to_tensor(
                self._slots['unshrinked_sparse_features_weights'])
            sparse_weights = []
            sparse_indices = []
            for w, i in zip(weights_tensor, sparse_feature_indices):
                # Find the feature ids to lookup in the variables.
                with ops.device(w.device):
                    sparse_indices.append(
                        math_ops.cast(
                            array_ops.unique(math_ops.cast(i,
                                                           dtypes.int32))[0],
                            dtypes.int64))
                    sparse_weights.append(
                        array_ops.gather(w, sparse_indices[-1]))

            # pylint: disable=protected-access
            esu, sfw, dfw = gen_sdca_ops._sdca_optimizer(
                sparse_example_indices,
                sparse_feature_indices,
                sparse_features_values,
                self._convert_n_to_tensor(self._examples['dense_features']),
                convert_to_tensor(self._examples['example_weights']),
                convert_to_tensor(self._examples['example_labels']),
                sparse_indices,
                sparse_weights,
                self._convert_n_to_tensor(
                    self._slots['unshrinked_dense_features_weights']),
                example_state_data,
                loss_type=self._options['loss_type'],
                l1=self._options['symmetric_l1_regularization'],
                l2=self._symmetric_l2_regularization(),
                num_loss_partitions=self._num_loss_partitions(),
                num_inner_iterations=1)
            # pylint: enable=protected-access

            with ops.control_dependencies([esu]):
                update_ops = [self._hashtable.insert(example_ids_hashed, esu)]
                # Update the weights before the proximal step.
                for w, i, u in zip(
                        self._slots['unshrinked_sparse_features_weights'],
                        sparse_indices, sfw):
                    update_ops.append(state_ops.scatter_add(w, i, u))
                for w, u in zip(
                        self._slots['unshrinked_dense_features_weights'], dfw):
                    update_ops.append(w.assign_add(u))

            if not global_step:
                return control_flow_ops.group(*update_ops)
            with ops.control_dependencies(update_ops):
                return state_ops.assign_add(global_step, 1, name=name).op
Beispiel #4
0
  def minimize(self, global_step=None, name=None):
    """Add operations to train a linear model by minimizing the loss function.

    Args:
      global_step: Optional `Variable` to increment by one after the
        variables have been updated.
      name: Optional name for the returned operation.

    Returns:
      An Operation that updates the variables passed in the constructor.
    """
    # Technically, the op depends on a lot more than the variables,
    # but we'll keep the list short.
    with name_scope(name, 'sdca/minimize'):
      sparse_example_indices = []
      sparse_feature_indices = []
      sparse_features_values = []
      for sf in self._examples['sparse_features']:
        sparse_example_indices.append(sf.example_indices)
        sparse_feature_indices.append(sf.feature_indices)
        # If feature values are missing, sdca assumes a value of 1.0f.
        if sf.feature_values is not None:
          sparse_features_values.append(sf.feature_values)

      # pylint: disable=protected-access
      example_ids_hashed = gen_sdca_ops._sdca_fprint(
          internal_convert_to_tensor(self._examples['example_ids']))
      # pylint: enable=protected-access
      example_state_data = self._hashtable.lookup(example_ids_hashed)
      # Solver returns example_state_update, new delta sparse_feature_weights
      # and delta dense_feature_weights.

      weights_tensor = self._convert_n_to_tensor(self._slots[
          'unshrinked_sparse_features_weights'])
      sparse_weights = []
      sparse_indices = []
      for w, i in zip(weights_tensor, sparse_feature_indices):
        # Find the feature ids to lookup in the variables.
        with ops.device(w.device):
          sparse_indices.append(
              math_ops.cast(
                  array_ops.unique(math_ops.cast(i, dtypes.int32))[0],
                  dtypes.int64))
          sparse_weights.append(array_ops.gather(w, sparse_indices[-1]))

      # pylint: disable=protected-access
      esu, sfw, dfw = gen_sdca_ops._sdca_optimizer(
          sparse_example_indices,
          sparse_feature_indices,
          sparse_features_values,
          self._convert_n_to_tensor(self._examples['dense_features']),
          internal_convert_to_tensor(self._examples['example_weights']),
          internal_convert_to_tensor(self._examples['example_labels']),
          sparse_indices,
          sparse_weights,
          self._convert_n_to_tensor(self._slots[
              'unshrinked_dense_features_weights']),
          example_state_data,
          loss_type=self._options['loss_type'],
          l1=self._options['symmetric_l1_regularization'],
          l2=self._symmetric_l2_regularization(),
          num_loss_partitions=self._num_loss_partitions(),
          num_inner_iterations=1)
      # pylint: enable=protected-access

      with ops.control_dependencies([esu]):
        update_ops = [self._hashtable.insert(example_ids_hashed, esu)]
        # Update the weights before the proximal step.
        for w, i, u in zip(self._slots['unshrinked_sparse_features_weights'],
                           sparse_indices, sfw):
          update_ops.append(state_ops.scatter_add(w, i, u))
        for w, u in zip(self._slots['unshrinked_dense_features_weights'], dfw):
          update_ops.append(w.assign_add(u))

      if not global_step:
        return control_flow_ops.group(*update_ops)
      with ops.control_dependencies(update_ops):
        return state_ops.assign_add(global_step, 1, name=name).op