Example #1
0
def softmax_classifier(tensor_in,
                       labels,
                       weights,
                       biases,
                       class_weight=None,
                       name=None):
  """Returns prediction and loss for softmax classifier.

  Args:
    tensor_in: Input tensor, [batch_size, feature_size], features.
    labels: Tensor, [batch_size, n_classes], labels of the output classes.
    weights: Tensor, [batch_size, feature_size], linear transformation
      matrix.
    biases: Tensor, [batch_size], biases.
    class_weight: Tensor, optional, [n_classes], weight for each class.
      If not given, all classes are supposed to have weight one.
    name: Operation name.

  Returns:
    Prediction and loss tensors.
  """
  with ops.name_scope(name, "softmax_classifier", [tensor_in, labels]):
    logits = nn.xw_plus_b(tensor_in, weights, biases)
    if class_weight is not None:
      logits = math_ops.mul(logits, class_weight)
    return nn.softmax(logits), loss_ops.softmax_cross_entropy(logits, labels)
Example #2
0
def softmax_classifier(tensor_in,
                       labels,
                       weights,
                       biases,
                       class_weight=None,
                       name=None):
    """Returns prediction and loss for softmax classifier.

  Args:
    tensor_in: Input tensor, [batch_size, feature_size], features.
    labels: Tensor, [batch_size, n_classes], labels of the output classes.
    weights: Tensor, [batch_size, feature_size], linear transformation
      matrix.
    biases: Tensor, [batch_size], biases.
    class_weight: Tensor, optional, [n_classes], weight for each class.
      If not given, all classes are supposed to have weight one.
    name: Operation name.

  Returns:
    Prediction and loss tensors.
  """
    with ops.op_scope([tensor_in, labels], name, "softmax_classifier"):
        logits = nn.xw_plus_b(tensor_in, weights, biases)
        if class_weight is not None:
            logits = math_ops.mul(logits, class_weight)
        return nn.softmax(logits), loss_ops.softmax_cross_entropy(
            logits, labels)
Example #3
0
def mean_squared_error_regressor(tensor_in, labels, weights, biases, name=None):
  """Returns prediction and loss for mean squared error regression."""
  with ops.op_scope([tensor_in, labels], name, "mean_squared_error_regressor"):
    predictions = nn.xw_plus_b(tensor_in, weights, biases)
    if len(labels.get_shape()) == 1 and len(predictions.get_shape()) == 2:
      predictions = array_ops_.squeeze(predictions, squeeze_dims=[1])
    return predictions, loss_ops.sum_of_squares(predictions, labels)
Example #4
0
def softmax_classifier(tensor_in,
                       labels,
                       weights,
                       biases,
                       class_weight=None,
                       name=None):
  """Returns prediction and loss for softmax classifier.

  This function returns "probabilities" and a cross entropy loss. To obtain
  predictions, use `tf.argmax` on the returned probabilities.

  This function requires labels to be passed in one-hot encoding.

  Args:
    tensor_in: Input tensor, [batch_size, feature_size], features.
    labels: Tensor, [batch_size, n_classes], one-hot labels of the output
      classes.
    weights: Tensor, [batch_size, feature_size], linear transformation
      matrix.
    biases: Tensor, [batch_size], biases.
    class_weight: Tensor, optional, [n_classes], weight for each class.
      If not given, all classes are supposed to have weight one.
    name: Operation name.

  Returns:
    `tuple` of softmax predictions and loss `Tensor`s.
  """
  with ops.name_scope(name, 'softmax_classifier', [tensor_in, labels]):
    logits = nn.xw_plus_b(tensor_in, weights, biases)
    if class_weight is not None:
      logits = math_ops.multiply(logits, class_weight)
    return nn.softmax(logits), losses.softmax_cross_entropy(labels, logits)
Example #5
0
def softmax_classifier(tensor_in,
                       labels,
                       weights,
                       biases,
                       class_weight=None,
                       name=None):
    """Returns prediction and loss for softmax classifier.

  This function returns "probabilities" and a cross entropy loss. To obtain
  predictions, use `tf.argmax` on the returned probabilities.

  This function requires labels to be passed in one-hot encoding.

  Args:
    tensor_in: Input tensor, [batch_size, feature_size], features.
    labels: Tensor, [batch_size, n_classes], one-hot labels of the output
      classes.
    weights: Tensor, [batch_size, feature_size], linear transformation
      matrix.
    biases: Tensor, [batch_size], biases.
    class_weight: Tensor, optional, [n_classes], weight for each class.
      If not given, all classes are supposed to have weight one.
    name: Operation name.

  Returns:
    `tuple` of softmax predictions and loss `Tensor`s.
  """
    with ops.name_scope(name, 'softmax_classifier', [tensor_in, labels]):
        logits = nn.xw_plus_b(tensor_in, weights, biases)
        if class_weight is not None:
            logits = math_ops.multiply(logits, class_weight)
        return nn.softmax(logits), loss_ops.softmax_cross_entropy(
            logits, labels)
Example #6
0
def mean_squared_error_regressor(tensor_in, labels, weights, biases, name=None):
    """Returns prediction and loss for mean squared error regression."""
    with ops.op_scope([tensor_in, labels], name, "mean_squared_error_regressor"):
        predictions = nn.xw_plus_b(tensor_in, weights, biases)
        if len(labels.get_shape()) == 1:
            labels = array_ops_.reshape(labels, [-1, 1])
        return predictions, loss_ops.squared(predictions, labels)
Example #7
0
  def testFullyConnectedWithBias(self):
    with self.session() as sess:
      with ops.device("/device:IPU:0"):
        x = array_ops.placeholder(np.float32, shape=[2, 2])
        weights = array_ops.placeholder(np.float32, shape=[2, 2])
        bias = array_ops.placeholder(np.float32, shape=[2])
        x_new = nn.xw_plus_b(x, weights, bias)

      report = tu.ReportJSON(self, sess)
      report.reset()

      out = sess.run(x_new, {
          x: np.full([2, 2], 3),
          weights: np.full([2, 2], 4),
          bias: np.ones([2]),
      })
      self.assertAllClose(np.full([2, 2], 25), out)

      report.parse_log(
          assert_len=4,
          assert_msg="Expected 1x compile, 1x load, 1x download, 1x execute")

      ok = [
          '__seed*', 'host-exchange-local-copy',
          'xw_plus_b/MatMul/dot.*/Conv_1', 'xw_plus_b/fusion/Op/Add'
      ]
      report.assert_all_compute_sets_and_list(ok)
Example #8
0
def mean_squared_error_regressor(tensor_in, labels, weights, biases, name=None):
  """Returns prediction and loss for mean squared error regression."""
  with ops.name_scope(name, "mean_squared_error_regressor",
                      [tensor_in, labels]):
    predictions = nn.xw_plus_b(tensor_in, weights, biases)
    if len(labels.get_shape()) == 1 and len(predictions.get_shape()) == 2:
      predictions = array_ops_.squeeze(predictions, squeeze_dims=[1])
    return predictions, loss_ops.mean_squared_error(predictions, labels)
Example #9
0
def model_fn(X, Y_one_hot):
    with tf.variable_scope("logistic_regression"):
        weights = tf.get_variable('weights', [n_features, n_classes])
        bias = tf.get_variable('bias', [n_classes])
        logits = nn.xw_plus_b(X, weights, bias)
        y_probs = nn.softmax(logits)
        loss = loss_ops.softmax(logits, Y_one_hot)
        return y_probs, loss
Example #10
0
def mean_squared_error_regressor(tensor_in, labels, weights, biases, name=None):
  """Returns prediction and loss for mean squared error regression."""
  with ops.name_scope(name, 'mean_squared_error_regressor',
                      [tensor_in, labels]):
    predictions = nn.xw_plus_b(tensor_in, weights, biases)
    if len(labels.get_shape()) == 1 and len(predictions.get_shape()) == 2:
      predictions = array_ops_.squeeze(predictions, axis=[1])
    return predictions, losses.mean_squared_error(labels, predictions)
Example #11
0
    def testFullyConnectedWithBias(self):
        with ops.device("/device:IPU:0"):
            x = array_ops.placeholder(np.float32, shape=[2, 2])
            weights = array_ops.placeholder(np.float32, shape=[2, 2])
            bias = array_ops.placeholder(np.float32, shape=[2])
            x_new = nn.xw_plus_b(x, weights, bias)

            with ops.device('cpu'):
                report = gen_ipu_ops.ipu_event_trace()

        tu.configure_ipu_system(True, True, True)

        with tu.ipu_session() as sess:

            sess.run(report)

            out = sess.run(
                x_new, {
                    x: np.full([2, 2], 3),
                    weights: np.full([2, 2], 4),
                    bias: np.ones([2]),
                })
            self.assertAllClose(np.full([2, 2], 25), out)

            result = sess.run(report)
            self.assertEqual(len(result),
                             4)  # 1xcompile, 1xload, 1xdownload, 1xexecute

            s = tu.extract_all_strings_from_event_trace(result)
            cs_list = tu.get_compute_sets_from_report(s)

            ok = [
                '__seed*', 'host-exchange-local-copy',
                'xw_plus_b/MatMul/dot.*/Conv_1/Convolve',
                'xw_plus_b/fusion/addToChannel'
            ]
            self.assertTrue(
                tu.check_compute_sets_in_whitelist_entries(cs_list, ok))