예제 #1
0
    def _simple(features):
        inputs = tf.feature_column.input_layer(features=features,
                                               feature_columns=feature_columns)
        with tf.variable_scope("simple"):
            with tf.variable_scope("logits"):
                w = tf.Variable(tf.random_normal([2, 2], seed=seed),
                                name="weight")
                b = tf.Variable(tf.random_normal([1], seed=seed), name="bias")
                predictions = tf.matmul(inputs, w) + b

            some_persisted_tensor_constant = tf.constant(
                seed, name="some_persisted_tensor_constant")
            persisted_tensors = {}
            if keep_persisted_tensors:
                persisted_tensors = {
                    "some_persisted_tensor_constant":
                    some_persisted_tensor_constant,
                }
            complexity = tf.constant(3, name="complexity")
            subnetwork = Subnetwork(last_layer=predictions,
                                    logits=predictions,
                                    complexity=complexity,
                                    persisted_tensors=persisted_tensors)
            return WeightedSubnetwork(name=tf.constant("simple", name="name"),
                                      logits=predictions,
                                      weight=w,
                                      subnetwork=subnetwork)
예제 #2
0
    def _dnn(features):
        inputs = _extract_feature(features)
        layer_size = 10
        with tf.variable_scope("dnn"):
            with tf.variable_scope("hidden_layer"):
                w = tf.Variable(tf.random_normal([2, layer_size], seed=seed),
                                name="weight")
                b = tf.Variable(tf.random_normal([layer_size], seed=seed),
                                name="bias")
                hidden_layer = tf.matmul(inputs, w) + b
            with tf.variable_scope("logits"):
                w = tf.Variable(tf.random_normal([layer_size, 1], seed=seed),
                                name="weight")
                b = tf.Variable(tf.random_normal([1], seed=seed), name="bias")
                predictions = tf.matmul(hidden_layer, w) + b

            some_persisted_tensor_constant = tf.constant(
                seed, name="some_persisted_tensor_constant")
            persisted_tensors = {}
            if keep_persisted_tensors:
                persisted_tensors = {
                    "some_persisted_tensor_constant":
                    some_persisted_tensor_constant,
                }
            complexity = tf.constant(6, name="complexity")
            subnetwork = Subnetwork(last_layer=hidden_layer,
                                    logits=predictions,
                                    complexity=complexity,
                                    persisted_tensors=persisted_tensors)
            return WeightedSubnetwork(name=tf.constant("dnn", name="name"),
                                      logits=predictions,
                                      weight=w,
                                      subnetwork=subnetwork)
예제 #3
0
  def _linear(features):
    inputs = _extract_feature(features)
    with tf.variable_scope("linear"):
      with tf.variable_scope("logits"):
        w = tf.Variable(tf.random_normal([2, 1], seed=seed), name="weight")
        b = tf.Variable(tf.random_normal([1], seed=seed), name="bias")
        predictions = tf.matmul(inputs, w) + b

      some_persisted_tensor_constant = tf.constant(
          seed, name="some_persisted_tensor_constant")
      nested_persisted_tensor_constant = tf.constant(
          seed, name="nested_persisted_tensor_constant")
      persisted_tensors = {}
      if keep_persisted_tensors:
        persisted_tensors = {
            "some_persisted_tensor_constant": some_persisted_tensor_constant,
            "nested": {
                "nested": {
                    "value": nested_persisted_tensor_constant,
                    "separated/by/slash": nested_persisted_tensor_constant,
                },
                "value": some_persisted_tensor_constant,
            }
        }
      complexity = tf.constant(3, name="complexity")
      subnetwork = Subnetwork(
          last_layer=inputs,
          logits=predictions,
          complexity=complexity,
          persisted_tensors=persisted_tensors)
      return WeightedSubnetwork(
          name=tf.constant("linear", name="name"),
          logits=predictions,
          weight=w,
          subnetwork=subnetwork)
예제 #4
0
def _make_metrics(sess, metric_fn):
    head = tf.contrib.estimator.binary_classification_head(
        loss_reduction=tf.losses.Reduction.SUM)
    builder = _EnsembleBuilder(head,
                               MixtureWeightType.SCALAR,
                               metric_fn=metric_fn)
    features = {"x": tf.constant([[1.], [2.]])}
    labels = tf.constant([0, 1])
    ensemble_spec = builder.build_ensemble_spec(
        "fake_ensemble", [
            WeightedSubnetwork(name=tf.constant("fake_weighted"),
                               logits=[[1.], [2.]],
                               weight=[1.],
                               subnetwork=Subnetwork(logits=[[1.], [2.]],
                                                     last_layer=[1.],
                                                     complexity=1.,
                                                     persisted_tensors={}))
        ],
        summary=_FakeSummary(),
        bias=0.,
        features=features,
        mode=tf.estimator.ModeKeys.EVAL,
        labels=labels,
        iteration_step=1.)
    sess.run(
        (tf.global_variables_initializer(), tf.local_variables_initializer()))
    metrics = sess.run(ensemble_spec.eval_metric_ops)
    return {k: metrics[k][1] for k in metrics}
예제 #5
0
    def _reconstruct_weighted_subnetwork(self, index):
        """Reconstructs a `WeightedSubnetwork` from the graph's collections.

    Args:
      index: Integer index of the subnetwork in a list of subnetworks.

    Returns:
      A frozen `WeightedSubnetwork` instance or `None` if there is no
        `WeightedSubnetwork` frozen at index.
    """

        name = None
        weight = None
        logits = None
        for key in tf.get_default_graph().get_all_collection_keys():
            prefix = self._weighted_subnetwork_collection_key(index, "")
            if prefix not in key:
                continue

            # Verify that each frozen collection is of size one, as each collection
            # should have been cleared before adding a tensor to freeze.
            frozen_collection = tf.get_collection(key)
            assert len(frozen_collection) == 1
            frozen_tensor = frozen_collection[-1]

            field = self._weighted_subnetwork_collection_key_field(key, index)
            if field is None:
                continue
            if field == self.Keys.NAME:
                name = frozen_tensor
                continue
            if field == self.Keys.LOGITS:
                logits = frozen_tensor
                continue
            if field == self.Keys.WEIGHT:
                weight = frozen_tensor
                continue

        # No weighted subnetwork found at given index.
        if name is None and weight is None and logits is None:
            return None

        subnetwork = self._reconstruct_subnetwork(index)
        return WeightedSubnetwork(name=name,
                                  logits=logits,
                                  weight=weight,
                                  subnetwork=subnetwork)
예제 #6
0
def dummy_ensemble_spec(name,
                        random_seed=42,
                        num_subnetworks=1,
                        bias=0.,
                        loss=None,
                        adanet_loss=None,
                        eval_metrics=None,
                        dict_predictions=False,
                        export_output_key=None,
                        subnetwork_builders=None,
                        train_op=None):
  """Creates a dummy `_EnsembleSpec` instance.

  Args:
    name: _EnsembleSpec's name.
    random_seed: A scalar random seed.
    num_subnetworks: The number of fake subnetworks in this ensemble.
    bias: Bias value.
    loss: Float loss to return. When None, it's picked from a random
      distribution.
    adanet_loss: Float AdaNet loss to return. When None, it's picked from a
      random distribution.
    eval_metrics: Optional eval metrics tuple of (metric_fn, tensor args).
    dict_predictions: Boolean whether to return predictions as a dictionary of
      `Tensor` or just a single float `Tensor`.
    export_output_key: An `ExportOutputKeys` for faking export outputs.
    subnetwork_builders: List of `adanet.subnetwork.Builder` objects.
    train_op: A train op.

  Returns:
    A dummy `_EnsembleSpec` instance.
  """

  if loss is None:
    loss = dummy_tensor([], random_seed)

  if adanet_loss is None:
    adanet_loss = dummy_tensor([], random_seed * 2)
  else:
    adanet_loss = tf.convert_to_tensor(adanet_loss)

  logits = dummy_tensor([], random_seed * 3)
  if dict_predictions:
    predictions = {
        "logits": logits,
        "classes": tf.cast(tf.abs(logits), dtype=tf.int64)
    }
  else:
    predictions = logits
  weighted_subnetworks = [
      WeightedSubnetwork(
          name=name,
          iteration_number=1,
          logits=dummy_tensor([2, 1], random_seed * 4),
          weight=dummy_tensor([2, 1], random_seed * 4),
          subnetwork=Subnetwork(
              last_layer=dummy_tensor([1, 2], random_seed * 4),
              logits=dummy_tensor([2, 1], random_seed * 4),
              complexity=1.,
              persisted_tensors={}))
  ]

  export_outputs = _dummy_export_outputs(export_output_key, logits, predictions)
  bias = tf.constant(bias)
  return _EnsembleSpec(
      name=name,
      ensemble=ComplexityRegularized(
          weighted_subnetworks=weighted_subnetworks * num_subnetworks,
          bias=bias,
          logits=logits,
      ),
      architecture=_Architecture("dummy_ensemble_candidate"),
      subnetwork_builders=subnetwork_builders,
      predictions=predictions,
      loss=loss,
      adanet_loss=adanet_loss,
      train_op=train_op,
      eval_metrics=eval_metrics,
      export_outputs=export_outputs)
예제 #7
0
def dummy_ensemble_spec(name,
                        random_seed=42,
                        num_subnetworks=1,
                        bias=0.,
                        loss=None,
                        adanet_loss=None,
                        complexity_regularized_loss=None,
                        eval_metric_ops=None,
                        dict_predictions=False,
                        export_output_key=None,
                        train_op=None):
  """Creates a dummy `_EnsembleSpec` instance.

  Args:
    name: _EnsembleSpec's name.
    random_seed: A scalar random seed.
    num_subnetworks: The number of fake subnetworks in this ensemble.
    bias: Bias value.
    loss: Float loss to return. When None, it's picked from a random
      distribution.
    adanet_loss: Float AdaNet loss to return. When None, it's picked from a
      random distribution.
    complexity_regularized_loss: Float complexity regularized loss to return.
      When None, it's picked from a random distribution.
    eval_metric_ops: Optional dictionary of metric ops.
    dict_predictions: Boolean whether to return predictions as a dictionary of
      `Tensor` or just a single float `Tensor`.
    export_output_key: An `ExportOutputKeys` for faking export outputs.
    train_op: A train op.

  Returns:
    A dummy `_EnsembleSpec` instance.
  """

  if loss is None:
    loss = dummy_tensor([], random_seed)
  elif not isinstance(loss, tf.Tensor):
    loss = tf.constant(loss)

  if adanet_loss is None:
    adanet_loss = dummy_tensor([], random_seed * 2)
  else:
    adanet_loss = tf.convert_to_tensor(adanet_loss)

  if complexity_regularized_loss is None:
    complexity_regularized_loss = dummy_tensor([], random_seed * 2)
  elif not isinstance(complexity_regularized_loss, tf.Tensor):
    complexity_regularized_loss = tf.constant(complexity_regularized_loss)

  logits = dummy_tensor([], random_seed * 3)
  if dict_predictions:
    predictions = {
        "logits": logits,
        "classes": tf.cast(tf.abs(logits), dtype=tf.int64)
    }
  else:
    predictions = logits
  weighted_subnetworks = [
      WeightedSubnetwork(
          name=tf.constant(name),
          logits=dummy_tensor([2, 1], random_seed * 4),
          weight=dummy_tensor([2, 1], random_seed * 4),
          subnetwork=Subnetwork(
              last_layer=dummy_tensor([1, 2], random_seed * 4),
              logits=dummy_tensor([2, 1], random_seed * 4),
              complexity=1.,
              persisted_tensors={}))
  ]

  export_outputs = _dummy_export_outputs(export_output_key, logits, predictions)
  bias = tf.constant(bias)
  return _EnsembleSpec(
      name=name,
      ensemble=Ensemble(
          weighted_subnetworks=weighted_subnetworks * num_subnetworks,
          bias=bias,
          logits=logits,
      ),
      predictions=predictions,
      loss=loss,
      adanet_loss=adanet_loss,
      complexity_regularized_loss=complexity_regularized_loss,
      complexity_regularization=1,
      eval_metric_ops=eval_metric_ops,
      train_op=train_op,
      export_outputs=export_outputs)