Ejemplo n.º 1
0
  def __exit__(self, error_type, unused_value, unused_traceback):
    if error_type:
      # Allow errors that occurred inside this context manager to pass through
      # normally.
      return

    # Only run in V2 Function mode.
    if (context.executing_eagerly() or
        not ops.executing_eagerly_outside_functions()):
      return

    if (self._graph is not ops.get_default_graph() or
        self._graph.name != 'keras_graph'):
      # Only auto-track updates when the Keras Graph is the only one used.
      return

    new_operations = self._graph.get_operations()[self._num_operations:]
    new_stateful_ops = set()

    # pylint: disable=protected-access
    for op in new_operations:
      # While loop is not supported in general for automatic control
      # dependencies.
      if control_flow_util.IsInWhileLoop(op):
        continue

      # Track stateful ops via `add_update`.
      is_stateful_op = (
          op.type not in self._graph._registered_ops or
          auto_control_deps.op_is_stateful(
              self._graph._registered_ops[op.type]))

      # Ignore ReadVariableOps as they are not needed to be run separately.
      # This ensures existing Layers don't get extra updates.
      if is_stateful_op and op.type != 'ReadVariableOp':
        new_stateful_ops.add(op)

    explicit_updates = set(
        [u for u in self.layer._unfiltered_updates if not isinstance(u, tuple)])
    # pylint: enable=protected-access

    # Don't add updates that will already be run by virtue of being consumed by
    # other stateful ops or by the Layer's outputs. This ensures that existing
    # Layers like `BatchNormalization` continue to return the same values for
    # `.update` calls.
    minimum_ops = set()
    targets = new_stateful_ops.union(
        set(nest.flatten(self.outputs)), explicit_updates)
    for op in new_stateful_ops:
      # Scrub any ops that are consumed by the outputs or other stateful ops.
      reachable = tf_utils.get_reachable_from_inputs(op)
      if not (targets - {op}).intersection(reachable):
        minimum_ops.add(op)
    new_stateful_ops = minimum_ops

    # Don't double-track updates added via explicitly calling `add_update`.
    # Also don't double-track updates already tracked in sublayers.
    new_stateful_ops = new_stateful_ops - explicit_updates

    # Decide whether to track as input-conditional or unconditional.
    input_reachable_ops = tf_utils.get_reachable_from_inputs(
        self.inputs, targets=new_stateful_ops)
    unconditional_updates = new_stateful_ops - input_reachable_ops
    conditional_updates = new_stateful_ops - unconditional_updates

    if unconditional_updates:
      self.layer.add_update(list(unconditional_updates))
    if conditional_updates:
      self.layer.add_update(list(conditional_updates), inputs=self.inputs)
Ejemplo n.º 2
0
    def __exit__(self, error_type, unused_value, unused_traceback):
        if error_type:
            # Allow errors that occurred inside this context manager to pass through
            # normally.
            return

        # Only run in V2 Function mode.
        if (context.executing_eagerly()
                or not ops.executing_eagerly_outside_functions()):
            return

        if (self._graph is not ops.get_default_graph()
                or self._graph.name != 'keras_graph'):
            # Only auto-track updates when the Keras Graph is the only one used.
            return

        new_operations = self._graph.get_operations()[self._num_operations:]
        new_stateful_ops = set()

        # pylint: disable=protected-access
        for op in new_operations:
            # While loop is not supported in general for automatic control
            # dependencies.
            if control_flow_util.IsInWhileLoop(op):
                continue

            # Track stateful ops via `add_update`.
            is_stateful_op = (op.type not in self._graph._registered_ops
                              or auto_control_deps.op_is_stateful(
                                  self._graph._registered_ops[op.type]))

            # Ignore ReadVariableOps as they are not needed to be run separately.
            # This ensures existing Layers don't get extra updates.
            if is_stateful_op and op.type != 'ReadVariableOp':
                new_stateful_ops.add(op)

        explicit_updates = set([
            u
            for u in self.layer._get_unfiltered_updates(check_trainable=False)
            if not isinstance(u, tuple)
        ])
        # pylint: enable=protected-access

        # Don't add updates that will already be run by virtue of being consumed by
        # other stateful ops or by the Layer's outputs. This ensures that existing
        # Layers like `BatchNormalization` continue to return the same values for
        # `.update` calls.
        minimum_ops = set()
        targets = new_stateful_ops.union(set(nest.flatten(self.outputs)),
                                         explicit_updates)
        for op in new_stateful_ops:
            # Scrub any ops that are consumed by the outputs or other stateful ops.
            reachable = tf_utils.get_reachable_from_inputs(op)
            if not (targets - {op}).intersection(reachable):
                minimum_ops.add(op)
        new_stateful_ops = minimum_ops

        # Don't double-track updates added via explicitly calling `add_update`.
        # Also don't double-track updates already tracked in sublayers.
        new_stateful_ops = new_stateful_ops - explicit_updates

        # Decide whether to track as input-conditional or unconditional.
        input_reachable_ops = tf_utils.get_reachable_from_inputs(
            self.inputs, targets=new_stateful_ops)
        unconditional_updates = new_stateful_ops - input_reachable_ops
        conditional_updates = new_stateful_ops - unconditional_updates

        if unconditional_updates:
            self.layer.add_update(list(unconditional_updates))
        if conditional_updates:
            self.layer.add_update(list(conditional_updates),
                                  inputs=self.inputs)