Ejemplo n.º 1
0
 def __del__(self):
   if not self._in_graph_mode:
     # There is only one ResourceVariable object for each underlying resource
     # (cached in the Graph's VariableStore when created with get_variable), so
     # it is safe to delete the resource we have a handle to. Each Graph has a
     # unique container name in Eager, which prevents resource sharing.
     #
     # The Graph's VariableStore contains strong references to ResourceVariable
     # objects created with get_variable, so this destructor will only be
     # callled once the Graph is garbage collected for those objects. However,
     # explicitly created ResourceVariables (e.g. through tfe.Variable) may be
     # collected earlier.
     try:
       # We have checked that this ResourceVariable was created in Eager
       # mode. However, this destructor may be running in graph mode
       # (especially during unit tests). To clean up successfully, we switch
       # back into Eager temporarily.
       with context.eager_mode():
         with ops.device(self._handle_device):
           gen_resource_variable_ops.destroy_resource_op(
               self._handle, ignore_lookup_error=True)
     except TypeError:
       # Suppress some exceptions, mainly for the case when we're running on
       # module deletion. Things that can go wrong include the context module
       # already being unloaded, self._handle._handle_data no longer being
       # valid, and so on. Printing warnings in these cases is silly
       # (exceptions raised from __del__ are printed as warnings to stderr).
       pass  # 'NoneType' object is not callable when the handle has been
             # partially unloaded.
     except AttributeError:
       pass  # 'NoneType' object has no attribute 'eager_mode' when context has
Ejemplo n.º 2
0
 def _destroy_cache():
     with ops.device("GPU:0"):
         handle = gen_trt_ops.create_trt_engine_cache_handle(
             container=trt_convert._TRT_ENGINE_CACHE_CONTAINER_NAME,
             resource_name="TRTEngineOp_0")
         gen_resource_variable_ops.destroy_resource_op(
             handle, ignore_lookup_error=False)
Ejemplo n.º 3
0
 def __del__(self):
   if not self._in_graph_mode:
     # There is only one ResourceVariable object for each underlying resource
     # (cached in the Graph's VariableStore when created with get_variable), so
     # it is safe to delete the resource we have a handle to. Each Graph has a
     # unique container name in Eager, which prevents resource sharing.
     #
     # The Graph's VariableStore contains strong references to ResourceVariable
     # objects created with get_variable, so this destructor will only be
     # callled once the Graph is garbage collected for those objects. However,
     # explicitly created ResourceVariables (e.g. through tfe.Variable) may be
     # collected earlier.
     try:
       # We have checked that this ResourceVariable was created in Eager
       # mode. However, this destructor may be running in graph mode
       # (especially during unit tests). To clean up successfully, we switch
       # back into Eager temporarily.
       with context.eager_mode():
         with ops.device(self._handle_device):
           gen_resource_variable_ops.destroy_resource_op(
               self._handle, ignore_lookup_error=True)
     except TypeError:
       # Suppress some exceptions, mainly for the case when we're running on
       # module deletion. Things that can go wrong include the context module
       # already being unloaded, self._handle._handle_data no longer being
       # valid, and so on. Printing warnings in these cases is silly
       # (exceptions raised from __del__ are printed as warnings to stderr).
       pass  # 'NoneType' object is not callable when the handle has been
             # partially unloaded.
     except AttributeError:
       pass  # 'NoneType' object has no attribute 'eager_mode' when context has
Ejemplo n.º 4
0
 def __del__(self):
   # Resources follow object-identity when executing eagerly, so it is safe to
   # delete the resource we have a handle to.
   try:
     # This resource was created in eager mode. However, this destructor may be
     # running in graph mode (especially during unit tests). To clean up
     # successfully, we switch back into eager mode temporarily.
     with context.eager_mode():
       with ops.device(self._handle_device):
         gen_resource_variable_ops.destroy_resource_op(
             self._handle, ignore_lookup_error=True)
   except TypeError:
     # Suppress some exceptions, mainly for the case when we're running on
     # module deletion. Things that can go wrong include the context module
     # already being unloaded, self._handle._handle_data no longer being
     # valid, and so on. Printing warnings in these cases is silly
     # (exceptions raised from __del__ are printed as warnings to stderr).
     pass  # 'NoneType' object is not callable when the handle has been
           # partially unloaded.
   except AttributeError:
     pass  # 'NoneType' object has no attribute 'eager_mode' when context has
 def __del__(self):
   # Resources follow object-identity when executing eagerly, so it is safe to
   # delete the resource we have a handle to.
   try:
     # This resource was created in eager mode. However, this destructor may be
     # running in graph mode (especially during unit tests). To clean up
     # successfully, we switch back into eager mode temporarily.
     with context.eager_mode():
       with ops.device(self._handle_device):
         gen_resource_variable_ops.destroy_resource_op(
             self._handle, ignore_lookup_error=True)
   except TypeError:
     # Suppress some exceptions, mainly for the case when we're running on
     # module deletion. Things that can go wrong include the context module
     # already being unloaded, self._handle._handle_data no longer being
     # valid, and so on. Printing warnings in these cases is silly
     # (exceptions raised from __del__ are printed as warnings to stderr).
     pass  # 'NoneType' object is not callable when the handle has been
     # partially unloaded.
   except AttributeError:
     pass  # 'NoneType' object has no attribute 'eager_mode' when context has
Ejemplo n.º 6
0
 def destroy_resource(self):
   handle = _get_resource_handle(self._resource_name, self._device)
   with ops.device(self._device):
     gen_resource_variable_ops.destroy_resource_op(
         handle, ignore_lookup_error=True)
Ejemplo n.º 7
0
 def _destroy_cache():
     with ops.device("GPU:0"):
         handle = gen_trt_ops.create_trt_resource_handle(
             resource_name="TRTEngineOp_0")
         gen_resource_variable_ops.destroy_resource_op(
             handle, ignore_lookup_error=False)
Ejemplo n.º 8
0
 def _DestroyCache():
   with ops.device("GPU:0"):
     handle = gen_trt_ops.create_trt_resource_handle(
         resource_name=trt_engine_name)
     gen_resource_variable_ops.destroy_resource_op(
         handle, ignore_lookup_error=False)
Ejemplo n.º 9
0
 def _destroy_resource(self):
   gen_resource_variable_ops.destroy_resource_op(
       self.resource_handle, ignore_lookup_error=True)
Ejemplo n.º 10
0
    def create_op(self, *args, **kwargs):
        """Creates an `Operation`.

    For operations of the following form

      orig_value = op(*args, **kwargs)

    this function constructs the following subgraph :

      v = Variable()
      if v is not initialized:
        orig_value = op(*args, **kwargs)
        v.assign(orig_value) # Initializes v
        return orig_value
      else:
        return v

    The above transformation is not performed and the original op is returned
    as is if any of the following is true:
    * `_return_as_is` flag is set to true.
    * op_type is listed in _PASS_THROUGH_OPS
    * op has no outputs.
    * One of the op's return value has a ref type.

    Args:
      *args: Arguments for create_op()
      **kwargs: Keyword arguments for create_op(). Refer to
        tensorflow.python.framework.ops.Graph.create_op() for the mandatory
        and optional arguments.

    Returns:
      An Operation.

    Raises:
      UnimplementedError: if output type is a reference and the op's type
        is not one of the supported types in `_REF_OPS_WHITELIST`.
    """
        op_type = kwargs['op_type'] if 'op_type' in kwargs else args[0]
        output_dtypes = kwargs['dtypes'] if 'dtypes' in kwargs else args[2]
        output_dtypes = [dtypes.as_dtype(d) for d in output_dtypes]

        if self._return_as_is or op_type in _PASS_THROUGH_OPS:
            return self._wrap(
                super(ImperativeGraph, self).create_op(*args, **kwargs))

        if not output_dtypes:
            return self._wrap(
                super(ImperativeGraph, self).create_op(*args, **kwargs))

        output_has_ref = any([dtype._is_ref_dtype for dtype in output_dtypes])  # pylint: disable=protected-access

        if output_has_ref:
            if op_type not in _REF_OPS_WHITELIST:
                raise errors.UnimplementedError(
                    None, None, op_type + ' op not supported in '
                    'imperative graph')

            ret = super(ImperativeGraph, self).create_op(*args, **kwargs)

            if self._in_variable_creation:
                if op_type == 'Assign':
                    self.add_pending_init(ret)

            return self._wrap(ret)

        with self.return_as_is():
            # Declares the variables to hold the output values of this op.
            op_output_var = [
                state_ops.variable_op_v2(tensor_shape.TensorShape(None),
                                         dtype,
                                         container=self._name)
                for dtype in output_dtypes
            ]
            # Ops to free the resources used by the temporary cache variables.
            # The following two ops are created for each cache variable,
            # having no control dependencies on any other ops :
            # var_handle_op ----> destroy_resource_op
            for dtype, v in zip(output_dtypes, op_output_var):
                with ops.control_dependencies(None):
                    self._variable_cleanup_ops += [
                        gen_resource_variable_ops.destroy_resource_op(
                            gen_resource_variable_ops.var_handle_op(
                                dtype,
                                tensor_shape.TensorShape(None),
                                container=self._name,
                                shared_name=v.op.name),
                            ignore_lookup_error=True)
                    ]

            # Create the conditional to run the original op only when the variable
            # corresponding to the first output is not initialized.
            inited = state_ops.is_variable_initialized(op_output_var[0])
            v_f, v_t = control_flow_ops.ref_switch(op_output_var[0], inited)
            # pylint: disable=protected-access
            v_f_op = gen_array_ops._ref_identity(v_f)
            v_t_op = gen_array_ops._ref_identity(v_t)
            # pylint: enable=protected-access

            with ops.control_dependencies([v_f_op.op]):
                # Create the original op
                orig_op = self._wrap(
                    super(ImperativeGraph, self).create_op(*args, **kwargs))
            shapes = [val.get_shape() for val in orig_op.outputs]

            controls = []
            for var, val in zip(op_output_var, orig_op.outputs):
                if (not val.get_shape().is_fully_defined()
                        or val.get_shape().num_elements() > 0):
                    assign_op = state_ops.assign(var,
                                                 val,
                                                 validate_shape=False)
                    assign_op.set_shape(val.get_shape())
                    controls.append(assign_op)

            values = []
            if len(controls) > 1:
                if control_flow_ops.IsSwitch(orig_op):
                    # pylint: disable=protected-access
                    controls = gen_control_flow_ops._ref_merge(controls)
                    # pylint: enable=protected-access
                else:
                    controls = control_flow_ops.tuple(controls)

            for var, val in zip(op_output_var, orig_op.outputs):
                with ops.control_dependencies(controls):
                    with self.colocate_with(v_f_op):
                        real_val = array_ops.identity(val)
                with ops.control_dependencies([v_t_op.op]):
                    with self.colocate_with(v_t_op):
                        stored_val = array_ops.identity(var)
                    stored_val.set_shape(val.get_shape())
                    real_val, _ = control_flow_ops.merge(
                        [real_val, stored_val])
                real_val.op.node_def.attr['_gradient_op_type'].CopyFrom(
                    attr_value_pb2.AttrValue(
                        s=compat.as_bytes(self._merge_op_type)))
                values.append(real_val)

            for i, _ in enumerate(shapes):
                values[i].set_shape(shapes[i])
            self._outputs_map[orig_op.name] = values
            try:
                self._gradient_function_map[
                    orig_op.name] = ops.get_gradient_function(orig_op)
            except (KeyError, LookupError):
                pass
            else:
                orig_op.node_def.attr['_gradient_op_type'].CopyFrom(
                    attr_value_pb2.AttrValue(
                        s=compat.as_bytes(self._imperative_op_type)))

            return MultiOutputOperation(values)
Ejemplo n.º 11
0
  def create_op(self, *args, **kwargs):
    """Creates an `Operation`.

    For operations of the following form

      orig_value = op(*args, **kwargs)

    this function constructs the following subgraph :

      v = Variable()
      if v is not initialized:
        orig_value = op(*args, **kwargs)
        v.assign(orig_value) # Initializes v
        return orig_value
      else:
        return v

    The above transformation is not performed and the original op is returned
    as is if any of the following is true:
    * `_return_as_is` flag is set to true.
    * op_type is listed in _PASS_THROUGH_OPS
    * op has no outputs.
    * One of the op's return value has a ref type.

    Args:
      *args: Arguments for create_op()
      **kwargs: Keyword arguments for create_op(). Refer to
        tensorflow.python.framework.ops.Graph.create_op() for the mandatory
        and optional arguments.

    Returns:
      An Operation.

    Raises:
      UnimplementedError: if output type is a reference and the op's type
        is not one of the supported types in `_REF_OPS_WHITELIST`.
    """
    op_type = kwargs['op_type'] if 'op_type' in kwargs else args[0]
    output_dtypes = kwargs['dtypes'] if 'dtypes' in kwargs else args[2]
    output_dtypes = [dtypes.as_dtype(d) for d in output_dtypes]

    if self._return_as_is or op_type in _PASS_THROUGH_OPS:
      return self._wrap(super(ImperativeGraph, self).create_op(*args, **kwargs))

    if not output_dtypes:
      return self._wrap(
          super(ImperativeGraph, self).create_op(*args, **kwargs))

    output_has_ref = any([dtype._is_ref_dtype for dtype in output_dtypes])  # pylint: disable=protected-access

    if output_has_ref:
      if op_type not in _REF_OPS_WHITELIST:
        raise errors.UnimplementedError(None, None,
                                        op_type + ' op not supported in '
                                        'imperative graph')

      ret = super(ImperativeGraph, self).create_op(*args, **kwargs)

      if self._in_variable_creation:
        if op_type == 'Assign':
          self.add_pending_init(ret)

      return self._wrap(ret)

    with self.return_as_is():
      # Declares the variables to hold the output values of this op.
      op_output_var = [state_ops.variable_op_v2(
          tensor_shape.TensorShape(None), dtype, container=self._name)
                       for dtype in output_dtypes]
      # Ops to free the resources used by the temporary cache variables.
      # The following two ops are created for each cache variable,
      # having no control dependencies on any other ops :
      # var_handle_op ----> destroy_resource_op
      for dtype, v in zip(output_dtypes, op_output_var):
        with ops.control_dependencies(None):
          self._variable_cleanup_ops += [
              gen_resource_variable_ops.destroy_resource_op(
                  gen_resource_variable_ops.var_handle_op(
                      dtype, tensor_shape.TensorShape(None),
                      container=self._name, shared_name=v.op.name),
                  ignore_lookup_error=True)]

      # Create the conditional to run the original op only when the variable
      # corresponding to the first output is not initialized.
      inited = state_ops.is_variable_initialized(op_output_var[0])
      v_f, v_t = control_flow_ops.ref_switch(op_output_var[0], inited)
      # pylint: disable=protected-access
      v_f_op = gen_array_ops._ref_identity(v_f)
      v_t_op = gen_array_ops._ref_identity(v_t)
      # pylint: enable=protected-access

      with ops.control_dependencies([v_f_op.op]):
        # Create the original op
        orig_op = self._wrap(
            super(ImperativeGraph, self).create_op(*args, **kwargs))
      shapes = [val.get_shape() for val in orig_op.outputs]

      controls = []
      for var, val in zip(op_output_var, orig_op.outputs):
        if (not val.get_shape().is_fully_defined() or
            val.get_shape().num_elements() > 0):
          assign_op = state_ops.assign(var, val, validate_shape=False)
          assign_op.set_shape(val.get_shape())
          controls.append(assign_op)

      values = []
      if len(controls) > 1:
        if control_flow_ops.IsSwitch(orig_op):
          # pylint: disable=protected-access
          controls = gen_control_flow_ops._ref_merge(controls)
          # pylint: enable=protected-access
        else:
          controls = control_flow_ops.tuple(controls)

      for var, val in zip(op_output_var, orig_op.outputs):
        with ops.control_dependencies(controls):
          with self.colocate_with(v_f_op):
            real_val = array_ops.identity(val)
        with ops.control_dependencies([v_t_op.op]):
          with self.colocate_with(v_t_op):
            stored_val = array_ops.identity(var)
          stored_val.set_shape(val.get_shape())
          real_val, _ = control_flow_ops.merge([real_val, stored_val])
        real_val.op.node_def.attr['_gradient_op_type'].CopyFrom(
            attr_value_pb2.AttrValue(s=compat.as_bytes(self._merge_op_type)))
        values.append(real_val)

      for i, _ in enumerate(shapes):
        values[i].set_shape(shapes[i])
      self._outputs_map[orig_op.name] = values
      try:
        self._gradient_function_map[orig_op.name] = ops.get_gradient_function(
            orig_op)
      except (KeyError, LookupError):
        pass
      else:
        orig_op.node_def.attr['_gradient_op_type'].CopyFrom(
            attr_value_pb2.AttrValue(
                s=compat.as_bytes(self._imperative_op_type)))

      return MultiOutputOperation(values, orig_op)
Ejemplo n.º 12
0
 def __del__(self):
   if context.in_eager_mode():
     gen_resource_variable_ops.destroy_resource_op(self._handle,
                                                   ignore_lookup_error=False)
Ejemplo n.º 13
0
 def __del__(self):
     if context.in_eager_mode():
         gen_resource_variable_ops.destroy_resource_op(
             self._handle, ignore_lookup_error=False)