Example #1
0
 def __enter__(self):
     if self._enabled:
         self.old_run = getattr(session.BaseSession, 'run', None)
         self.old_init = getattr(session.BaseSession, '__init__', None)
         if not self.old_run:
             raise errors.InternalError(None, None,
                                        'BaseSession misses run method.')
         elif not self.old_init:
             raise errors.InternalError(
                 None, None, 'BaseSession misses __init__ method.')
         elif getattr(session.BaseSession, '_profiler_run_internal', None):
             raise errors.InternalError(
                 None, None, 'Already in context or context not cleaned.')
         elif getattr(session.BaseSession, '_profiler_init_internal', None):
             raise errors.InternalError(
                 None, None, 'Already in context or context not cleaned.')
         else:
             setattr(session.BaseSession, 'run', _profiled_run)
             setattr(session.BaseSession, '__init__', _profiled_init)
             setattr(session.BaseSession, '_profiler_run_internal',
                     self.old_run)
             setattr(session.BaseSession, '_profiler_init_internal',
                     self.old_init)
             setattr(session.BaseSession, 'profile_context', self)
             return self
     else:
         return self
Example #2
0
 def ExitGradientColocation(self, op, gradient_uid):
   if op is not None:
     if not self._gradient_colocation_stack:
       raise errors.InternalError(
           op.node_def, op,
           "Badly nested gradient colocation: empty stack when popping Op " +
           op.name)
     last_op = self._gradient_colocation_stack.pop()
     if op is last_op:
       if op is self._in_gradient_colocation:
         self._in_gradient_colocation = None
         self._ExitOutsideCompilationScope()
     else:
       raise errors.InternalError(
           op.node_def, op, "Badly nested gradient colocation, expected " +
           last_op + ", got " + op.name)
Example #3
0
def _assert_same_non_optimizer_objects(model, model_graph, clone, clone_graph):
    """Assert model and clone contain the same checkpointable objects."""
    def get_non_optimizer_objects(m, g):
        """Gather set of model and optimizer checkpointable objects."""
        # Set default graph because optimizer.variables() returns optimizer
        # variables defined in the default graph.
        with g.as_default():
            all_objects = set(checkpointable_utils.list_objects(m))
            optimizer_and_variables = set()
            for obj in all_objects:
                if isinstance(obj, optimizers.TFOptimizer):
                    optimizer_and_variables.update(
                        checkpointable_utils.list_objects(obj))
                    optimizer_and_variables.update(
                        set(obj.optimizer.variables()))
            return all_objects - optimizer_and_variables

    model_objects = get_non_optimizer_objects(model, model_graph)
    clone_objects = get_non_optimizer_objects(clone, clone_graph)

    if len(model_objects) != len(clone_objects):
        raise errors.InternalError(
            None, None, 'Model and clone must use the same variables.'
            '\n\tModel variables: %s\n\t Clone variables: %s' %
            (model_objects, clone_objects))
Example #4
0
def get_device_policy():
    """Gets the current device policy.

  The device policy controls how operations requiring inputs on a specific
  device (e.g., on GPU:0) handle inputs on a different device (e.g. GPU:1).

  This function only gets the device policy for the current thread. Any
  subsequently started thread will again use the default policy.

  Returns:
    Current thread device policy
  """
    device_policy = context.context().device_policy
    if device_policy == context.DEVICE_PLACEMENT_SILENT:
        return 'silent'
    elif device_policy == context.DEVICE_PLACEMENT_SILENT_FOR_INT32:
        return 'silent_for_int32'
    elif device_policy == context.DEVICE_PLACEMENT_WARN:
        return 'warn'
    elif device_policy == context.DEVICE_PLACEMENT_EXPLICIT:
        return 'explicit'
    else:
        # pylint: disable-next=no-value-for-parameter
        raise errors.InternalError(
            f'Got an invalid device policy: {device_policy!r}.')
Example #5
0
 def _prereadline_check(self):
     if not self._read_buf:
         if not self._read_check_passed:
             raise errors.PermissionDeniedError(
                 None, None, "File isn't open for reading")
         self._read_buf = pywrap_tensorflow.CreateBufferedInputStream(
             compat.as_bytes(self.__name), 1024 * 512)
         if not self._read_buf:
             raise errors.InternalError(
                 None, None, "Could not open file for streaming")
Example #6
0
 def _prewrite_check(self):
     if not self._writable_file:
         if not self._write_check_passed:
             raise errors.PermissionDeniedError(
                 None, None, "File isn't open for writing")
         self._writable_file = pywrap_tensorflow.CreateWritableFile(
             compat.as_bytes(self.__name))
         if not self._writable_file:
             raise errors.InternalError(None, None,
                                        "Could not open file for writing")
 def testInterestingError(self):
     with self.assertRaises(errors.InternalError):
         self.catch_and_raise(errors.InternalError('message', None, None))