Пример #1
0
    def create_variable(self, name, alloc, type_, max_stack_depth, batch_size):
        """Returns an intialized Variable.

    Args:
      name: Name for the variable.
      alloc: `VariableAllocation` for the variable.
      type_: `instructions.TensorType` describing the sub-batch shape and dtype
        of the variable being created.
      max_stack_depth: Python `int`, the maximum stack depth to enforce.
      batch_size: Python `int`, the number of parallel threads being executed.

    Returns:
      var: A new, initialized Variable object.
    """
        del name
        if alloc is instructions.VariableAllocation.NULL:
            return instructions.NullVariable()
        elif alloc is instructions.VariableAllocation.TEMPORARY:
            return instructions.TemporaryVariable.empty()
        else:
            dtype, event_shape = type_
            value = np.zeros([batch_size] + list(event_shape), dtype=dtype)
            if alloc is instructions.VariableAllocation.REGISTER:
                return RegisterNumpyVariable(value)
            else:
                return FullNumpyVariable(value,
                                         _create_stack(max_stack_depth, value))
Пример #2
0
    def create_variable(self, name, alloc, type_, max_stack_depth, batch_size):
        """Returns an intialized Variable.

    Args:
      name: Name for the variable.
      alloc: `VariableAllocation` for the variable.
      type_: `instructions.TensorType` describing the sub-batch shape and dtype
        of the variable being created.
      max_stack_depth: Scalar `int` `Tensor`, the maximum stack depth allocated.
      batch_size: Scalar `int` `Tensor`, the number of parallel threads being
        executed.

    Returns:
      var: A new, initialized Variable object.
    """
        if alloc is instructions.VariableAllocation.NULL:
            return instructions.NullVariable()
        elif alloc is instructions.VariableAllocation.TEMPORARY:
            return instructions.TemporaryVariable.empty()
        else:
            name = 'Variable' if name is None else 'VM.var_{}'.format(name)
            dtype, event_shape = type_

            with tf.name_scope('{}.initialize'.format(name)):
                if (alloc is instructions.VariableAllocation.REGISTER
                        and tf.executing_eagerly()):
                    # Don't need to construct the empty value in Eager mode, because there
                    # is no tf.while_loop whose loop-carried state it would need to be.
                    # This is a substantial optimization for stackless mode, because that
                    # initializes variables on every function call, rather than just once.
                    value = (batch_size, dtype, event_shape)
                else:
                    value = self.fill(0, batch_size, dtype, event_shape)

                if alloc is instructions.VariableAllocation.REGISTER:
                    klass = RegisterTensorFlowVariable
                    extra = []
                else:
                    klass = FullTensorFlowVariable
                    extra = [
                        _create_stack(max_stack_depth, value,
                                      self._safety_checks)
                    ]

                class NamedVariable(klass):
                    """Captures `name` to yield improved downstream TF op names."""
                    def _name(self):
                        return name

                return NamedVariable(value, *extra)