Esempio n. 1
0
def _is_even_lowered_execute(inputs, backend):
  prog = test_programs.is_even_function_calls()
  alloc = allocation_strategy.optimize(prog)
  lowered = lowering.lower_function_calls(alloc)
  return list(vm.execute(
      lowered, [inputs],
      max_stack_depth=int(max(inputs)) + 3, backend=backend))
Esempio n. 2
0
def _constant_execute(inputs, backend):
    # Stack depth limit is 4 to accommodate the initial value and
    # two pushes to "answer".
    with tf.compat.v2.name_scope('constant_program'):
        return vm.execute(test_programs.constant_program(), [inputs],
                          max_stack_depth=4,
                          backend=backend)
Esempio n. 3
0
def _fibonacci_lowered_execute(inputs, backend):
  prog = test_programs.fibonacci_function_calls()
  alloc = allocation_strategy.optimize(prog)
  lowered = lowering.lower_function_calls(alloc)
  return list(vm.execute(
      lowered, [inputs],
      max_stack_depth=15, backend=backend))
Esempio n. 4
0
 def batched(*args, **kwargs):
     """The batched function."""
     # Accepting kwargs here because Python 2.7 gets confused otherwise.
     # See, e.g.,
     # https://stackoverflow.com/questions/14003939/python-function-args-and-kwargs-with-other-specified-keyword-arguments
     # The whole strategy of adding arguments to the decorated function has
     # the unfortunate consequence that pylint uses the argument signature of
     # the decoratee to issue warnings, with the result that
     # disable=unexpected-keyword-arg needs to be added to use sites of
     # decorated functions, for example in `frontend_test.py`.
     # TODO(axch) Figure out how to avoid pylint warnings in callers.
     # Options:
     # - Add `backend` and `max_stack_depth` arguments to the `Context`
     #   constructor, and use those as defaults here.
     #   - Likely insufficient, because dynamic selection of the stack size
     #     seems important.
     # - Carry the same information in a context manager, used like
     #     with ctx.config(backend=TF_BACKEND, max_stack_depth=15):
     #       some_decorated_function(normal, arguments)
     # - Just invoke batched functions from the `Context` object:
     #     ctx.run(some_decorated_function, input_1, max_stack_depth=15)
     # - Maybe even make magic methods for the above?
     #     ctx.some_decorated_function(input_1, max_stack_depth=15)
     max_stack_depth = kwargs.pop('max_stack_depth', 15)
     backend = kwargs.pop('backend', TF_BACKEND)
     dry_run = kwargs.pop('dry_run', False)
     stackless = kwargs.pop('stackless', False)
     block_code_cache = kwargs.pop('block_code_cache', {})
     if self._in_dry_run:
         return _run_at_batch_size_one(backend, function, args)
     if dry_run:
         with self._dry_run():
             return _run_at_batch_size_one(backend, function, args)
     if kwargs:
         msg = 'Auto-batched function given unexpected keyword arguments {}.'
         raise TypeError(msg.format(kwargs.keys()))
     sig = ab_type_inference.signature(self.program(main=name), args,
                                       backend)
     if stackless:
         compiled = self.program_compiled(main=name,
                                          sig=sig,
                                          backend=backend)
         return st.execute(compiled, backend, block_code_cache, *args)
     else:
         lowered = self.program_lowered(main=name,
                                        sig=sig,
                                        backend=backend)
         return vm.execute(lowered,
                           args,
                           max_stack_depth,
                           backend,
                           block_code_cache=block_code_cache)
Esempio n. 5
0
        def execute(batch_size, latent_size, data_size):
            data = np.random.normal(size=(data_size,
                                          latent_size)).astype(np.float32)

            def step_state(state):
                return state + np.sum(np.tensordot(data, state, ([1], [1])))

            state = np.random.normal(size=(batch_size,
                                           latent_size)).astype(np.float32)

            def choose_depth(count):
                del count
                return 3

            program = test_programs.pea_nuts_program((latent_size, ),
                                                     choose_depth, step_state)
            input_counts = np.array([3] * batch_size)
            return vm.execute(program, [input_counts, state],
                              10,
                              backend=NP_BACKEND)
Esempio n. 6
0
        def execute(_, backend):
            data = tf.random.normal(shape=(data_size, latent_size),
                                    dtype=np.float32)

            def step_state(state):
                return state + tf.reduce_sum(
                    input_tensor=tf.tensordot(data, state, ([1], [1])))

            state = tf.random.normal(shape=(batch_size, latent_size),
                                     dtype=np.float32)

            def choose_depth(count):
                del count
                return 2

            program = test_programs.pea_nuts_program((latent_size, ),
                                                     choose_depth, step_state)
            input_counts = np.array([3] * batch_size)
            return vm.execute(program, [input_counts, state],
                              10,
                              backend=backend)
Esempio n. 7
0
def _fibonacci_execute(inputs, backend):
    with tf.compat.v2.name_scope('fibonacci_program'):
        return vm.execute(test_programs.fibonacci_program(), [inputs],
                          max_stack_depth=15,
                          backend=backend)
Esempio n. 8
0
def _product_type_execute(inputs, backend):
    with tf.compat.v2.name_scope('product_types_program'):
        return vm.execute(test_programs.synthetic_pattern_variable_program(),
                          [inputs],
                          max_stack_depth=4,
                          backend=backend)
Esempio n. 9
0
def _single_if_execute(inputs, backend):
    with tf.compat.v2.name_scope('single_if_program'):
        return vm.execute(test_programs.single_if_program(), [inputs],
                          max_stack_depth=3,
                          backend=backend)
Esempio n. 10
0
def _execute(prog, inputs, stack_depth, backend):
  return vm.execute(
      prog, [inputs], max_stack_depth=stack_depth, backend=backend)