コード例 #1
0
 def testProductTypeInferenceNumpy(self):
     inputs = np.array([4, 5], dtype=np.int64)
     outputs = np.array(([6, 7], [7, 8]), dtype=np.int64)
     prog = test_programs.synthetic_pattern_variable_program(
         include_types=False)
     typed = type_inference.infer_types(prog, [inputs], NP_BACKEND)
     expected_prog = test_programs.synthetic_pattern_variable_program()
     self.assertSameTypes(expected_prog, typed)
     alloc = allocation_strategy.optimize(typed)
     lowered = lowering.lower_function_calls(alloc)
     self.assertAllEqual(outputs, _execute(lowered, inputs, 15, NP_BACKEND))
コード例 #2
0
 def testShapeSequenceInferenceNumpy(self, dtype):
     shape_seq = [(1, 1, 3, 1), (1, 1, 1, 2), (1, 1, 3, 2), (1, 5, 1, 1)]
     init_val = np.zeros([1], dtype=dtype)
     prog = shape_sequence_program(shape_seq)
     typed = type_inference.infer_types(prog, [init_val], NP_BACKEND)
     self.assertEqual({instructions.pc_var, 'ans'},
                      set(typed.var_defs.keys()))
     self.assertEqual(dtype, typed.var_defs['ans'].tensors.dtype)
     # Note: the shapes used by the primop include the batch dimension, but the
     # returned type does not.
     self.assertEqual((5, 3, 2), typed.var_defs['ans'].tensors.shape)
コード例 #3
0
ファイル: dsl_test.py プロジェクト: stjordanis/probability
 def testAutoBatchingEvenOddNumpy(self):
   for inputs, outputs in ([5], [False]), ([5, 6, 8, 9],
                                           [False, True, True, False]):
     inputs = np.array(inputs, dtype=np.int64)
     outputs = np.array(outputs, dtype=np.bool_)
     prog = even_odd_program()
     # print(prog)
     typed = type_inference.infer_types(prog, [inputs], NP_BACKEND)
     # print(typed)
     alloc = allocation_strategy.optimize(typed)
     lowered = lowering.lower_function_calls(alloc)
     # print(lowered)
     self.assertAllEqual(outputs, _execute(lowered, inputs, 15, NP_BACKEND))
コード例 #4
0
ファイル: dsl_test.py プロジェクト: stjordanis/probability
 def testAutoBatchingFibonacciTF(self):
   for inputs, outputs in ([5], [8]), ([5, 6, 8, 9], [8, 13, 34, 55]):
     inputs = np.array(inputs, dtype=np.int32)
     outputs = np.array(outputs, dtype=np.int32)
     prog = fibonacci_program()
     # print(prog)
     inputs_t = tf.constant(inputs, dtype=np.int32)
     typed = type_inference.infer_types(prog, [inputs_t], TF_BACKEND)
     # print(typed)
     alloc = allocation_strategy.optimize(typed)
     lowered = lowering.lower_function_calls(alloc)
     # print(lowered)
     self.assertAllEqual(
         outputs, self.evaluate(_execute(lowered, inputs_t, 15, TF_BACKEND)))
コード例 #5
0
 def testFibonacciTypeInferenceTF(self, dtype):
   for inputs, outputs in ([5], [8]), ([5, 6, 8, 9], [8, 13, 34, 55]):
     inputs = np.array(inputs, dtype=dtype)
     outputs = np.array(outputs, dtype=dtype)
     tf1.logging.debug('tf.fib {} {} {}'.format(
         dtype, inputs.shape, outputs.shape))
     inputs_t = tf.constant(inputs, dtype=dtype)
     prog = test_programs.fibonacci_function_calls(include_types=False)
     typed = type_inference.infer_types(prog, [inputs_t], TF_BACKEND)
     expected_prog = test_programs.fibonacci_function_calls(dtype=dtype)
     self.assertSameTypes(expected_prog, typed)
     alloc = allocation_strategy.optimize(typed)
     lowered = lowering.lower_function_calls(alloc)
     self.assertAllEqual(
         outputs, self.evaluate(_execute(lowered, inputs_t, 15, TF_BACKEND)))
コード例 #6
0
ファイル: dsl_test.py プロジェクト: stjordanis/probability
 def testAutoBatchingMultivalueTF(self):
   input_ = np.array([1, 1, 1], dtype=np.int64)
   output = ((np.array([1, 1, 1], dtype=np.int64),
              np.array([3, 3, 3], dtype=np.int64)),
             np.array([4, 4, 4], dtype=np.int64),
             (np.array([5, 5, 5], dtype=np.int64),
              np.array([6, 6, 6], dtype=np.int64)))
   prog = synthetic_pattern_program()
   # print(prog)
   input_t = tf.constant(input_, dtype=np.int64)
   typed = type_inference.infer_types(prog, [input_t], TF_BACKEND)
   # print(typed)
   alloc = allocation_strategy.optimize(typed)
   lowered = lowering.lower_function_calls(alloc)
   # print(lowered)
   for expected, obtained in instructions.pattern_zip(
       output, self.evaluate(_execute(lowered, input_t, 15, TF_BACKEND))):
     self.assertAllEqual(expected, obtained)
コード例 #7
0
 def testIsEvenTypeInferenceTF(self, dtype):
   for inputs, outputs in [([1], [False]),
                           ([5, 6, 0, 3], [False, True, True, False])]:
     inputs = np.array(inputs, dtype=dtype)
     outputs = np.array(outputs, dtype=np.bool)
     tf1.logging.debug('tf.even {} {} {}'.format(
         dtype, inputs.shape, outputs.shape))
     inputs_t = tf.constant(inputs, dtype=dtype)
     prog = test_programs.is_even_function_calls(include_types=False)
     typed = type_inference.infer_types(prog, [inputs_t], TF_BACKEND)
     expected_prog = test_programs.is_even_function_calls(dtype=dtype)
     self.assertSameTypes(expected_prog, typed)
     alloc = allocation_strategy.optimize(typed)
     lowered = lowering.lower_function_calls(alloc)
     self.assertAllEqual(
         outputs,
         self.evaluate(_execute(
             lowered, inputs_t, int(np.max(inputs)) + 3, TF_BACKEND)))
コード例 #8
0
 def testFibonacciTypeInferenceNumpy(self, dtype):
   for inputs, outputs in ([5], [8]), ([5, 6, 8, 9], [8, 13, 34, 55]):
     inputs = np.array(inputs, dtype=dtype)
     outputs = np.array(outputs, dtype=dtype)
     tf1.logging.debug('np.fib {} {} {}'.format(
         dtype, inputs.shape, outputs.shape))
     prog = test_programs.fibonacci_function_calls(include_types=False)
     typed = type_inference.infer_types(prog, [inputs], NP_BACKEND)
     expected_prog = test_programs.fibonacci_function_calls(dtype=dtype)
     # We can only assert on the int64/float64 cases because numpy does
     # not match-cast types on arithmetic with constants.
     # i.e. (np.int32(0) - 1).dtype == np.int64
     self.assertSameTypes(
         expected_prog, typed, check_dtypes=dtype(0).nbytes == 8)
     alloc = allocation_strategy.optimize(typed)
     lowered = lowering.lower_function_calls(alloc)
     self.assertAllEqual(
         outputs, _execute(lowered, inputs, 15, NP_BACKEND))
コード例 #9
0
 def testIsEvenTypeInferenceNumpy(self, dtype):
   for inputs, outputs in [([1], [False]),
                           ([5, 6, 0, 3], [False, True, True, False])]:
     inputs = np.array(inputs, dtype=dtype)
     outputs = np.array(outputs, dtype=np.bool)
     tf1.logging.debug('np.even {} {} {}'.format(
         dtype, inputs.shape, outputs.shape))
     prog = test_programs.is_even_function_calls(include_types=False)
     typed = type_inference.infer_types(prog, [inputs], NP_BACKEND)
     expected_prog = test_programs.is_even_function_calls(dtype=dtype)
     # We can only assert on the int64/float64 cases because numpy does
     # not match-cast types on arithmetic with constants.
     # i.e. (np.int32(0) - 1).dtype == np.int64
     self.assertSameTypes(
         expected_prog, typed, check_dtypes=dtype(0).nbytes == 8)
     alloc = allocation_strategy.optimize(typed)
     lowered = lowering.lower_function_calls(alloc)
     self.assertAllEqual(
         outputs,
         _execute(lowered, inputs, int(np.max(inputs)) + 3, NP_BACKEND))
コード例 #10
0
ファイル: dsl_test.py プロジェクト: stjordanis/probability
 def testAutoBatchingFibonacciNumpy(self):
   for inputs, outputs in ([5], [8]), ([5, 6, 8, 9], [8, 13, 34, 55]):
     # This test doesn't pass with int32 input types, because (apparently)
     # numpy can't tell the difference between an ndarray of shape () and known
     # dtype, and a scalar (literal) whose dtype needs to be inferred.
     # To wit:
     #   (np.zeros((), dtype=np.int32) - 1).dtype == np.int64
     # because that's somehow the best numpy can do, even though
     #   (np.zeros([6], dtype=np.int32) - 1).dtype == np.int32
     # Needless to say, this messes up type inference for programs like
     # Fibonacci whose unbatched input shape is scalar.
     inputs = np.array(inputs, dtype=np.int64)
     outputs = np.array(outputs, dtype=np.int64)
     prog = fibonacci_program()
     # print(prog)
     typed = type_inference.infer_types(prog, [inputs], NP_BACKEND)
     # print(typed)
     alloc = allocation_strategy.optimize(typed)
     lowered = lowering.lower_function_calls(alloc)
     # print(lowered)
     self.assertAllEqual(outputs, _execute(lowered, inputs, 15, NP_BACKEND))