Esempio n. 1
0
    def test_encode_decode_type_spec(self):
        spec = tf.TensorSpec((1, 5), tf.float32)
        string = json_utils.Encoder().encode(spec)
        loaded = json_utils.decode(string)
        self.assertEqual(spec, loaded)

        invalid_type_spec = {
            'class_name': 'TypeSpec',
            'type_spec': 'Invalid Type',
            'serialized': None
        }
        string = json_utils.Encoder().encode(invalid_type_spec)
        with self.assertRaisesRegexp(ValueError,
                                     'No TypeSpec has been registered'):
            loaded = json_utils.decode(string)
Esempio n. 2
0
    def testTypeSpecArg(self):
        # Create a Keras Input
        x = input_layer_lib.Input(type_spec=tf.TensorSpec((7, 32), tf.float32))
        self.assertAllEqual(x.shape.as_list(), [7, 32])

        # Verify you can construct and use a model w/ this input
        model = functional.Functional(x, x * 2.0)
        self.assertAllEqual(model(tf.ones(x.shape)), tf.ones(x.shape) * 2.0)

        # Test serialization / deserialization
        model = functional.Functional.from_config(model.get_config())
        self.assertAllEqual(model(tf.ones(x.shape)), tf.ones(x.shape) * 2.0)

        model = model_config.model_from_json(model.to_json())
        self.assertAllEqual(model(tf.ones(x.shape)), tf.ones(x.shape) * 2.0)
    def create_serving_signature(model):

      @tf.function
      def serve_fn(raw_features):
        raw_features = tf.expand_dims(raw_features, axis=0)
        transformed_features = model.feature_ps(raw_features)
        outputs = model(transformed_features)
        outputs = tf.squeeze(outputs, axis=0)
        outputs = tf.cast(tf.greater(outputs, 0.5), tf.int64)
        decoded_outputs = model.label_inverse_lookup_layer(outputs)
        return tf.squeeze(decoded_outputs, axis=0)

      # serving does NOT have batch dimension
      return serve_fn.get_concrete_function(
          tf.TensorSpec(shape=(3), dtype=tf.string, name="example"))
Esempio n. 4
0
class LanguageModelEncoder(tf.train.Checkpoint):
    def __init__(self,vocab_size,emb_dim,state_size,n_layers):
        super(LanguageModelEncoder, self).__init__()
        self._state_size = state_size
        self.embedding_layer = Embedding(vocab_size,emb_dim)
        self._lstm_layers = [LSTM(self._state_size,return_sequences=True) for i in range(n_layers)]
        
    @tf.function(input_signature=[tf.TensorSpec([None,None], tf.dtypes.int64)])
    def __call__(self,sentence_lookup_ids):
        
        emb_output = self.embedding_layer(sentence_lookup_ids)
        lstm_output = emb_output # initialize to the input
        for lstm_layer in self._lstm_layers:
            lstm_output = lstm_layer(lstm_output)
        return lstm_output
Esempio n. 5
0
class ControlFlowModule(tf.Module):

  def __init__(self):
    pass

  @tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
  def collatz(self, a):
    i = 0.
    while a > 1.:
      i = i + 1.
      if (a % 2.) > 0.:
        a = 3. * a + 1.
      else:
        a = a / 2.
    return i
    def setUp(self):
        super().setUp()
        input_signature = [
            tf.TensorSpec(shape=[None], dtype=tf.float32, name='init')
        ]
        self.init = tf.ones([7])
        self.decorator = tf.function(jit_compile=True,
                                     input_signature=input_signature)
        self.arms = 2
        self.days = 3

        self.n_chains = 5
        self.trials = tfd.Poisson(100.).sample([self.arms, self.days])
        self.dist = get_joint_distribution(self.trials)
        self.true_values = self.dist.sample(seed=test_util.test_seed())
class TestModule(tf.Module):

    # Check that we get shapes annotated on function arguments.
    #
    # Besides checking the shape on the function input argument, this test also
    # checks that the shape on the input argument is propagated to the return
    # value.
    # We eventually want to move the shape inference to a pass separate from
    # the initial import, in which case that aspect of this test doesn't make much
    # sense and will be superceded by MLIR->MLIR shape inference tests.
    #
    # CHECK:      func {{@[a-zA-Z_0-9]+}}(%arg0: tensor<f32> {{.*}}) -> (tensor<f32> {{.*}})
    # CHECK-NEXT: attributes {{.*}} tf_saved_model.exported_names = ["some_function"]
    @tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
    def some_function(self, x):
        return x
Esempio n. 8
0
    def test_encode_decode_type_spec(self):
        spec = tf.TensorSpec((1, 5), tf.float32)
        string = json_utils.Encoder().encode(spec)
        loaded = json_utils.decode(string)
        self.assertEqual(spec, loaded)

        invalid_type_spec = {
            "class_name": "TypeSpec",
            "type_spec": "Invalid Type",
            "serialized": None,
        }
        string = json_utils.Encoder().encode(invalid_type_spec)
        with self.assertRaisesRegexp(
            ValueError, "No TypeSpec has been registered"
        ):
            loaded = json_utils.decode(string)
Esempio n. 9
0
    def test_specify_input_signature(self):
        model = testing_utils.get_small_sequential_mlp(10, 3, None)
        inputs = tf.ones((8, 5))

        with self.assertRaisesRegex(ValueError,
                                    'input shapes have not been set'):
            saving_utils.trace_model_call(model)

        fn = saving_utils.trace_model_call(
            model, [tf.TensorSpec(shape=[None, 5], dtype=tf.float32)])
        signature_outputs = fn(inputs)
        if model.output_names:
            expected_outputs = {model.output_names[0]: model(inputs)}
        else:
            expected_outputs = {'output_1': model(inputs)}
        self._assert_all_close(expected_outputs, signature_outputs)
Esempio n. 10
0
class TextEmbeddingModel(tf.train.Checkpoint):
  """Text embedding model.

  A text embeddings model that takes a sentences on input and outputs the
  sentence embedding.
  """

  def __init__(self, vocabulary, emb_dim, oov_buckets):
    super(TextEmbeddingModel, self).__init__()
    self._oov_buckets = oov_buckets
    self._vocabulary_file = tracking.TrackableAsset(
        write_vocabulary_file(vocabulary))
    self._total_size = len(vocabulary) + oov_buckets
    self._table = lookup_ops.index_table_from_file(
        vocabulary_file=self._vocabulary_file,
        num_oov_buckets=self._oov_buckets,
        hasher_spec=lookup_ops.FastHashSpec)
    self.embeddings = tf.Variable(
        tf.random.uniform(shape=[self._total_size, emb_dim]))
    self.variables = [self.embeddings]
    self.trainable_variables = self.variables

  def _tokenize(self, sentences):
    # Perform a minimalistic text preprocessing by removing punctuation and
    # splitting on spaces.
    normalized_sentences = tf.strings.regex_replace(
        input=sentences, pattern=r"\pP", rewrite="")
    normalized_sentences = tf.reshape(normalized_sentences, [-1])
    sparse_tokens = tf.string_split(normalized_sentences, " ")

    # Deal with a corner case: there is one empty sentence.
    sparse_tokens, _ = tf.sparse.fill_empty_rows(sparse_tokens, tf.constant(""))
    # Deal with a corner case: all sentences are empty.
    sparse_tokens = tf.sparse.reset_shape(sparse_tokens)
    sparse_token_ids = self._table.lookup(sparse_tokens.values)

    return (sparse_tokens.indices, sparse_token_ids, sparse_tokens.dense_shape)

  @tf.function(input_signature=[tf.TensorSpec([None], tf.dtypes.string)])
  def __call__(self, sentences):
    token_ids, token_values, token_dense_shape = self._tokenize(sentences)

    return tf.nn.safe_embedding_lookup_sparse(
        embedding_weights=self.embeddings,
        sparse_ids=tf.SparseTensor(token_ids, token_values, token_dense_shape),
        sparse_weights=None,
        combiner="sqrtn")
Esempio n. 11
0
 def test_save_load_io_device(self, model_and_input, distribution):
   saved_dir = os.path.join(self.get_temp_dir(), 'io_device')
   with distribution.scope():
     model = model_and_input.get_model()
     x_train, y_train, _ = model_and_input.get_data()
     batch_size = model_and_input.get_batch_size()
     self._train_model(model, x_train, y_train, batch_size)
   call = model.__call__.get_concrete_function(tf.TensorSpec(None))
   save_options = tf.saved_model.SaveOptions(
       experimental_io_device='/job:localhost')
   tf.saved_model.save(model, saved_dir, signatures=call, options=save_options)
   load_options = tf.saved_model.LoadOptions(
       experimental_io_device='/job:localhost')
   # Check that the model can be loaded and training continued without error.
   with distribution.scope():
     loaded_model = tf.saved_model.load(saved_dir, options=load_options)
     self._train_model(loaded_model, x_train, y_train, batch_size)
Esempio n. 12
0
class TestModule(tf.Module):
    def __init__(self):
        super(TestModule, self).__init__()
        self.v42 = tf.Variable(42.0)
        self.c43 = tf.constant(43.0)

    # CHECK: "tf_saved_model.global_tensor"() {is_mutable, sym_name = "[[VAR:[a-zA-Z_0-9]+]]", tf_saved_model.exported_names = ["v42"], type = tensor<f32>, value = dense<4.200000e+01> : tensor<f32>} : () -> ()
    # CHECK: "tf_saved_model.global_tensor"() {sym_name = "[[CONST:[a-zA-Z_0-9]+]]", tf_saved_model.exported_names = [], type = tensor<f32>, value = dense<4.300000e+01> : tensor<f32>} : () -> ()
    # CHECK:      func {{@[a-zA-Z_0-9]+}}(
    # CHECK-SAME:   %arg0: tensor<f32> {tf._user_specified_name = "x", tf_saved_model.index_path = [0]},
    # CHECK-SAME:   %arg1: tensor<!tf_type.resource<tensor<f32>>> {tf_saved_model.bound_input = @[[VAR]]},
    # CHECK-SAME:   %arg2: tensor<!tf_type.resource<tensor<f32>>> {tf_saved_model.bound_input = @[[CONST]]}) -> (
    # CHECK-SAME:   tensor<f32> {tf_saved_model.index_path = []})
    # CHECK-SAME: attributes {{.*}} tf_saved_model.exported_names = ["some_function"]
    @tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
    def some_function(self, x):
        return x + self.v42 + self.c43
Esempio n. 13
0
  def testSaveSequentialModelWithoutInputShapes(self):
    model = sequential_model_without_input_shape(True)
    # A Sequential model that hasn't been built should raise an error.
    with self.assertRaisesRegex(
        ValueError, 'Weights for sequential model have not yet been created'):
      keras_saved_model.export_saved_model(model, '')

    # Even with input_signature, the model's weights has not been created.
    with self.assertRaisesRegex(
        ValueError, 'Weights for sequential model have not yet been created'):
      saved_model_dir = self._save_model_dir()
      keras_saved_model.export_saved_model(
          model,
          saved_model_dir,
          input_signature=tf.TensorSpec(
              shape=(10, 11, 12, 13, 14), dtype=tf.float32,
              name='spec_input'))
Esempio n. 14
0
class TestGetTensorSpec(parameterized.TestCase):
    @parameterized.parameters([
        (lambda: tf.constant([[1, 2]]), [1, 2]),
        (tf.TensorSpec([8, 3], tf.int32), [8, 3]),
        (tf.TensorSpec([8], tf.int32), [8]),
        (tf.TensorSpec([], tf.int32), []),
        (tf.TensorSpec(None, tf.int32), None),
        (tf.RaggedTensorSpec([8, 3], tf.int32), [8, 3]),
        (tf.SparseTensorSpec([8, 3], tf.int32), [8, 3]),
    ])
    def test_without_dynamic_batch(self, t, expected_shape):
        if callable(t):
            t = t()
        result = tf_utils.get_tensor_spec(t)
        self.assertTrue(result.is_compatible_with(t))
        if expected_shape is None:
            self.assertIsNone(result.shape.rank)
        else:
            self.assertEqual(result.shape.as_list(), expected_shape)

    @parameterized.parameters([
        (lambda: tf.constant([[1, 2]]), [None, 2]),
        (tf.TensorSpec([8, 3], tf.int32), [None, 3]),
        (tf.TensorSpec([8], tf.int32), [None]),
        (tf.TensorSpec([], tf.int32), []),
        (tf.TensorSpec(None, tf.int32), None),
        (tf.RaggedTensorSpec([8, 3], tf.int32), [None, 3]),
        (tf.SparseTensorSpec([8, 3], tf.int32), [None, 3]),
    ])
    def test_with_dynamic_batch(self, t, expected_shape):
        if callable(t):
            t = t()
        result = tf_utils.get_tensor_spec(t, True)
        self.assertTrue(result.is_compatible_with(t))
        if expected_shape is None:
            self.assertIsNone(result.shape.rank)
        else:
            self.assertEqual(result.shape.as_list(), expected_shape)

    def test_with_keras_tensor_with_ragged_spec(self):
        t = keras.engine.keras_tensor.KerasTensor(
            tf.RaggedTensorSpec(shape=(None, None, 1)))
        self.assertIsInstance(tf_utils.get_tensor_spec(t), tf.RaggedTensorSpec)
Esempio n. 15
0
    def setUp(self):
        super().setUp()
        self._data_context = data_converter.DataContext(
            time_step_spec=ts.TimeStep(step_type=(),
                                       reward=tf.TensorSpec((), tf.float32),
                                       discount=tf.TensorSpec((), tf.float32),
                                       observation=()),
            action_spec={'action1': tf.TensorSpec((), tf.float32)},
            policy_state_spec=(),
            info_spec=())

        self._data_context_with_state = data_converter.DataContext(
            time_step_spec=ts.TimeStep(step_type=(),
                                       reward=tf.TensorSpec((), tf.float32),
                                       discount=tf.TensorSpec((), tf.float32),
                                       observation=tf.TensorSpec((2, ),
                                                                 tf.float32)),
            action_spec={'action1': tf.TensorSpec((), tf.float32)},
            policy_state_spec=[
                tf.TensorSpec((2, ), tf.float32),
                tf.TensorSpec((2, ), tf.float32)
            ],
            info_spec=())
Esempio n. 16
0
class TensorListModule(tf.Module):
    def __init__(self):
        pass

    @tf.function(input_signature=[tf.TensorSpec([], tf.float32)])
    def identity_through_tensorlist(self, x):
        ta = tf.TensorArray(dtype=tf.float32, size=1, element_shape=[])
        ta = ta.write(0, x)
        return ta.read(0)

    @tf.function(input_signature=[
        tf.TensorSpec([], tf.float32),
        tf.TensorSpec([], tf.float32)
    ])
    def add_through_tensorlist(self, a, b):
        ta = tf.TensorArray(dtype=tf.float32, size=2, element_shape=[])
        ta = ta.write(0, a)
        ta = ta.write(1, b)
        return ta.read(0) + ta.read(1)

    @tf.function(input_signature=[tf.TensorSpec([STATIC_SIZE], tf.float32)])
    def slice_first_element_with_from_tensor(self, t):
        ta = tf.TensorArray(dtype=tf.float32,
                            size=STATIC_SIZE,
                            element_shape=[])
        ta = ta.unstack(t)
        return ta.read(0)

    @tf.function(input_signature=[
        tf.TensorSpec([STATIC_SIZE, STATIC_SIZE], tf.float32)
    ])
    def slice_first_element_with_from_tensor_high_rank(self, t):
        ta = tf.TensorArray(dtype=tf.float32,
                            size=STATIC_SIZE,
                            element_shape=[STATIC_SIZE])
        ta = ta.unstack(t)
        return ta.read(0)

    @tf.function(input_signature=[
        tf.TensorSpec([], tf.float32),
        tf.TensorSpec([], tf.float32)
    ])
    def concat_with_tensorlist_stack(self, a, b):
        ta = tf.TensorArray(dtype=tf.float32, size=2, element_shape=[])
        ta = ta.write(0, a)
        ta = ta.write(1, b)
        return ta.stack()
Esempio n. 17
0
    def create_serving_signature(self, model, feature_mapper,
                                 label_inverse_lookup_layer):
        """Create serving signature for the given model."""
        @tf.function
        def serve_fn(raw_features):
            raw_features = tf.compat.v1.expand_dims(raw_features, axis=0)
            transformed_features = model.feature_mapper(raw_features)
            outputs = model(transformed_features)
            outputs = tf.compat.v1.squeeze(outputs, axis=0)
            outputs = tf.cast(tf.greater(outputs, 0.5), tf.int64)
            decoded_outputs = model.label_inverse_lookup_layer(outputs)
            return tf.compat.v1.squeeze(decoded_outputs, axis=0)

        model.feature_mapper = feature_mapper
        model.label_inverse_lookup_layer = label_inverse_lookup_layer
        # serving does NOT have batch dimension
        return serve_fn.get_concrete_function(
            tf.TensorSpec(shape=(3), dtype=tf.string, name="example"))
Esempio n. 18
0
    def test_conv1d_recreate_conv_unknown_dims(self):
        with self.cached_session():
            layer = keras.layers.Conv1D(filters=1,
                                        kernel_size=3,
                                        strides=1,
                                        dilation_rate=2,
                                        padding='causal')

            inpt1 = np.random.normal(size=[1, 9, 1]).astype(np.float32)
            inpt2 = np.random.normal(size=[1, 2, 1]).astype(np.float32)
            outp1_shape = layer(inpt1).shape

            @tf.function(input_signature=[tf.TensorSpec([1, None, 1])])
            def fn(inpt):
                return layer(inpt)

            fn(inpt2)
            self.assertEqual(outp1_shape, layer(inpt1).shape)
Esempio n. 19
0
def create_reverb_server_for_replay_buffer_and_variable_container(
        collect_policy, train_step, replay_buffer_capacity, port):
    """Sets up one reverb server for replay buffer and variable container."""
    # Create the signature for the variable container holding the policy weights.
    variables = {
        reverb_variable_container.POLICY_KEY: collect_policy.variables(),
        reverb_variable_container.TRAIN_STEP_KEY: train_step
    }
    variable_container_signature = tf.nest.map_structure(
        lambda variable: tf.TensorSpec(variable.shape, dtype=variable.dtype),
        variables)

    # Create the signature for the replay buffer holding observed experience.
    replay_buffer_signature = tensor_spec.from_spec(
        collect_policy.collect_data_spec)
    # TODO(b/188427258) Add time dimension when using Reverb.TrajectoryWriters.
    # replay_buffer_signature = tensor_spec.add_outer_dim(replay_buffer_signature)

    # Crete and start the replay buffer and variable container server.
    server = reverb.Server(
        tables=[
            reverb.Table(  # Replay buffer storing experience.
                name=reverb_replay_buffer.DEFAULT_TABLE,
                sampler=reverb.selectors.Uniform(),
                remover=reverb.selectors.Fifo(),
                # TODO(b/159073060): Set rate limiter for SAC properly.
                rate_limiter=reverb.rate_limiters.MinSize(1),
                max_size=replay_buffer_capacity,
                max_times_sampled=0,
                signature=replay_buffer_signature,
            ),
            reverb.Table(  # Variable container storing policy parameters.
                name=reverb_variable_container.DEFAULT_TABLE,
                sampler=reverb.selectors.Uniform(),
                remover=reverb.selectors.Fifo(),
                rate_limiter=reverb.rate_limiters.MinSize(1),
                max_size=1,
                max_times_sampled=0,
                signature=variable_container_signature,
            ),
        ],
        port=port)
    return server
Esempio n. 20
0
  def __init__(self):
    super().__init__()
    self.m = initialize_model()

    input_shape = list([BATCH_SIZE] + self.m.inputs[0].shape[1:])

    # Some models accept dynamic image dimensions by default, so we use
    # IMAGE_DIM as a stand-in.
    for i, dim in enumerate(input_shape):
      if dim is None:
        input_shape[i] = IMAGE_DIM

    # Specify input shape with a static batch size.
    # TODO(b/142948097): Add support for dynamic shapes in SPIR-V lowering.
    self.call = tf_test_utils.tf_function_unit_test(
        input_signature=[tf.TensorSpec(input_shape)],
        name="call",
        rtol=1e-5,
        atol=1e-5)(lambda x: self.m(x, training=False))
Esempio n. 21
0
def get_model():

    relay_file = "relay.json"
    relay_params = "relay.params"
    from tensorflow.python.framework.convert_to_constants import convert_variables_to_constants_v2
    model = tf.keras.Sequential([
        hub.KerasLayer(tf_hub_links[model_name], output_shape=[1001])
    ])
    img_size = 299 if model_name == 'inceptionv3' else 224
    np_image = np.random.rand(1, img_size, img_size, 3).astype('float32')
    model._set_inputs(np_image)


    # Convert Keras model to ConcreteFunction
    full_model = tf.function(lambda x: model(x))
    full_model = full_model.get_concrete_function(
            tf.TensorSpec(model.inputs[0].shape, model.inputs[0].dtype, name="data"))

    frozen_func = convert_variables_to_constants_v2(full_model)
    frozen_func.graph.as_graph_def()

    tf.io.write_graph(graph_or_graph_def=frozen_func.graph,
                      logdir="./.tf_saved_model/" + model_name,
                      name="frozen_graph.pb",
                      as_text=False)

    parser = tvm.relay.frontend.TFParser("./.tf_saved_model/"
                                         + model_name +  "/frozen_graph.pb")
    graph_def = parser.parse()
    mod, params = relay.frontend.from_tensorflow(graph_def,
                                                 shape={"data": (1, img_size, img_size, 3)})

    # with open(relay_file, "w") as fo:
    #     fo.write(tvm.ir.save_json(mod))
    # with open(relay_params, "wb") as fo:
    #     fo.write(relay.save_param_dict(params))

    # with open(relay_file, "r") as fi:
    #     mod = tvm.ir.load_json(fi.read())
    # with open(relay_params, "rb") as fi:
    #     params = relay.load_param_dict(fi.read())
    return mod, params
    def testKerasModel(self):
        """Test a basic Keras model with Variables."""
        input_data = {"x": tf.constant(1., shape=[1, 1])}

        # Create a simple Keras model.
        x = [-1, 0, 1, 2, 3, 4]
        y = [-3, -1, 1, 3, 5, 7]

        model = keras.models.Sequential(
            [keras.layers.Dense(units=1, input_shape=[1])])
        model.compile(optimizer="sgd", loss="mean_squared_error")
        model.fit(x, y, epochs=1)

        @tf.function(
            input_signature=[tf.TensorSpec(shape=[1, 1], dtype=tf.float32)])
        def to_save(x):
            return model(x)

        root, output_func = self._freezeModel(to_save)
        self._testConvertedFunction(root, root.f, output_func, input_data)
    def testKerasLSTM(self):
        """Test a Keras LSTM containing dynamic_rnn ops."""
        input_data = {
            "x":
            tf.constant(
                np.array(np.random.random_sample((10, 10, 10)),
                         dtype=np.float32))
        }

        model = keras.models.Sequential(
            [keras.layers.LSTM(units=10, input_shape=(10, 10))])

        @tf.function(input_signature=[
            tf.TensorSpec(shape=[10, 10, 10], dtype=tf.float32)
        ])
        def to_save(x):
            return model(x)

        root, output_func = self._freezeModel(to_save)
        self._testConvertedFunction(root, root.f, output_func, input_data)
def build_saved_model(model):
    """Returns a tf.Module for saving to SavedModel."""
    class SimCLRModel(tf.Module):
        """Saved model for exporting to hub."""
        def __init__(self, model):
            self.model = model
            # This can't be called `trainable_variables` because `tf.Module` has
            # a getter with the same name.
            self.trainable_variables_list = model.trainable_variables

        @tf.function
        def __call__(self, inputs, trainable):
            self.model(inputs, training=trainable)
            return get_salient_tensors_dict()

    module = SimCLRModel(model)
    input_spec = tf.TensorSpec(shape=[None, None, None, 3], dtype=tf.float32)
    module.__call__.get_concrete_function(input_spec, trainable=True)
    module.__call__.get_concrete_function(input_spec, trainable=False)
    return module
def models():
    tf.keras.backend.set_learning_phase(False)

    # keras model receives images size as input,
    # where batch size is not specified - by default it is dynamic
    if FLAGS.model in APP_MODELS:
        model = APP_MODELS[FLAGS.model](weights=None,
                                        include_top=False,
                                        input_shape=INPUT_SHAPE[1:])
    else:
        raise ValueError('unsupported model', FLAGS.model)

    module = tf.Module()
    module.m = model
    # specify input size with static batch size
    # TODO(b/142948097): with support of dynamic shape
    # replace INPUT_SHAPE by model.input_shape, so batch size will be dynamic (-1)
    module.predict = tf.function(input_signature=[tf.TensorSpec(INPUT_SHAPE)])(
        model.call)
    return module
Esempio n. 26
0
 def __init__(self, unroll_length=1):
     self._env = MockEnv(state_space_size=4, unroll_length=unroll_length)
     self._agent = MockAgent(unroll_length=unroll_length)
     self._actor_output_spec = common.ActorOutput(
         initial_agent_state=tf.TensorSpec(shape=[5], dtype=tf.float32),
         env_output=self._env.env_spec,
         agent_output=self._agent.agent_spec,
         actor_action=common.ActorAction(
             chosen_action_idx=tf.TensorSpec(shape=[unroll_length + 1],
                                             dtype=tf.int32),
             oracle_next_action_idx=tf.TensorSpec(shape=[unroll_length + 1],
                                                  dtype=tf.int32),
             action_val=tf.TensorSpec(shape=[unroll_length + 1],
                                      dtype=tf.int32),
             log_prob=tf.TensorSpec(shape=[unroll_length + 1],
                                    dtype=tf.float32)),
         loss_type=tf.TensorSpec(shape=[], dtype=tf.int32),
         info=tf.TensorSpec(shape=[], dtype=tf.string),
     )
Esempio n. 27
0
    def test_run_eval_actor_once(self):
        hparams = {}
        hparams['max_iter'] = 1
        hparams['num_episodes_per_iter'] = 5
        hparams['logdir'] = os.path.join(FLAGS.test_tmpdir, 'model')

        mock_problem = testing_utils.MockProblem(
            unroll_length=FLAGS.unroll_length)
        agent = mock_problem.get_agent()
        ckpt_manager = _get_ckpt_manager(hparams['logdir'], agent=agent)
        checkpoint_path = ckpt_manager.save(checkpoint_number=0)

        # Create a no-op gRPC server that responds to Aggregator RPCs.
        server_address = 'unix:/tmp/eval_actor_test_grpc'
        server = grpc.Server([server_address])

        @tf.function(
            input_signature=[tf.TensorSpec(shape=(), dtype=tf.string)])
        def eval_enqueue(_):
            return []

        # Test 01. Eval with aggregator.
        server.bind(eval_enqueue)
        server.start()
        eval_actor.run_evaluation(mock_problem, server_address, hparams)

        # Test 02. Eval without aggregator.
        hparams['task_id'] = 0000
        hparams['max_iter'] = 1
        eval_actor.run_evaluation(mock_problem,
                                  None,
                                  hparams,
                                  checkpoint_path,
                                  FLAGS.test_tmpdir,
                                  file_prefix='mock_test',
                                  test_mode=True)
        with gfile.GFile(os.path.join(FLAGS.test_tmpdir, 'test_data_0.p'),
                         'rb') as fp:
            eval_result = pickle.load(fp)
        self.assertEqual(eval_result['mock_test_0_0_0000']['result'], 1000.0)
Esempio n. 28
0
class MNIST(tf.keras.models.Model):
    """Model representing a MNIST classifier."""
    def __init__(self, output_activation="softmax"):
        super(MNIST, self).__init__()
        self.layer_1 = tf.keras.layers.Dense(64)
        self.layer_2 = tf.keras.layers.Dense(10, activation=output_activation)

    @tf.function(input_signature=[
        tf.TensorSpec(shape=[None, 28, 28, 1], dtype=tf.uint8)
    ])
    def call(self, inputs):
        casted = tf.keras.layers.Lambda(lambda x: tf.cast(x, tf.float32))(
            inputs)
        flatten = tf.keras.layers.Flatten()(casted)

        def normalize_fn(x):
            return x / tf.reduce_max(tf.gather(x, 0))

        normalize = tf.keras.layers.Lambda(normalize_fn)(flatten)
        x = self.layer_1(normalize)
        output = self.layer_2(x)
        return output
Esempio n. 29
0
  def testReproduceBug159550941(self):
    # Reproduction for b/159550941.
    input_signature = [tf.TensorSpec([], tf.int32)]

    @tf.function(input_signature=input_signature)
    def sample(chains):
      initial_state = tf.zeros([chains, 1])
      def log_prob(x):
        return tf.reduce_sum(tfp.distributions.Normal(0, 1).log_prob(x), -1)
      kernel = tfp.mcmc.HamiltonianMonteCarlo(
          target_log_prob_fn=log_prob,
          num_leapfrog_steps=3,
          step_size=1e-3)
      return tfp.mcmc.sample_chain(
          num_results=5,
          num_burnin_steps=4,
          current_state=initial_state,
          kernel=kernel,
          trace_fn=None)

    # Checking that shape inference doesn't fail.
    sample(2)
Esempio n. 30
0
def models():
  tf.keras.backend.set_learning_phase(False)
  tf_test_utils.set_random_seed()

  input_shape = get_input_shape(FLAGS.data, FLAGS.model)
  # keras model receives images size as input,
  # where batch size is not specified - by default it is dynamic
  if FLAGS.model in APP_MODELS:
    weights = 'imagenet' if FLAGS.data == 'imagenet' else None

    # if weights == 'imagenet' it will load weights from external tf.keras URL
    model = APP_MODELS[FLAGS.model](
        weights=weights,
        include_top=FLAGS.include_top,
        input_shape=input_shape[1:])

    if FLAGS.data == 'cifar10' and FLAGS.url:
      file_name = 'cifar10' + FLAGS.model
      # it will download model weights from publically available folder: PATH
      # and save it to cache_dir=~/.keras and return path to it
      weights_path = tf.keras.utils.get_file(
          file_name,
          os.path.join(
              FLAGS.url,
              'cifar10_include_top_{}_{}'.format(FLAGS.include_top,
                                                 FLAGS.model + '.h5')))

      model.load_weights(weights_path)
  else:
    raise ValueError('Unsupported model', FLAGS.model)

  module = tf.Module()
  module.m = model
  # specify input size with static batch size
  # TODO(b/142948097): with support of dynamic shape
  # replace input_shape by model.input_shape, so batch size will be dynamic (-1)
  module.predict = tf.function(input_signature=[tf.TensorSpec(input_shape)])(
      model.call)
  return module