Esempio n. 1
0
 def test_tf_saved_model_save_multiple_signatures(self):
     base_path = os.path.join(self.get_temp_dir(), 'tf_saved_model_save')
     export_path = os.path.join(base_path, '00000123')
     root = tf.train.Checkpoint()
     root.f = tf.function(lambda x: {'y': 1.},
                          input_signature=[tf.TensorSpec(None, tf.float32)])
     root.g = tf.function(lambda x: {'y': 2.},
                          input_signature=[tf.TensorSpec(None, tf.float32)])
     tf.saved_model.experimental.save(
         root,
         export_path,
         signatures={
             signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: root.f,
             'custom_signature_key': root.g
         })
     _, model_server_address, _ = TensorflowModelServerTest.RunServer(
         'default', base_path)
     expected_version = self._GetModelVersion(base_path)
     self.VerifyPredictRequest(model_server_address,
                               expected_output=2.0,
                               expected_version=expected_version,
                               signature_name='custom_signature_key')
     self.VerifyPredictRequest(model_server_address,
                               expected_output=1.0,
                               expected_version=expected_version)
Esempio n. 2
0
def training_loop(train_dataset,
                  valid_dataset,
                  model,
                  hparams):
  """Trains a GNP for a fixed number of iterations."""
  optimizer_config = {'optimizer': hparams.optimizer(hparams.lr),
                      'max_grad_norm': hparams.max_grad_norm}
  num_context = hparams.num_context
  best_recon_loss = np.inf
  if hparams.is_nll:
    step = tf.function(utils.nll_gnp_step_bandits.python_function)
    valid_metric = utils.nll
  else:
    step = tf.function(utils.mse_gnp_step_bandits.python_function)
    valid_metric = utils.mse

  for it in range(hparams.num_iterations):
    batch_train_data = get_splits(
        train_dataset,
        num_context,
        hparams.batch_size,
        points_perm=True)
    recon_loss, local_z_kl, global_z_kl = step(
        model,
        batch_train_data,
        optimizer_config)

    if it % hparams.print_every == 0:
      batch_valid_data = get_splits(
          valid_dataset,
          num_context,
          hparams.batch_size,
          points_perm=False)
      (batch_context_x,
       batch_context_y,
       batch_target_x,
       batch_target_y,
       batch_unseen_target_y,
       batch_unseen_target_a) = batch_valid_data
      prediction = model(batch_context_x,
                         batch_context_y,
                         batch_target_x,
                         batch_target_y)
      batch_unseen_predictions = prediction[:, num_context:]
      valid_recon_loss = valid_metric(batch_unseen_target_y,
                                      batch_unseen_predictions,
                                      batch_unseen_target_a)

      print('it: {}, train recon loss: {}, local kl: {} global kl: {} '
            'valid reconstr loss: {}'.format(it, recon_loss, local_z_kl,
                                             global_z_kl, valid_recon_loss))
      if valid_recon_loss.numpy() < best_recon_loss:
        best_recon_loss = valid_recon_loss.numpy()
        print('Saving best model with reconstruction loss',
              best_recon_loss, flush=True)
        model.save_weights(hparams.save_path)
Esempio n. 3
0
    def testNormalNormalSample(self):
        # Standard normal prior.
        # Samples are shape [2].
        normal_prior = tfd.Normal(self.dtype([0., 0.]), self.dtype(1.))

        def normal_sampler(seed):
            return normal_prior.sample(seed=seed)

        # A single data point at the mode.
        # The state is expected to be 2 dimensional, so
        # we reduce sum on the last axis.
        def normal_log_likelihood(state):
            return tf.reduce_sum(tfd.Normal(state, self.dtype(2.)).log_prob(
                self.dtype(0.)),
                                 axis=-1)

        kernel = elliptical.EllipticalSliceSampler(
            normal_sampler_fn=normal_sampler,
            log_likelihood_fn=normal_log_likelihood,
            seed=tfp_test_util.test_seed(),
        )

        samples = tf.function(lambda: tfp.mcmc.sample_chain(  # pylint: disable=g-long-lambda
            num_results=int(3e5),
            current_state=self.dtype(np.random.randn(2)),
            kernel=kernel,
            num_burnin_steps=int(1e4),
            parallel_iterations=1,
            trace_fn=None))()

        mean, variance = self.evaluate(tf.nn.moments(samples, axes=[0]))
        # Computed exactly from the formula in normal-normal posterior.
        self.assertAllClose([0., 0.], mean, rtol=5e-2, atol=6e-3)
        self.assertAllClose([4. / 5, 4. / 5], variance, rtol=5e-2)
Esempio n. 4
0
 def graph_fn():
     boxes = tf.zeros([0, 4], dtype=tf.float32)
     blackout = tf.zeros([0], dtype=tf.bool)
     blackout_pixel_weights_by_box_regions = tf.function(
         ta_utils.blackout_pixel_weights_by_box_regions)
     output = blackout_pixel_weights_by_box_regions(
         10, 20, boxes, blackout)
     return output
 def graph_fn():
   boxes = tf.constant(
       [[0.0, 0.0, 5, 5], [0.0, 0.0, 10.0, 20.0], [6.0, 12.0, 8.0, 18.0]],
       dtype=tf.float32)
   blackout = tf.constant([True, False, True], dtype=tf.bool)
   blackout_pixel_weights_by_box_regions = tf.function(
       ta_utils.blackout_pixel_weights_by_box_regions)
   output = blackout_pixel_weights_by_box_regions(10, 20, boxes, blackout)
   return output
Esempio n. 6
0
 def test_linear(self, solver):
     jacobian_diag_part = np.float32([-0.5, -1.])
     initial_state = np.float32([1., 2.])
     fn = lambda: linear(solver, jacobian_diag_part, initial_state)
     fn = tf.function(fn, autograph=False, jit_compile=True)
     with tf.device(FLAGS.test_device):
         times, states = self.evaluate(fn())
     states_exact = np.exp(jacobian_diag_part[np.newaxis, :] *
                           times[:, np.newaxis]) * initial_state
     self.assertAllClose(states, states_exact, rtol=1e-4)
 def test_hub_model(self):
     image = tf.random.uniform((1, 320, 320, 3))
     keras_model = efficientdet_keras.EfficientDetNet('efficientdet-lite0')
     tmp_ckpt = os.path.join(tempfile.mkdtemp(), 'ckpt')
     keras_model.config.model_dir = tmp_ckpt
     base_model = train_lib.EfficientDetNetTrainHub(
         keras_model.config,
         "https://tfhub.dev/tensorflow/efficientdet/lite0/feature-vector/1")
     cls_outputs, box_outputs = tf.function(base_model)(image,
                                                        training=False)
     keras_model.build(image.shape)
     d1 = {var.name: var for var in base_model.variables}
     for var in keras_model.variables:
         var.assign(d1[var.name].numpy())
     cls_outputs2, box_outputs2 = tf.function(keras_model)(image, False)
     for c1, b1, c2, b2 in zip(cls_outputs, box_outputs, cls_outputs2,
                               box_outputs2):
         self.assertAllEqual(c1, c2)
         self.assertAllEqual(b1, b2)
Esempio n. 8
0
 def graph_fn():
     boxes = tf.constant([[0.0, 0.0, 2.0, 2.0], [0.0, 0.0, 4.0, 2.0],
                          [3.0, 0.0, 4.0, 4.0]],
                         dtype=tf.float32)
     blackout = tf.constant([False, False, True], dtype=tf.bool)
     weights = tf.constant([0.4, 0.3, 0.2], tf.float32)
     blackout_pixel_weights_by_box_regions = tf.function(
         ta_utils.blackout_pixel_weights_by_box_regions)
     output = blackout_pixel_weights_by_box_regions(
         4, 4, boxes, blackout, weights)
     return output
Esempio n. 9
0
    def __init__(self, name, hparams, optimizer='RMS'):
        self.name = name
        self.hparams = hparams
        self.verbose = getattr(hparams, 'verbose', True)

        self.update_freq_lr = hparams.training_freq
        self.update_freq_nn = hparams.training_freq_network

        self.t = 0
        self.num_epochs = hparams.training_epochs
        self.data_h = contextual_dataset.ContextualDataset(hparams.context_dim,
                                                           hparams.num_actions,
                                                           intercept=False)

        self.gradient_updates = tf.Variable(0, trainable=False)
        if self.hparams.activate_decay:
            self.lr = tf.train.inverse_time_decay(self.hparams.initial_lr,
                                                  self.gradient_updates, 1,
                                                  self.hparams.lr_decay_rate)
        else:
            self.lr = tf.Variable(self.hparams.initial_lr, trainable=False)
        optimizer = tf.train.RMSPropOptimizer(self.lr)
        self._optimizer_config = {
            'optimizer': optimizer,
            'max_grad_norm': hparams.max_grad_norm
        }

        if self.verbose:
            print('Initializing model {}.'.format(self.name))
        self.snp = regressor.Regressor(
            input_dim=hparams.context_dim + hparams.num_actions,
            output_dim=1,
            x_encoder_sizes=hparams.x_encoder_sizes,
            x_y_encoder_sizes=hparams.x_y_encoder_sizes,
            global_latent_net_sizes=hparams.global_latent_net_sizes,
            local_latent_net_sizes=hparams.local_latent_net_sizes,
            heteroskedastic_net_sizes=hparams.heteroskedastic_net_sizes,
            att_type=hparams.att_type,
            att_heads=hparams.att_heads,
            uncertainty_type=hparams.uncertainty_type,
            mean_att_type=hparams.mean_att_type,
            scale_att_type_1=hparams.scale_att_type_1,
            scale_att_type_2=hparams.scale_att_type_2,
            activation=hparams.activation,
            output_activation=hparams.output_activation,
            data_uncertainty=hparams.data_uncertainty,
            local_variational=hparams.local_variational,
            model_path=hparams.model_path)

        self._step = tf.function(utils.mse_step.python_function)  # pytype: disable=module-attr

        self._one_hot_vectors = tf.one_hot(indices=np.arange(
            hparams.num_actions),
                                           depth=hparams.num_actions)
Esempio n. 10
0
def training_loop(train_dataset,
                  valid_dataset,
                  model,
                  hparams):
  """Trains an SNP for a fixed number of iterations."""
  optimizer_config = {'optimizer': hparams.optimizer(hparams.lr),
                      'max_grad_norm': hparams.max_grad_norm}
  num_context = hparams.num_context
  best_mse = np.inf
  step = tf.function(utils.mse_step.python_function)  # pytype: disable=module-attr

  for it in range(hparams.num_iterations):
    batch_train_data = get_splits(
        train_dataset,
        num_context,
        hparams.batch_size,
        points_perm=True)
    nll, mse, local_z_kl, global_z_kl = step(
        model,
        batch_train_data,
        optimizer_config)

    if it % hparams.print_every == 0:
      (batch_context_x,
       batch_context_y,
       batch_target_x,
       batch_target_y,
       batch_unseen_targets) = get_splits(valid_dataset,
                                          num_context,
                                          hparams.batch_size,
                                          points_perm=False)
      prediction = model(batch_context_x,
                         batch_context_y,
                         batch_target_x,
                         batch_target_y)

      batch_unseen_predictions = prediction[:, num_context:]
      valid_nll = utils.nll(batch_unseen_targets, batch_unseen_predictions)
      valid_mse = utils.mse(batch_unseen_targets, batch_unseen_predictions)
      if model.local_variational:
        valid_local_kl = tf.reduce_mean(
            tf.reduce_sum(model.losses[-1][:, num_context:], axis=[1, 2]))
      else:
        valid_local_kl = 0.
      valid_global_kl = tf.reduce_mean(tf.reduce_sum(model.losses[-2], axis=-1))

      print('it: {}, train nll: {}, mse: {}, local kl: {} global kl: {} '
            'valid nll: {}, mse: {}, local kl: {} global kl: {}'
            .format(it, nll, mse, local_z_kl, global_z_kl,
                    valid_nll, valid_mse, valid_local_kl, valid_global_kl))
      if valid_mse.numpy() < best_mse:
        best_mse = valid_mse.numpy()
        print('Saving best model with MSE', best_mse)
        model.save_weights(hparams.save_path)
Esempio n. 11
0
    def testSampleChainSeedReproducible(self):
        normal_prior = tfd.Normal(5 * [[0., 0.]], 1.)

        def normal_sampler(seed):
            return normal_prior.sample(seed=seed)

        def normal_log_likelihood(state):
            return tf.reduce_sum(tfd.Normal(state, 2.).log_prob(0.), axis=-1)

        num_results = 10
        seed = tfp_test_util.test_seed()

        current_state = np.float32(np.random.rand(5, 2))
        samples0 = tf.function(lambda: tfp.mcmc.sample_chain(  # pylint: disable=g-long-lambda
            num_results=2 * num_results,
            num_steps_between_results=0,
            current_state=current_state,
            kernel=elliptical.EllipticalSliceSampler(
                normal_sampler_fn=normal_sampler,
                log_likelihood_fn=normal_log_likelihood,
                seed=seed),
            num_burnin_steps=150,
            trace_fn=None,
            parallel_iterations=1))()

        samples1 = tf.function(lambda: tfp.mcmc.sample_chain(  # pylint: disable=g-long-lambda
            num_results=num_results,
            num_steps_between_results=1,
            current_state=current_state,
            kernel=elliptical.EllipticalSliceSampler(
                normal_sampler_fn=normal_sampler,
                log_likelihood_fn=normal_log_likelihood,
                seed=seed),
            trace_fn=None,
            num_burnin_steps=150,
            parallel_iterations=1))()
        samples0_, samples1_ = self.evaluate([samples0, samples1])

        self.assertAllClose(samples0_[::2], samples1_, atol=1e-5, rtol=1e-5)
Esempio n. 12
0
  def testMultivariateNormalNd(self, event_size=32, batch_size=8, num_steps=2):
    tf.set_random_seed(3)
    with tf.device(FLAGS.test_device):
      f = run_nuts_chain(event_size, batch_size, num_steps)
      f = tf.function(f, autograph=False, jit_compile=True)
      samples, leapfrogs = self.evaluate(f())

    # TODO(axch) Figure out what the right thing to test about the leapfrog
    # count really is and test it, instead of just flailing around like this
    # does.
    print(type(samples), type(leapfrogs))
    print(samples, leapfrogs)
    ev_leapfrogs = leapfrogs[0]
    self.assertGreater(len(set(ev_leapfrogs.tolist())), 1)
    self.assertTrue(all(ev_leapfrogs > 1))
Esempio n. 13
0
 def test_tf_saved_model_save(self):
     base_path = os.path.join(self.get_temp_dir(), 'tf_saved_model_save')
     export_path = os.path.join(base_path, '00000123')
     root = tf.train.Checkpoint()
     root.v1 = tf.Variable(3.)
     root.v2 = tf.Variable(2.)
     root.f = tf.function(lambda x: {'y': root.v1 * root.v2 * x})
     to_save = root.f.get_concrete_function(tf.TensorSpec(None, tf.float32))
     tf.saved_model.experimental.save(root, export_path, to_save)
     _, model_server_address, _ = TensorflowModelServerTest.RunServer(
         'default', base_path)
     expected_version = self._GetModelVersion(base_path)
     self.VerifyPredictRequest(model_server_address,
                               expected_output=12.0,
                               specify_output=False,
                               expected_version=expected_version)
Esempio n. 14
0
    def _tfTestHelper(self, run_asserts_fn, execute_program_fn):
        # Note: test_device is 'cpu', 'gpu', etc.

        # Various int32 and int64 kernels are missing for GPU, so we skip direct
        # tests on the GPU device, but test XLA on GPU below.
        if 'cpu' in FLAGS.test_device.lower():
            # Make sure everything works with no XLA compilation.
            with tf.device('CPU:0'):
                run_asserts_fn(
                    functools.partial(execute_program_fn, backend=TF_BACKEND))

        # Force XLA compilation using tf.function.
        backend = TF_BACKEND_NO_ASSERTS
        f = functools.partial(execute_program_fn, backend=backend)
        f = tf.function(f, autograph=False, experimental_compile=True)
        with tf.device(FLAGS.test_device):
            run_asserts_fn(f)
Esempio n. 15
0
    def testNormalNormalSampleMultipleDatapoints(self):
        # Two independent chains, of states of shape [3].
        prior_stddev = self.dtype(np.exp(np.random.rand(2, 3)))

        likelihood_stddev = self.dtype(np.exp(np.random.rand(2, 3)))
        # 10 data points.
        data = self.dtype(np.random.randn(10, 2, 3))

        # Standard normal prior.
        normal_prior = tfd.Normal(self.dtype(0.), prior_stddev)

        def normal_sampler(seed):
            return normal_prior.sample(seed=seed)

        # 10 samples at 2 chains.
        def normal_log_likelihood(state):
            return tf.reduce_sum(
                tfd.Normal(state, likelihood_stddev).log_prob(data),
                axis=[0, -1],
            )

        kernel = elliptical.EllipticalSliceSampler(
            normal_sampler_fn=normal_sampler,
            log_likelihood_fn=normal_log_likelihood,
            seed=tfp_test_util.test_seed(),
        )

        samples = tf.function(lambda: tfp.mcmc.sample_chain(  # pylint: disable=g-long-lambda
            num_results=int(3e5),
            current_state=self.dtype(np.random.randn(2, 3)),
            kernel=kernel,
            num_burnin_steps=int(1e4),
            parallel_iterations=1,
            trace_fn=None))()

        mean, variance = self.evaluate(tf.nn.moments(samples, axes=[0]))
        posterior_mean, posterior_variance = normal_normal_posterior(
            prior_mean=0.,
            prior_stddev=prior_stddev,
            likelihood_stddev=likelihood_stddev,
            data=data)
        # Computed exactly from the formula in normal-normal posterior.
        self.assertAllClose(posterior_mean, mean, rtol=2e-2, atol=6e-3)
        self.assertAllClose(posterior_variance, variance, rtol=5e-2)
Esempio n. 16
0
  def __init__(self, weight_path):
    helpers.ensure_lpips_weights_exist(weight_path)

    def wrap_frozen_graph(graph_def, inputs, outputs):
      def _imports_graph_def():
        tf.graph_util.import_graph_def(graph_def, name="")
      wrapped_import = tf.wrap_function(_imports_graph_def, [])
      import_graph = wrapped_import.graph
      return wrapped_import.prune(
          tf.nest.map_structure(import_graph.as_graph_element, inputs),
          tf.nest.map_structure(import_graph.as_graph_element, outputs))

    # Pack LPIPS network into a tf function
    graph_def = tf.GraphDef()
    with open(weight_path, "rb") as f:
      graph_def.ParseFromString(f.read())
    self._lpips_func = tf.function(
        wrap_frozen_graph(
            graph_def, inputs=("0:0", "1:0"), outputs="Reshape_10:0"))
Esempio n. 17
0
def reset_tf_function(tf_function):
    return tf.function(tf_function.python_function)
Esempio n. 18
0
 def reset(self):
     self.tf_function = tf.function(self.python_function)
Esempio n. 19
0
 def __init__(self, python_function):
     self.tf_function = tf.function(python_function)
     self.python_function = python_function
Esempio n. 20
0
def multiply2n_ragged(tensor1, tensor2):
    #this  function multiplies two ragged tesnsors of rank 2 . the most outer ranks of the two tensros must be equal .
    #setting variables and constats
    outerloop_counter = tf.constant(0, dtype=tf.int32)
    carry_on = tf.constant(0, dtype=tf.int32)
    taValues = tf.TensorArray(tf.float32,
                              size=0,
                              dynamic_size=True,
                              clear_after_read=False,
                              infer_shape=False)
    taL2Splits = tf.TensorArray(tf.int32,
                                size=0,
                                dynamic_size=True,
                                clear_after_read=False,
                                infer_shape=False)
    taL1Splits = tf.TensorArray(tf.int32,
                                size=0,
                                dynamic_size=True,
                                clear_after_read=False,
                                infer_shape=False)
    taL1Splits = taL1Splits.write(
        0, [0])  ## required intialization for L1 split only
    innerloop_processing_graphed = tf.function(innerloop_processing)
    generateL1Tensor_writeback_graphed = tf.function(
        generateL1Tensor_writeback)

    def outerloop_cond(counter, input1, input2, taValues, taL2Splits,
                       taL1Splits, carry_on):
        value = tf.shape(input1[2])[0] - 1
        return counter < value  ## this is the length of the outermost dimision , stop of this

    def outloop_body(counter, input1, input2, taValues, taL2Splits, taL1Splits,
                     carry_on):
        l1_comp_begin = input1[2][
            counter]  ## this is begin position of the current row in the outer split  ( ie. the ith value in the outer row split tensor )
        l1_comp_end = input1[2][
            counter +
            1]  ## this is end position of the current row in the outer split   (ie. the ith + 1 value in the outer row split tensor)
        l1_comp2_begin = input2[2][
            counter]  ## we do the same for the second components
        l1_comp2_end = input2[2][
            counter + 1]  ## we do the same for the second components
        comp = innerloop_processing_graphed(
            l1_comp_begin, l1_comp_end, input1
        )  ## now retrive the data to be procesed for the selected rows from vector1
        comp2 = innerloop_processing_graphed(
            l1_comp2_begin, l1_comp2_end, input2)  ## do the same for vector 2

        #comp2 = tf.transpose(comp2) ### desired operation
        multiply = tf.matmul(comp, comp2)  #### This is the desired operation

        myshape = tf.shape(
            multiply
        )  ## calculate the shape of the result in order to prepare to write the result in a ragged tensor format.
        offset = tf.cond(
            taValues.size() > 0, lambda: tf.shape(taValues.concat())[0],
            lambda: 0
        )  ### this is a hack, TensorArray.concat returns an error if the array is empty. Thus we check before calling this.
        #print11=tf.print("=================Final Shape is : " ,myshape[0] , " X " ,myshape[1] )
        l2v = generateL1Tensor_writeback_graphed(
            offset, myshape[1], myshape[0]
        )  # generate the inner row split of the result for the current element
        taL2Splits = taL2Splits.write(
            counter, l2v)  # write back the inner rowlplit to a TensorArray
        taValues = taValues.write(
            counter, tf.reshape(multiply, [-1])
        )  # wirte back the actual ragged tensor elemnts in a another TensorArray
        carry_on = carry_on + myshape[
            0]  ## required to calculate the outer row splite
        taL1Splits = taL1Splits.write(
            counter + 1, [carry_on])  ## This is the outmost row split.
        with tf.control_dependencies(
            [comp, comp2, myshape, l2v, carry_on, multiply]):
            counter = counter + 1
        return counter, input1, input2, taValues, taL2Splits, taL1Splits, carry_on

    with tf.name_scope("RaggedMultiply"):
        outerloop_finalcounter, _, _, ta1, ta2, ta3, _ = tf.while_loop(
            outerloop_cond,
            outloop_body, [
                outerloop_counter, tensor1, tensor2, taValues, taL2Splits,
                taL1Splits, carry_on
            ],
            back_prop=True)
    uinquie_ta2, _ = tf.unique(
        ta2.concat()
    )  # this is required since some values might be duplicate in the row split itself
    t1 = ta1.concat()
    t3 = ta3.concat()
    #with  tf.control_dependencies([t1 , uinquie_ta2 ,t3  ]):
    final_values = t1, uinquie_ta2, t3
    return final_values