Beispiel #1
0
def bad_input_colocation_module_fn():
  u = tf.add(42, 69, name="u")
  with tf.colocate_with(u):
    # Inputs must not reference other nodes for colocation.
    x = tf.placeholder(tf.float32, name="x")
  y = x + 1.0
  hub.add_signature(inputs=x, outputs=y)
Beispiel #2
0
def unused_input_module_fn():
  x = tf.placeholder(tf.int64)
  y = tf.placeholder(tf.int64)
  result = x*x
  hub.add_signature(
      inputs={"x": x, "unused": y},
      outputs=result)
Beispiel #3
0
def table_lookup_module_fn():
  x = tf.placeholder(dtype=tf.int64, name="x")
  keys = tf.constant([0, 1, 2], dtype=tf.int64)
  values = tf.constant(["index0", "hello", "world"])
  tbl_init = tf.contrib.lookup.KeyValueTensorInitializer(keys, values)
  table = tf.contrib.lookup.HashTable(tbl_init, "UNK")
  hub.add_signature(inputs=x, outputs=table.lookup(x))
Beispiel #4
0
def while_module_fn():
  """Compute x^n with while_loop."""
  x = tf.placeholder(dtype=tf.float32, name="x", shape=[])
  n = tf.placeholder(dtype=tf.int32, name="n")
  _, pow_x = tf.while_loop(
      lambda i, ix: i < n, lambda i, ix: [tf.add(i, 1), ix * x],
      [tf.constant(0), tf.constant(1.0)])
  hub.add_signature(inputs={"x": x, "n": n}, outputs=pow_x)
Beispiel #5
0
def text_module_fn():
  weights = tf.get_variable(
      "weights", dtype=tf.float32, shape=[100, 10])
  #      initializer=tf.random_uniform_initializer())
  text = tf.placeholder(tf.string, shape=[None])
  hash_buckets = tf.string_to_hash_bucket_fast(text, weights.get_shape()[0])
  embeddings = tf.gather(weights, hash_buckets)
  hub.add_signature(inputs=text, outputs=embeddings)
Beispiel #6
0
def bad_state_colocation_module_fn():
  u = tf.add(42, 69, name="u")
  with tf.colocate_with(u):
    # State-holding nodes must not reference other nodes for colocation.
    v = tf.Variable(1.0, name="v")
  x = tf.placeholder(dtype=tf.float32)
  y = x + v
  hub.add_signature(inputs=x, outputs=y)
 def module_fn():
     graph_def = tf.GraphDef()
     with tf.gfile.GFile(frozen_graph_file, 'rb') as f:
         graph_def.ParseFromString(f.read())
     tf.import_graph_def(graph_def, name='')
     graph = tf.get_default_graph()
     x = graph.get_tensor_by_name('x:0')
     y = graph.get_tensor_by_name('y:0')
     hub.add_signature('default', inputs={'x': x}, outputs={'y': y})
Beispiel #8
0
 def module_fn():
   """A module summing one normal and one partitioned variable."""
   partitioner = tf.fixed_size_partitioner(partitions)
   var_1 = tf.get_variable(
       initializer=tf.ones(shape),
       name="partitioned_variable",
       partitioner=partitioner)
   var_2 = tf.get_variable(
       initializer=tf.ones(shape), name="normal_variable")
   hub.add_signature(outputs=var_1 + var_2)
Beispiel #9
0
def stateful_module_fn_with_colocation():
  v = tf.get_variable(
      "var123", shape=[],
      initializer=tf.constant_initializer(1.0),
      use_resource=False)
  v_value = v.value()
  x = tf.placeholder(dtype=tf.float32, name="x")
  with tf.colocate_with(v), tf.colocate_with(x):
    y = tf.add(v_value, x, name="y")
  hub.add_signature(inputs=x, outputs=y)
Beispiel #10
0
def multiple_signature_module_fn():
  """Stateful module with multiple signatures."""
  weight = tf.Variable([3.0])

  x_input = tf.placeholder(dtype=tf.float32)
  x_output = tf.multiply(x_input, weight)
  hub.add_signature("mul", inputs=x_input, outputs=x_output)

  y_input = tf.placeholder(dtype=tf.float32)
  y_output = tf.divide(y_input, weight)
  hub.add_signature("div", inputs=y_input, outputs=y_output)
Beispiel #11
0
def module_with_variables():
  tf.get_variable(
      name="weights",
      shape=[3],
      initializer=tf.zeros_initializer())
  tf.get_variable(
      name="partition",
      shape=[4],
      initializer=tf.zeros_initializer(),
      partitioner=tf.fixed_size_partitioner(3))
  hub.add_signature(outputs=tf.constant(1.0))
def good_colocation_module_fn():
  w = tf.Variable(42 + 69, name="w")
  with tf.colocate_with(w):
    # Colocation references among state nodes is ok.
    v = tf.Variable(1.0, name="v")
    assert v.op.colocation_groups() == [tf.compat.as_bytes("loc:@w")]
  x = tf.placeholder(dtype=tf.float32, name="x")
  with tf.colocate_with(x):
    # Colocation references from other nodes to state nodes is ok.
    y = x + v
    assert sorted(y.op.colocation_groups()) == [tf.compat.as_bytes("loc:@x")]
  hub.add_signature(inputs=x, outputs=y)
Beispiel #13
0
def nested_control_flow_module_fn():
  """Compute the sum of elements greater than 'a' with nested control flow."""
  elems = tf.placeholder(
      dtype=tf.float32, name="elems", shape=[None])
  a = tf.placeholder(dtype=tf.float32, name="a")

  def sum_above_a(acc, x):
    return acc + tf.cond(x > a, lambda: x, lambda: 0.0)

  hub.add_signature(
      inputs={"elems": elems, "a": a},
      outputs=tf.foldl(sum_above_a, elems, initializer=tf.constant(0.0)))
Beispiel #14
0
def layers_module_fn():
  """Module that exercises the use of layers."""
  # This is a plain linear map Mx+b regularized by the sum of the squares
  # of the coefficients in M and b.
  x = tf.placeholder(dtype=tf.float32, shape=[None, 2], name="x")
  # Cancels internal factor 0.5.
  l2_reg = tf.contrib.layers.l2_regularizer(scale=2.0)
  h = tf.layers.dense(
      x, 2,
      activation=None,
      kernel_regularizer=l2_reg,
      bias_regularizer=l2_reg)
  hub.add_signature(inputs=x, outputs=h)
Beispiel #15
0
 def module_with_duplicate_asset():
   vocabulary_file = self.create_vocab_file("tokens2.txt", ["1", "2", "3"])
   indices1 = tf.placeholder(dtype=tf.int64, name="indices1")
   indices2 = tf.placeholder(dtype=tf.int64, name="indices2")
   hub.add_signature(
       inputs={
           "indices_1": indices1,
           "indices_2": indices2,
       },
       outputs={
           "x": do_table_lookup(indices1, vocabulary_file),
           "y": do_table_lookup(indices2, vocabulary_file),
       })
Beispiel #16
0
def nested_cond_module_fn():
  """Computes relu(x) with nested conditionals."""
  x = tf.placeholder(dtype=tf.float32, name="x", shape=[])
  # pylint: disable=g-long-lambda
  result = tf.cond(
      0 < x,
      lambda: tf.cond(3 < x,
                      lambda: tf.identity(x),
                      lambda: tf.multiply(x, 1.0)),
      lambda: tf.cond(x < -3,
                      lambda: tf.constant(0.0),
                      lambda: tf.multiply(0.0, 1.0)))
  # pylint: enable=g-long-lambda
  hub.add_signature(inputs=x, outputs=result)
Beispiel #17
0
def batch_norm_module_fn(is_training):
  """Module that exercises batch normalization, incl. UPDATE_OPS."""
  x = tf.placeholder(dtype=tf.float32, shape=[None, 1], name="x")
  y = tf.contrib.layers.batch_norm(
      x,
      center=True,
      scale=True,
      is_training=is_training,
      # Let the moving_mean (aggregated for normalization at serving time)
      # decay quickly for more accurate values after few iterations.
      decay=0.6,
      # TODO(b/38416827): re-enable after the tests are updated.
      fused=False)
  hub.add_signature(inputs=x, outputs=y)
Beispiel #18
0
  def hub_module_fn():
    """Creates the TF graph for the hub module."""
    model_fn = t2t_model.T2TModel.make_estimator_model_fn(
        model_name,
        hparams,
        decode_hparams=decode_hparams,
        use_tpu=FLAGS.use_tpu)
    features = problem.serving_input_fn(hparams).features

    # we must do a copy of the features, as the model_fn can add additional
    # entries there (like hyperparameter settings etc).
    original_features = features.copy()
    spec = model_fn(features, labels=None, mode=tf.estimator.ModeKeys.PREDICT)

    hub.add_signature(
        inputs=original_features,
        outputs=spec.export_outputs["serving_default"].outputs)
Beispiel #19
0
  def module_fn():
    """Spec function for a token embedding module."""
    tokens = tf.placeholder(shape=[None], dtype=tf.string, name="tokens")

    embeddings_var = tf.get_variable(
        initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]),
        name=EMBEDDINGS_VAR_NAME,
        dtype=tf.float32)

    lookup_table = tf.contrib.lookup.index_table_from_file(
        vocabulary_file=vocabulary_file,
        num_oov_buckets=num_oov_buckets,
    )
    ids = lookup_table.lookup(tokens)
    combined_embedding = tf.nn.embedding_lookup(params=embeddings_var, ids=ids)
    hub.add_signature("default", {"tokens": tokens},
                      {"default": combined_embedding})
Beispiel #20
0
  def module_fn_with_preprocessing():
    """Spec function for a full-text embedding module with preprocessing."""
    sentences = tf.placeholder(shape=[None], dtype=tf.string, name="sentences")
    # Perform a minimalistic text preprocessing by removing punctuation and
    # splitting on spaces.
    normalized_sentences = tf.regex_replace(
        input=sentences, pattern=r"\pP", rewrite="")
    tokens = tf.string_split(normalized_sentences, " ")

    # In case some of the input sentences are empty before or after
    # normalization, we will end up with empty rows. We do however want to
    # return embedding for every row, so we have to fill in the empty rows with
    # a default.
    tokens, _ = tf.sparse_fill_empty_rows(tokens, "")
    # In case all of the input sentences are empty before or after
    # normalization, we will end up with a SparseTensor with shape [?, 0]. After
    # filling in the empty rows we must ensure the shape is set properly to
    # [?, 1].
    tokens = tf.sparse_reset_shape(tokens)

    embeddings_var = tf.get_variable(
        initializer=tf.zeros([vocab_size + num_oov_buckets, embeddings_dim]),
        name=EMBEDDINGS_VAR_NAME,
        dtype=tf.float32)
    lookup_table = tf.contrib.lookup.index_table_from_file(
        vocabulary_file=vocabulary_file,
        num_oov_buckets=num_oov_buckets,
    )
    sparse_ids = tf.SparseTensor(
        indices=tokens.indices,
        values=lookup_table.lookup(tokens.values),
        dense_shape=tokens.dense_shape)

    combined_embedding = tf.nn.embedding_lookup_sparse(
        params=embeddings_var,
        sp_ids=sparse_ids,
        sp_weights=None,
        combiner="sqrtn")

    hub.add_signature("default", {"sentences": sentences},
                      {"default": combined_embedding})
Beispiel #21
0
def image_module_fn():
  """Maps 1x2 images to sums of each color channel."""
  images = tf.placeholder(dtype=tf.float32, shape=[None, 1, 2, 3])
  sum_channels = tf.reduce_sum(images, axis=[1, 2])
  hub.add_signature(inputs={"images": images}, outputs=sum_channels)
 def make_model_spec():
   input_layer = tf.placeholder(tf.float32, shape=input_shape)
   x = self.embed(tf.expand_dims(input_layer, -1))
   x, encoder_layers = self.encoder(x)
   b, b_loss = self.bottleneck(x)
   hub.add_signature(inputs=input_layer, outputs=b)
Beispiel #23
0
def stateful_rv_module_fn():
  r = tf.get_variable(
      "rv_var123", shape=[],
      initializer=tf.constant_initializer(10.0),
      use_resource=True)
  hub.add_signature(outputs=r.value())
Beispiel #24
0
    def module_fn(training):
      """Hub module definition."""
      image_input = tf.placeholder(
          shape=[None] + self._observations_shape[1:], dtype=tf.float32)
      if self._extra_shape:
        inputs_extra = tf.placeholder(
            shape=[None, self._extra_shape[-1]], dtype=tf.float32)
      else:
        inputs_extra = None
      noise_shape = ([None] * (len(self._latent_shape) - 1)
                     + [self._latent_shape[-1]])
      noise_input = tf.placeholder(shape=noise_shape, dtype=tf.float32)
      with tf.variable_scope("encoder", reuse=tf.AUTO_REUSE):
        latents, endpoints_encoder = self._encoder(image_input, inputs_extra,
                                                   training=training)
      latent_means = latents[Ellipsis, :self._latent_shape[-1]]
      latent_log_sigmas = latents[Ellipsis, self._latent_shape[-1]:]
      with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
        outputs, endpoints_decoder = self._decoder(latent_means, inputs_extra,
                                                   training=training)
      outputs_channels = outputs.get_shape().as_list()[-1] // 2
      if self._output_distribution == "gaussian":
        reconstructions = outputs[Ellipsis, :outputs_channels]
        reconstruction_log_sigmas = outputs[Ellipsis, outputs_channels:]
      elif self._output_distribution == "bernoulli":
        reconstructions = outputs
        reconstruction_log_sigmas = outputs
      recon_inputs = {"image": image_input}
      if self._extra_shape:
        recon_inputs.update({"inputs_extra": inputs_extra})
      hub.add_signature(inputs=recon_inputs, outputs=reconstructions,
                        name="reconstruct")
      outputs = {"reconstructions": reconstructions,
                 "reconstruction_log_sigmas": reconstruction_log_sigmas,
                 "latent_means": latent_means,
                 "latent_log_sigmas": latent_log_sigmas}
      outputs.update(endpoints_encoder)
      outputs.update(endpoints_decoder)
      hub.add_signature(inputs=recon_inputs, outputs=outputs,
                        name="reconstruct_with_extras")

      with tf.variable_scope("decoder", reuse=tf.AUTO_REUSE):
        outputs, endpoints_decoder = self._decoder(noise_input, inputs_extra,
                                                   training=training)
      if self._output_distribution == "gaussian":
        image_samples = outputs[Ellipsis, :outputs_channels]
        image_samples_log_sigmas = outputs[Ellipsis, outputs_channels:]
      elif self._output_distribution == "bernoulli":
        image_samples = outputs
        image_samples_log_sigmas = outputs

      sample_inputs = {"noise": noise_input}
      if self._extra_shape:
        sample_inputs.update({"inputs_extra": inputs_extra})
      hub.add_signature(inputs=sample_inputs, outputs=image_samples,
                        name="sample")
      outputs = {"image_samples": image_samples,
                 "image_samples_log_sigmas": image_samples_log_sigmas}
      outputs.update(endpoints_decoder)
      hub.add_signature(inputs=sample_inputs, outputs=outputs,
                        name="sample_with_extras")
Beispiel #25
0
def module_fn(is_training):
    """Module function."""
    input_ids = tf.placeholder(tf.int32, [None, None], "input_ids")
    input_mask = tf.placeholder(tf.int32, [None, None], "input_mask")
    segment_ids = tf.placeholder(tf.int32, [None, None], "segment_ids")
    mlm_positions = tf.placeholder(tf.int32, [None, None], "mlm_positions")

    albert_config_path = os.path.join(FLAGS.albert_directory,
                                      "albert_config.json")
    albert_config = modeling.AlbertConfig.from_json_file(albert_config_path)
    model = modeling.AlbertModel(config=albert_config,
                                 is_training=is_training,
                                 input_ids=input_ids,
                                 input_mask=input_mask,
                                 token_type_ids=segment_ids,
                                 use_one_hot_embeddings=False)

    mlm_logits = get_mlm_logits(model, albert_config, mlm_positions)

    vocab_model_path = os.path.join(FLAGS.albert_directory, "30k-clean.model")
    vocab_file_path = os.path.join(FLAGS.albert_directory, "30k-clean.vocab")

    config_file = tf.constant(value=albert_config_path,
                              dtype=tf.string,
                              name="config_file")
    vocab_model = tf.constant(value=vocab_model_path,
                              dtype=tf.string,
                              name="vocab_model")
    # This is only for visualization purpose.
    vocab_file = tf.constant(value=vocab_file_path,
                             dtype=tf.string,
                             name="vocab_file")

    # By adding `config_file, vocab_model and vocab_file`
    # to the ASSET_FILEPATHS collection, TF-Hub will
    # rewrite this tensor so that this asset is portable.
    tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, config_file)
    tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, vocab_model)
    tf.add_to_collection(tf.GraphKeys.ASSET_FILEPATHS, vocab_file)

    hub.add_signature(name="tokens",
                      inputs=dict(input_ids=input_ids,
                                  input_mask=input_mask,
                                  segment_ids=segment_ids),
                      outputs=dict(sequence_output=model.get_sequence_output(),
                                   pooled_output=model.get_pooled_output()))

    hub.add_signature(name="mlm",
                      inputs=dict(input_ids=input_ids,
                                  input_mask=input_mask,
                                  segment_ids=segment_ids,
                                  mlm_positions=mlm_positions),
                      outputs=dict(sequence_output=model.get_sequence_output(),
                                   pooled_output=model.get_pooled_output(),
                                   mlm_logits=mlm_logits))

    hub.add_signature(name="tokenization_info",
                      inputs={},
                      outputs=dict(vocab_file=vocab_model,
                                   do_lower_case=tf.constant(
                                       FLAGS.do_lower_case)))
Beispiel #26
0
 def flow_module_spec():
     cond_layer = tf.placeholder(tf.float32, shape=[None, n_cond])
     flow = params['flow_fn'](cond_layer, is_training)
     hub.add_signature(inputs=cond_layer,
                       outputs=flow.sample(tf.shape(cond_layer)[0]))
Beispiel #27
0
 def plus_one():
     x = tf.compat.v1.sparse.placeholder(dtype=tf.float32, name='x')
     y = tf.identity(tf.SparseTensor(x.indices, x.values + 1,
                                     x.dense_shape))
     hub.add_signature(inputs=x, outputs=y)
Beispiel #28
0
    def _module_fn():
        """
        Function building the module
        """

        feature_layer = tf.placeholder(
            tf.float32,
            shape=[None, None, None, None, nchannels],
            name='input')
        obs_layer = tf.placeholder(tf.float32,
                                   shape=[None, None, None, None, n_y],
                                   name='observations')
        mask_layer = tf.clip_by_value(obs_layer, 0, 0.001) * 1000
        #
        # Builds the neural network
        if pad == 0:
            net = slim.conv3d(feature_layer,
                              16,
                              5,
                              activation_fn=tf.nn.leaky_relu,
                              padding='same')
        elif pad == 2:
            net = slim.conv3d(feature_layer,
                              16,
                              5,
                              activation_fn=tf.nn.leaky_relu,
                              padding='valid')
        #net = wide_resnet(feature_layer, 8, activation_fn=tf.nn.leaky_relu, is_training=is_training)
        net = wide_resnet(net,
                          16,
                          activation_fn=tf.nn.leaky_relu,
                          keep_prob=dropout,
                          is_training=is_training)
        net = wide_resnet(net,
                          32,
                          activation_fn=tf.nn.leaky_relu,
                          keep_prob=dropout,
                          is_training=is_training)
        net = wide_resnet(net,
                          32,
                          activation_fn=tf.nn.leaky_relu,
                          keep_prob=dropout,
                          is_training=is_training)
        if distribution == 'logistic':
            net = slim.conv3d(net, 32, 3, activation_fn=tf.nn.tanh)
        else:
            net = slim.conv3d(net, 32, 3, activation_fn=tf.nn.leaky_relu)

        #Predicted mask
        masknet = slim.conv3d(net, 8, 1, activation_fn=tf.nn.leaky_relu)
        out_mask = slim.conv3d(masknet, 1, 1, activation_fn=None)
        pred_mask = tf.nn.sigmoid(out_mask)

        # Define the probabilistic layer
        likenet = slim.conv3d(net, 64, 1, activation_fn=tf.nn.leaky_relu)
        net = slim.conv3d(likenet, n_mixture * 3 * n_y, 1, activation_fn=None)
        cube_size = tf.shape(obs_layer)[1]
        net = tf.reshape(
            net, [-1, cube_size, cube_size, cube_size, n_y, n_mixture * 3])
        #         net = tf.reshape(net, [None, None, None, None, n_y, n_mixture*3])
        loc, unconstrained_scale, logits = tf.split(net,
                                                    num_or_size_splits=3,
                                                    axis=-1)
        scale = tf.nn.softplus(unconstrained_scale) + 1e-3

        # Form mixture of discretized logistic distributions. Note we shift the
        # logistic distribution by -0.5. This lets the quantization capture "rounding"
        # intervals, `(x-0.5, x+0.5]`, and not "ceiling" intervals, `(x-1, x]`.
        if distribution == 'logistic':
            discretized_logistic_dist = tfd.QuantizedDistribution(
                distribution=tfd.TransformedDistribution(
                    distribution=tfd.Logistic(loc=loc, scale=scale),
                    bijector=tfb.AffineScalar(shift=-0.5)),
                low=0.,
                high=2.**3 - 1)

            mixture_dist = tfd.MixtureSameFamily(
                mixture_distribution=tfd.Categorical(logits=logits),
                components_distribution=discretized_logistic_dist)

        elif distribution == 'normal':

            mixture_dist = tfd.MixtureSameFamily(
                mixture_distribution=tfd.Categorical(logits=logits),
                components_distribution=tfd.Normal(loc=loc, scale=scale))

        # Define a function for sampling, and a function for estimating the log likelihood
        #sample = tf.squeeze(mixture_dist.sample())
        rawsample = mixture_dist.sample()
        sample = rawsample * pred_mask
        rawloglik = mixture_dist.log_prob(obs_layer)
        print(rawloglik)
        print(out_mask)
        print(mask_layer)

        #loss1 = - rawloglik* out_mask #This can be constant mask as well if we use mask_layer instead
        if masktype == 'constant': loss1 = -rawloglik * mask_layer
        elif masktype == 'vary': loss1 = -rawloglik * pred_mask
        loss2 = tf.nn.sigmoid_cross_entropy_with_logits(logits=out_mask,
                                                        labels=mask_layer)
        loglik = -(loss1 + loss2)

        hub.add_signature(inputs={
            'features': feature_layer,
            'labels': obs_layer
        },
                          outputs={
                              'sample': sample,
                              'loglikelihood': loglik,
                              'loc': loc,
                              'scale': scale,
                              'logits': logits,
                              'rawsample': rawsample,
                              'pred_mask': pred_mask,
                              'out_mask': out_mask,
                              'rawloglik': rawloglik,
                              'loss1': loss1,
                              'loss2': loss2
                          })
Beispiel #29
0
    def _module_fn():
        """
        Function building the module
        """

        feature_layer = tf.placeholder(
            tf.float32,
            shape=[None, None, None, None, nchannels],
            name='input')
        obs_layer = tf.placeholder(tf.float32,
                                   shape=[None, None, None, None, n_y],
                                   name='observations')
        shift = tf.Variable(1., dtype=tf.float32, name='shift')
        scale = tf.Variable(1., dtype=tf.float32, name='scale')

        # Builds the neural network
        # ! ny and nchannel need to be the same

        cube_size = tf.shape(feature_layer)[1]
        #         print(cube_size)
        chain = tfb.Chain([
            tfp.bijectors.Affine(shift=shift, scale_identity_multiplier=scale),
            tfb.Invert(
                Squeeze3d(event_shape_in=[
                    cube_size, cube_size, cube_size, nchannels
                ])),
            iRevNetsimple(name='layer1', h=h),
            iRevNetsimple(name='layer1b', h=h),
            iRevNetsimple(name='layer2', h=h),
            iRevNetsimple(name='layer2b', h=h),
            #tfb.Permute(np.arange(8)[::-1],axis=-1),
            tfb.Permute(np.arange(8)[::-1], axis=-1),
            iRevNetsimple(name='layer3', h=h),
            iRevNetsimple(name='layer3b', h=h),
            iRevNetsimple(name='layer4', h=h),
            iRevNetsimple(name='layer4b', h=h),
            tfb.Invert(
                Squeeze3d(event_shape_in=[
                    cube_size // 2, cube_size // 2, cube_size // 2, nchannels *
                    8
                ])),
            iRevNetsimple(name='layer5', h=h),
            iRevNetsimple(name='layer5b', h=h),
            iRevNetsimple(name='layer6', h=h),
            iRevNetsimple(name='layer6b', h=h),
            tfb.Permute(np.arange(64)[::-1], axis=-1),
            iRevNetsimple(name='layer7', h=h),
            iRevNetsimple(name='layer7b', h=h),
            iRevNetsimple(name='layer8', h=h),
            iRevNetsimple(name='layer8b', h=h),
            tfb.Invert(
                Squeeze3d(event_shape_in=[
                    cube_size // 4, cube_size // 4, cube_size // 4, nchannels *
                    64
                ])),
            iRevNetsimple(name='layer9', h=h, kernel_size=1),
            iRevNetsimple(name='layer9b', h=h, kernel_size=1),
            iRevNetsimple(name='layer10', h=h, kernel_size=1),
            iRevNetsimple(name='layer10b', h=h, kernel_size=1),
            tfb.Permute(np.arange(64 * 8)[::-1], axis=-1),
            iRevNetsimple(name='layer11', h=h, kernel_size=1),
            iRevNetsimple(name='layer11b', h=h, kernel_size=1),
            iRevNetsimple(name='layer12', h=h, kernel_size=1),
            iRevNetsimple(name='layer12b', h=h, kernel_size=1),
            Squeeze3d(event_shape_in=[
                cube_size // 4, cube_size // 4, cube_size // 4, nchannels * 64
            ]),
            iRevNetsimple(name='layer13', h=h),
            iRevNetsimple(name='layer13b', h=h),
            iRevNetsimple(name='layer14', h=h),
            iRevNetsimple(name='layer14b', h=h),
            tfb.Permute(np.arange(64)[::-1], axis=-1),
            iRevNetsimple(name='layer15', h=h),
            iRevNetsimple(name='layer15b', h=h),
            iRevNetsimple(name='layer16', h=h),
            iRevNetsimple(name='layer16b', h=h),
            Squeeze3d(event_shape_in=[
                cube_size // 2, cube_size // 2, cube_size // 2, nchannels * 8
            ]),
            iRevNetsimple(name='layer17', h=h),
            iRevNetsimple(name='layer17b', h=h),
            iRevNetsimple(name='layer18', h=h),
            iRevNetsimple(name='layer18b', h=h),
            tfb.Permute(np.arange(8)[::-1], axis=-1),
            iRevNetsimple(name='layer19', h=h),
            iRevNetsimple(name='layer19b', h=h),
            iRevNetsimple(name='layer20', h=h),
            iRevNetsimple(name='layer20b', h=h),
            Squeeze3d(
                event_shape_in=[cube_size, cube_size, cube_size, nchannels])
        ])

        bijection = chain

        # Define the probabilistic layer
        net = bijection.forward(feature_layer, name='lambda')
        if softplus:
            net = tf.nn.softplus(net, name='lambda')
        dist = tfd.Poisson(net + 1e-3)

        sample = tf.squeeze(dist.sample())
        #         loglik = dist.log_prob(obs_layer+1)
        loglik = dist.log_prob(obs_layer)

        #l2 = tf.losses.mean_squared_error(obs_layer, net)
        l2 = (tf.square(tf.subtract(obs_layer, net)))
        l1 = (tf.abs(tf.subtract(obs_layer, net)))

        hub.add_signature(inputs={
            'features': feature_layer,
            'labels': obs_layer
        },
                          outputs={
                              'sample': sample,
                              'loglikelihood': loglik,
                              'lambda': net,
                              'l2': l2,
                              'l1': l1
                          })
Beispiel #30
0
    def _module_fn():
        """
        Function building the module
        """

        feature_layer = tf.placeholder(
            tf.float32,
            shape=[None, None, None, None, nchannels],
            name='input')
        obs_layer = tf.placeholder(tf.float32,
                                   shape=[None, None, None, None, n_y],
                                   name='observations')

        # Builds the neural network
        net = slim.conv3d(feature_layer,
                          16,
                          5,
                          activation_fn=tf.nn.leaky_relu,
                          padding='same')
        #net = wide_resnet(feature_layer, 8, activation_fn=tf.nn.leaky_relu, is_training=is_training)
        net = wide_resnet(net,
                          16,
                          activation_fn=tf.nn.leaky_relu,
                          keep_prob=dropout,
                          is_training=is_training)
        net = wide_resnet(net,
                          32,
                          activation_fn=tf.nn.leaky_relu,
                          keep_prob=dropout,
                          is_training=is_training)
        net = wide_resnet(net,
                          32,
                          activation_fn=tf.nn.leaky_relu,
                          keep_prob=dropout,
                          is_training=is_training)
        net = slim.conv3d(net, 32, 3, activation_fn=tf.nn.leaky_relu)

        # Define the probabilistic layer
        net = slim.conv3d(net,
                          3 * n_mixture * nchannels,
                          1,
                          activation_fn=None)
        cube_size = tf.shape(obs_layer)[1]
        net = tf.reshape(
            net,
            [-1, cube_size, cube_size, cube_size, nchannels, n_mixture * 3])

        logits, loc, unconstrained_scale = tf.split(net,
                                                    num_or_size_splits=3,
                                                    axis=-1)
        print('\nloc :\n', loc)
        scale = tf.nn.softplus(unconstrained_scale[...]) + 1e-3

        distribution = tfd.MixtureSameFamily(
            mixture_distribution=tfd.Categorical(logits=logits[...]),
            #components_distribution=tfd.MultivariateNormalDiag(loc=loc[...,0], scale_diag=scale))
            components_distribution=tfd.Normal(loc=loc[...], scale=scale))
        print('\ngmm\n', distribution)

        # Define a function for sampling, and a function for estimating the log likelihood
        if log:
            print('Logged it')
            sample = tf.exp(distribution.sample()) - logoffset
            print('\ninf dist sample :\n', distribution.sample())
            logfeature = tf.log(tf.add(logoffset, obs_layer), 'logfeature')
            print('\nlogfeature :\n', logfeature)
            prob = distribution.prob(logfeature[...])
            loglik = distribution.log_prob(logfeature[...])
        else:
            print('UnLogged it')
            sample = distribution.sample()
            print('\ninf dist sample :\n', distribution.sample())
            loglik = distribution.log_prob(obs_layer[...])

        hub.add_signature(inputs={
            'features': feature_layer,
            'labels': obs_layer
        },
                          outputs={
                              'sample': sample,
                              'loglikelihood': loglik,
                              'sigma': scale,
                              'mean': loc,
                              'logits': logits
                          })
Beispiel #31
0
    def _module_fn():
        """
        Function building the module
        """

        feature_layer = tf.placeholder(
            tf.float32,
            shape=[None, None, None, None, nchannels],
            name='input')
        obs_layer = tf.placeholder(tf.float32,
                                   shape=[None, None, None, None, n_y],
                                   name='observations')

        # Builds the neural network

        if pad == 0:
            d00 = slim.conv3d(feature_layer,
                              fsize,
                              5,
                              activation_fn=tf.nn.leaky_relu,
                              padding='same')
        elif pad == 2:
            d00 = slim.conv3d(feature_layer,
                              fsize,
                              5,
                              activation_fn=tf.nn.leaky_relu,
                              padding='valid')
        if pad == 4:
            d00 = slim.conv3d(feature_layer,
                              fsize,
                              5,
                              activation_fn=tf.nn.leaky_relu,
                              padding='valid')
            d00 = slim.conv3d(d00,
                              fsize * 2,
                              5,
                              activation_fn=tf.nn.leaky_relu,
                              padding='valid')
##        #downsample
##        dd = [[d00]]
##        cfsize = fsize
##        for i in range(nsub):
##            d0 = dd[-1][-1]
##            d1 = wide_resnet(d0, cfsize, activation_fn=tf.nn.leaky_relu)
##            d2 = wide_resnet(d1, cfsize, activation_fn=tf.nn.leaky_relu)
##            dsub = slim.max_pool3d(d2, kernel_size=3, stride=2, padding='SAME')
##            dd.append([d1, d2, dsub])
##            cfsize  *= 2
##
##        #lower layer
##        d0 = dd[-1][-1]
##        d1 = wide_resnet(d0, cfsize, activation_fn=tf.nn.leaky_relu)
##        d2 = wide_resnet(d1, cfsize, activation_fn=tf.nn.leaky_relu)
##
##        up = [[d1, d2]]
##        #upsample
##        for i in range(nsub):
##            cfsize = cfsize // 2
##            usub = up[-1][-1]
##            dup = dd.pop()
##            u0 = dynamic_deconv3d('up%d'%i, usub, shape=[3,3,3,cfsize], activation=tf.nn.leaky_relu)
##            #u0 = slim.conv3d_transpose(usub, fsize, kernel_size=3, stride=2)
##            uc = tf.concat([u0, dup[1]], axis=-1)
##            u1 = wide_resnet(uc, cfsize, activation_fn=tf.nn.leaky_relu)
##            u2 = wide_resnet(u1, cfsize, activation_fn=tf.nn.leaky_relu)
##            up.append([u0, u1, u1c, u2])
##
##        u0 = up[-1][-1]
##        net = slim.conv3d(u0, 1, 3, activation_fn=tf.nn.tanh)
##
#downsample #restructure code while doubling filter size
        cfsize = fsize
        d1 = wide_resnet(d00, cfsize, activation_fn=tf.nn.leaky_relu)
        d2 = wide_resnet(d1, cfsize, activation_fn=tf.nn.leaky_relu)
        dd = [d2]
        for i in range(nsub):
            cfsize *= 2
            print(i, cfsize)
            dsub = slim.max_pool3d(dd[-1],
                                   kernel_size=3,
                                   stride=2,
                                   padding='SAME')
            d1 = wide_resnet(dsub, cfsize, activation_fn=tf.nn.leaky_relu)
            d2 = wide_resnet(d1, cfsize, activation_fn=tf.nn.leaky_relu)
            dd.append(d2)

        print(len(dd))
        #upsample
        usub = dd.pop()
        for i in range(nsub):
            u0 = dynamic_deconv3d('up%d' % i,
                                  usub,
                                  shape=[3, 3, 3, cfsize],
                                  activation=tf.identity)
            cfsize = cfsize // 2
            print(i, cfsize)
            u0 = slim.conv3d(u0,
                             cfsize,
                             1,
                             activation_fn=tf.identity,
                             padding='same')
            #u0 = slim.conv3d_transpose(usub, fsize, kernel_size=3, stride=2)
            uc = tf.concat([u0, dd.pop()], axis=-1)
            u1 = wide_resnet(uc, cfsize, activation_fn=tf.nn.leaky_relu)
            u2 = wide_resnet(u1, cfsize, activation_fn=tf.nn.leaky_relu)
            usub = u2

        print(len(dd))
        net = slim.conv3d(usub, 1, 3, activation_fn=tf.nn.tanh)

        # Define the probabilistic layer
        net = slim.conv3d(net, n_mixture * 3 * n_y, 1, activation_fn=None)
        cube_size = tf.shape(obs_layer)[1]
        net = tf.reshape(
            net, [-1, cube_size, cube_size, cube_size, n_y, n_mixture * 3])
        #         net = tf.reshape(net, [None, None, None, None, n_y, n_mixture*3])
        loc, unconstrained_scale, logits = tf.split(net,
                                                    num_or_size_splits=3,
                                                    axis=-1)
        scale = tf.nn.softplus(unconstrained_scale) + 1e-3

        # Form mixture of discretized logistic distributions. Note we shift the
        # logistic distribution by -0.5. This lets the quantization capture "rounding"
        # intervals, `(x-0.5, x+0.5]`, and not "ceiling" intervals, `(x-1, x]`.
        if distribution == 'logistic':
            discretized_logistic_dist = tfd.QuantizedDistribution(
                distribution=tfd.TransformedDistribution(
                    distribution=tfd.Logistic(loc=loc, scale=scale),
                    bijector=tfb.AffineScalar(shift=-0.5)),
                low=0.,
                high=2.**3 - 1)

            mixture_dist = tfd.MixtureSameFamily(
                mixture_distribution=tfd.Categorical(logits=logits),
                components_distribution=discretized_logistic_dist)

        elif distribution == 'normal':

            mixture_dist = tfd.MixtureSameFamily(
                mixture_distribution=tfd.Categorical(logits=logits),
                components_distribution=tfd.Normal(loc=loc, scale=scale))

        # Define a function for sampling, and a function for estimating the log likelihood
        #sample = tf.squeeze(mixture_dist.sample())
        sample = mixture_dist.sample()
        loglik = mixture_dist.log_prob(obs_layer)
        hub.add_signature(inputs={
            'features': feature_layer,
            'labels': obs_layer
        },
                          outputs={
                              'sample': sample,
                              'loglikelihood': loglik,
                              'loc': loc,
                              'scale': scale,
                              'logits': logits
                          })
Beispiel #32
0
def stateful_non_rv_module_fn():
  v = tf.get_variable(
      "var123", shape=[],
      initializer=tf.constant_initializer(10.0),
      use_resource=False)
  hub.add_signature(outputs=v.value())
 def make_model_spec():
     input_layer = tf.placeholder(
         tf.float32, shape=features["inputs"].get_shape())
     outputs = pixel_cnn_fn(input_layer)
     hub.add_signature(inputs=input_layer, outputs=outputs)
Beispiel #34
0
 def double_module_fn():
   w = tf.Variable([2.0, 4.0])
   x = tf.compat.v1.placeholder(dtype=tf.float32)
   hub.add_signature(inputs=x, outputs=x*w)
Beispiel #35
0
def module_fn():
    inputs = tf.placeholder(dtype=tf.float32, shape=[None, 50])
    layer1 = tf.layers.dense(inputs, 200)
    layer2 = tf.layers.dense(layer1, 100)
    outputs = dict(default=layer2, hidden_activations=layer1)
    hub.add_signature(inputs=inputs, outputs=outputs)
Beispiel #36
0
 def decoder_spec():
     z = tf.compat.v1.placeholder(tf.float32,
                                  shape=[None, params['latent_size']])
     x = decoder_(z)
     hub.add_signature(inputs={'z': z}, outputs={'x': x})
Beispiel #37
0
    def module_fn():
        """Spec function for a token embedding module."""
        # init
        _bos_id = 256
        _eos_id = 257
        _bow_id = 258
        _eow_id = 259
        _pad_id = 260

        _max_word_length = 50
        _parallel_iterations = 10
        _max_batch_size = 1024

        id_dtype = tf.int32
        id_nptype = np.int32
        max_word_length = tf.constant(_max_word_length,
                                      dtype=id_dtype,
                                      name='max_word_length')

        version = tf.constant('from_dp_1', dtype=tf.string, name='version')

        # the charcter representation of the begin/end of sentence characters
        def _make_bos_eos(c):
            r = np.zeros([_max_word_length], dtype=id_nptype)
            r[:] = _pad_id
            r[0] = _bow_id
            r[1] = c
            r[2] = _eow_id
            return tf.constant(r, dtype=id_dtype)

        bos_ids = _make_bos_eos(_bos_id)
        eos_ids = _make_bos_eos(_eos_id)

        def token2ids(token):
            with tf.name_scope("token2ids_preprocessor"):
                char_ids = tf.decode_raw(token,
                                         tf.uint8,
                                         name='decode_raw2get_char_ids')
                char_ids = tf.cast(char_ids, tf.int32, name='cast2int_token')
                char_ids = tf.strided_slice(char_ids, [0],
                                            [max_word_length - 2], [1],
                                            name='slice2resized_token')
                ids_num = tf.shape(char_ids)[0]
                fill_ids_num = (_max_word_length - 2) - ids_num
                pads = tf.fill([fill_ids_num], _pad_id)
                bow_token_eow_pads = tf.concat(
                    [[_bow_id], char_ids, [_eow_id], pads],
                    0,
                    name='concat2bow_token_eow_pads')
                return bow_token_eow_pads

        def sentence_tagging_and_padding(sen_dim):
            with tf.name_scope("sentence_tagging_and_padding_preprocessor"):
                sen = sen_dim[0]
                dim = sen_dim[1]
                extra_dim = tf.shape(sen)[0] - dim
                sen = tf.slice(sen, [0, 0], [dim, max_word_length],
                               name='slice2sen')

                bos_sen_eos = tf.concat([[bos_ids], sen, [eos_ids]],
                                        0,
                                        name='concat2bos_sen_eos')
                bos_sen_eos_plus_one = bos_sen_eos + 1
                bos_sen_eos_pads = tf.pad(bos_sen_eos_plus_one,
                                          [[0, extra_dim], [0, 0]],
                                          "CONSTANT",
                                          name='pad2bos_sen_eos_pads')
                return bos_sen_eos_pads

        # Input placeholders to the biLM.
        tokens = tf.placeholder(shape=(None, None),
                                dtype=tf.string,
                                name='ph2tokens')
        sequence_len = tf.placeholder(shape=(None, ),
                                      dtype=tf.int32,
                                      name='ph2sequence_len')

        tok_shape = tf.shape(tokens)
        line_tokens = tf.reshape(tokens,
                                 shape=[-1],
                                 name='reshape2line_tokens')

        with tf.device('/cpu:0'):
            tok_ids = tf.map_fn(token2ids,
                                line_tokens,
                                dtype=tf.int32,
                                back_prop=False,
                                parallel_iterations=_parallel_iterations,
                                name='map_fn2get_tok_ids')

        tok_ids = tf.reshape(tok_ids, [tok_shape[0], tok_shape[1], -1],
                             name='reshape2tok_ids')
        with tf.device('/cpu:0'):
            sen_ids = tf.map_fn(sentence_tagging_and_padding,
                                (tok_ids, sequence_len),
                                dtype=tf.int32,
                                back_prop=False,
                                parallel_iterations=_parallel_iterations,
                                name='map_fn2get_sen_ids')

        # Build the biLM graph.
        bilm = BidirectionalLanguageModel(options,
                                          str(weight_file),
                                          max_batch_size=_max_batch_size)

        embeddings_op = bilm(sen_ids)

        # Get an op to compute ELMo (weighted average of the internal biLM layers)
        elmo_output = weight_layers('elmo_output', embeddings_op, l2_coef=0.0)

        weighted_op = elmo_output['weighted_op']
        mean_op = elmo_output['mean_op']
        word_emb = elmo_output['word_emb']
        lstm_outputs1 = elmo_output['lstm_outputs1']
        lstm_outputs2 = elmo_output['lstm_outputs2']

        hub.add_signature("tokens", {
            "tokens": tokens,
            "sequence_len": sequence_len
        }, {
            "elmo": weighted_op,
            "default": mean_op,
            "word_emb": word_emb,
            "lstm_outputs1": lstm_outputs1,
            "lstm_outputs2": lstm_outputs2,
            "version": version
        })

        # #########################Next signature############################# #

        # Input placeholders to the biLM.
        def_strings = tf.placeholder(shape=(None), dtype=tf.string)
        def_tokens_sparse = tf.string_split(def_strings)
        def_tokens_dense = tf.sparse_to_dense(
            sparse_indices=def_tokens_sparse.indices,
            output_shape=def_tokens_sparse.dense_shape,
            sparse_values=def_tokens_sparse.values,
            default_value='')
        def_mask = tf.not_equal(def_tokens_dense, '')
        def_int_mask = tf.cast(def_mask, dtype=tf.int32)
        def_sequence_len = tf.reduce_sum(def_int_mask, axis=-1)

        def_tok_shape = tf.shape(def_tokens_dense)
        def_line_tokens = tf.reshape(def_tokens_dense,
                                     shape=[-1],
                                     name='reshape2line_tokens')

        with tf.device('/cpu:0'):
            def_tok_ids = tf.map_fn(token2ids,
                                    def_line_tokens,
                                    dtype=tf.int32,
                                    back_prop=False,
                                    parallel_iterations=_parallel_iterations,
                                    name='map_fn2get_tok_ids')

        def_tok_ids = tf.reshape(def_tok_ids,
                                 [def_tok_shape[0], def_tok_shape[1], -1],
                                 name='reshape2tok_ids')
        with tf.device('/cpu:0'):
            def_sen_ids = tf.map_fn(sentence_tagging_and_padding,
                                    (def_tok_ids, def_sequence_len),
                                    dtype=tf.int32,
                                    back_prop=False,
                                    parallel_iterations=_parallel_iterations,
                                    name='map_fn2get_sen_ids')

        # Get ops to compute the LM embeddings.
        def_embeddings_op = bilm(def_sen_ids)

        # Get an op to compute ELMo (weighted average of the internal biLM layers)
        def_elmo_output = weight_layers('elmo_output',
                                        def_embeddings_op,
                                        l2_coef=0.0,
                                        reuse=True)

        def_weighted_op = def_elmo_output['weighted_op']
        def_mean_op = def_elmo_output['mean_op']
        def_word_emb = def_elmo_output['word_emb']
        def_lstm_outputs1 = def_elmo_output['lstm_outputs1']
        def_lstm_outputs2 = def_elmo_output['lstm_outputs2']

        hub.add_signature("default", {"strings": def_strings}, {
            "elmo": def_weighted_op,
            "default": def_mean_op,
            "word_emb": def_word_emb,
            "lstm_outputs1": def_lstm_outputs1,
            "lstm_outputs2": def_lstm_outputs2,
            "version": version
        })
Beispiel #38
0
def half_plus_two():
    a = tf.get_variable("a", shape=[])
    b = tf.get_variable("b", shape=[])
    x = tf.placeholder(tf.float32)
    y = a * x + b
    hub.add_signature(inputs=x, outputs=y)
Beispiel #39
0
 def _stateless_module_fn(self):
   """Simple module that squares an input."""
   x = tf.placeholder(tf.int64)
   y = x*x
   hub.add_signature(inputs=x, outputs=y)
Beispiel #40
0
    def _module_fn(is_training):
        """A module_fn for use with hub.create_module_spec().

    Args:
      is_training: a boolean, passed to the config.network_fn.
          This is meant to control whether batch norm, dropout etc. are built
          in training or inference mode for this graph version.

    Raises:
      ValueError: if network_fn outputs are not as expected.
    """
        # Set up the module input, and attach an ImageModuleInfo about it.
        with tf.name_scope('hub_input'):
            default_size = (hparams.image_size, ) * 2
            image_module_info = hub.ImageModuleInfo()
            size_info = image_module_info.default_image_size
            size_info.height, size_info.width = default_size
            # TODO(b/72731449): Support variable input size.
            shape = (None, ) + default_size + (3, )
            images = tf.placeholder(dtype=tf.float32,
                                    shape=shape,
                                    name='images')
            hub.attach_image_module_info(image_module_info)
            # The input is expected to have RGB color values in the range [0,1]
            # and gets converted for AmoebaNet to the Inception-style range [-1,+1].
            scaled_images = tf.multiply(images, 2.0)
            scaled_images = tf.subtract(scaled_images, 1.0)

        # Build the net.
        logits, end_points = model_builder.build_network(
            scaled_images, num_classes, is_training, hparams)

        with tf.name_scope('hub_output'):
            # Extract the feature_vectors output.
            try:
                feature_vectors = end_points['global_pool']
            except KeyError:
                tf.logging.error('Valid keys of end_points are:',
                                 ', '.join(end_points))
                raise
            with tf.name_scope('feature_vector'):
                if feature_vectors.shape.ndims != 2:
                    raise ValueError(
                        'Wrong rank (expected 2 after squeeze) '
                        'in feature_vectors:', feature_vectors)
            # Extract the logits output (if applicable).
            if num_classes:
                with tf.name_scope('classification'):
                    if logits.shape.ndims != 2:
                        raise ValueError('Wrong rank (expected 2) in logits:',
                                         logits)

        # Add named signatures.
        hub.add_signature('image_feature_vector', dict(images=images),
                          dict(end_points, default=feature_vectors))
        if num_classes:
            hub.add_signature('image_classification', dict(images=images),
                              dict(end_points, default=logits))
        # Add the default signature.
        if num_classes:
            hub.add_signature('default', dict(images=images),
                              dict(default=logits))
        else:
            hub.add_signature('default', dict(images=images),
                              dict(default=feature_vectors))
  def _module_fn(self, model, batch_size):
    """Module Function to create a TF Hub module spec.

    Args:
      model: `tf.estimator.ModeKeys` value.
      batch_size: batch size.
    """
    if model not in {"gen", "disc"}:
      raise ValueError("Model {} not support in module_fn()".format(model))
    placeholder_fn = tf.placeholder if batch_size is None else tf.zeros
    is_training = False
    inputs = {}
    y = None
    if model == "gen":
      inputs["z"] = placeholder_fn(
          shape=(batch_size, self._z_dim),
          dtype=tf.float32,
          name="z_for_eval")
    elif model == "disc":
      inputs["images"] = placeholder_fn(
          shape=[batch_size] + list(self._dataset.image_shape),
          dtype=tf.float32,
          name="images_for_eval")
    if self.conditional:
      inputs["labels"] = placeholder_fn(
          shape=(batch_size,),
          dtype=tf.int32,
          name="labels_for_eval")
      y = self._get_one_hot_labels(inputs["labels"])
    else:
      y = None

    logging.info("Creating module for model %s with inputs %s and y=%s",
                 model, inputs, y)
    outputs = {}
    if model == "disc":
      outputs["prediction"], _, _ = self.discriminator(
          inputs["images"], y=y, is_training=is_training)
    else:
      z = inputs["z"]
      generated = self.generator(z=z, y=y, is_training=is_training)
      if self._g_use_ema and not is_training:
        g_vars = [var for var in tf.trainable_variables()
                  if "generator" in var.name]
        ema = tf.train.ExponentialMovingAverage(decay=self._ema_decay)
        # Create the variables that will be loaded from the checkpoint.
        ema.apply(g_vars)
        def ema_getter(getter, name, *args, **kwargs):
          var = getter(name, *args, **kwargs)
          ema_var = ema.average(var)
          if ema_var is None:
            var_names_without_ema = {"u_var", "accu_mean", "accu_variance",
                                     "accu_counter", "update_accus"}
            if name.split("/")[-1] not in var_names_without_ema:
              logging.warning("Could not find EMA variable for %s.", name)
            return var
          return ema_var
        with tf.variable_scope("", values=[z, y], reuse=True,
                               custom_getter=ema_getter):
          generated = self.generator(z, y=y, is_training=is_training)
      outputs["generated"] = generated

    hub.add_signature(inputs=inputs, outputs=outputs)
Beispiel #42
0
 def make_generator_spec():
     input_layer = tf.placeholder(
         tf.float32,
         shape=[None] + common_layers.shape_list(generator_inputs)[1:])
     gen_output = self.generator(input_layer, mode)
     hub.add_signature(inputs=input_layer, outputs=gen_output)
  def _build_graph(self, batch_dim: Optional[int] = None):
    """Builds the TensorFlow graph for evaluating the functional.

    Args:
      batch_dim: the batch dimension of the grid to use in the model. Default:
        None (determine at runtime). This should only be set if building a model
        in order to export and ahead-of-time compile it into a standalone
        library.
    """

    self._functional = hub.Module(spec=self._model_path)

    grid_coords = tf.placeholder(
        tf.float32, shape=[batch_dim, 3], name='grid_coords')
    grid_weights = tf.placeholder(
        tf.float32, shape=[batch_dim], name='grid_weights')

    # Density information.
    rho_a = tf.placeholder(tf.float32, shape=[6, batch_dim], name='rho_a')
    rho_b = tf.placeholder(tf.float32, shape=[6, batch_dim], name='rho_b')

    # Split into corresponding terms.
    rho_only_a, grad_a_x, grad_a_y, grad_a_z, _, tau_a = tf.unstack(
        rho_a, axis=0)
    rho_only_b, grad_b_x, grad_b_y, grad_b_z, _, tau_b = tf.unstack(
        rho_b, axis=0)

    # Evaluate |\del \rho|^2 for each spin density and for the total density.
    norm_grad_a = (grad_a_x**2 + grad_a_y**2 + grad_a_z**2)
    norm_grad_b = (grad_b_x**2 + grad_b_y**2 + grad_b_z**2)
    grad_x = grad_a_x + grad_b_x
    grad_y = grad_a_y + grad_b_y
    grad_z = grad_a_z + grad_b_z
    norm_grad = (grad_x**2 + grad_y**2 + grad_z**2)

    # The local Hartree-Fock energy densities at each grid point for the alpha-
    # and beta-spin densities for each value of omega.
    # Note an omega of 0 indicates no screening of the Coulomb potential.
    hfxa = tf.placeholder(
        tf.float32, shape=[batch_dim, len(self._omega_values)], name='hfxa')
    hfxb = tf.placeholder(
        tf.float32, shape=[batch_dim, len(self._omega_values)], name='hfxb')

    # Make all features 2D arrays on input for ease of handling inside the
    # functional.
    features = {
        'grid_coords': grid_coords,
        'grid_weights': tf.expand_dims(grid_weights, 1),
        'rho_a': tf.expand_dims(rho_only_a, 1),
        'rho_b': tf.expand_dims(rho_only_b, 1),
        'tau_a': tf.expand_dims(tau_a, 1),
        'tau_b': tf.expand_dims(tau_b, 1),
        'norm_grad_rho_a': tf.expand_dims(norm_grad_a, 1),
        'norm_grad_rho_b': tf.expand_dims(norm_grad_b, 1),
        'norm_grad_rho': tf.expand_dims(norm_grad, 1),
        'hfxa': hfxa,
        'hfxb': hfxb,
    }
    tensor_dict = {f'tensor_dict${k}': v for k, v in features.items()}

    predictions = self._functional(tensor_dict, as_dict=True)
    local_xc = predictions['grid_contribution']
    weighted_local_xc = local_xc * grid_weights
    unweighted_xc = tf.reduce_sum(local_xc, axis=0)
    xc = tf.reduce_sum(weighted_local_xc, axis=0)

    # The potential is the local exchange correlation divided by the
    # total density. Add a small constant to deal with zero density.
    self._vxc = local_xc / (rho_only_a + rho_only_b + 1E-12)

    # The derivatives of the exchange-correlation (XC) energy with respect to
    # input features.  PySCF weights the (standard) derivatives by the grid
    # weights, so we need to compute this with respect to the unweighted sum
    # over grid points.
    self._vrho = tf.gradients(
        unweighted_xc, [features['rho_a'], features['rho_b']],
        name='GRAD_RHO',
        unconnected_gradients=tf.UnconnectedGradients.ZERO)
    self._vsigma = tf.gradients(
        unweighted_xc, [
            features['norm_grad_rho_a'], features['norm_grad_rho_b'],
            features['norm_grad_rho']
        ],
        name='GRAD_SIGMA',
        unconnected_gradients=tf.UnconnectedGradients.ZERO)
    self._vtau = tf.gradients(
        unweighted_xc, [features['tau_a'], features['tau_b']],
        name='GRAD_TAU',
        unconnected_gradients=tf.UnconnectedGradients.ZERO)
    # Standard meta-GGAs do not have a dependency on local HF, so we need to
    # compute the contribution to the Fock matrix ourselves. Just use the
    # weighted XC energy to avoid having to weight this later.
    self._vhf = tf.gradients(
        xc, [features['hfxa'], features['hfxb']],
        name='GRAD_HFX',
        unconnected_gradients=tf.UnconnectedGradients.ZERO)

    self._placeholders = FunctionalInputs(
        rho_a=rho_a,
        rho_b=rho_b,
        hfx_a=hfxa,
        hfx_b=hfxb,
        grid_coords=grid_coords,
        grid_weights=grid_weights)

    outputs = {
        'vxc': self._vxc,
        'vrho': tf.stack(self._vrho),
        'vsigma': tf.stack(self._vsigma),
        'vtau': tf.stack(self._vtau),
        'vhf': tf.stack(self._vhf),
    }
    # Create the signature for TF-Hub, including both the energy and functional
    # derivatives.
    # This is a no-op if _build_graph is called outside of
    # hub.create_module_spec.
    hub.add_signature(
        inputs=attr.asdict(self._placeholders), outputs=outputs)
Beispiel #44
0
def stateful_module_fn():
  v = tf.get_variable(
      "var123", shape=[3],
      initializer=tf.constant_initializer([1.0, 2.0, 3.0]))
  hub.add_signature(outputs=v.value())
Beispiel #45
0
 def make_discriminator_spec():
     input_layer = tf.placeholder(tf.float32, shape=[None] + img_shape)
     disc_output = self.discriminator(input_layer, None, mode)
     hub.add_signature(inputs=input_layer, outputs=disc_output)
Beispiel #46
0
 def assets_module_fn():
   indices = tf.placeholder(dtype=tf.int64, name="indices")
   outputs = do_table_lookup(indices, vocabulary_file)
   hub.add_signature(inputs=indices, outputs=outputs)
Beispiel #47
0
 def _stateless_module_fn():
     """Simple module that squares an input."""
     x = tf.compat.v1.placeholder(tf.int64)
     y = x * x
     hub.add_signature(inputs=x, outputs=y)
Beispiel #48
0
 def consumer_module_fn():
   indices = tf.placeholder(dtype=tf.int64, name="indices")
   inner_module = hub.Module(exported_hub_module)
   inner_module_output = inner_module(indices)
   output = tf.identity(inner_module_output)
   hub.add_signature(inputs=indices, outputs=output)
Beispiel #49
0
    def module_fn():
        """Spec function for a token embedding module."""
        # init
        _bos_id = 256
        _eos_id = 257
        _bow_id = 258
        _eow_id = 259
        _pad_id = 260

        _max_word_length = 50
        _parallel_iterations = 10
        _max_batch_size = 1024

        id_dtype = tf.int32
        id_nptype = np.int32
        max_word_length = tf.constant(_max_word_length, dtype=id_dtype, name='max_word_length')

        version = tf.constant('from_dp_1', dtype=tf.string, name='version')

        # the charcter representation of the begin/end of sentence characters
        def _make_bos_eos(c):
            r = np.zeros([_max_word_length], dtype=id_nptype)
            r[:] = _pad_id
            r[0] = _bow_id
            r[1] = c
            r[2] = _eow_id
            return tf.constant(r, dtype=id_dtype)

        bos_ids = _make_bos_eos(_bos_id)
        eos_ids = _make_bos_eos(_eos_id)

        def token2ids(token):
            with tf.name_scope("token2ids_preprocessor"):
                char_ids = tf.decode_raw(token, tf.uint8, name='decode_raw2get_char_ids')
                char_ids = tf.cast(char_ids, tf.int32, name='cast2int_token')
                char_ids = tf.strided_slice(char_ids, [0], [max_word_length - 2],
                                            [1], name='slice2resized_token')
                ids_num = tf.shape(char_ids)[0]
                fill_ids_num = (_max_word_length - 2) - ids_num
                pads = tf.fill([fill_ids_num], _pad_id)
                bow_token_eow_pads = tf.concat([[_bow_id], char_ids, [_eow_id], pads],
                                               0, name='concat2bow_token_eow_pads')
                return bow_token_eow_pads

        def sentence_tagging_and_padding(sen_dim):
            with tf.name_scope("sentence_tagging_and_padding_preprocessor"):
                sen = sen_dim[0]
                dim = sen_dim[1]
                extra_dim = tf.shape(sen)[0] - dim
                sen = tf.slice(sen, [0, 0], [dim, max_word_length], name='slice2sen')

                bos_sen_eos = tf.concat([[bos_ids], sen, [eos_ids]], 0, name='concat2bos_sen_eos')
                bos_sen_eos_plus_one = bos_sen_eos + 1
                bos_sen_eos_pads = tf.pad(bos_sen_eos_plus_one, [[0, extra_dim], [0, 0]],
                                          "CONSTANT", name='pad2bos_sen_eos_pads')
                return bos_sen_eos_pads

        # Input placeholders to the biLM.
        tokens = tf.placeholder(shape=(None, None), dtype=tf.string, name='ph2tokens')
        sequence_len = tf.placeholder(shape=(None, ), dtype=tf.int32, name='ph2sequence_len')

        tok_shape = tf.shape(tokens)
        line_tokens = tf.reshape(tokens, shape=[-1], name='reshape2line_tokens')

        with tf.device('/cpu:0'):
            tok_ids = tf.map_fn(
                token2ids,
                line_tokens,
                dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
                name='map_fn2get_tok_ids')

        tok_ids = tf.reshape(tok_ids, [tok_shape[0], tok_shape[1], -1], name='reshape2tok_ids')
        with tf.device('/cpu:0'):
            sen_ids = tf.map_fn(
                sentence_tagging_and_padding,
                (tok_ids, sequence_len),
                dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
                name='map_fn2get_sen_ids')

        # Build the biLM graph.
        bilm = BidirectionalLanguageModel(options, str(weight_file),
                                          max_batch_size=_max_batch_size)

        embeddings_op = bilm(sen_ids)

        # Get an op to compute ELMo (weighted average of the internal biLM layers)
        elmo_output = weight_layers('elmo_output', embeddings_op, l2_coef=0.0)

        weighted_op = elmo_output['weighted_op']
        mean_op = elmo_output['mean_op']
        word_emb = elmo_output['word_emb']
        lstm_outputs1 = elmo_output['lstm_outputs1']
        lstm_outputs2 = elmo_output['lstm_outputs2']

        hub.add_signature("tokens", {"tokens": tokens, "sequence_len": sequence_len},
                          {"elmo": weighted_op,
                           "default": mean_op,
                           "word_emb": word_emb,
                           "lstm_outputs1": lstm_outputs1,
                           "lstm_outputs2": lstm_outputs2,
                           "version": version})

        # #########################Next signature############################# #

        # Input placeholders to the biLM.
        def_strings = tf.placeholder(shape=(None), dtype=tf.string)
        def_tokens_sparse = tf.string_split(def_strings)
        def_tokens_dense = tf.sparse_to_dense(sparse_indices=def_tokens_sparse.indices,
                                              output_shape=def_tokens_sparse.dense_shape,
                                              sparse_values=def_tokens_sparse.values,
                                              default_value=''
                                              )
        def_mask = tf.not_equal(def_tokens_dense, '')
        def_int_mask = tf.cast(def_mask, dtype=tf.int32)
        def_sequence_len = tf.reduce_sum(def_int_mask, axis=-1)

        def_tok_shape = tf.shape(def_tokens_dense)
        def_line_tokens = tf.reshape(def_tokens_dense, shape=[-1], name='reshape2line_tokens')

        with tf.device('/cpu:0'):
            def_tok_ids = tf.map_fn(
                token2ids,
                def_line_tokens,
                dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
                name='map_fn2get_tok_ids')

        def_tok_ids = tf.reshape(def_tok_ids, [def_tok_shape[0], def_tok_shape[1], -1], name='reshape2tok_ids')
        with tf.device('/cpu:0'):
            def_sen_ids = tf.map_fn(
                sentence_tagging_and_padding,
                (def_tok_ids, def_sequence_len),
                dtype=tf.int32, back_prop=False, parallel_iterations=_parallel_iterations,
                name='map_fn2get_sen_ids')

        # Get ops to compute the LM embeddings.
        def_embeddings_op = bilm(def_sen_ids)

        # Get an op to compute ELMo (weighted average of the internal biLM layers)
        def_elmo_output = weight_layers('elmo_output', def_embeddings_op, l2_coef=0.0, reuse=True)

        def_weighted_op = def_elmo_output['weighted_op']
        def_mean_op = def_elmo_output['mean_op']
        def_word_emb = def_elmo_output['word_emb']
        def_lstm_outputs1 = def_elmo_output['lstm_outputs1']
        def_lstm_outputs2 = def_elmo_output['lstm_outputs2']

        hub.add_signature("default", {"strings": def_strings},
                          {"elmo": def_weighted_op,
                           "default": def_mean_op,
                           "word_emb": def_word_emb,
                           "lstm_outputs1": def_lstm_outputs1,
                           "lstm_outputs2": def_lstm_outputs2,
                           "version": version})
    def _module_fn():
        """
        Function building the module
        """

        feature_layer = tf.placeholder(
            tf.float32,
            shape=[None, None, None, None, nchannels],
            name='input')
        obs_layer = tf.placeholder(tf.float32,
                                   shape=[None, None, None, None, n_y],
                                   name='observations')

        # Builds the neural network
        net = slim.conv3d(feature_layer,
                          16,
                          5,
                          activation_fn=tf.nn.leaky_relu,
                          padding='valid')
        #net = wide_resnet(feature_layer, 8, activation_fn=tf.nn.leaky_relu, is_training=is_training)
        net = wide_resnet(net,
                          16,
                          activation_fn=tf.nn.leaky_relu,
                          keep_prob=dropout,
                          is_training=is_training)
        net = wide_resnet(net,
                          32,
                          activation_fn=tf.nn.leaky_relu,
                          keep_prob=dropout,
                          is_training=is_training)
        net = wide_resnet(net,
                          32,
                          activation_fn=tf.nn.leaky_relu,
                          keep_prob=dropout,
                          is_training=is_training)
        net = slim.conv3d(net, 32, 3, activation_fn=tf.nn.tanh)

        # Define the probabilistic layer
        net = slim.conv3d(net, n_mixture * 3 * n_y, 1, activation_fn=None)
        cube_size = tf.shape(obs_layer)[1]
        net = tf.reshape(
            net, [-1, cube_size, cube_size, cube_size, n_y, n_mixture * 3])
        #         net = tf.reshape(net, [None, None, None, None, n_y, n_mixture*3])
        loc, unconstrained_scale, logits = tf.split(net,
                                                    num_or_size_splits=3,
                                                    axis=-1)
        scale = tf.nn.softplus(unconstrained_scale)

        # Form mixture of discretized logistic distributions. Note we shift the
        # logistic distribution by -0.5. This lets the quantization capture "rounding"
        # intervals, `(x-0.5, x+0.5]`, and not "ceiling" intervals, `(x-1, x]`.
        discretized_logistic_dist = tfd.QuantizedDistribution(
            distribution=tfd.TransformedDistribution(
                distribution=tfd.Logistic(loc=loc, scale=scale),
                bijector=tfb.AffineScalar(shift=-0.5)),
            low=0.,
            high=2.**3 - 1)

        mixture_dist = tfd.MixtureSameFamily(
            mixture_distribution=tfd.Categorical(logits=logits),
            components_distribution=discretized_logistic_dist)

        # Define a function for sampling, and a function for estimating the log likelihood
        sample = tf.squeeze(mixture_dist.sample())
        loglik = mixture_dist.log_prob(obs_layer)
        hub.add_signature(inputs={
            'features': feature_layer,
            'labels': obs_layer
        },
                          outputs={
                              'sample': sample,
                              'loglikelihood': loglik
                          })
Beispiel #51
0
def invalid_text_module_fn():
  text = tf.placeholder(tf.string, shape=[10])
  hub.add_signature(inputs=text, outputs=tf.zeros([10, 3]))
Beispiel #52
0
 def assets_module_fn():
   indices = tf.placeholder(dtype=tf.int64, name="indices")
   table = tf.contrib.lookup.index_to_string_table_from_file(
       vocabulary_file=vocab_filename, default_value="UNKNOWN")
   outputs = table.lookup(indices)
   hub.add_signature(inputs=indices, outputs=outputs)
Beispiel #53
0
    def _module_fn():
        """                                                                                                                     
        Function building the module                                                                                            
        """

        feature_layer = tf.placeholder(
            tf.float32,
            shape=[None, None, None, None, nchannels],
            name='input')
        obs_layer = tf.placeholder(tf.float32,
                                   shape=[None, None, None, None, n_y],
                                   name='observations')

        conditional_im = wide_resnet(feature_layer,
                                     16,
                                     activation_fn=tf.nn.leaky_relu,
                                     keep_prob=dropout,
                                     is_training=is_training)
        conditional_im = wide_resnet(conditional_im,
                                     16,
                                     activation_fn=tf.nn.leaky_relu,
                                     keep_prob=dropout,
                                     is_training=is_training)
        conditional_im = wide_resnet(conditional_im,
                                     1,
                                     activation_fn=tf.nn.leaky_relu,
                                     keep_prob=dropout,
                                     is_training=is_training)
        conditional_im = tf.concat((feature_layer, conditional_im), -1)

        # Builds the neural network
        ul = [[obs_layer]]
        for i in range(10):
            ul.append(
                PixelCNN3Dlayer(i,
                                ul[i],
                                f_map=f_map,
                                full_horizontal=True,
                                h=None,
                                conditional_im=conditional_im,
                                cfilter_size=cfilter_size,
                                gatedact='sigmoid'))

        h_stack_in = ul[-1][-1]

        with tf.variable_scope("fc_1"):
            fc1 = GatedCNN([1, 1, 1, 1],
                           h_stack_in,
                           orientation=None,
                           gated=False,
                           mask='b').output()

        with tf.variable_scope("fc_2"):
            fc2 = GatedCNN([1, 1, 1, n_mixture * 3 * n_y],
                           fc1,
                           orientation=None,
                           gated=False,
                           mask='b',
                           activation=False).output()

        cube_size = tf.shape(obs_layer)[1]
        net = tf.reshape(
            fc2, [-1, cube_size, cube_size, cube_size, n_y, n_mixture * 3])

        loc, unconstrained_scale, logits = tf.split(net,
                                                    num_or_size_splits=3,
                                                    axis=-1)
        scale = tf.nn.softplus(unconstrained_scale) + 1e-3

        # Form mixture of discretized logistic distributions. Note we shift the
        # logistic distribution by -0.5. This lets the quantization capture "rounding"
        # intervals, `(x-0.5, x+0.5]`, and not "ceiling" intervals, `(x-1, x]`.
        #         discretized_logistic_dist = tfd.QuantizedDistribution(
        #             distribution=tfd.TransformedDistribution(
        #                 distribution=tfd.Logistic(loc=loc, scale=scale),
        #                 bijector=tfb.AffineScalar(shift=-0.5)),
        #             low=0.,
        #             high=2.**3-1)

        mixture_dist = tfd.MixtureSameFamily(
            mixture_distribution=tfd.Categorical(logits=logits),
            components_distribution=tfd.Normal(loc, scale))

        # Define a function for sampling, and a function for estimating the log likelihood
        #sample = tf.squeeze(mixture_dist.sample())
        sample = mixture_dist.sample()
        loglik = mixture_dist.log_prob(obs_layer)
        hub.add_signature(inputs={
            'features': feature_layer,
            'labels': obs_layer
        },
                          outputs={
                              'sample': sample,
                              'loglikelihood': loglik,
                              'loc': loc,
                              'scale': scale,
                              'logits': logits
                          })
 def make_decoder_spec():
     code = tf.placeholder(tf.float32, shape=[None, latent_size])
     output = decoder_model(code)
     if not tf.contrib.framework.is_tensor(output):
         output = output.sample()
     hub.add_signature(inputs=code, outputs=output)
Beispiel #55
0
 def encoder_spec():
     x = tf.compat.v1.placeholder(tf.float32, shape=params['full_size'])
     z = encoder_(x)
     hub.add_signature(inputs={'x': x}, outputs={'z': z})
Beispiel #56
0
def update_ops_module_fn():
  counter = tf.Variable(0, trainable=False)
  tf.add_to_collection(tf.GraphKeys.UPDATE_OPS, counter.assign_add(1))
  hub.add_signature(inputs=None, outputs=counter.value())
Beispiel #57
0
 def plus_one():
     x = tf.compat.v1.placeholder(dtype=tf.float32, name='x')
     y = x + 1
     hub.add_signature(inputs=x, outputs=y)
Beispiel #58
0
def multiple_outputs_module_fn():
  x = tf.placeholder(dtype=tf.float32)
  v = tf.Variable([3.0])
  hub.add_signature(
      inputs={"x": x},
      outputs={"y": v * x, "z": v * v * x})
Beispiel #59
0
 def assets_module_fn():
   indices = tf_v1.placeholder(dtype=tf.int64, name="indices")
   table = index_to_string_table_from_file(
       vocabulary_file=vocab_filename, default_value="UNKNOWN")
   outputs = table.lookup(indices)
   hub.add_signature(inputs=indices, outputs=outputs)
Beispiel #60
0
def AutoEncoder_module():
    inputs = tf.placeholder(tf.float32, shape=[None, IMAGE_DIM])
    output = tf.reshape(inputs, [-1, 1, 96, 96])

    # Compression
    # 96x96
    output = lib.ops.conv2d.Conv2D('AutoEncoder.1',
                                   1,
                                   DIM,
                                   5,
                                   output,
                                   stride=2)
    output = tf.nn.relu(output)
    #48x48
    output = lib.ops.conv2d.Conv2D('AutoEncoder.2',
                                   DIM,
                                   2 * DIM,
                                   5,
                                   output,
                                   stride=2)
    output = tf.nn.relu(output)
    #24x24
    output = lib.ops.conv2d.Conv2D('AutoEncoder.3',
                                   2 * DIM,
                                   4 * DIM,
                                   5,
                                   output,
                                   stride=2)
    output = tf.nn.relu(output)
    #12x12
    output = lib.ops.conv2d.Conv2D('AutoEncoder.4',
                                   4 * DIM,
                                   8 * DIM,
                                   5,
                                   output,
                                   stride=2)
    output = tf.nn.relu(output)
    #6x6
    #output = lib.ops.conv2d.Conv2D('AutoEncoder.5', 8*DIM, int(DIM/64), 5, output)
    #output = tf.nn.relu(output)

    #output = tf.reshape(tf.layers.flatten(output), (-1, LATENT_DIM))
    output = tf.layers.flatten(output)
    output_latent = tf.layers.dense(output, LATENT_DIM, activation=None)

    #tf.Print(output_latent)
    print(output_latent)
    hub.add_signature(inputs=inputs, outputs=output_latent, name='latent')

    # Decompressioin
    #output = tf.reshape(
    activation = None
    output = tf.layers.dense(output_latent,
                             DIM * 6 * 6,
                             use_bias=False,
                             activation=activation)
    output = tf.reshape(output, [-1, DIM, 6, 6])

    output = lib.ops.deconv2d.Deconv2D('AutoEncoder.6', DIM, 4 * DIM, 5,
                                       output)
    output = tf.nn.relu(output)

    output = lib.ops.deconv2d.Deconv2D('AutoEncoder.7', 4 * DIM, 2 * DIM, 5,
                                       output)
    output = tf.nn.relu(output)

    output = lib.ops.deconv2d.Deconv2D('AutoEncoder.8', 2 * DIM, DIM, 5,
                                       output)
    output = tf.nn.relu(output)

    output = lib.ops.deconv2d.Deconv2D('AutoEncoder.9', DIM, 1, 5, output)
    output = tf.nn.relu(output)

    output = tf.reshape(output, [-1, IMAGE_DIM])

    hub.add_signature(inputs=output_latent,
                      outputs=output,
                      name='decode_latent')
    hub.add_signature(inputs=inputs, outputs=output)

    return output