コード例 #1
0
 def make_model_spec():
   input_layer = tf.placeholder(tf.float32, shape=b_shape)
   x = self.unbottleneck(input_layer, res_size)
   x = self.decoder(x, None)
   reconstr = tf.layers.dense(x, self.num_channels, name="autoencoder_final",
                              activation=output_activation)
   hub.add_signature(inputs=input_layer, outputs=reconstr)
   hub.attach_message("stamp_size", tf.train.Int64List(value=[hparams.problem_hparams.img_len]))
   hub.attach_message("pixel_size", tf.train.FloatList(value=[hparams.problem_hparams.pixel_scale]))
コード例 #2
0
 def generative_model_fn():
     code = hub.Module(FLAGS.flow_module)
     decoder = hub.Module(FLAGS.decoder_module)
     input_info = code.get_input_info_dict()
     inputs = {
         k: tf.placeholder(tf.float32, shape=input_info[k].get_shape())
         for k in input_info.keys()
     }
     hub.add_signature(inputs=inputs, outputs=decoder(code(inputs)))
     hub.attach_message(
         "stamp_size",
         decoder.get_attached_message("stamp_size", tf.train.Int64List))
     hub.attach_message(
         "pixel_size",
         decoder.get_attached_message("pixel_size", tf.train.FloatList))
コード例 #3
0
    def module_fn():
      """A module_fn for use with hub.create_module_spec()."""
      # We will use a copy of the original object to build the graph.
      wrapped_object = self._object_factory()

      for method_name, method_info in self._captured_calls.items():
        captured_inputs, captured_specs = method_info
        tensor_inputs = nest.map_structure(_to_placeholder, captured_inputs)
        method_to_call = getattr(wrapped_object, method_name)
        tensor_outputs = method_to_call(**tensor_inputs)

        flat_tensor_inputs = nest.flatten(tensor_inputs)
        flat_tensor_inputs = {
            str(k): v for k, v in zip(
                range(len(flat_tensor_inputs)), flat_tensor_inputs)
        }
        flat_tensor_outputs = nest.flatten(tensor_outputs)
        flat_tensor_outputs = {
            str(k): v for k, v in zip(
                range(len(flat_tensor_outputs)), flat_tensor_outputs)
        }

        method_specs[method_name] = dict(
            specs=captured_specs,
            inputs=nest.map_structure(lambda _: None, tensor_inputs),
            outputs=nest.map_structure(lambda _: None, tensor_outputs))

        signature_name = ("default"
                          if method_name == "__call__" else method_name)
        hub.add_signature(signature_name, flat_tensor_inputs,
                          flat_tensor_outputs)

      hub.attach_message(
          "methods", tf.train.BytesList(value=[pickle.dumps(method_specs)]))
      hub.attach_message(
          "properties",
          tf.train.BytesList(value=[pickle.dumps(self._captured_attrs)]))
コード例 #4
0
 def make_model_spec():
   input_layer = tf.placeholder(tf.float32, shape=b_shape)
   x = self.unbottleneck(input_layer, res_size)
   x = self.decoder(x, None)
   reconstr = tf.layers.dense(x, input_shape[-1], name="autoencoder_final",
                              activation=output_activation)
   hub.add_signature(inputs=input_layer, outputs=reconstr)
   hub.attach_message("stamp_size", tf.train.Int64List(value=[hparams.problem_hparams.img_len]))
   try:
     hub.attach_message("pixel_size", tf.train.FloatList(value=[hparams.problem_hparams.pixel_scale[res] for res in hparams.problem_hparams.resolutions]))
   except AttributeError:
     hub.attach_message("pixel_size", tf.train.FloatList(value=[hparams.problem_hparams.pixel_scale]))