コード例 #1
0
 def testConflictingUnboundVariables(self):
   """Two unbound_vars with the same name are considered conflicting."""
   input_pt = self.Wrap(self.input)
   with self.assertRaises(ValueError):
     (input_pt.flatten()
      .fully_connected(prettytensor.UnboundVariable('width'))
      .fully_connected(prettytensor.UnboundVariable('width')))
コード例 #2
0
 def testMultipleUnboundVariables(self):
   input_pt = self.Wrap(self.input)
   template = (input_pt.flatten()
               .fully_connected(prettytensor.UnboundVariable('width'))
               .fully_connected(prettytensor.UnboundVariable('width2')))
   out = self.RunTensor(template.construct(width=200, width2=100))
   self.assertSequenceEqual([2, 100], out.shape)
コード例 #3
0
 def testUnboundVariableForParameter(self):
   input_pt = self.Wrap(self.input)
   template = input_pt.flatten().fully_connected(prettytensor.UnboundVariable(
       'width'))
   self.assertTrue(isinstance(template, pretty_tensor_class._DeferredLayer))
   out = self.RunTensor(template.construct(width=200))
   self.assertSequenceEqual([2, 200], out.shape)
コード例 #4
0
ファイル: test1.py プロジェクト: nayem/RSGAN
def sub_temp():
    matrix = tf.constant([[2.]])
    template= \
        (pt.template("input").  # 128*9*4*4
         custom_add(pt.UnboundVariable("hidden"), matrix)
         )
    return template
コード例 #5
0
    def _make_decoder_template(self):
        defaults_scope = {
            'phase': pt.UnboundVariable('phase', default=pt.Phase.train),
            'scale_after_normalization': True,
            }
        image_size = self.image_shape[0]
        with pt.defaults_scope(**defaults_scope):
          with tf.variable_scope("decoder"):
            if self.network_type=="fully-connected":
                self.decoder_template = (pt.template("z_in").
                                         custom_fully_connected(1000).
                                         apply(tf.nn.relu).
                                         custom_fully_connected(1000).
                                         batch_normalize().
                                         apply(tf.nn.relu).
                                         custom_fully_connected(self.image_dim))

            elif self.network_type=="convolutional":
                self.decoder_template = \
                    (pt.template("z_in").
                     custom_fully_connected(1024).
                     batch_normalize().
                     apply(tf.nn.relu).
                     custom_fully_connected(image_size/4 * image_size/4 * 128).
                     batch_normalize().
                     apply(tf.nn.relu).
                     reshape([-1, image_size/4, image_size/4, 128]).
                     custom_deconv2d([0, image_size/2, image_size/2, 64],
                                     k_h=4, k_w=4).
                     batch_normalize().
                     apply(tf.nn.relu).
                     custom_deconv2d([0] + list(self.image_shape),
                                     k_h=4, k_w=4).
                     flatten())
コード例 #6
0
    def _make_encoder_template(self):
        defaults_scope = {
            'phase': pt.UnboundVariable('phase', default=pt.Phase.train),
            'scale_after_normalization': True,
            }
        with pt.defaults_scope(**defaults_scope):
          with tf.variable_scope("encoder"):
            if self.network_type=="fully-connected":
                z_dim = self.latent_dist.dist_flat_dim
                self.encoder_template = (pt.template("x_in").
                                         custom_fully_connected(1000).
                                         batch_normalize().
                                         apply(tf.nn.elu).
                                         custom_fully_connected(1000).
                                         batch_normalize().
                                         apply(tf.nn.elu).
                                         custom_fully_connected(z_dim))

            elif self.network_type=="convolutional":
                z_dim = self.latent_dist.dist_flat_dim
                self.encoder_template = (pt.template("x_in").
                                         reshape([-1] + list(self.image_shape)).
                                         custom_conv2d(64, k_h=4, k_w=4).
                                         apply(tf.nn.elu).
                                         custom_conv2d(128, k_h=4, k_w=4).
                                         batch_normalize().
                                         apply(tf.nn.elu).
                                         custom_fully_connected(1024).
                                         batch_normalize().
                                         apply(tf.nn.elu).
                                         custom_fully_connected(z_dim))
コード例 #7
0
 def testUnboundVariableReused(self):
   """The same unbound_var can be used multiple times in a graph."""
   input_pt = self.Wrap(self.input)
   unbound_var = prettytensor.UnboundVariable('width')
   template = (input_pt.flatten().fully_connected(unbound_var)
               .fully_connected(unbound_var))
   out = self.RunTensor(template.construct(width=200))
   self.assertSequenceEqual([2, 200], out.shape)
コード例 #8
0
 def testUnboundVariableAsDefault(self):
   """The same unbound_var can be used multiple times in a graph."""
   input_pt = self.Wrap(self.input)
   with prettytensor.defaults_scope(
       value=prettytensor.UnboundVariable('key')):
     x = input_pt.ValidateMethod(self)
   self.assertTrue(isinstance(x, pretty_tensor_class._DeferredLayer))
   x.construct(key=KEY)
コード例 #9
0
ファイル: model.py プロジェクト: nayem/RSGAN
    def discriminator(self):
        template = \
            (pt.template("input").  # 128*9*4*4
             custom_conv2d(self.df_dim * 8, k_h=1, k_w=1, d_h=1, d_w=1).  # 128*8*4*4
             conv_batch_norm().
             apply(leaky_rectify, leakiness=0.2).
             # custom_fully_connected(1))
             #custom_conv2d(1, k_h=self.s16, k_w=self.s16, d_h=self.s16, d_w=self.s16).
             custom_conv_lstm(self.df_dim * 8, pt.UnboundVariable("hidden"), k_h=1, k_w=1, d_h=1, d_w=1))

        return template
コード例 #10
0
  def testIncompatibleUnboundVariableValues(self):
    """Ensures that an error is thrown if a var is given incompatible values.

    Since the primary use case of templates is parameter sharing, it is
    important that substitutions don't conflict.
    """
    input_pt = self.Wrap(self.input)
    full = input_pt.flatten().fully_connected(prettytensor.UnboundVariable(
        'width'))
    full.construct(width=100)
    with self.assertRaises(ValueError):
      full.construct(width=200)
コード例 #11
0
 def _make_discriminator_template(self):
     defaults_scope = {
         'phase': pt.UnboundVariable('phase', default=pt.Phase.train),
         'scale_after_normalization': True,
         }
     with pt.defaults_scope(**defaults_scope):
       with tf.variable_scope("discriminator"):
         self.discriminator_template = (pt.template("z_in").
                                        custom_fully_connected(1000).
                                        apply(tf.nn.relu).
                                        custom_fully_connected(1000).
                                        batch_normalize().
                                        apply(tf.nn.relu).
                                        custom_fully_connected(1))
コード例 #12
0
 def testMissingUnboundVariable(self):
   input_pt = self.Wrap(self.input)
   template = input_pt.flatten().fully_connected(prettytensor.UnboundVariable(
       'width'))
   with self.assertRaises(ValueError):
     template.construct()
コード例 #13
0
 def testExtraValues(self):
   input_pt = self.Wrap(self.input)
   template = (input_pt.flatten()
               .fully_connected(prettytensor.UnboundVariable('width')))
   with self.assertRaises(ValueError):
     template.construct(width=200, width2=100)
コード例 #14
0
    sampled_tensors = []
    glimpse_tensors = []
    write_tensors = []
    params_tensors = []

    loss = 0.0
    with tf.variable_scope("model"):
        with pt.defaults_scope(activation_fn=tf.nn.elu,
                               batch_normalize=True,
                               learned_moments_update_rate=0.1,
                               variance_epsilon=0.001,
                               scale_after_normalization=True):
            # Encoder RNN (Eq. 5)
            encoder_template = (pt.template('input').gru_cell(
                num_units=FLAGS.rnn_size, state=pt.UnboundVariable('state')))

            # Projection of encoder RNN output (Eq. 1-2)
            encoder_proj_template = (pt.template('input').fully_connected(
                FLAGS.hidden_size * 2, activation_fn=None))

            # Params of read from decoder RNN output (Eq. 21)
            decoder_read_params_template = (
                pt.template('input').fully_connected(5, activation_fn=None))

            # Decoder RNN (Eq. 7)
            decoder_template = (pt.template('input').gru_cell(
                num_units=FLAGS.rnn_size, state=pt.UnboundVariable('state')))

            # Projection of decoder RNN output (Eq. 18)
            decoder_proj_template = (pt.template('input').fully_connected(