Ejemplo n.º 1
0
    def __init__(self):
        self.data_directory = os.path.join(FLAGS.working_directory, "MNIST")
        if not os.path.exists(self.data_directory):
            os.makedirs(self.data_directory)
        self.save_path = FLAGS.working_directory + '/save.ckpt'
        self.mnist = read_data_set("/tmp/vae/converted_java.npy")

        self.input_tensor = tf.placeholder(tf.float32, [FLAGS.batch_size, 28 * 28])

        with pt.defaults_scope(activation_fn=tf.nn.elu,
                               batch_normalize=True,
                               learned_moments_update_rate=0.0003,
                               variance_epsilon=0.001,
                               scale_after_normalization=True):
            with pt.defaults_scope(phase=pt.Phase.train):
                with tf.variable_scope("model") as scope:
                    self.output_tensor, self.mean, self.stddev = decoder(encoder(self.input_tensor))

            with pt.defaults_scope(phase=pt.Phase.test):
                with tf.variable_scope("model", reuse=True) as scope:
                    self.sampled_tensor, _, _ = decoder()

        self.vae_loss = get_vae_cost(self.mean, self.stddev)
        self.rec_loss = get_reconstruction_cost(self.output_tensor, self.input_tensor)

        self.loss = self.vae_loss + self.rec_loss

        self.optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate, epsilon=1.0)
        self.train = pt.apply_optimizer(self.optimizer, losses=[self.loss])

        self.init = tf.initialize_all_variables()
        
        self.saver = tf.train.Saver()
Ejemplo n.º 2
0
    def init_opt(self):
        self.build_placeholder()

        with pt.defaults_scope(phase=pt.Phase.train):
            with tf.variable_scope("g_net"):
                # ####get output from G network################################
                c, kl_loss = self.sample_encoded_context(self.embeddings)
                z = tf.random_normal([self.batch_size, cfg.Z_DIM])
                self.log_vars.append(("hist_c", c))
                self.log_vars.append(("hist_z", z))
                fake_images = self.model.get_generator(tf.concat(1, [c, z]))

            # ####get discriminator_loss and generator_loss ###################
            discriminator_loss, generator_loss =\
                self.compute_losses(self.images,
                                    self.wrong_images,
                                    fake_images,
                                    self.embeddings)
            generator_loss += kl_loss
            self.log_vars.append(("g_loss_kl_loss", kl_loss))
            self.log_vars.append(("g_loss", generator_loss))
            self.log_vars.append(("d_loss", discriminator_loss))

            # #######Total loss for build optimizers###########################
            self.prepare_trainer(generator_loss, discriminator_loss)
            # #######define self.g_sum, self.d_sum,....########################
            self.define_summaries()

        with pt.defaults_scope(phase=pt.Phase.test):
            with tf.variable_scope("g_net", reuse=True):
                self.sampler()
            self.visualization(cfg.TRAIN.NUM_COPY)
            print("success")
Ejemplo n.º 3
0
    def init_opt(self):
        self.build_placeholder()

        with pt.defaults_scope(phase=pt.Phase.train):
            with tf.variable_scope("g_net"):
                # ####get output from G network################################
                c, kl_loss = self.sample_encoded_context(self.embeddings)
                z = tf.random_normal([self.batch_size, cfg.Z_DIM])
                self.log_vars.append(("hist_c", c))
                self.log_vars.append(("hist_z", z))
                fake_images = self.model.get_generator(tf.concat(1, [c, z]))

            # ####get discriminator_loss and generator_loss ###################
            discriminator_loss, generator_loss =\
                self.compute_losses(self.images,
                                    self.wrong_images,
                                    fake_images,
                                    self.embeddings)
            generator_loss += kl_loss
            self.log_vars.append(("g_loss_kl_loss", kl_loss))
            self.log_vars.append(("g_loss", generator_loss))
            self.log_vars.append(("d_loss", discriminator_loss))

            # #######Total loss for build optimizers###########################
            self.prepare_trainer(generator_loss, discriminator_loss)
            # #######define self.g_sum, self.d_sum,....########################
            self.define_summaries()

        with pt.defaults_scope(phase=pt.Phase.test):
            with tf.variable_scope("g_net", reuse=True):
                self.sampler()
            self.visualization(cfg.TRAIN.NUM_COPY)
            print("success")
Ejemplo n.º 4
0
 def testNestedDefaultScope(self):
     pretty_tensor_class._defaults['l2loss'] = 0
     with prettytensor.defaults_scope(l2loss=0.001):
         self.assertEqual(0.001, pretty_tensor_class._defaults['l2loss'])
         with prettytensor.defaults_scope(l2loss=5):
             self.assertEqual(5, pretty_tensor_class._defaults['l2loss'])
         self.assertEqual(0.001, pretty_tensor_class._defaults['l2loss'])
     self.assertEqual(0, pretty_tensor_class._defaults['l2loss'])
Ejemplo n.º 5
0
 def testNestedDefaultScope(self):
   pretty_tensor_class._defaults['l2loss'] = 0
   with prettytensor.defaults_scope(l2loss=0.001):
     self.assertEqual(0.001, pretty_tensor_class._defaults['l2loss'])
     with prettytensor.defaults_scope(l2loss=5):
       self.assertEqual(5, pretty_tensor_class._defaults['l2loss'])
     self.assertEqual(0.001, pretty_tensor_class._defaults['l2loss'])
   self.assertEqual(0, pretty_tensor_class._defaults['l2loss'])
Ejemplo n.º 6
0
 def build_model(self):
   with tf.device('/cpu:0'):
     tf.reset_default_graph()
     self._current_step = tf.Variable(0, trainable=False, name='global_step')
     self._step = tf.assign(self._current_step, self._current_step + 1)
     with pt.defaults_scope(activation_fn=self._activation.func):
       with pt.defaults_scope(phase=pt.Phase.train):
         with tf.variable_scope(self.encoder_scope):
           self._build_encoder()
         with tf.variable_scope(self.decoder_scope):
           self._build_decoder()
Ejemplo n.º 7
0
 def build_model(self):
   tf.reset_default_graph()
   self._batch_shape = inp.get_batch_shape(FLAGS.batch_size, FLAGS.input_path)
   self._current_step = tf.Variable(0, trainable=False, name='global_step')
   self._step = tf.assign(self._current_step, self._current_step + 1)
   with pt.defaults_scope(activation_fn=self._activation.func):
     with pt.defaults_scope(phase=pt.Phase.train):
       with tf.variable_scope(self.encoder_scope):
         self._build_encoder()
       with tf.variable_scope(self.decoder_scope):
         self._build_decoder()
Ejemplo n.º 8
0
    def init_opt(self):
        self.build_placeholder()

        with tf.variable_scope("g_net"):
            with pt.defaults_scope(phase=pt.Phase.train):
                # ####get output from G network################################
                # c, kl_loss = self.sample_encoded_context(self.conditions)
                #c_text = self.model.generate_text_condition(self.embeddings)
                #c, c_vector = self.model.generate_condition(self.conditions)
                c_text = self.embeddings
                c_mask = self.conditions
                z = tf.random_normal([self.batch_size, cfg.Z_DIM])
                self.log_vars.append(("hist_c", c_text))
                self.log_vars.append(("hist_z", z))

                #G_input = tf.concat(1, [c, c_text])
                #G_input = tf.concat(1, [G_input, z])
                # print('#######################')
                # print('c_text, c_mask shape is:', c_text, c_mask)
                # print('#########################')
                G_input = self.model.generate_condition(
                    tf.concat(1, [c_text, z]), c_mask)
                # update by zhaoyang
                self.debug_g_input = G_input
                ###################

                fake_images = self.model.get_generator(G_input)

                # update by kristy
                self.debug_fake_images = fake_images

            # ####get discriminator_loss and generator_loss ###################
            discriminator_loss, generator_loss =\
                self.compute_losses(self.images,
                                    self.wrong_images,
                                    fake_images,
                                    self.embeddings,
                                    self.conditions)
            #generator_loss += kl_loss
            #self.log_vars.append(("g_loss_kl_loss", kl_loss))
            self.log_vars.append(("g_loss", generator_loss))
            self.log_vars.append(("d_loss", discriminator_loss))

            # #######Total loss for build optimizers###########################
            self.prepare_trainer(generator_loss, discriminator_loss)
            # #######define self.g_sum, self.d_sum,....########################
            self.define_summaries()

            with pt.defaults_scope(phase=pt.Phase.test):
                #with tf.variable_scope("g_net", reuse=True):
                self.sampler()
            self.visualization(cfg.TRAIN.NUM_COPY)
            print("success")
Ejemplo n.º 9
0
    def init_opt(self):
        self.build_placeholder()

        with pt.defaults_scope(phase=pt.Phase.train):
            # ####get output from G network####################################
            with tf.variable_scope("g_net"):
                c, kl_loss = self.sample_encoded_context(self.embeddings)
                z = tf.random_normal([self.batch_size, cfg.Z_DIM])
                self.log_vars.append(("hist_c", c))
                self.log_vars.append(("hist_z", z))
                fake_images = self.model.get_generator(
                    tf.concat(axis=1, values=[c, z]))

            # ####get discriminator_loss and generator_loss ###################
            discriminator_loss, generator_loss =\
                self.compute_losses(self.images,
                                    self.wrong_images,
                                    fake_images,
                                    self.embeddings,
                                    self.style_indices, #but we don't need to use style indices for discriminator loss
                                    flag='lr')
            generator_loss += kl_loss
            self.log_vars.append(("g_loss_kl_loss", kl_loss))
            self.log_vars.append(("g_loss", generator_loss))
            self.log_vars.append(("d_loss", discriminator_loss))

            # #### For hr_g and hr_d #########################################
            with tf.variable_scope("hr_g_net"):
                hr_c, hr_kl_loss = self.sample_encoded_context(self.embeddings)
                self.log_vars.append(("hist_hr_c", hr_c))
                hr_fake_images = self.model.hr_get_generator(fake_images, hr_c)
            # get losses
            hr_discriminator_loss, hr_generator_loss =\
                self.compute_losses(self.hr_images,
                                    self.hr_wrong_images,
                                    hr_fake_images,
                                    self.embeddings,
                                    self.style_indices,
                                    flag='hr')
            hr_generator_loss += hr_kl_loss
            self.log_vars.append(("hr_g_loss", hr_generator_loss))
            self.log_vars.append(("hr_d_loss", hr_discriminator_loss))

            # #######define self.g_sum, self.d_sum,....########################
            self.prepare_trainer(discriminator_loss, generator_loss,
                                 hr_discriminator_loss, hr_generator_loss)
            self.define_summaries()

        with pt.defaults_scope(phase=pt.Phase.test):
            self.sampler()
            self.visualization(cfg.TRAIN.NUM_COPY)
            print("success")
Ejemplo n.º 10
0
 def build_model(self):
     tf.reset_default_graph()
     self._batch_shape = inp.get_batch_shape(FLAGS.batch_size,
                                             FLAGS.input_path)
     self._current_step = tf.Variable(0,
                                      trainable=False,
                                      name='global_step')
     self._step = tf.assign(self._current_step, self._current_step + 1)
     with pt.defaults_scope(activation_fn=self._activation.func):
         with pt.defaults_scope(phase=pt.Phase.train):
             with tf.variable_scope(self.encoder_scope):
                 self._build_encoder()
             with tf.variable_scope(self.decoder_scope):
                 self._build_decoder()
Ejemplo n.º 11
0
def calc_pc():
    '''Calculate the prior probabilities of each dimension based on counts in a binary dataset
    data: N x M matrix of 1, -1
    returns pc: M x 1 vector of p(c=1)
    '''
    data_directory = os.path.join(FLAGS.working_directory, "celebA")
    private_tensor = tf.placeholder(tf.float32, [FLAGS.batch_size, FLAGS.private_size])
    attrs = np.loadtxt(FLAGS.working_directory + FLAGS.attrs_directory + 'list_attr_celeba.txt', skiprows=2, usecols=range(1,FLAGS.output_size + FLAGS.private_size+1))
    def get_feed(batch_no, test_phase):
        #xs = dvib.read_imgs(batch_no, test_phase)
        ys = dvib.read_attrs(attrs, batch_no, test_phase)
        #xs = xs/PX_MAX
        #ys = ys * PX_MAX / 2
        return {private_tensor: ys[:, FLAGS.output_size:]}
    with pt.defaults_scope(activation_fn=tf.nn.relu,
                           batch_normalize=True,
                           learned_moments_update_rate=0.0003,
                           variance_epsilon=0.001,
                           scale_after_normalization=True):
        with pt.defaults_scope(phase=pt.Phase.train):
            with tf.variable_scope("prior") as scope:
                # private data is in {-1, 1}
                p_c = tf.reduce_mean((private_tensor+1.)/2., axis=0)

    #pdb.set_trace()
    init = tf.global_variables_initializer()
    # Config session for memory
    config = tf.ConfigProto()
    #config.gpu_options.allow_growth = True
    #config.gpu_options.per_process_gpu_memory_fraction = 0.8
    config.log_device_placement=False
    sess = tf.Session()
    sess.run(init)
    # calculating prior p_c
    widgets = ["Calculating priors |", Percentage(), Bar(), ETA()]
    pbar = ProgressBar(maxval = FLAGS.updates_per_epoch, widgets=widgets)
    pbar.start()


    p_cv = 0
    for i in range(FLAGS.updates_per_epoch):
        pbar.update(i)
        feeds = get_feed(i, False)
        p_cv += sess.run(p_c, feeds)
    p_cv /= FLAGS.updates_per_epoch
    print("prior p_c")
    print(p_cv)
    sess.close()
    return p_cv
Ejemplo n.º 12
0
 def inference_io(self, reuse):
     with tf.variable_scope(self.name, reuse=reuse), pt.defaults_scope(
             summary_collections=['INFERENCE_SUMMARIES']):
         inf_input = tf.placeholder(tf.int32, [])
         inf_logits = self.create(
             pt.wrap(inf_input).reshape([1, 1]), pt.Phase.infer)
         return inf_input, inf_logits
Ejemplo n.º 13
0
    def create(self, input_placeholder, phase):
        """Creates a 2 layer LSTM model with dropout.

        Args:
          input_placeholder: placeholder of timesteps x sequences

          phase: Phase controls whether or not dropout is active.  In
            training mode we want to perform dropout, but in test we
            want to disable it.

        Returns: The logits layer.

        """
        timesteps = input_placeholder.get_shape()[1].value
        text_in = integerize.reshape_cleavable(input_placeholder)
        with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
            # The embedding lookup must be placed on a cpu.
            with tf.device('/cpu:0'):
                embedded = text_in.embedding_lookup(integerize.CHARS,
                                                    [self.embedding])
        # Because the sequence LSTM expects each timestep to be its
        # own Tensor, we need to cleave the sequence.  Below we can
        # build a stacked 2 layer LSTM by just chaining them together.
        # You can stack as many layers as you want.
        lstm = embedded.cleave_sequence(timesteps)
        assert len(self.lstms)
        for lstm_size in self.lstms:
            lstm = lstm.sequence_lstm(lstm_size)

        # The classifier is much more efficient if it runs across the entire
        # dataset at once, so we want to squash (i.e. uncleave).
        # Note: if phase is test, dropout is a noop.
        return (lstm.squash_sequence().dropout(
            keep_prob=0.8, phase=phase).fully_connected(integerize.CHARS,
                                                        activation_fn=None))
Ejemplo n.º 14
0
def network(): 
    dim_0, dim_1, dim_2 = grid_size
    gt = tf.placeholder(tf.float32, [dim_0, dim_1])	
    conflict_grids = tf.placeholder(tf.float32, [num_timesteps, dim_0, dim_1, dim_2])
    mask = tf.placeholder(tf.float32, [dim_0, dim_1])
    #poverty_grid = tf.placeholder(tf.float32, [1, dim_0, dim_1])

    assert(num_timesteps > 1)
    with tf.variable_scope("model") as scope:
        with pt.defaults_scope(activation_fn=tf.nn.relu,
                               batch_normalize=True,
                               learned_moments_update_rate=0.0003,
                               variance_epsilon=0.001,
                               scale_after_normalization=True):
            enc_conflicts = network_conflict(conflict_grids)
            rnn_output = network_rnn(enc_conflicts)

    rnn_output = tf.reshape(rnn_output, [1, 64])
    '''
    with tf.variable_scope("model") as scope:
        with pt.defaults_scope(activation_fn=tf.nn.relu,
                               batch_normalize=True,
                               learned_moments_update_rate=0.0003,
                               variance_epsilon=0.001,
                               scale_after_normalization=True):
            enc_poverty = network_poverty(poverty_grid)

    feats = tf.concat(0, [mean_conflict, enc_poverty])
    '''
    feats = rnn_output
    pred = fc_layers(feats, dim_0)

    return conflict_grids, pred, gt, mask
Ejemplo n.º 15
0
def build_model(sess, image_shape, embedding_dim, batch_size):
    model = CondALI(image_shape=image_shape)

    embeddings = tf.placeholder(tf.float32, [batch_size, embedding_dim],
                                name='conditional_embeddings')

    images = tf.placeholder(tf.float32, [batch_size] + image_shape,
                            name='real_images')

    bottlenecks = prepare_bottlenecks(images)

    with tf.variable_scope("e_net"):
        #Add Cond_AUG to fake latents
        fake_latents = model.get_encoder(bottlenecks)

    with pt.defaults_scope(phase=pt.Phase.test):
        with tf.variable_scope("g_net"):
            latents = sample_conditionned_latent_variable(
                embeddings, batch_size, model)
            fake_latents = sample_conditionned_latent_variable(
                fake_latents, batch_size, model)
            fake_images = model.get_generator(latents)
            fake_images_2 = model.get_generator(fake_latents)

    ckt_path = cfg.TEST.PRETRAINED_MODEL
    if ckt_path.find('.ckpt') != -1:
        print("Reading model parameters from %s" % ckt_path)
        saver = tf.train.Saver(tf.global_variables())
        saver.restore(sess, ckt_path)
    else:
        print("Input a valid model path.")
    return embeddings, fake_images, fake_images_2
Ejemplo n.º 16
0
def network(): 
    dim_0, dim_1, dim_2 = grid_size
    gt = tf.placeholder(tf.float32, [dim_0, dim_1])	
    conflict_grids = tf.placeholder(tf.float32, [num_timesteps, dim_0, dim_1, dim_2])
    mask = tf.placeholder(tf.float32, [dim_0, dim_1])
    #poverty_grid = tf.placeholder(tf.float32, [1, dim_0, dim_1])

    assert(num_timesteps > 1)
    with tf.variable_scope("model") as scope:
        with pt.defaults_scope(activation_fn=tf.nn.relu,
                               batch_normalize=True,
                               learned_moments_update_rate=0.0003,
                               variance_epsilon=0.001,
                               scale_after_normalization=True):
            enc_conflicts = network_conflict(conflict_grids)
    
    mean_conflict = tf.reduce_mean(enc_conflicts, 0)
    mean_conflict = tf.reshape(mean_conflict, [1, 128])
    '''
    with tf.variable_scope("model") as scope:
        with pt.defaults_scope(activation_fn=tf.nn.relu,
                               batch_normalize=True,
                               learned_moments_update_rate=0.0003,
                               variance_epsilon=0.001,
                               scale_after_normalization=True):
            enc_poverty = network_poverty(poverty_grid)

    feats = tf.concat(0, [mean_conflict, enc_poverty])
    '''
    feats = mean_conflict
    pred = fc_layers(feats, dim_0)

    return conflict_grids, pred, gt, mask
Ejemplo n.º 17
0
def multilayer_fully_connected(images, labels):
  images = pt.wrap(images)
  with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
    return (images
      .fully_connected(100)
      .fully_connected(100)
      .softmax_classifier(10, labels))
Ejemplo n.º 18
0
    def _make_encoder_template(self):
        defaults_scope = {
            'phase': pt.UnboundVariable('phase', default=pt.Phase.train),
            'scale_after_normalization': True,
            }
        with pt.defaults_scope(**defaults_scope):
          with tf.variable_scope("encoder"):
            if self.network_type=="fully-connected":
                z_dim = self.latent_dist.dist_flat_dim
                self.encoder_template = (pt.template("x_in").
                                         custom_fully_connected(1000).
                                         batch_normalize().
                                         apply(tf.nn.elu).
                                         custom_fully_connected(1000).
                                         batch_normalize().
                                         apply(tf.nn.elu).
                                         custom_fully_connected(z_dim))

            elif self.network_type=="convolutional":
                z_dim = self.latent_dist.dist_flat_dim
                self.encoder_template = (pt.template("x_in").
                                         reshape([-1] + list(self.image_shape)).
                                         custom_conv2d(64, k_h=4, k_w=4).
                                         apply(tf.nn.elu).
                                         custom_conv2d(128, k_h=4, k_w=4).
                                         batch_normalize().
                                         apply(tf.nn.elu).
                                         custom_fully_connected(1024).
                                         batch_normalize().
                                         apply(tf.nn.elu).
                                         custom_fully_connected(z_dim))
Ejemplo n.º 19
0
    def _make_decoder_template(self):
        defaults_scope = {
            'phase': pt.UnboundVariable('phase', default=pt.Phase.train),
            'scale_after_normalization': True,
            }
        image_size = self.image_shape[0]
        with pt.defaults_scope(**defaults_scope):
          with tf.variable_scope("decoder"):
            if self.network_type=="fully-connected":
                self.decoder_template = (pt.template("z_in").
                                         custom_fully_connected(1000).
                                         apply(tf.nn.relu).
                                         custom_fully_connected(1000).
                                         batch_normalize().
                                         apply(tf.nn.relu).
                                         custom_fully_connected(self.image_dim))

            elif self.network_type=="convolutional":
                self.decoder_template = \
                    (pt.template("z_in").
                     custom_fully_connected(1024).
                     batch_normalize().
                     apply(tf.nn.relu).
                     custom_fully_connected(image_size/4 * image_size/4 * 128).
                     batch_normalize().
                     apply(tf.nn.relu).
                     reshape([-1, image_size/4, image_size/4, 128]).
                     custom_deconv2d([0, image_size/2, image_size/2, 64],
                                     k_h=4, k_w=4).
                     batch_normalize().
                     apply(tf.nn.relu).
                     custom_deconv2d([0] + list(self.image_shape),
                                     k_h=4, k_w=4).
                     flatten())
    def image2feature(self, image_tensor):

        if self.patch_feature_dim == 0:
            return None

        mid_tensor = (
            pt.wrap(image_tensor).
            conv2d(3, 32).
            max_pool(2, 2)
        ).tensor  # 64x64

        hgd = [
            {"type": "conv2d", "depth": 64},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 32 x 32
            {"type": "conv2d", "depth": 128},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 16 x 16
            {"type": "conv2d", "depth": 256},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 8 x 8
            {"type": "conv2d", "depth": 512},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 4 x 4
            {"type": "conv2d", "depth": 512},
        ]

        with pt.defaults_scope(**self.pt_defaults_scope_value()):
            feature_map = hourglass(
                mid_tensor, hgd,
                net_type=self.options["hourglass_type"] if "hourglass_type" in self.options else None
            )

        return feature_map
Ejemplo n.º 21
0
def build_model(sess, embedding_dim, batch_size):
    model = CondGAN(
        lr_imsize=cfg.TEST.LR_IMSIZE,
        hr_lr_ratio=int(cfg.TEST.HR_IMSIZE/cfg.TEST.LR_IMSIZE))

    embeddings = tf.placeholder(
        tf.float32, [batch_size, embedding_dim],
        name='conditional_embeddings')
    with pt.defaults_scope(phase=pt.Phase.test):
        with tf.variable_scope("g_net"):
            c = sample_encoded_context(embeddings, model)
            z = tf.random_normal([batch_size, cfg.Z_DIM])
            fake_images = model.get_generator(tf.concat(1, [c, z]))
        with tf.variable_scope("hr_g_net"):
            hr_c = sample_encoded_context(embeddings, model)
            hr_fake_images = model.hr_get_generator(fake_images, hr_c)

    ckt_path = cfg.TEST.PRETRAINED_MODEL
    if ckt_path.find('.ckpt') != -1:
        print("Reading model parameters from %s" % ckt_path)
        saver = tf.train.Saver(tf.all_variables())
        saver.restore(sess, ckt_path)
    else:
        print("Input a valid model path.")
    return embeddings, fake_images, hr_fake_images
Ejemplo n.º 22
0
def fc():
	with tf.variable_scope('fc'):
		with pt.defaults_scope(activation_fn=tf.nn.relu):
			fc_seq = pt.wrap(x).sequential()
			fc_seq.fully_connected(10, activation_fn=None)
			fc_seq.softmax()
			return tf.nn.softmax(fc_seq)
def main_network(images, training):
    # Wrap the input images as a Pretty Tensor object.
    x_pretty = pt.wrap(images)

    # Pretty Tensor uses special numbers to distinguish between
    # the training and testing phases.
    if training:
        phase = pt.Phase.train
    else:
        phase = pt.Phase.infer

    # Create the convolutional neural network using Pretty Tensor.
    # It is very similar to the previous tutorials, except
    # the use of so-called batch-normalization in the first layer.
    with pt.defaults_scope(activation_fn=tf.nn.relu, phase=phase):
        y_pred, loss = x_pretty.\
            conv2d(kernel=5, depth=64, name='layer_conv1', batch_normalize=True).\
            max_pool(kernel=2, stride=2).\
            conv2d(kernel=5, depth=64, name='layer_conv2').\
            max_pool(kernel=2, stride=2).\
            flatten().\
            fully_connected(size=256, name='layer_fc1').\
            fully_connected(size=128, name='layer_fc2').\
            softmax_classifier(num_classes=num_classes, labels=y_true)

    return y_pred, loss
Ejemplo n.º 24
0
    def feature2image(self, feature_tensor):

        output_channels = 3*self.recon_dist_param_num
        hgd = [
            {"type": "conv2d", "depth": 64, "decoder_depth": output_channels, "decoder_activation_fn": None},
            {"type": "conv2d", "depth": 64, "decoder_depth": 32},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 40 x 40
            {"type": "conv2d", "depth": 128, "decoder_depth": 64},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 20x20
            {"type": "conv2d", "depth": 256},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 10x10
            {"type": "conv2d", "depth": 512},
            {"type": "skip", "layer_num": 2},
            {"type": "pool", "pool": "max", "kernel": 2, "stride": 2},  # 5x5
            {"type": "conv2d", "depth": 512},
        ]

        with pt.defaults_scope(**self.pt_defaults_scope_value()):
            output_tensor = hourglass(
                feature_tensor, hgd,
                net_type=self.options["hourglass_type"] if "hourglass_type" in self.options else None,
                extra_highlevel_feature=None
            )

        return output_tensor
Ejemplo n.º 25
0
    def posterior_net(self, data_tensor, cond_info_dict):

        with pt.defaults_scope(phase=pt.Phase.test):
            with tf.variable_scope(self._name, reuse=True) as scope:
                condition_tensor = self.condition_subnet(cond_info_dict)
                enc_outputs, enc_extra_outputs = self.encoding_subnet(
                    data_tensor, condition_tensor=condition_tensor)
                reconstructed, dec_extra_outputs = self.decoding_subnet(
                    enc_outputs,
                    condition_tensor=condition_tensor,
                    extra_inputs=enc_extra_outputs['for_decoder'])
                decoded_vis, decoded_param_tensor = self.decoded_vis_subnet(
                    reconstructed)

        decoded_out = dict()
        decoded_out["vis"] = decoded_vis
        decoded_out["param"] = decoded_param_tensor
        aux_out = dict()
        aux_out[
            "decoded"] = decoded_out  # extracts "save" field in self.reconstructed_base_aux_out
        aux_out["encoded"] = enc_extra_outputs["save"]
        aux_out["data"] = data_tensor

        latent_param_dict = dict(value=enc_outputs)

        return latent_param_dict, aux_out
Ejemplo n.º 26
0
def mapping(self, x):
    """
    Inference network to parameterize variational family. It takes
    data x as input and outputs the variational parameters lambda.

    lambda = phi(x)
    """
    with pt.defaults_scope(activation_fn=tf.nn.elu,
                           batch_normalize=True,
                           learned_moments_update_rate=0.0003,
                           variance_epsilon=0.001,
                           scale_after_normalization=True):
        params = (pt.wrap(x).
                reshape([FLAGS.n_data, 28, 28, 1]).
                conv2d(5, 32, stride=2).
                conv2d(5, 64, stride=2).
                conv2d(5, 128, edges='VALID').
                dropout(0.9).
                flatten().
                fully_connected(self.num_local_vars * 2, activation_fn=None)).tensor

    # Return list of vectors where mean[i], stddev[i] are the
    # parameters of the local variational factor for data point i.
    mean = tf.reshape(params[:, :self.num_local_vars], [-1])
    stddev = tf.reshape(tf.sqrt(tf.exp(params[:, self.num_local_vars:])), [-1])
    return [mean, stddev]
Ejemplo n.º 27
0
def neural_network(x):
    """
    Inference network to parameterize variational family. It takes
    data as input and outputs the variational parameters.

    loc, scale = neural_network(x)
    """
    num_vars = 10
    with pt.defaults_scope(activation_fn=tf.nn.elu,
                           batch_normalize=True,
                           learned_moments_update_rate=0.0003,
                           variance_epsilon=0.001,
                           scale_after_normalization=True):
        params = (pt.wrap(x).
                reshape([N_DATA, 28, 28, 1]).
                conv2d(5, 32, stride=2).
                conv2d(5, 64, stride=2).
                conv2d(5, 128, edges='VALID').
                dropout(0.9).
                flatten().
                fully_connected(num_vars * 2, activation_fn=None)).tensor

    # Return list of vectors where mean[i], stddev[i] are the
    # parameters of the local variational factor for data point i.
    loc = tf.reshape(params[:, :num_vars], [-1])
    scale = tf.reshape(tf.sqrt(tf.exp(params[:, num_vars:])), [-1])
    return [loc, scale]
Ejemplo n.º 28
0
def build_model(sess, embedding_dim, batch_size):
    model = CondGAN(
        lr_imsize=cfg.TEST.LR_IMSIZE,
        hr_lr_ratio=int(cfg.TEST.HR_IMSIZE / cfg.TEST.LR_IMSIZE))

    embeddings = tf.placeholder(
        tf.float32, [batch_size, embedding_dim],
        name='conditional_embeddings')
    with pt.defaults_scope(phase=pt.Phase.test):
        with tf.variable_scope("g_net"):
            c = sample_encoded_context(embeddings, model)
            z = tf.random_normal([batch_size, cfg.Z_DIM])
            fake_images = model.get_generator(tf.concat(1, [c, z]))
        with tf.variable_scope("hr_g_net"):
            hr_c = sample_encoded_context(embeddings, model)
            hr_fake_images = model.hr_get_generator(fake_images, hr_c)

    ckt_path = cfg.TEST.PRETRAINED_MODEL
    if ckt_path.find('.ckpt') != -1:
        print("Reading model parameters from %s" % ckt_path)
        saver = tf.train.Saver(tf.all_variables())
        saver.restore(sess, ckt_path)
    else:
        print("Input a valid model path.")
    return embeddings, fake_images, hr_fake_images
Ejemplo n.º 29
0
def inference(x):
    """
    Run the models. Called inference because it does the same thing as tensorflow's cifar tutorial
    """
    z_p = tf.random_normal((batch_size, hidden_size), 0,
                           1)  # normal dist for GAN
    eps = tf.random_normal((batch_size, hidden_size), 0,
                           1)  # normal dist for VAE

    with pt.defaults_scope(activation_fn=tf.nn.elu,
                           batch_normalize=True,
                           learned_moments_update_rate=0.0003,
                           variance_epsilon=0.001,
                           scale_after_normalization=True):

        with tf.variable_scope("enc"):
            z_x_mean, z_x_log_sigma_sq = encoder(x)  # get z from the input
        with tf.variable_scope("gen"):
            z_x = tf.add(z_x_mean,
                         tf.mul(tf.sqrt(tf.exp(z_x_log_sigma_sq)),
                                eps))  # grab our actual z
            x_tilde = generator(z_x)
        with tf.variable_scope("dis"):
            _, l_x_tilde = discriminator(x_tilde)
        with tf.variable_scope("gen", reuse=True):
            x_p = generator(z_p)
        with tf.variable_scope("dis", reuse=True):
            d_x, l_x = discriminator(x)  # positive examples
        with tf.variable_scope("dis", reuse=True):
            d_x_p, _ = discriminator(x_p)
        return z_x_mean, z_x_log_sigma_sq, z_x, x_tilde, l_x_tilde, x_p, d_x, l_x, d_x_p, z_p
Ejemplo n.º 30
0
def mapping(self, x):
    """
    lambda = phi(x)
    """
    with pt.defaults_scope(
        activation_fn=tf.nn.elu,
        batch_normalize=True,
        learned_moments_update_rate=0.0003,
        variance_epsilon=0.001,
        scale_after_normalization=True,
    ):
        params = (
            pt.wrap(x)
            .reshape([FLAGS.n_data, 28, 28, 1])
            .conv2d(5, 32, stride=2)
            .conv2d(5, 64, stride=2)
            .conv2d(5, 128, edges="VALID")
            .dropout(0.9)
            .flatten()
            .fully_connected(self.num_vars * 2, activation_fn=None)
        ).tensor

    mean = params[:, : self.num_vars]
    stddev = tf.sqrt(tf.exp(params[:, self.num_vars :]))
    return [mean, stddev]
Ejemplo n.º 31
0
 def testVariableCollections(self):
     with prettytensor.defaults_scope(variable_collections=['a']):
         self.MultiLayer()
     self.assertTrue(tf.get_collection('a'))
     self.assertEqual(tf.get_collection('a'),
                      tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
     self.assertTrue(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
Ejemplo n.º 32
0
def build_model(image_size, num_channels, num_classes):
    x = tf.placeholder(tf.float32,
                       shape=[None, image_size * image_size],
                       name="x")
    x_image = tf.reshape(x, [-1, image_size, image_size, num_channels])
    y_true = tf.placeholder(tf.float32, shape=[None, 6], name="y_true")
    y_true_cls = tf.argmax(y_true, axis=1)

    # create wrapper tensor
    x_pretty = pt.wrap(x_image)
    with pt.defaults_scope(activation_fn=tf.nn.elu):
        y_pred, loss = x_pretty. \
            conv2d(5, 16, name='layer_conv1'). \
            max_pool(2, 2). \
            conv2d(3, 32, name='layer_conv2'). \
            max_pool(2, 2). \
            conv2d(3, 64, name= 'layer_conv3'). \
            max_pool(2, 2).\
            flatten(). \
            fully_connected(128, name='layer_fc1'). \
            fully_connected(32, name= 'layer_fc2'). \
            softmax_classifier(num_classes=num_classes, labels=y_true)
    optimizer = tf.train.AdamOptimizer(1e-4).minimize(loss)
    y_pred_cls = tf.argmax(y_pred, axis=1)
    prediction_accuracy = tf.reduce_mean(
        tf.cast(tf.equal(y_true_cls, y_pred_cls), tf.float32))

    return optimizer, x, y_true, prediction_accuracy
Ejemplo n.º 33
0
def main(argv=None):
    input.init_dataset_constants()
    num_images = GRID[0] * GRID[1]
    FLAGS.batch_size = num_images
    with tf.Graph().as_default():
        g_template = model.generator_template()
        z = tf.placeholder(tf.float32, shape=[FLAGS.batch_size, FLAGS.z_size])
        #np.random.seed(1337) # generate same random numbers each time
        noise = np.random.normal(size=(FLAGS.batch_size, FLAGS.z_size))
        with pt.defaults_scope(phase=pt.Phase.test):
            gen_images_op, _ = pt.construct_all(g_template, input=z)

        sess = tf.Session()
        init_variables(sess)
        gen_images, = sess.run([gen_images_op], feed_dict={z: noise})
        gen_images = (gen_images + 1) / 2

        sess.close()

        fig = plt.figure(1)
        grid = ImageGrid(fig, 111, nrows_ncols=GRID, axes_pad=0.1)
        for i in xrange(num_images):
            im = gen_images[i]
            axis = grid[i]
            axis.axis('off')
            axis.imshow(im)

        plt.show()
        fig.savefig('montage.png', dpi=100, bbox_inches='tight')
Ejemplo n.º 34
0
def create_model(text_in, timesteps, phase):
  """Creates a 2 layer LSTM model with dropout.

  Args:
    text_in: The input text as ASCII ordinals in a Tensor.
    timesteps: The number of timesteps in the sequence.
    phase: Phase controls whether or not dropout is active.  In training mode
      we want to perform dropout, but in test we want to disable it.
  Returns:
    The logits.
  """
  with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
    # The embedding lookup must be placed on a cpu.
    with tf.device('/cpu:0'):
      embedded = text_in.embedding_lookup(CHARS, [EMBEDDING_SIZE])
    # Because the sequence LSTM expects each timestep to be its own Tensor,
    # we need to cleave the sequence.
    # Below we can build a stacked 2 layer LSTM by just chaining them together.
    # You can stack as many layers as you want.
    lstm = (embedded
            .cleave_sequence(timesteps)
            .sequence_lstm(LOWER)
            .sequence_lstm(UPPER))

    # The classifier is much more efficient if it runs across the entire
    # dataset at once, so we want to squash (i.e. uncleave).
    # Note: if phase is test, dropout is a noop.
    return (lstm.squash_sequence()
            .dropout(keep_prob=0.8, phase=phase)
            .fully_connected(CHARS, activation_fn=None))
Ejemplo n.º 35
0
def multilayer_fully_connected(images, labels):
    images = pt.wrap(images)
    with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
        return (images.flatten().\
            fully_connected(100).\
            fully_connected(100).\
            softmax_classifier(10, labels))
 def init_op(self):
     self.build_placeholder()
     with pt.defaults_scope(phase=pt.Phase.train):
         with tf.variable_scope("vae_mnist_model"):
             vae_loss = self.compute_losses(self.input_images)
             print("VAE losses computed")
             self.prepare_trainer(loss=vae_loss)
Ejemplo n.º 37
0
def fc():
    with tf.variable_scope('fc'):
        with pt.defaults_scope(activation_fn=tf.nn.relu):
            fc_seq = pt.wrap(x).sequential()
            fc_seq.fully_connected(10, activation_fn=None)
            fc_seq.softmax()
            return tf.nn.softmax(fc_seq)
Ejemplo n.º 38
0
def inference(x224,x64):
    z_p = tf.random_normal((batch_size, hidden_size), 0, 1)
    eps = tf.random_normal((batch_size, hidden_size), 0, 1)  # normal dist for VAE
    with pt.defaults_scope(activation_fn=tf.nn.elu,
                           batch_normalize=True,
                           learned_moments_update_rate=0.0003,
                           variance_epsilon=0.001,
                           scale_after_normalization=True):
        with tf.variable_scope("enc"):
                vgg_net = Vgg19('./vgg19.npy', codelen=hidden_size)
                vgg_net.build(x224, beta_nima, train_model)
                z_x_mean = vgg_net.fc9
                z_x_log_sigma_sq = vgg_net.fc10

        with tf.variable_scope("gen"):
            z_x = tf.add(z_x_mean,
                         tf.multiply(tf.sqrt(tf.exp(z_x_log_sigma_sq)), eps))  # grab our actual z
            x_tilde = generator(z_x)
            # x_tilde = generator(z_x_mean)
        with tf.variable_scope("dis"):
            _, l_x_tilde = discriminator(x_tilde)
        with tf.variable_scope("gen", reuse=True):
            x_p = generator(z_p)
        with tf.variable_scope("dis", reuse=True):
            d_x, l_x = discriminator(x64)
        with tf.variable_scope("dis", reuse=True):
            d_x_p, _ = discriminator(x_p)
        return z_x_mean,z_x_log_sigma_sq,x_tilde, l_x_tilde, z_x,x_p, d_x, l_x, d_x_p, z_p
Ejemplo n.º 39
0
def main_network(images, training):
    # Wrap the input images as a Pretty Tensor object.
    x_pretty = pt.wrap(images)

    # Pretty Tensor uses special numbers to distinguish between
    # the training and testing phases.
    if training:
        phase = pt.Phase.train
    else:
        phase = pt.Phase.infer

    # CNN using Pretty Tensor.
    #we can now quickly chain any number of layers to define neural networks
    with pt.defaults_scope(activation_fn=tf.nn.relu, phase=phase):
        y_pred, loss = x_pretty.\
            conv2d(kernel=5, depth=64, name='layer_conv1', batch_normalize=True).\
            max_pool(kernel=2, stride=2).\
            conv2d(kernel=5, depth=64, name='layer_conv2').\
            max_pool(kernel=2, stride=2).\
            flatten().\
            fully_connected(size=256, name='layer_fc1').\
            fully_connected(size=128, name='layer_fc2').\
            softmax_classifier(num_classes=num_classes, labels=y_true)

    return y_pred, loss
Ejemplo n.º 40
0
def neural_network(x):
    """
    Inference network to parameterize variational family. It takes
    data as input and outputs the variational parameters.

    loc, scale = neural_network(x)
    """
    n_vars = 10
    with pt.defaults_scope(activation_fn=tf.nn.elu,
                           batch_normalize=True,
                           learned_moments_update_rate=0.0003,
                           variance_epsilon=0.001,
                           scale_after_normalization=True):
        params = (pt.wrap(x).
                reshape([N_MINIBATCH, 28, 28, 1]).
                conv2d(5, 32, stride=2).
                conv2d(5, 64, stride=2).
                conv2d(5, 128, edges='VALID').
                dropout(0.9).
                flatten().
                fully_connected(n_vars * 2, activation_fn=None)).tensor

    # Return list of vectors where mean[i], stddev[i] are the
    # parameters of the local variational factor for data point i.
    loc = tf.reshape(params[:, :n_vars], [-1])
    scale = tf.reshape(tf.sqrt(tf.exp(params[:, n_vars:])), [-1])
    return [loc, scale]
Ejemplo n.º 41
0
def build_alexnet(ims, tr):
    
    x_pretty = pt.wrap(ims)

   
    if tr:
        state = pt.Phase.train
    else:
        state = pt.Phase.infer


    with pt.defaults_scope(activation_fn=tf.nn.relu, phase=state):
        prediction, cost = x_pretty.\
            conv2d(kernel=5, depth=64, name='conv1', batch_normalize=True).\
            max_pool(kernel=2, stride=2).\
            conv2d(kernel=5, depth=64, name='conv2', batch_normalize=True).\
            max_pool(kernel=2, stride=2).\
	    conv2d(kernel=5, depth=64, name='conv3').\
            conv2d(kernel=5, depth=64, name='conv4').\
            conv2d(kernel=5, depth=64, name='conv5').\
            flatten().\
            fully_connected(size=256, name='fc1').\
            fully_connected(size=128, name='fc2').\
	    fully_connected(size=64, name='fc3').\
            softmax_classifier(num_classes=classes_of_image, labels=im_true_lb)

    return prediction, cost
Ejemplo n.º 42
0
def create_model(text_in, timesteps, phase):
  """Creates a 2 layer LSTM model with dropout.

  Args:
    text_in: The input text as ASCII ordinals in a Tensor.
    timesteps: The number of timesteps in the sequence.
    phase: Phase controls whether or not dropout is active.  In training mode
      we want to perform dropout, but in test we want to disable it.
  Returns:
    The logits.
  """
  with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
    # The embedding lookup must be placed on a cpu.
    with tf.device('/cpu:0'):
      embedded = text_in.embedding_lookup(CHARS, [EMBEDDING_SIZE])
    # Because the sequence LSTM expects each timestep to be its own Tensor,
    # we need to cleave the sequence.
    # Below we can build a stacked 2 layer LSTM by just chaining them together.
    # You can stack as many layers as you want.
    lstm = (embedded
            .cleave_sequence(timesteps)
            .sequence_lstm(LOWER)
            .sequence_lstm(UPPER))

    # The classifier is much more efficient if it runs across the entire
    # dataset at once, so we want to squash (i.e. uncleave).
    # Note: if phase is test, dropout is a noop.
    return (lstm.squash_sequence()
            .dropout(keep_prob=0.8, phase=phase)
            .fully_connected(CHARS, activation_fn=None))
def getConv_features(images, training):

    # Wrap the input images as a Pretty Tensor object.
    x_pretty = pt.wrap(images)

    # Pretty Tensor uses special numbers to distinguish between
    # the training and testing phases.
    if training:
        phase = pt.Phase.train
    else:
        phase = pt.Phase.infer

    # Create the convolutional neural network using Pretty Tensor.
    # It is very similar to the previous tutorials, except
    # the use of so-called batch-normalization in the first layer.
    with pt.defaults_scope(activation_fn=tf.nn.relu, phase=phase):
        p_input_flatten = x_pretty. \
            conv2d(kernel=5, depth=64, name='layer_conv1_feature_extraction', batch_normalize=True). \
            max_pool(kernel=2, stride=2). \
            conv2d(kernel=5, depth=64, name='layer_conv2_feature_extraction'). \
            max_pool(kernel=2, stride=2). \
            flatten(). \
            fully_connected(size=256, name='layer_fc1_feature_extraction')

    return p_input_flatten
Ejemplo n.º 44
0
 def testVariableCollections(self):
   with prettytensor.defaults_scope(variable_collections=['a']):
     self.MultiLayer()
   self.assertTrue(tf.get_collection('a'))
   self.assertEqual(
       tf.get_collection('a'),
       tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
   self.assertTrue(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES))
	def output( self, inputs, phase = pt.Phase.train ):

		inputs = pt.wrap( inputs )
		with pt.defaults_scope( phase = phase, activation_fn = self.nonlinearity, l2loss = self.l2loss ):
			for layer in self.hidden_layers:
				inputs = inputs.fully_connected( layer )

			return inputs.fully_connected( self.dim_output, activation_fn = None )
 def testUnboundVariableAsDefault(self):
   """The same unbound_var can be used multiple times in a graph."""
   input_pt = self.Wrap(self.input)
   with prettytensor.defaults_scope(
       value=prettytensor.UnboundVariable('key')):
     x = input_pt.ValidateMethod(self)
   self.assertTrue(isinstance(x, pretty_tensor_class._DeferredLayer))
   x.construct(key=KEY)
Ejemplo n.º 47
0
 def testConvBatchNorm(self):
   st = self.input_layer.sequential()
   st.reshape([DIM_SAME, DIM_SAME, DIM_SAME, 1])
   with prettytensor.defaults_scope(batch_normalize=True,
                                    learned_moments_update_rate=0.0003,
                                    variance_epsilon=0.001,
                                    scale_after_normalization=False):
     st.conv2d(3, 2)
   self.assertEqual(
       2, len(tf.get_collection(prettytensor.GraphKeys.UPDATE_OPS)))
Ejemplo n.º 48
0
 def test_default_parameter_modifier(self):
   called = set()
   def parameter_modifier(var_name, variable, unused_phase):
     called.add(var_name)
     return variable
   with prettytensor.defaults_scope(parameter_modifier=parameter_modifier):
     st = self.input_layer.sequential()
     st.flatten()
     st.fully_connected(5)
   self.assertEqual({'weights', 'bias'}, called)
Ejemplo n.º 49
0
def network(): 
    gt = tf.placeholder(tf.float32, [conflict_grid_size[0], conflict_grid_size[1]])	
    conflict_grids = tf.placeholder(tf.float32, [num_timesteps,
                                                 conflict_grid_size[0],
                                                 conflict_grid_size[1],
                                                 conflict_grid_size[2]])
    poverty_grid = tf.placeholder(tf.float32, [poverty_grid_size[0],
                                               poverty_grid_size[1],
                                               poverty_grid_size[2],
                                               poverty_grid_size[3]])
    climate_grids = tf.placeholder(tf.float32, [num_timesteps,
                                                climate_grid_size[0],
                                                climate_grid_size[1],
                                                climate_grid_size[2]])
    
    assert(num_timesteps > 1)
    with tf.variable_scope("model") as scope:
        with pt.defaults_scope(activation_fn=tf.nn.relu,
                               batch_normalize=True,
                               learned_moments_update_rate=0.0003,
                               variance_epsilon=0.001,
                               scale_after_normalization=True):
            enc_conflicts = network_conflict(conflict_grids)
            enc_climate = network_climate(climate_grids)
            enc_conflict_climate = tf.concat(1, [enc_conflicts, enc_climate])
            rnn_output = network_rnn(enc_conflict_climate)
    
    rnn_output = tf.reshape(rnn_output, [1, 128])

    with tf.variable_scope("model") as scope:
        with pt.defaults_scope(activation_fn=tf.nn.relu,
                               batch_normalize=True,
                               learned_moments_update_rate=0.0003,
                               variance_epsilon=0.001,
                               scale_after_normalization=True):
            enc_poverty = network_poverty(poverty_grid)
    
    feats = tf.concat(1, [rnn_output, enc_poverty])

    pred = fc_layers(feats, conflict_grid_size[0])

    return conflict_grids, climate_grids, poverty_grid, pred, gt
Ejemplo n.º 50
0
def lenet5(images, labels):
  images = pt.wrap(images)
  with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
    return (images
      .reshape([-1, 28, 28, 1])
      .conv2d(5, 20)
      .max_pool(2, 2)
      .conv2d(5, 50)
      .max_pool(2, 2)
      .flatten()
      .fully_connected(500)
      .softmax_classifier(10, labels))
Ejemplo n.º 51
0
def create_model(text_in, labels, timesteps, per_example_weights, phase=pt.Phase.train):
  with pt.defaults_scope(phase=phase, l2loss=0.00001):
    with tf.device('/cpu:0'):
      embedded = text_in.embedding_lookup(CHARS, [EMBEDDING_SIZE])
    lstm = (embedded
      .cleave_sequence(timesteps)
      .sequence_lstm(CHARS))
    return (lstm
      .squash_sequence()
      .fully_connected(32, activation_fn=tf.nn.relu)
      .dropout(0.7)
      .softmax_classifier(SEXES, labels, per_example_weights=per_example_weights))
Ejemplo n.º 52
0
def cnn():
	with tf.variable_scope('cnn'):
		with pt.defaults_scope(activation_fn=tf.nn.relu):
			x_reshape = tf.reshape(x, [-1, 28, 28, 1])
			cnn_seq = pt.wrap(x_reshape).sequential()
			cnn_seq.conv2d(7, 16)
			cnn_seq.max_pool(2, 2)
			cnn_seq.conv2d(7, 16)
			cnn_seq.max_pool(2, 2)
			cnn_seq.flatten()
			cnn_seq.fully_connected(32, activation_fn=tf.nn.relu)
			cnn_seq.fully_connected(10, activation_fn=None)
			return tf.nn.softmax(cnn_seq)
Ejemplo n.º 53
0
 def neural_network(self, z):
     """p = neural_network(z)"""
     with pt.defaults_scope(activation_fn=tf.nn.elu,
                            batch_normalize=True,
                            learned_moments_update_rate=0.0003,
                            variance_epsilon=0.001,
                            scale_after_normalization=True):
         return (pt.wrap(z).
                 reshape([N_DATA, 1, 1, self.num_vars]).
                 deconv2d(3, 128, edges='VALID').
                 deconv2d(5, 64, edges='VALID').
                 deconv2d(5, 32, stride=2).
                 deconv2d(5, 1, stride=2, activation_fn=tf.nn.sigmoid).
                 flatten()).tensor
Ejemplo n.º 54
0
  def testMethodRegistrationWithDefaults(self):
    # pylint: disable=unused-variable,invalid-name
    @prettytensor.Register(assign_defaults='funny_name')
    def test_method3(_, funny_name='not none'):
      return tf.constant(funny_name)

    result = self.RunTensor(self.input_layer.test_method3())
    self.assertEqual(b'not none', result)
    result = self.RunTensor(self.input_layer.test_method3(funny_name='other'))
    self.assertEqual(b'other', result)
    with prettytensor.defaults_scope(funny_name='something'):
      result = self.RunTensor(self.input_layer.test_method3())
      self.assertEqual(b'something', result)
      result = self.RunTensor(self.input_layer.test_method3(funny_name='other'))
      self.assertEqual(b'other', result)
def generative_network(z):
  """Generative network to parameterize generative model. It takes
  latent variables as input and outputs the likelihood parameters.

  logits = neural_network(z)
  """
  with pt.defaults_scope(activation_fn=tf.nn.elu,
                         batch_normalize=True,
                         scale_after_normalization=True):
    return (pt.wrap(z).
            reshape([M, 1, 1, d]).
            deconv2d(3, 128, edges='VALID').
            deconv2d(5, 64, edges='VALID').
            deconv2d(5, 32, stride=2).
            deconv2d(5, 1, stride=2, activation_fn=None).
            flatten()).tensor
Ejemplo n.º 56
0
def lenet5(images, labels):
  """Creates a multi layer convolutional network.

  The architecture is similar to that defined in LeNet 5.
  Please change this to experiment with architectures.

  Args:
    images: The input images.
    labels: The labels as dense one-hot vectors.
  Returns:
    A softmax result.
  """
  images = pt.wrap(images)
  with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
    return (images.conv2d(5, 20).max_pool(2, 2).conv2d(5, 50).max_pool(2, 2)
            .flatten().fully_connected(500).softmax_classifier(10, labels))
Ejemplo n.º 57
0
def network(): 
    gt = tf.placeholder(tf.float32, 1)
    input_tensor = tf.placeholder(tf.float32,
                                  [num_timesteps, len_feats])

    assert(num_timesteps > 1)
    with tf.variable_scope("model") as scope:
        with pt.defaults_scope(activation_fn=tf.nn.relu,
                               batch_normalize=True,
                               learned_moments_update_rate=0.0003,
                               variance_epsilon=0.001,
                               scale_after_normalization=True):
            rnn_feats = rnn(input_tensor) # dim = [batch_size, hidden]
            rnn_feats = tf.reshape(rnn_feats, [batch_size, -1])
    pred = fc_layers(rnn_feats)

    return input_tensor, pred, gt
 def testConvBatchNormArgumentOverride(self):
   st = self.input_layer.sequential()
   st.reshape([DIM_SAME, DIM_SAME, DIM_SAME, 1])
   with prettytensor.defaults_scope(batch_normalize=True,
                                    learned_moments_update_rate=0.0003,
                                    variance_epsilon=0.001,
                                    scale_after_normalization=True):
     st.conv2d(3, 2,
               batch_normalize=prettytensor.BatchNormalizationArguments(
                   scale_after_normalization=False))
   self.assertEqual(2,
                    len(tf.get_collection(prettytensor.GraphKeys.UPDATE_OPS)))
   self.assertTrue(tf.get_collection(tf.GraphKeys.VARIABLES, '.*/beta'))
   self.assertFalse(tf.get_collection(tf.GraphKeys.VARIABLES, '.*/gamma'))
   self.assertTrue(tf.get_collection(
       tf.GraphKeys.VARIABLES, '.*/moving_variance'))
   self.assertTrue(tf.get_collection(tf.GraphKeys.VARIABLES, '.*/moving_mean'))
Ejemplo n.º 59
0
def multilayer_fully_connected(images, labels):
  """Creates a multi layer network of fully_connected layers.

  Each layer is 100 neurons.  Please change this to experiment with
  architectures.

  Args:
    images: The input images.
    labels: The labels as dense one-hot vectors.
  Returns:
    A softmax result.
  """
  # Pretty Tensor is a thin wrapper on Tensors.
  # Change this method to experiment with other architectures
  images = pt.wrap(images)
  with pt.defaults_scope(activation_fn=tf.nn.relu, l2loss=0.00001):
    return (images.flatten().fully_connected(100).fully_connected(100)
            .softmax_classifier(10, labels))
Ejemplo n.º 60
0
def network(): 
    dim_0, dim_1, dim_2 = grid_size
    gt = tf.placeholder(tf.float32, [dim_0, dim_1])
    input_tensor = tf.placeholder(tf.float32, [num_timesteps, dim_0, dim_1, dim_2])
    mask = tf.placeholder(tf.float32, [dim_0, dim_1])

    assert(num_timesteps > 1)
    with tf.variable_scope("model") as scope:
        with pt.defaults_scope(activation_fn=tf.nn.relu,
                               batch_normalize=True,
                               learned_moments_update_rate=0.0003,
                               variance_epsilon=0.001,
                               scale_after_normalization=True):
            enc = network_grid(input_tensor)
            rnn_feats = network_rnn(enc)
            rnn_feats = tf.reshape(rnn_feats, [1, 64])
            pred = fc_layers(rnn_feats, dim_0)

    return input_tensor, pred, gt, mask