Beispiel #1
0
  def train(self, epochs_to_train=5):
    meta = self.get_meta()
    ut.print_time('train started: \n%s' % ut.to_file_name(meta))
    ut.configure_folders(FLAGS, meta)

    self.fetch_datasets(self._activation)
    self.build_model()
    self._register_training_start()

    with tf.Session() as sess:
      sess.run(tf.initialize_all_variables())
      self._saver = tf.train.Saver()

      if FLAGS.load_state and os.path.exists(self.get_checkpoint_path()):
        self._saver.restore(sess, self.get_checkpoint_path())
        ut.print_info('Restored requested. Previous epoch: %d' % self.get_past_epochs(), color=31)

      # MAIN LOOP
      for current_epoch in xrange(epochs_to_train):
        start = time.time()
        feed = self._get_epoch_dataset()
        for _, batch in enumerate(feed):

          encoding, reconstruction, loss, _, _ = sess.run(
            [self._encode, self._decode, self._reco_loss, self._train, self._step],
            feed_dict={self._input: batch[0], self._reconstruction: batch[0]})
          self._register_batch(loss)
        self._register_epoch(current_epoch, epochs_to_train, time.time()-start, sess)
      self._writer = tf.train.SummaryWriter(FLAGS.logdir, sess.graph)
      meta = self._register_training()
    return meta, self._stats['epoch_accuracy']
Beispiel #2
0
    def train(self, epochs_to_train=5):
        meta = self.get_meta()
        ut.print_time('train started: \n%s' % ut.to_file_name(meta))
        # return meta, np.random.randn(epochs_to_train)
        ut.configure_folders(FLAGS, meta)

        self._dataset, self._filters = self.fetch_datasets(self._activation)
        self.build_model()
        self._register_training_start()

        with tf.Session() as sess:
            sess.run(tf.initialize_all_variables())
            self._saver = tf.train.Saver()

            if FLAGS.load_state and os.path.exists(self.get_checkpoint_path()):
                self._saver.restore(sess, self.get_checkpoint_path())
                ut.print_info('Restored requested. Previous epoch: %d' %
                              self.get_past_epochs(),
                              color=31)

            # MAIN LOOP
            for current_epoch in xrange(epochs_to_train):

                feed, permutation = self._get_epoch_dataset()
                for _, batch in enumerate(feed):
                    filter = batch[1][0]
                    assert batch[1][0, 0] == batch[1][-1, 0]
                    encoding, = sess.run([self._encode],
                                         feed_dict={self._input: batch[0]
                                                    })  # 1.1 encode forward
                    clamped_enc, vae_grad = _clamp(encoding,
                                                   filter)  # 1.2 # clamp

                    sess.run(self._assign_clamped,
                             feed_dict={self._clamped: clamped_enc})
                    reconstruction, loss, clamped_gradient, _ = sess.run(  # 2.1 decode forward+backward
                        [
                            self._decode, self._decoder_loss,
                            self._clamped_grad, self._train_decoder
                        ],
                        feed_dict={
                            self._clamped: clamped_enc,
                            self._reconstruction: batch[0]
                        })

                    declamped_grad = _declamp_grad(
                        vae_grad, clamped_gradient,
                        filter)  # 2.2 prepare gradient
                    _, step = sess.run(                                            # 3.0 encode backward path
                      [self._train_encoder, self._step],
                      feed_dict={self._input: batch[0], self._encoding: encoding-declamped_grad})          # Profit

                    self._register_batch(batch, encoding, reconstruction, loss)
                self._register_epoch(current_epoch, epochs_to_train,
                                     permutation, sess)
            self._writer = tf.train.SummaryWriter(FLAGS.logdir, sess.graph)
            meta = self._register_training()
        return meta, self._stats['epoch_accuracy']
Beispiel #3
0
    def save_meta(self, meta=None):
        if meta is None:
            meta = self.get_meta()

        ut.configure_folders(FLAGS, meta)
        meta['a'] = 's'
        meta['opt'] = str(meta['opt']).split('.')[-1][:-2]
        meta['input_path'] = FLAGS.input_path
        path = os.path.join(FLAGS.save_path, 'meta.txt')
        json.dump(meta, open(path, 'w'))
Beispiel #4
0
  def save_meta(self, meta=None):
    if meta is None:
      meta = self.get_meta()

    ut.configure_folders(FLAGS, meta)
    meta['a'] = 's'
    meta['opt'] = str(meta['opt']).split('.')[-1][:-2]
    meta['input_path'] = FLAGS.input_path
    path = os.path.join(FLAGS.save_path, 'meta.txt')
    json.dump(meta, open(path,'w'))
Beispiel #5
0
 def load_meta(self, save_path):
   meta = super(FF_model, self).load_meta(save_path)
   self._weight_init = meta['init']
   self._optimizer = tf.train.AdadeltaOptimizer \
     if 'Adam' in meta['opt'] \
     else tf.train.AdadeltaOptimizer
   self._activation = act.sigmoid
   self.set_layer_sizes(meta['h'])
   # FLAGS.stride = int(meta['str']) if 'str' in meta else 2
   ut.configure_folders(FLAGS, self.get_meta())
   return meta
Beispiel #6
0
 def load_meta(self, save_path):
     meta = super(IGNModel, self).load_meta(save_path)
     self._weight_init = meta['init']
     self._optimizer = tf.train.AdadeltaOptimizer \
       if 'Adam' in meta['opt'] \
       else tf.train.AdadeltaOptimizer
     self._activation = act.sigmoid
     self.layer_encoder = meta['h'][0]
     self.layer_narrow = meta['h'][1]
     self.layer_decoder = meta['h'][2]
     FLAGS.gradient_proportion = float(meta['div'])
     ut.configure_folders(FLAGS, self.get_meta())
     return meta
Beispiel #7
0
 def load_meta(self, save_path):
   meta = super(IGN_model, self).load_meta(save_path)
   self._weight_init = meta['init']
   self._optimizer = tf.train.AdadeltaOptimizer \
     if 'Adam' in meta['opt'] \
     else tf.train.AdadeltaOptimizer
   self._activation = act.sigmoid
   self.layer_encoder = meta['h'][0]
   self.layer_narrow = meta['h'][1]
   self.layer_decoder = meta['h'][2]
   FLAGS.gradient_proportion = float(meta['div'])
   ut.configure_folders(FLAGS, self.get_meta())
   return meta
Beispiel #8
0
  def train(self, epochs_to_train=5):
    meta = self.get_meta()
    ut.print_time('train started: \n%s' % ut.to_file_name(meta))
    # return meta, np.random.randn(epochs_to_train)
    ut.configure_folders(FLAGS, meta)

    self._dataset, self._filters = self.fetch_datasets(self._activation)
    self.build_model()
    self._register_training_start()

    with tf.Session() as sess:
      sess.run(tf.initialize_all_variables())
      self._saver = tf.train.Saver()

      if FLAGS.load_state and os.path.exists(self.get_checkpoint_path()):
        self._saver.restore(sess, self.get_checkpoint_path())
        ut.print_info('Restored requested. Previous epoch: %d' % self.get_past_epochs(), color=31)

      # MAIN LOOP
      for current_epoch in xrange(epochs_to_train):

        feed, permutation = self._get_epoch_dataset()
        for _, batch in enumerate(feed):
          filter = batch[1][0]
          assert batch[1][0,0] == batch[1][-1,0]
          encoding, = sess.run([self._encode], feed_dict={self._input: batch[0]})   # 1.1 encode forward
          clamped_enc, vae_grad = _clamp(encoding, filter)                          # 1.2 # clamp

          sess.run(self._assign_clamped, feed_dict={self._clamped:clamped_enc})
          reconstruction, loss, clamped_gradient, _ = sess.run(          # 2.1 decode forward+backward
            [self._decode, self._decoder_loss, self._clamped_grad, self._train_decoder],
            feed_dict={self._clamped: clamped_enc, self._reconstruction: batch[0]})

          declamped_grad = _declamp_grad(vae_grad, clamped_gradient, filter) # 2.2 prepare gradient
          _, step = sess.run(                                            # 3.0 encode backward path
            [self._train_encoder, self._step],
            feed_dict={self._input: batch[0], self._encoding: encoding-declamped_grad})          # Profit

          self._register_batch(batch, encoding, reconstruction, loss)
        self._register_epoch(current_epoch, epochs_to_train, permutation, sess)
      self._writer = tf.train.SummaryWriter(FLAGS.logdir, sess.graph)
      meta = self._register_training()
    return meta, self._stats['epoch_accuracy']
Beispiel #9
0
 def __init__(self, optimizer=tf.train.AdamOptimizer, need_forlders=True):
     self.optimizer_constructor = optimizer
     FLAGS.input_name = inp.get_input_name(FLAGS.input_path)
     if need_forlders:
         ut.configure_folders(FLAGS)
     ut.print_flags(FLAGS)