Пример #1
0
  def test_tf_utilities(self):
    # make a minibatch of 2 copies of cell_2_2, to
    # test broadcasting
    row = self.context.one_hot_numpy_array(cell(2, 2), 'place_t')
    x = self.context.constant(np.vstack([row, row]), 'place_t')

    def near(x):
      return x.follow('n') + x.follow('s') + x.follow('e') + x.follow('w')

    def lr_near(x):
      return near(x).weighted_by('distance_to', 'ul')

    # softmax should split most of the weight between 2, 3 and 3,2

    sm = nql.nonneg_softmax(lr_near(x).tf)
    sm_list = self.context.as_dicts(self.session.run(sm), 'place_t')
    for i in range(len(sm_list)):
      self.assertAllInRange(sm_list[i][cell(2, 3)], 0.4, 0.5)
      self.assertAllInRange(sm_list[i][cell(3, 2)], 0.4, 0.5)
    # construct a target
    target_nq = (self.context.one(cell(2, 3), 'place_t')
                 | self.context.one(cell(3, 2), 'place_t')) * 0.5
    low_loss = nql.nonneg_crossentropy(sm, target_nq.tf)
    offtarget_nq = (self.context.one(cell(2, 1), 'place_t')
                    | self.context.one(cell(2, 2), 'place_t')) * 0.5
    high_loss = nql.nonneg_crossentropy(sm, offtarget_nq.tf)
    lo = self.session.run(low_loss)
    hi = self.session.run(high_loss)
    self.assertAllInRange(lo, 0.5, 2.0)
    self.assertAllInRange(hi, 5.0, 15.0)
Пример #2
0
 def config_model_training(self, model, labels_ph, params=None):
     model.labels = model.context.as_tf(labels_ph)
     model.loss = nql.nonneg_crossentropy(model.predicted_y.tf,
                                          model.labels)
     optimizer = tf1.train.AdagradOptimizer(1.0)
     model.train_op = optimizer.minimize(
         loss=model.loss, global_step=tf1.train.get_global_step())
Пример #3
0
 def config_model_training(self, model, labels_ph, params=None):
     model.loss = nql.nonneg_crossentropy(model.predicted_y, labels_ph)
     logging.info('learning rate %f', FLAGS.learning_rate)
     optimizer = tf.train.AdamOptimizer(FLAGS.learning_rate)
     if FLAGS.gradient_clip > 0:
         logging.info('clipping gradients to %f', FLAGS.gradient_clip)
         gradients, variables = zip(*optimizer.compute_gradients(
             loss=model.loss))
         gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
         model.train_op = optimizer.apply_gradients(
             zip(gradients, variables),
             global_step=tf.train.get_global_step())
     else:
         logging.info('no gradient clipping')
         model.train_op = optimizer.minimize(
             loss=model.loss, global_step=tf.train.get_global_step())