def test_l2_penalty(): g = tf.Graph() with g.as_default(): w1 = tf.Variable(tf.ones((4, 4))) w2 = tf.Variable(tf.ones((3, 3))) b = tf.Variable(tf.ones((3, 3)), name='bias') l2 = modeling.l2_penalty(0.5) init() l2_val = sess.run(l2) assert l2_val == 12.5
def cost(y, logits, regularize=False, l2_weight=0.01): batch_size = logits.get_shape().as_list()[0] with tf.name_scope('cost'): with tf.name_scope('xentropy'): xentropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits( logits, tf.cast(modeling.onehot(y, NUM_CLASSES), 'float'))) if regularize: l2 = modeling.l2_penalty(l2_weight) tot_cost = xentropy if regularize: tot_cost = tf.add(xentropy, l2) tf.scalar_summary('cost', tot_cost) return tot_cost