Пример #1
0
train_linear_id, train_linear_val, train_continuous_val \
    = data.ReadBatch(FLAGS.train_file,
                     FLAGS.max_epoch,
                     FLAGS.batch_size,
                     FLAGS.thread_num,
                     FLAGS.min_after_dequeue)
valid_label, valid_sparse_id, valid_sparse_val, \
valid_linear_id, valid_linear_val, valid_continuous_val \
    = data.ReadBatch(FLAGS.valid_file,
                     FLAGS.max_epoch,
                     FLAGS.batch_size,
                     FLAGS.thread_num,
                     FLAGS.min_after_dequeue)

# define model
model = Model(FLAGS.embedding_size, data.Dict(), FLAGS.sparse_fields,
              FLAGS.continuous_fields, FLAGS.linear_fields, FLAGS.hidden_layer)

# define loss
logits, all_parameter = model.forward(train_sparse_id, train_sparse_val,
                                      train_linear_id, train_linear_val,
                                      train_continuous_val)
train_label = tf.to_int64(train_label)
cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
    logits=logits, labels=train_label, name='cross_entropy')
loss = tf.reduce_mean(cross_entropy, name='loss')
l1_regularizer = tf.contrib.layers.l1_regularizer(scale=FLAGS.l1, scope=None)
l2_regularizer = tf.contrib.layers.l2_regularizer(scale=FLAGS.l2, scope=None)
l1_penalty = tf.contrib.layers.apply_regularization(l1_regularizer,
                                                    all_parameter)
l2_penalty = tf.contrib.layers.apply_regularization(l2_regularizer,
                                                    all_parameter)