Exemplo n.º 1
0
flags.DEFINE_string('sparse_fields', '', 'sparse fields. example 0,1,2')
flags.DEFINE_string('hidden_layer', '100,100,50', 'hidden size for eacy layer')
flags.DEFINE_integer('embedding_size', 32, 'embedding size')

if not os.path.exists(FLAGS.model_dir):
    os.makedirs(FLAGS.model_dir)
if not os.path.exists(FLAGS.checkpoint_dir):
    os.makedirs(FLAGS.checkpoint_dir)
if not os.path.exists(FLAGS.tensorboard_dir):
    os.makedirs(FLAGS.tensorboard_dir)

with tf.device('/cpu:0'):
    # data iter
    data = Data(FLAGS.sparse_fields)
    train_label, train_sparse_id, train_sparse_val = data.ReadBatch(
        FLAGS.train_file, FLAGS.max_epoch, FLAGS.batch_size, FLAGS.thread_num,
        FLAGS.min_after_dequeue)
    valid_label, valid_sparse_id, valid_sparse_val = data.ReadBatch(
        FLAGS.valid_file, FLAGS.max_epoch, FLAGS.batch_size, FLAGS.thread_num,
        FLAGS.min_after_dequeue)

    # define model
    model = Model(FLAGS.embedding_size, FLAGS.sparse_fields,
                  FLAGS.hidden_layer)

    # define loss
    logits, all_parameter = model.forward(train_sparse_id, train_sparse_val)
    train_label = tf.to_int64(train_label)
    cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(
        logits=logits, labels=train_label, name='cross_entropy')
    loss = tf.reduce_mean(cross_entropy, name='loss')
Exemplo n.º 2
0
flags.DEFINE_float('l2', '0.001', 'l2 regularizetion')
flags.DEFINE_integer('embedding_size', 10, 'embedding size')

if not os.path.exists(FLAGS.model_dir):
    os.makedirs(FLAGS.model_dir)
if not os.path.exists(FLAGS.tensorboard_dir):
    os.makedirs(FLAGS.tensorboard_dir)

# data iter
data = Data(FLAGS.dict, FLAGS.continuous_fields, FLAGS.sparse_fields,
            FLAGS.linear_fields)
train_label, train_sparse_id, train_sparse_val, \
train_linear_id, train_linear_val, train_continuous_val \
    = data.ReadBatch(FLAGS.train_file,
                     FLAGS.max_epoch,
                     FLAGS.batch_size,
                     FLAGS.thread_num,
                     FLAGS.min_after_dequeue)
valid_label, valid_sparse_id, valid_sparse_val, \
valid_linear_id, valid_linear_val, valid_continuous_val \
    = data.ReadBatch(FLAGS.valid_file,
                     FLAGS.max_epoch,
                     FLAGS.batch_size,
                     FLAGS.thread_num,
                     FLAGS.min_after_dequeue)

# define model
model = Model(FLAGS.embedding_size, data.Dict(), FLAGS.sparse_fields,
              FLAGS.continuous_fields, FLAGS.linear_fields, FLAGS.hidden_layer)

# define loss