コード例 #1
0
#x_val, y_val = data_deepqa.load_data_val()

# Training
# ==================================================

with tf.Graph().as_default():
    with tf.device("/gpu:1"):
        session_conf = tf.ConfigProto(
            allow_soft_placement=FLAGS.allow_soft_placement,
            log_device_placement=FLAGS.log_device_placement)
        sess = tf.Session(config=session_conf)
        with sess.as_default():
            cnn = InsQACNN(sequence_length=x_train_1.shape[1],
                           batch_size=FLAGS.batch_size,
                           vocab_size=len(vocab),
                           embedding_size=FLAGS.embedding_dim,
                           filter_sizes=list(
                               map(int, FLAGS.filter_sizes.split(","))),
                           num_filters=FLAGS.num_filters,
                           l2_reg_lambda=FLAGS.l2_reg_lambda)

            # Define Training procedure
            global_step = tf.Variable(0, name="global_step", trainable=False)
            optimizer = tf.train.AdamOptimizer(1e-1)
            #optimizer = tf.train.GradientDescentOptimizer(1e-2)
            grads_and_vars = optimizer.compute_gradients(cnn.loss)
            train_op = optimizer.apply_gradients(grads_and_vars,
                                                 global_step=global_step)

            # Keep track of gradient values and sparsity (optional)
            grad_summaries = []
            for g, v in grads_and_vars:
コード例 #2
0
# Training
# ==================================================

with tf.Graph().as_default():
  with tf.device("/gpu:1"):
    session_conf = tf.ConfigProto(
      allow_soft_placement=FLAGS.allow_soft_placement,
      log_device_placement=FLAGS.log_device_placement)
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        cnn = InsQACNN(
            sequence_length=max_seq_len,
            batch_size=FLAGS.batch_size,
            vocab_size=len(vocab),
            embedding_size=FLAGS.embedding_dim,
            filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
            num_filters=FLAGS.num_filters,
            l2_reg_lambda=FLAGS.l2_reg_lambda,
            embedding_type=FLAGS.embedding_type)
        # Define Training procedure
        global_step = tf.Variable(0, name="global_step", trainable=False)
        optimizer = tf.train.AdamOptimizer(1e-1)
        #optimizer = tf.train.GradientDescentOptimizer(1e-2)
        grads_and_vars = optimizer.compute_gradients(cnn.loss)
        train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step)

        # Keep track of gradient values and sparsity (optional)
        grad_summaries = []
        for g, v in grads_and_vars:
            if g is not None:
コード例 #3
0
# Training
# ==================================================

prev_auc = 0
with tf.Graph().as_default():
    with tf.device("/gpu:1"):
        session_conf = tf.ConfigProto(
            allow_soft_placement=FLAGS.allow_soft_placement,
            log_device_placement=FLAGS.log_device_placement)
        sess = tf.Session(config=session_conf)
        with sess.as_default():
            cnn = InsQACNN(_margin=FLAGS.margin,
                           sequence_length=FLAGS.sequence_length,
                           batch_size=FLAGS.batch_size,
                           vocab_size=len(vocab),
                           embedding_size=FLAGS.embedding_dim,
                           filter_sizes=list(
                               map(int, FLAGS.filter_sizes.split(","))),
                           num_filters=FLAGS.num_filters,
                           l2_reg_lambda=FLAGS.l2_reg_lambda)

            # Define Training procedure
            global_step = tf.Variable(0, name="global_step", trainable=False)
            optimizer = tf.train.AdamOptimizer(1e-1)
            #optimizer = tf.train.GradientDescentOptimizer(1e-2)
            grads_and_vars = optimizer.compute_gradients(cnn.loss)
            train_op = optimizer.apply_gradients(grads_and_vars,
                                                 global_step=global_step)

            # Keep track of gradient values and sparsity (optional)
            grad_summaries = []