Esempio n. 1
0
        allow_soft_placement=FLAGS.allow_soft_placement,
        log_device_placement=FLAGS.log_device_placement,
        gpu_options=tf.GPUOptions(per_process_gpu_memory_fraction=0.8))
    sess = tf.Session(config=session_conf)
    with sess.as_default():
        cnn = TextCNN(
            sequence_length=FLAGS.
            max_sequence_length,  ##cnt the word num of the sentence
            num_classes=y_train_shuffled.shape[1],
            vocab_size=len(vocabulary),
            embedding_size=embedding_dim,
            filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
            num_filters=FLAGS.num_filters,
            l2_reg_lambda=FLAGS.l2_reg_lambda)
        if FLAGS.use_pretrain:
            cnn.assign_embedding(sess, pretrained_embedding)
        # Define Training procedure

        global_step = tf.Variable(0, name="global_step",
                                  trainable=False)  #record the global step
        #learning_rate = tf.train.exponential_decay(FLAGS.learning_rate,global_step=global_step,decay_steps=FLAGS.num_epochs,decay_rate=0.03)
        optimizer = tf.train.AdamOptimizer(1e-3)

        grads_and_vars = optimizer.compute_gradients(
            cnn.loss)  ##calculate gradient
        train_op = optimizer.apply_gradients(grads_and_vars,
                                             global_step=global_step,
                                             name="train_op")

        # Output directory for models and summaries
        timestamp = str(int(time.time()))
Esempio n. 2
0
    sess = tf.Session(config=session_conf)
    print(np.array(y_train))
    with sess.as_default():
        cnn = TextCNN(
            sequence_length_arg1=x_train_arg1_word.shape[1],
            sequence_length_arg2=x_train_arg2_word.shape[1],
            num_classes=np.array(y_train).shape[1],
            pos_vocab_size=len(pos_vocab),
            word_vocab_size=len(word_vocab),
            pos_embedding_size=pos_embedding_dim ,
            word_embedding_size=word_embedding_dim,
            filter_sizes=list(map(int, FLAGS.filter_sizes.split(","))),
            num_filters=FLAGS.num_filters,
            l2_reg_lambda=FLAGS.l2_reg_lambda)
        if FLAGS.use_pretrain:
            cnn.assign_embedding(sess, pretrained_pos_embedding,pretrained_word_embedding)
            #cnn.assign_word_embedding(sess,pretrained_word_embedding)



        global_step = tf.Variable(0, name="global_step", trainable=False)
        optimizer = tf.train.AdamOptimizer(learning_rate=1e-3)
        grads_and_vars = optimizer.compute_gradients(cnn.loss)
        train_op = optimizer.apply_gradients(grads_and_vars, global_step=global_step, name='train_op')
        # var_list1 = [var for var in tf.trainable_variables() if
        #                'word_embedding' in var.op.name]
        # var_list2 = [var for var in tf.trainable_variables() if
        #                'word_embedding' not in var.op.name]
        # opt1 = tf.train.AdamOptimizer(0.001)
        # opt2 = tf.train.AdamOptimizer(0.004)
        #