コード例 #1
0
# sys.exit(0)

# for index, w in enumerate(vocab_processor.vocabulary_._mapping):
#     print('vocab-{}:{}'.format(index, w))
# sys.exit(0)

with tf.Graph().as_default():
    session_conf = tf.ConfigProto(allow_soft_placement=ALLOW_SOFT_PLACEMENT,
                                  log_device_placement=LOG_DEVICE_PLACEMENT)
    sess = tf.Session(config=session_conf)

    with sess.as_default():
        siameseModel = SiameseLSTMw2v(sequence_length=MAX_DOCUMENT_LENGTH,
                                      vocab_size=len(
                                          vocab_processor.vocabulary_),
                                      embedding_size=EMBEDDING_DIM,
                                      hidden_units=HIDDEN_UNITS,
                                      l2_reg_lambda=L2_REG_LAMBDA,
                                      batch_size=BATCH_SIZE,
                                      trainableEmbeddings=False)
        # Define Training procedure
        global_step = tf.Variable(0, name="global_step", trainable=False)
        optimizer = tf.train.AdamOptimizer(1e-3)

    grads_and_vars = optimizer.compute_gradients(siameseModel.loss)
    tr_op_set = optimizer.apply_gradients(grads_and_vars,
                                          global_step=global_step)
    print("defined training_ops")
    # Keep track of gradient values and sparsity (optional)
    grad_summaries = []
    for g, v in grads_and_vars:
        if g is not None:
コード例 #2
0
 with sess.as_default():
     if FLAGS.is_char_based:
         siameseModel = SiameseLSTM(
             sequence_length=max_document_length,
             vocab_size=len(vocab_processor.vocabulary_),
             embedding_size=FLAGS.embedding_dim,
             hidden_units=FLAGS.hidden_units,
             l2_reg_lambda=FLAGS.l2_reg_lambda,
             batch_size=FLAGS.batch_size
         )
     else:
         siameseModel = SiameseLSTMw2v(
             sequence_length=max_document_length,
             vocab_size=len(vocab_processor.vocabulary_),
             embedding_size=FLAGS.embedding_dim,
             hidden_units=FLAGS.hidden_units,
             l2_reg_lambda=FLAGS.l2_reg_lambda,
             batch_size=FLAGS.batch_size,
             trainableEmbeddings=trainableEmbeddings
         )
     # Define Training procedure
     global_step = tf.Variable(0, name="global_step", trainable=False)
     optimizer = tf.train.AdamOptimizer(1e-3)
     print("initialized siameseModel object")
 
 grads_and_vars=optimizer.compute_gradients(siameseModel.loss)
 tr_op_set = optimizer.apply_gradients(grads_and_vars, global_step=global_step)
 print("defined training_ops")
 # Keep track of gradient values and sparsity (optional)
 grad_summaries = []
 for g, v in grads_and_vars:
コード例 #3
0
    def __launch_from_build(self, vocab_processor, trainableEmbeddings,
                            out_dir, checkpoint_dir_abs, initW):
        # ==================================================
        print("starting graph def")
        graph = tf.Graph()

        with graph.as_default():
            # will use default_graph as input para, and current default_graph is the `graph`
            sess = tf.Session(graph=graph, config=self.session_conf)
            print("started session")
            with sess.as_default():
                if self.FLAGS.is_char_based:
                    siameseModel = SiameseLSTM(
                        sequence_length=self.FLAGS.max_document_length,
                        vocab_size=len(vocab_processor.vocabulary_),
                        embedding_size=self.FLAGS.embedding_dim,
                        hidden_units=self.FLAGS.hidden_units,
                        l2_reg_lambda=self.FLAGS.l2_reg_lambda,
                        batch_size=self.FLAGS.batch_size)
                else:
                    siameseModel = SiameseLSTMw2v(
                        sequence_length=self.FLAGS.max_document_length,
                        vocab_size=len(vocab_processor.vocabulary_),
                        embedding_size=self.FLAGS.embedding_dim,
                        hidden_units=self.FLAGS.hidden_units,
                        l2_reg_lambda=self.FLAGS.l2_reg_lambda,
                        batch_size=self.FLAGS.batch_size,
                        trainableEmbeddings=trainableEmbeddings)

            # Define Training procedure
            global_step = tf.Variable(0, name="global_step", trainable=False)
            optimizer = tf.train.AdamOptimizer(1e-3)
            print("initialized siameseModel object")

            grads_and_vars = optimizer.compute_gradients(siameseModel.loss)
            tr_op_set = optimizer.apply_gradients(grads_and_vars,
                                                  global_step=global_step,
                                                  name='tr_op_set')
            print("defined training_ops")
            # Keep track of gradient values and sparsity (optional)
            grad_summaries = []
            for g, v in grads_and_vars:
                if g is not None:
                    grad_hist_summary = tf.summary.histogram(
                        "{}/grad/hist".format(v.name), g)
                    sparsity_summary = tf.summary.scalar(
                        "{}/grad/sparsity".format(v.name),
                        tf.nn.zero_fraction(g))
                    grad_summaries.append(grad_hist_summary)
                    grad_summaries.append(sparsity_summary)
            grad_summaries_merged = tf.summary.merge(grad_summaries)
            print("defined gradient summaries")

            # Summaries for loss and accuracy
            loss_summary = tf.summary.scalar("loss", siameseModel.loss)
            acc_summary = tf.summary.scalar("accuracy", siameseModel.accuracy)

            # Train Summaries
            train_summary_op = tf.summary.merge(
                [loss_summary, acc_summary, grad_summaries_merged])
            train_summary_op = tf.identity(train_summary_op,
                                           'train_summary_op')
            train_summary_dir = os.path.join(out_dir, "summaries", "train")
            train_summary_writer = tf.summary.FileWriter(
                train_summary_dir, sess.graph)

            # Dev summaries
            dev_summary_op = tf.summary.merge([loss_summary, acc_summary])
            dev_summary_op = tf.identity(dev_summary_op, 'dev_summary_op')
            dev_summary_dir = os.path.join(out_dir, "summaries", "dev")
            dev_summary_writer = tf.summary.FileWriter(dev_summary_dir,
                                                       sess.graph)

            saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
            sess.run(tf.global_variables_initializer())
            if initW is not None:
                sess.run(siameseModel.W.assign(initW))

            graphpb_txt = str(graph.as_graph_def())
            with open(os.path.join(checkpoint_dir_abs, "graphpb.txt"),
                      'w') as f:
                f.write(graphpb_txt)

        input_tensors = InputTensors(siameseModel.input_x1,
                                     siameseModel.input_x2,
                                     siameseModel.input_y,
                                     siameseModel.dropout_keep_prob)
        result_tensors = ResultTensors(global_step, siameseModel.loss,
                                       siameseModel.accuracy,
                                       siameseModel.distance,
                                       siameseModel.temp_sim)
        metric_ops = MetricOps(tr_op_set, train_summary_op, dev_summary_op,
                               train_summary_writer, dev_summary_writer)
        return saver, sess, input_tensors, result_tensors, metric_ops