Exemple #1
0
 attMetricAll = []
 labelList = [[]]*1000
 Q_value = [[]] * 1000
 allAccList = []
 finalNum = 10
 diceAllList=[]
 for preNum in range(0,finalNum+1):
     tokenizer = tflearn.data_utils.VocabularyProcessor(word_pad_length,
                                                      tokenizer_fn=lambda tokens: [token_parse(x) for x in tokens])
     label_dict = VocabDict()
     reset_default_graph()
     model = SelfAttentive()
     with tf.Session() as sess:
       #load train data
       print('load train data')
       words, tags = load_csv('../data/trainTCM/TCM_train_%s.csv'%preName, target_columns=[0], columns_to_ignore=None,
                              target_dict=label_dict,usePreVector=usePreVector)
       vocab_list={}
       words = string_parser(words, fit=True)
       if FLAGS.shuffle == True:
          words, tags = shuffle(words, tags)
       word_input = tflearn.data_utils.pad_sequences(words, maxlen=word_pad_length)
       # build graph
       model.build_graph(n=word_pad_length,usePreVector=usePreVector,vectors=word_vecs)
       # Downstream Application
       with tf.variable_scope('DownstreamApplication'):
           global_step = tf.Variable(0, trainable=False, name='global_step')
           learn_rate = tf.train.exponential_decay(lr, global_step, FLAGS.decay_step, 0.95, staircase=True)
           labels = tf.placeholder('float32', shape=[None, tag_size])
           net = tflearn.fully_connected(model.M, 50, activation='relu')
           logits = tflearn.fully_connected(net, tag_size, activation=None)
           loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(labels=labels, logits=logits), axis=1)
Exemple #2
0
# preName = 'MM'  #11
# preName = 'JP' #124
# preName = 'ZK' #27
# preName = 'AS' #55
# preName = 'HXZT' #1
# preName = 'ZY' #6
# preName = 'TL' #13

allList = []
model = SelfAttentive()
with tf.Session() as sess:
    #load train data
    print('load train data')
    words, tags = load_csv('../data/trainTCM/TCM_train_%s.csv' % preName,
                           target_columns=[0],
                           columns_to_ignore=None,
                           target_dict=label_dict,
                           usePreVector=usePreVector)
    # print('zz',words)
    vocab_list = {}
    #  zsy 判断是否适用预训练的词向量 start
    if usePreVector == True:
        input_iter = encode_window.create_document_iter(words)
        vocab = encode_window.encode_dictionary(input_iter)
        vocab_list = vocab.vocabulary_._mapping
        word_vecs = encode_window.load_bin_vec(
            "../medicalVector/model/medicalCorpus_50d.model", vocab_list)
        word_input = encode_window.encode_word(words, vocab, word_pad_length)
    else:
        words = string_parser(words, fit=True)
        if FLAGS.shuffle == True:
Exemple #3
0
                loss = tf.reduce_sum(tf.nn.sigmoid_cross_entropy_with_logits(
                    labels=labels, logits=logits),
                                     axis=1)
                loss = tf.reduce_mean(loss)
                params = tf.trainable_variables()
                optimizer = tf.train.AdamOptimizer(learn_rate)
                grad_and_vars = tf.gradients(loss, params)
                opt = optimizer.apply_gradients(zip(grad_and_vars, params),
                                                global_step=global_step)

            # Start Training
            attMetric = []
            sess.run(tf.global_variables_initializer())
            words, tags = load_csv('../data/trainTCM/TCM_train_%s.csv' %
                                   preName,
                                   target_columns=[0],
                                   columns_to_ignore=None,
                                   target_dict=label_dict)

            words = string_parser(words, fit=True)
            if FLAGS.shuffle == True:
                words, tags = shuffle(words, tags)
            word_input = tflearn.data_utils.pad_sequences(
                words, maxlen=word_pad_length)
            total = len(word_input)
            step_print = int((total / batch_size) / 20)
            if FLAGS.train == True:
                print('start training')
                for epoch_num in range(num_epochs):
                    epoch_loss = 0
                    step_loss = 0
      p_coef = 0.004
      p_loss = p_coef * model.P
      loss = loss + p_loss
      p_loss = tf.reduce_mean(p_loss)
    loss = tf.reduce_mean(loss)
    params = tf.trainable_variables()
    #clipped_gradients = [tf.clip_by_value(x, -0.5, 0.5) for x in gradients]
    optimizer = tf.train.AdamOptimizer(learn_rate)
    grad_and_vars = tf.gradients(loss, params)
    clipped_gradients, _ = tf.clip_by_global_norm(grad_and_vars, 0.5)
    opt = optimizer.apply_gradients(zip(clipped_gradients, params), global_step=global_step)

  # Start Training
  sess.run(tf.global_variables_initializer())
#这里向下都是为了制作句子矩阵
  words, tags = load_csv('./data/train.csv', target_columns=[0], target_dict=label_dict, columns_to_ignore=[1])
  #words = string_parser(words, fit=True)#将每句话的word转化成对应词汇表的序号
  if FLAGS.shuffle == True:
    word_input, tags = shuffle(words, tags)

  # word2vec作为词向量,需要用word2vec.py预训练好word2vec.txt
  '''
  w2v_model = gensim.models.KeyedVectors.load_word2vec_format('./data/pw_20_csv/word2vec.txt', binary=False)
  embedding_matrix = np.zeros((len(tokenizer.vocabulary_._mapping) + 1, 100))
  for word, i in tokenizer.vocabulary_._mapping.items():
    try:
      embedding_matrix[i] = w2v_model.wv[word]#建立对应单词的向量矩阵
    except:
      # words not found in embedding index will be all-zeros.
      # print(str(i)+': '+word+' not in w2v_model.')
      pass
Exemple #5
0
      p_coef = 0.004
      p_loss = p_coef * model.P
      loss = loss + p_loss
      p_loss = tf.reduce_mean(p_loss)
    loss = tf.reduce_mean(loss)
    params = tf.trainable_variables()
    #clipped_gradients = [tf.clip_by_value(x, -0.5, 0.5) for x in gradients]
    optimizer = tf.train.AdamOptimizer(learn_rate)
    grad_and_vars = tf.gradients(loss, params)
    clipped_gradients, _ = tf.clip_by_global_norm(grad_and_vars, 0.5)
    opt = optimizer.apply_gradients(zip(clipped_gradients, params), global_step=global_step)

  # Start Training
  sess.run(tf.global_variables_initializer())

  words, tags = load_csv('./data/ag_news_csv/train.csv', target_columns=[0], columns_to_ignore=[1], target_dict=label_dict)
  words = string_parser(words, fit=True)
  if FLAGS.shuffle == True:
    words, tags = shuffle(words, tags)
  word_input = tflearn.data_utils.pad_sequences(words, maxlen=word_pad_length)
  total = len(word_input)
  step_print = int((total/batch_size) / 13)

  if FLAGS.train == True:
    print('start training')
    for epoch_num in range(num_epochs):
      epoch_loss = 0
      step_loss = 0
      for i in range(int(total/batch_size)):
        batch_input, batch_tags = (word_input[i*batch_size:(i+1)*batch_size], tags[i*batch_size:(i+1)*batch_size])
        train_ops = [opt, loss, learn_rate, global_step]
Exemple #6
0
 allAccList = []
 finalNum = 10
 diceAllList = []
 for preNum in range(0, 11):
     tokenizer = tflearn.data_utils.VocabularyProcessor(
         word_pad_length,
         tokenizer_fn=lambda tokens: [token_parse(x) for x in tokens])
     label_dict = VocabDict()
     reset_default_graph()
     model = SelfAttentive()
     with tf.Session() as sess:
         #load train data
         print('load train data')
         words, tags = load_csv('../data/trainTCM/TCM_train_%s.csv' %
                                preName,
                                target_columns=[0],
                                columns_to_ignore=None,
                                target_dict=label_dict,
                                usePreVector=usePreVector)
         vocab_list = {}
         words = string_parser(words, fit=True)
         if FLAGS.shuffle == True:
             words, tags = shuffle(words, tags)
         word_input = tflearn.data_utils.pad_sequences(
             words, maxlen=word_pad_length)
         # build graph
         model.build_graph(n=word_pad_length,
                           usePreVector=usePreVector,
                           vectors=word_vecs)
         # Downstream Application
         with tf.variable_scope('DownstreamApplication'):
             global_step = tf.Variable(0,