示例#1
0
    def __init__(self):
        self.du = DU()
        self.vocab, self.recab = self.du.initialize_vocabulary()
        self.ids_arr = []
        for line in open(self.du.ids_path):
            line = line.strip()
            if len(line) > 0:
                temp = line.split(' ')
                for i in range(len(temp)):
                    temp[i] = int(temp[i])
                self.ids_arr.append(temp)
            else:
                self.ids_arr.append([])

        self.mark = json.load(open(self.du.mark_path))
        self.train = json.load(open(self.du.train_path))
        self.dev = json.load(open(self.du.dev_path))
        self.test = json.load(open(self.du.test_path))

        self.model = Ranker(
            vocab_size=FLAGS.vocab_size,
            embedding_size=FLAGS.emd_size,
            memory_size=FLAGS.mem_size,
            batch_size=FLAGS.batch_size,
            max_dialogue_size=FLAGS.max_dialogue_size,
            max_sentence_size=FLAGS.max_sentence_size,
            margin=FLAGS.margin,
            max_gradient_norm=FLAGS.max_gradient_norm,
            learning_rate=FLAGS.learning_rate,
            learning_rate_decay_factor=FLAGS.learning_rate_decay_factor,
            use_lstm=False,
            train_mode=FLAGS.train,
            #				drop_out 	   = FLAGS.drop_out,
            #				layer		   = FLAGS.layer
        )
示例#2
0
    def __init__(self):
        self.du = DU()
        self.vocab, self.recab = self.du.initialize_vocabulary()
        self.tag, self.retag = self.du.init_tag()  #载入标签和对应的id
        self.ids_arr = []
        for line in open(self.du.ids_path):
            line = line.strip()
            if len(line) > 0:
                temp = line.split(' ')
                for i in range(len(temp)):
                    try:
                        temp[i] = int(temp[i])
                    except Exception:
                        temp[i] = 3
                self.ids_arr.append(temp)
            else:
                self.ids_arr.append([])

#		self.train = json.load(open(self.du.train_path))
#		self.dev = json.load(open(self.du.dev_path))
#		self.test = json.load(open(self.du.test_path))

        self.model = Marker(
            vocab_size=FLAGS.vocab_size,
            embedding_size=FLAGS.emd_size,
            memory_size=FLAGS.mem_size,
            label_size=FLAGS.tag_size,
            batch_size=FLAGS.batch_size,
            max_ut_size=FLAGS.max_ut_size,
            max_gradient_norm=FLAGS.max_gradient_norm,
            learning_rate=FLAGS.learning_rate,
            learning_rate_decay_factor=FLAGS.learning_rate_decay_factor,
            use_lstm=False,
            train_mode=FLAGS.train)
示例#3
0
class Robot:
    def __init__(self):
        self.du = DU()
        self.vocab, self.recab = self.du.initialize_vocabulary()
        self.ids_arr = []
        for line in open(self.du.ids_path):
            line = line.strip()
            if len(line) > 0:
                temp = line.split(' ')
                for i in range(len(temp)):
                    temp[i] = int(temp[i])
                self.ids_arr.append(temp)
            else:
                self.ids_arr.append([])

        self.mark = json.load(open(self.du.mark_path))
        self.train = json.load(open(self.du.train_path))
        self.dev = json.load(open(self.du.dev_path))
        self.test = json.load(open(self.du.test_path))

        self.model = Ranker(
            vocab_size=FLAGS.vocab_size,
            embedding_size=FLAGS.emd_size,
            memory_size=FLAGS.mem_size,
            batch_size=FLAGS.batch_size,
            max_dialogue_size=FLAGS.max_dialogue_size,
            max_sentence_size=FLAGS.max_sentence_size,
            margin=FLAGS.margin,
            max_gradient_norm=FLAGS.max_gradient_norm,
            learning_rate=FLAGS.learning_rate,
            learning_rate_decay_factor=FLAGS.learning_rate_decay_factor,
            use_lstm=False,
            train_mode=FLAGS.train,
            #				drop_out 	   = FLAGS.drop_out,
            #				layer		   = FLAGS.layer
        )

    def build_model(self, session):
        self.model.build_model()
        ckpt = tf.train.get_checkpoint_state(FLAGS.ckpt_dir)
        #尝试从检查点恢复模型参数
        if ckpt:  # and tf.gfile.Exists(ckpt.model_checkpoint_path):
            print("Reading model parameters from %s" %
                  ckpt.model_checkpoint_path)
            self.model.saver.restore(session, ckpt.model_checkpoint_path)
        else:
            print("Created model with fresh parameters.")
            session.run(tf.initialize_all_variables())

            #emd_weight = tf.random_normal([FLAGS.vocab_size,FLAGS.emd_size],-0.08,0.08)
            emd_weight = np.random.rand(FLAGS.vocab_size,
                                        FLAGS.emd_size) * 0.16 - 0.08
            #f = open('../data/emd/ylemd-128.bin','r')
            f = open('../data/emd/ylemd.bin', 'r')
            first = True
            for line in f:
                if first:
                    first = False
                    continue
                box = line.split(' ')
                word = box[0]
                box = box[1:FLAGS.emd_size + 1]
                if self.vocab.has_key(word):
                    index = self.vocab[word]
                    one_emd = np.array([float(x) for x in box])
                    emd_weight[index, :] = one_emd
            load = self.model.embedding_weight.assign(emd_weight)
            session.run(load)
            print('word embedding load over')

        self.train_writer = tf.train.SummaryWriter(FLAGS.summary_dir,
                                                   session.graph)

    def ut2ids(self, ut):  #将句子标记转换为具体的词id列表
        #返回单个对话机器candidates的id表示
        if ut == None or len(ut) == 0:
            return None
        result = []
        for i in range(len(ut)):
            cache = []
            for j in range(len(ut[i])):
                cache.append(self.ids_arr[ut[i][j]])
            result.append(cache)
        return result

    def ids2ut(self, ids):  #将id转换为文本单词句子
        if ids == None or len(ids) == 0:
            return None
        ut = []
        for i in range(len(ids)):
            ut.append(self.recab[ids[i]])
        return ' '.join(ut)

    def run_train(self):
        print('running train op')
        train_set = json.load(open(self.du.train_path, 'r'))
        train_set2 = train_set[:100]
        if len(train_set
               ) > FLAGS.max_trainset_size and FLAGS.max_trainset_size != 0:
            train_set = train_set[:FLAGS.max_trainset_size]

        dev_set = json.load(open(self.du.dev_path, 'r'))
        if len(dev_set) > FLAGS.max_devset_size and FLAGS.max_devset_size != 0:
            dev_set = dev_set[:FLAGS.max_devset_size]

        with tf.Session() as sess:
            self.build_model(sess)
            step_time, loss = 0.0, 0.0
            step_count = self.model.global_step.eval()
            previous_losses = []
            previous_score = []
            print('P@1\tP@3\tMAP\tStep\tLR\tTime\tLoss')
            while True:
                # Get a batch and make a step.
                start_time = time.time()
                dialogs = []
                for i in range(FLAGS.batch_size):
                    temp = random.choice(train_set)
                    for i in range(len(temp)):  #遍历选出对话的每句
                        if i % 2 == 0:
                            continue
                        true_candidate = temp[i][0]  #正确答案
                        seed = int(random.uniform(0,
                                                  len(self.mark) -
                                                  1))  #候选的 错误答案
                        while self.mark[seed] == False or cmp(
                                true_candidate, self.ids_arr[seed]) == 0:
                            # 如果依据话是客户说的或者和正确内容重复,则重新选取
                            seed = int(random.uniform(0, len(self.mark) - 1))
                        temp[i][1] = seed  #对错误答案进行赋值
                    dialogs.append(self.ut2ids(temp))
                history_batch, true_batch, false_batch = self.model.train2vec(
                    dialogs, step_count)
                #获取id的表达

                step_loss, summary = self.model.step_train(
                    sess, history_batch, true_batch, false_batch)
                if step_count % 5 == 0:
                    self.train_writer.add_summary(summary, step_count)

                #进行一步训练
                step_count += 1
                step_time += (time.time() -
                              start_time) / FLAGS.steps_per_checkpoint
                loss += step_loss / FLAGS.steps_per_checkpoint

                if step_count % FLAGS.steps_per_checkpoint == 0:  #统计数据,保存模型
                    #统计在训练过程中时间和平均损失等信息
                    pat1, pat3, MAP, count = 0.0, 0.0, 0.0, 0.0
                    for i in range(len(dev_set)):
                        #print(dev_set[i])
                        dialog = self.ut2ids(dev_set[i])
                        dialog1 = dialog[:]
                        dialog, candidates = self.model.test2vec(dialog)
                        scores = self.model.step_test(sess, dialog, candidates)
                        '''
						for j in range(min(len(scores)*2,len(dialog1))):
							if j%2==0:
								print('user',self.ids2ut(dialog1[j][0]))
							else:
								#print(dialog1[i])
								print('host',scores[int(j/2)][0],self.ids2ut(dialog1[j][0]))
								max_index = np.argmax(scores[int(j/2)])
								print('max_cand',max_index,scores[int(j/2)][max_index],self.ids2ut(dialog1[j][max_index]))
								print('')
						print('############新对话################')	
						'''
                        scores = scores[:int(len(dev_set[i]) / 2)]
                        a, b, c, d = self.cal_score(scores)
                        pat1 += a
                        pat3 += b
                        MAP += c
                        count += d
                    length = count
                    #print('P@1\tP@3\tMAP\tStep\tLR\tTime\tLoss')
                    print('V100: %.4f\t%.4f\t%.4f\t%.0f\t%.4f\t%.4f\t%.4f' %
                          (pat1 / length, pat3 / length, MAP / length,
                           self.model.global_step.eval(),
                           self.model.learning_rate.eval(), step_time, loss))
                    pat1, pat3, MAP, count = 0.0, 0.0, 0.0, 0.0
                    for i in range(len(train_set2)):
                        dialog = self.ut2ids(train_set2[i])
                        dialog1 = dialog[:]
                        dialog, candidates = self.model.test2vec(dialog)
                        scores = self.model.step_test(sess, dialog, candidates)
                        scores = scores[:int(len(train_set2[i]) / 2)]
                        a, b, c, d = self.cal_score(scores)
                        pat1 += a
                        pat3 += b
                        MAP += c
                        count += d
                    length = count
                    print('T100: %.4f\t%.4f\t%.4f\t%.0f\t%.4f\t%.4f\t%.4f' %
                          (pat1 / length, pat3 / length, MAP / length,
                           self.model.global_step.eval(),
                           self.model.learning_rate.eval(), step_time, loss))
                    score = MAP / length
                    #if len(previous_losses) > 2 and loss > max(previous_losses[-3:]):
                    if len(previous_score) > 2 and score < min(
                            previous_score[-3:]):
                        sess.run(self.model.learning_rate_decay_op)
                    previous_losses.append(loss)
                    previous_score.append(score)
                    if len(previous_losses) % 4 == 0:  #三次测试保存一次模型
                        checkpoint_path = os.path.join(FLAGS.ckpt_dir,
                                                       "hred.ckpt")
                        self.model.saver.save(
                            sess,
                            checkpoint_path,
                            global_step=self.model.global_step)
                    step_time, loss = 0.0, 0.0

    def cal_score(self, scores):
        pat1 = 0.0
        pat3 = 0.0
        MAP = 0.0
        length = len(scores)
        for i in range(length):
            #print('scores:',i,scores[i])
            rank = 1.0
            for j in range(len(scores[i])):
                if scores[i][0] < scores[i][j]:
                    rank += 1
            if rank == 1:
                pat1 += 1

            if rank <= 3:
                pat3 += 1

            MAP += 1 / rank

        return pat1, pat3, MAP, length

    def run_test(self):
        print('Running Test Op')
        #模型测试
        with tf.Session() as sess:
            self.build_model(sess)
            test_set = json.load(open(self.du.test_path, 'r'))
            #test_set = json.load(open(self.du.train_path,'r'))
            pat1, pat3, MAP, count = 0.0, 0.0, 0.0, 0.0
            for i in range(len(test_set)):
                #print(test_set[i])
                dialog = self.ut2ids(test_set[i])
                dialog1 = dialog[:]
                dialog, candidates = self.model.test2vec(dialog)
                scores = self.model.step_test(sess, dialog, candidates)

                for j in range(min(len(scores) * 2, len(dialog1))):
                    if j % 2 == 0:
                        print('user', self.ids2ut(dialog1[j][0]))
                    else:
                        #print(dialog1[i])
                        print('host', scores[int(j / 2)][0],
                              self.ids2ut(dialog1[j][0]))
                        max_index = np.argmax(scores[int(j / 2)])
                        print('max_cand', max_index,
                              scores[int(j / 2)][max_index],
                              self.ids2ut(dialog1[j][max_index]))
                        print('')
                print('############新对话################')

                scores = scores[:int(len(test_set[i]) / 2)]
                a, b, c, d = self.cal_score(scores)
                pat1 += a
                pat3 += b
                MAP += c
                count += d
            length = count
            print('Test Set Result: P@1 %.4f, P@3 %.4f MAP %.4f' %
                  (pat1 / length, pat3 / length, MAP / length))
示例#4
0
#coding=utf-8
import tensorflow as tf
import numpy as np
from data_utils import DU
if __name__ == '__main__':
    du = DU()
    vocab, recab = du.initialize_vocabulary()
    vocab_size = len(vocab)
    embedding_size = 300
    emd = tf.Variable(tf.random_uniform([vocab_size, embedding_size], -1.0,
                                        1.0),
                      name="W")
    weight = np.random.rand(vocab_size, embedding_size) * 2 - 1
    f = open('../data/emd/cyemd.bin', 'r')
    first = True
    for line in f:
        if first:
            first = False
            continue
        box = line.split(' ')
        word = box[0]
        box = box[1:301]
        if vocab.has_key(word):
            index = vocab[word]
            print(word, index)
            #print(box)
            one_emd = np.array([float(x) for x in box])
            #print(one_emd)
            weight[index, :] = one_emd
    load = emd.assign(weight)
    with tf.Session() as sess:
示例#5
0
class Robot:
    def __init__(self):
        self.du = DU()
        self.vocab, self.recab = self.du.initialize_vocabulary()
        self.tag, self.retag = self.du.init_tag()  #载入标签和对应的id
        self.ids_arr = []
        for line in open(self.du.ids_path):
            line = line.strip()
            if len(line) > 0:
                temp = line.split(' ')
                for i in range(len(temp)):
                    try:
                        temp[i] = int(temp[i])
                    except Exception:
                        temp[i] = 3
                self.ids_arr.append(temp)
            else:
                self.ids_arr.append([])

#		self.train = json.load(open(self.du.train_path))
#		self.dev = json.load(open(self.du.dev_path))
#		self.test = json.load(open(self.du.test_path))

        self.model = Marker(
            vocab_size=FLAGS.vocab_size,
            embedding_size=FLAGS.emd_size,
            memory_size=FLAGS.mem_size,
            label_size=FLAGS.tag_size,
            batch_size=FLAGS.batch_size,
            max_ut_size=FLAGS.max_ut_size,
            max_gradient_norm=FLAGS.max_gradient_norm,
            learning_rate=FLAGS.learning_rate,
            learning_rate_decay_factor=FLAGS.learning_rate_decay_factor,
            use_lstm=False,
            train_mode=FLAGS.train)

    def build_model(self, session):
        self.model.build_model()
        ckpt = tf.train.get_checkpoint_state(FLAGS.ckpt_dir)
        #尝试从检查点恢复模型参数
        if ckpt and tf.gfile.Exists(ckpt.model_checkpoint_path):
            print("Reading model parameters from %s" %
                  ckpt.model_checkpoint_path)
            self.model.saver.restore(session, ckpt.model_checkpoint_path)
        else:
            print("Created model with fresh parameters.")
            session.run(tf.initialize_all_variables())
            emd_weight = np.random.rand(FLAGS.vocab_size,
                                        FLAGS.emd_size) * 0.16 - 0.08
            f = open(self.du.emd_path, 'r')
            first = True
            for line in f:
                if first:
                    first = False
                    continue
                box = line.split(' ')
                word = box[0]
                box = box[1:FLAGS.emd_size + 1]
                if self.vocab.has_key(word):
                    index = self.vocab[word]
                    one_emd = np.array([float(x) for x in box])
                    emd_weight[index, :] = one_emd
            load = self.model.embedding_weight.assign(emd_weight)
            session.run(load)
            print('word embedding load over')
        self.train_writer = tf.train.SummaryWriter('../summary', session.graph)

    def ut2ids(self, ut):  #将句子标记转换为具体的词id列表
        #返回单个对话机器candidates的id表示,包含batch
        #print(ut)
        if ut == None or len(ut) == 0:
            return None
        result = []
        for i in range(len(ut)):
            temp = []
            temp.append(self.ids_arr[ut[i][0]])
            temp.append(ut[i][1])
            result.append(temp)
        return result

    def ids2ut(self, ids):  #将id转换为文本单词句子
        if ids == None or len(ids) == 0:
            return None
        ut = []
        for i in range(len(ids)):
            ut.append(self.recab[ids[i]])
        return ' '.join(ut)

    def run_train(self):
        print('Running train op')
        train_set = json.load(open(self.du.train_path, 'r'))
        train_set2 = train_set[:100]  #用于测试模型在训练集上的得分
        if len(train_set
               ) > FLAGS.max_trainset_size and FLAGS.max_trainset_size != 0:
            train_set = train_set[:FLAGS.max_trainset_size]
        dev_set = json.load(open(self.du.dev_path, 'r'))
        if len(dev_set) > FLAGS.max_devset_size and FLAGS.max_devset_size != 0:
            dev_set = dev_set[:FLAGS.max_devset_size]


#		with tf.device('/cpu:0'):
#			sess = tf.Session()
        with tf.Session() as sess:
            self.build_model(sess)
            step_time, loss = 0.0, 0.0
            step_count = self.model.global_step.eval()
            previous_losses = []
            while True:
                start_time = time.time()
                samples = []
                for i in range(FLAGS.batch_size):
                    temp = random.choice(train_set)  #随机挑选一个样本
                    samples.append(temp)
                sample = self.ut2ids(samples)
                ut_arr, labels = self.model.sample2vec(sample)
                step_loss, summary = self.model.step_train(
                    sess, ut_arr, labels)
                self.train_writer.add_summary(summary, step_count)

                step_count += 1
                step_time += (time.time() -
                              start_time) / FLAGS.steps_per_checkpoint
                loss += step_loss / FLAGS.steps_per_checkpoint

                if step_count % FLAGS.steps_per_checkpoint == 0:  #统计数据,保存模型
                    for data_set in [dev_set, train_set2]:
                        pat1, pat3, MAP, count = 0.0, 0.0, 0.0, 0.0
                        samples = []
                        labels_cache = []
                        for i in range(len(data_set)):
                            samples.append(data_set[i])
                            labels_cache.append(data_set[i][1])
                        samples = self.ut2ids(samples)
                        ut_arr, labels = self.model.sample2vec(samples)
                        scores = self.model.step_test(sess, ut_arr, labels)
                        hit_count, all_count = self.cal_score(
                            scores, labels_cache)
                        print('top2 hit:\t%.4f' % (hit_count / all_count))
                    print(
                        "global step %d learning rate %.4f step-time %.4f average loss %.4f"
                        % (self.model.global_step.eval(),
                           self.model.learning_rate.eval(), step_time, loss))
                    if len(previous_losses) > 2 and loss > max(
                            previous_losses[-3:]):
                        sess.run(self.model.learning_rate_decay_op)
                    previous_losses.append(loss)
                    checkpoint_path = os.path.join(FLAGS.ckpt_dir, "hred.ckpt")
                    self.model.saver.save(sess,
                                          checkpoint_path,
                                          global_step=self.model.global_step)
                    step_time, loss = 0.0, 0.0

    def cal_score(self, scores, labels):
        #计算top2的平均准确率
        #print(scores.shape)
        #print(labels)
        batch_size = scores.shape[0]
        hit_count = 0.0
        all_count = 0.0
        for i in range(batch_size):
            logit = scores[i, :]
            tops = logit.argsort()[-2:][::-1]
            #print('logit',tops)
            #print('labels',labels[i])
            tags = set(labels[i])
            for j in range(len(tops)):
                #print(tops[j],logit[tops[j]])
                if tops[j] in tags:
                    hit_count = hit_count + 1
            #for k in tags:
            #print(k,logit[k])
            all_count = all_count + len(tags)
        return hit_count, all_count

    def run_test(self):
        print('Running Test Op')
        #模型测试
        with tf.Session() as sess:
            self.build_model(sess)
            test_set = json.load(open(self.du.test_path, 'r'))
            #test_set = json.load(open(self.du.train_path,'r'))
            cache = []
            label_cache = []
            hit_count, all_count = 0.0, 0.0
            print('length of test_set:', len(test_set))
            for i in range(len(test_set)):
                cache.append(test_set[i])
                label_cache.append(test_set[i][1])
                if i % 200 == 0 or i == (len(test_set) - 1):
                    samples = self.ut2ids(cache)
                    ut_arr, labels = self.model.sample2vec(samples)
                    scores = self.model.step_test(sess, ut_arr, labels)
                    h, a = self.cal_score(scores, label_cache)
                    hit_count += h
                    all_count += a
                    cache, label_cache = [], []
            print('Test Set Top2 Score:\t%.4f' % (hit_count / all_count))