Esempio n. 1
0
def runtest():
    test_predicts = model_slots.predict([X_test])
    testPred = predict_classes(test_predicts)
    testPred = [
        map(lambda x: idx2labels[x], remove_padding(c, d))
        for c, d in zip(testPred, X_test)
    ]
    print conlleval(testPred, testAct, wordsTest,
                    output_dir + 'current.test.txt')

    intentPred = predict_classes(model_intent.predict([X_test])[0])
    print "Test Accuracy : " + str(accuracy(intentPred, testL))
Esempio n. 2
0
 def evaluate(self, label_list, seq_len_list, data, epoch=None):
     label2tag = {}
     for tag, label in self.tag2label.items():
         label2tag[label] = tag if label != 0 else label
     model_predict = []
     for label_, (sent, tag) in zip(label_list, data):
         tag_ = [label2tag[label__] for label__ in label_]
         sent_res = []
         if len(label_) != len(sent):
             print(sent)
             print(len(label_))
             print(tag)
         for i in range(len(sent)):
             sent_res.append(
                 [sent[i], tag[i], tag_[i]]
             )  #sent_res format like : [['中','B-LOC','0'],['国','I-LOC','I-LOC']...]
         model_predict.append(sent_res)
     epoch_num = str(epoch + 1) if epoch != None else 'test'
     # 将模型的预测写到这个文件中,每一轮迭代都要写一个
     label_path = os.path.join(self.result_path, 'label_' + epoch_num)
     # 将评测结果写到这个文件中,每一轮迭代都要写一个
     metric_path = os.path.join(self.result_path,
                                'result_metric_' + epoch_num)
     for _ in conlleval(model_predict, label_path, metric_path):
         print(_)
Esempio n. 3
0
def visualize(args):
    score = []
    x = [i+1 for i in range(args.epoch_num)]
    for eidx in x:
        score.append(conlleval(eidx))
    plt.plot(x, score, 'b',)
    plt.xlabel("epoch_num")
    plt.ylabel("accuracy(%)")
    plt.title("Accuracy")
    plt.show()
Esempio n. 4
0
def evaluate(data, sess, model, epoch=None):
    # seqs_list = []
    labels_pred = []
    label_references = []
    # (sents, tags) = data
    label2tag = []
    label2tag = {label: tag for tag, label in hp.tag2label.items()}
    # for tag, label in hp.tag2label.items():
    #     label2tag[label] = tag
    for seqs, labels, seqs_len in get_batch(data,
                                            hp.batch_size,
                                            hp.vocab_path,
                                            hp.tag2label,
                                            shuffle=False):
        _logits, _transition_params = sess.run(
            [logits, transition_params],
            feed_dict={
                model.sent_input: seqs,
                model.label: labels,
                model.sequence_length: seqs_len
            })
        # seqs_list.extend(seqs)
        label_references.extend(labels)
        for logit, seq_len in zip(_logits, seqs_len):
            viterbi_seq, _ = tf.contrib.crf.viterbi_decode(
                logit[:seq_len], _transition_params)
            labels_pred.append(viterbi_seq)
        # print(seqs_list)
        # print(label_references)
    model_pred = []
    epoch_num = str(epoch) if epoch != None else 'test'
    if not os.path.exists(hp.result_path): os.mkdir(hp.result_path)
    with open(hp.result_path + 'results_epoch_' + (epoch_num),
              'w',
              encoding='utf-8') as fw:
        for label_pred, (sent, tag) in zip(labels_pred, data):
            fw.write(''.join(sent) + '\n')
            fw.write(''.join(tag) + '\n')
            tag_pred = [label2tag[i] for i in label_pred]
            fw.write(''.join(tag_pred) + '\n')
            sent_res = []
            if len(label_pred) != len(sent):
                print(sent)
                print(len(label_pred))
                print(len(sent))
            for i in range(len(sent)):
                sent_res.append([sent[i], tag[i], tag_pred[i]])
            model_pred.append(sent_res)
    # label_path = os.path.join(hp.result_path, 'label_' + epoch_num)
    # metric_path = os.path.join(hp.result_path, 'result_metric_' + epoch_num)
    result = conlleval(model_pred)
    print(result)
Esempio n. 5
0
def train(args):
    logger = logging.getLogger("hit-cosem-2018")
    logger.info('Load data_set and vocab...')
    with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
        vocab = pickle.load(fin)
    atis_data = AtisDataSet(args.max_len, args.slot_name_file, train_files=args.train_files, test_files=args.test_files)
    logger.info('Converting text into ids...')
    atis_data.convert_to_ids(vocab)
    atis_data.dynamic_padding(vocab.token2id[vocab.pad_token])
    train_data, test_data = atis_data.get_numpy_data()
    train_data = AtisLoader(train_data)
    train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True)
    test_data = AtisLoader(test_data)
    test_loader = DataLoader(dataset=test_data, batch_size=args.batch_size, shuffle=False)

    model = biLSTM(args, vocab.size(), len(atis_data.slot_names))
    optimizer = model.get_optimizer(args.learning_rate, args.embed_learning_rate, args.weight_decay)
    loss_fn = torch.nn.CrossEntropyLoss()

    score = []
    losses = []
    for eidx, _ in enumerate(range(args.epoch_num), 1):
        for bidx, data in enumerate(train_loader, 1):
            optimizer.zero_grad()
            sentences, labels = data
            if args.has_cuda:
                sentences, labels = Variable(sentences).cuda(), Variable(labels).cuda()
            else:
                sentences, labels = Variable(sentences), Variable(labels)
            output = model(sentences)
            labels = labels.view(-1)
            loss = loss_fn(output, labels)
            loss.backward()
            optimizer.step()

            _, predicted = torch.max(output.data, 1)
            predicted = predicted.cpu().numpy()
            labels = labels.data.cpu().numpy()
            losses.append(loss.data[0])
            logger.info('epoch: {} batch: {} loss: {:.4f} acc: {:.4f}'.format(eidx, bidx, loss.data[0],
                                                                          accuracy(predicted, labels)))
        if eidx >= args.begin_epoch:
            if args.embed_learning_rate == 0:
                args.embed_learning_rate = 2e-4
            elif args.embed_learning_rate > 0:
                args.embed_learning_rate *= args.lr_decay
                if args.embed_learning_rate <= 1e-5:
                    args.embed_learning_rate = 1e-5
            args.learning_rate = args.learning_rate * args.lr_decay
            optimizer = model.get_optimizer(args.learning_rate,
                                            args.embed_learning_rate,
                                            args.weight_decay)

        logger.info('do eval on test set...')
        f = open("./result/result_epoch"+str(eidx)+".txt", 'w', encoding='utf-8')
        for data in test_loader:
            sentences, labels = data
            if args.has_cuda:
                sentences, labels = Variable(sentences).cuda(), Variable(labels).cuda()
            else:
                sentences, labels = Variable(sentences), Variable(labels)  # batch_size * max_len
            output = model(sentences)

            sentences = sentences.data.cpu().numpy().tolist()
            sentences = [vocab.recover_from_ids(remove_pad(s, vocab.token2id[vocab.pad_token]))
                         for s in sentences]
            labels = labels.data.cpu().numpy().tolist()
            _, predicted = torch.max(output.data, 1)
            predicted = predicted.view(-1, args.max_len).cpu().numpy().tolist()
            iter = [zip(s,
                        map(lambda x:atis_data.slot_names[x], labels[i][:len(s)]),
                               map(lambda x:atis_data.slot_names[x], predicted[i][:len(s)])
                        ) for i, s in enumerate(sentences)]
            for it in iter:
                for z in it:
                    z = list(map(str, z))
                    f.write(' '.join(z)+'\n')
        f.close()
        score.append(conlleval(eidx))
        torch.save(model.state_dict(), args.save_path+"biLSTM_epoch"+str(eidx)+".model")

    max_score_eidx = score.index(max(score))+1
    logger.info('epoch {} gets max score.'.format(max_score_eidx))
    os.system('perl ./eval/conlleval.pl < ./result/result_epoch' + str(max_score_eidx) + '.txt')
    
    x = [i + 1 for i in range(len(losses))]
    plt.plot(x, losses, 'r')
    plt.xlabel("time_step")
    plt.ylabel("loss")
    plt.title("CrossEntropyLoss")
    plt.show()
Esempio n. 6
0
def test_lstm(**kwargs):
    """
    Wrapper function for training and testing LSTM

    :type fold: int
    :param fold: fold index of the ATIS dataset, from 0 to 4.

    :type lr: float
    :param lr: learning rate used (factor for the stochastic gradient).

    :type nepochs: int
    :param nepochs: maximal number of epochs to run the optimizer.

    :type win: int
    :param win: number of words in the context window.

    :type nhidden: int
    :param n_hidden: number of hidden units.

    :type emb_dimension: int
    :param emb_dimension: dimension of word embedding.

    :type verbose: boolean
    :param verbose: to print out epoch summary or not to.

    :type decay: boolean
    :param decay: decay on the learning rate if improvement stop.

    :type savemodel: boolean
    :param savemodel: save the trained model or not.

    :type normal: boolean
    :param normal: normalize word embeddings after each update or not.

    :type folder: string
    :param folder: path to the folder where results will be stored.

    """
    # process input arguments
    param = {
        'experiment': 'standard',
        'lr': 0.1,
        'verbose': True,
        'decay': True,
        'win': 3,
        'nhidden': 300,
        'nhidden2': 300,
        'seed': 345,
        'emb_dimension': 90,
        'nepochs': 40,
        'savemodel': False,
        'normal': True,
        'layer_norm': False,
        'minibatch_size': 4978,
        'folder': '../result'
    }

    param_diff = set(kwargs.keys()) - set(param.keys())
    if param_diff:
        raise KeyError("invalid arguments:" + str(tuple(param_diff)))
    param.update(kwargs)

    if param['verbose']:
        for k, v in param.items():
            print("%s: %s" % (k, v))

    # create result folder if not exists
    check_dir(param['folder'])

    # load the dataset
    print('... loading the dataset')
    train_set, valid_set, test_set, dic = load_data(3)

    train_set = list(train_set)
    valid_set = list(valid_set)

    # Add validation set to train set
    for i in range(3):
        train_set[i] += valid_set[i]

    # create mapping from index to label, and index to word
    idx2label = dict((k, v) for v, k in dic['labels2idx'].items())
    idx2word = dict((k, v) for v, k in dic['words2idx'].items())

    # unpack dataset
    train_lex, train_ne, train_y = train_set
    test_lex, test_ne, test_y = test_set

    n_trainbatches = len(train_lex) // param['minibatch_size']

    print("Sentences in train: %d, Words in train: %d" %
          (count_of_words_and_sentences(train_lex)))
    print("Sentences in test: %d, Words in test: %d" %
          (count_of_words_and_sentences(test_lex)))

    vocsize = len(dic['words2idx'])
    nclasses = len(dic['labels2idx'])
    nsentences = len(train_lex)

    groundtruth_test = [[idx2label[x] for x in y] for y in test_y]
    words_test = [[idx2word[x] for x in w] for w in test_lex]

    # instanciate the model
    numpy.random.seed(param['seed'])
    random.seed(param['seed'])

    print('... building the model')
    lstm = LSTM(n_hidden=param['nhidden'],
                n_hidden2=param['nhidden2'],
                n_out=nclasses,
                n_emb=vocsize,
                dim_emb=param['emb_dimension'],
                cwind_size=param['win'],
                normal=param['normal'],
                layer_norm=param['layer_norm'],
                experiment=param['experiment'])

    # train with early stopping on validation set
    print('... training')
    best_f1 = -numpy.inf
    param['clr'] = param['lr']
    for e in range(param['nepochs']):

        # shuffle
        shuffle([train_lex, train_ne, train_y], param['seed'])

        param['ce'] = e
        tic = timeit.default_timer()

        for minibatch_index in range(n_trainbatches):

            for i in range(minibatch_index * param['minibatch_size'],
                           (1 + minibatch_index) * param['minibatch_size']):
                x = train_lex[i]
                y = train_y[i]
                res = lstm.train(x, y, param['win'], param['clr'])

            predictions_test = [[
                idx2label[x] for x in lstm.classify(
                    numpy.asarray(contextwin(x, param['win'])).astype('int32'))
            ] for x in test_lex]

            # evaluation // compute the accuracy using conlleval.pl
            res_test = conlleval(predictions_test, groundtruth_test,
                                 words_test,
                                 param['folder'] + '/current.test.txt',
                                 param['folder'])

            if res_test['f1'] > best_f1:

                if param['savemodel']:
                    lstm.save(param['folder'])

                best_lstm = copy.deepcopy(lstm)
                best_f1 = res_test['f1']

                if param['verbose']:
                    print(
                        'NEW BEST: epoch %d, minibatch %d/%d, best test F1: %.3f'
                        % (e, minibatch_index + 1, n_trainbatches,
                           res_test['f1']))

                param['tf1'] = res_test['f1']
                param['tp'] = res_test['p']
                param['tr'] = res_test['r']
                param['be'] = e

                os.rename(param['folder'] + '/current.test.txt',
                          param['folder'] + '/best.test.txt')
            else:
                if param['verbose']:
                    print('')

        # learning rate decay if no improvement in 10 epochs
        if param['decay'] and abs(param['be'] - param['ce']) >= 10:
            param['clr'] *= 0.5
            print("Decay happened. New Learning Rate:", param['clr'])
            lstm = best_lstm

        if param['clr'] < 0.00001:
            break

    print('BEST RESULT: epoch', param['be'], 'best test F1', param['tf1'],
          'with the model', param['folder'])

    return lstm, dic
Esempio n. 7
0
        i = 0
        while (True):
            model_slots.fit([X_train], [y_train, l_train],
                            batch_size=batch_size,
                            epochs=1,
                            validation_data=([X_valid], [y_valid, l_valid]))
            model_slots.save(output_dir + 'model_slots_intents_' + str(i) +
                             '.ckpt')

            valid_predicts = model_slots.predict([X_valid])
            validPred = predict_classes(valid_predicts[0])
            validPred = [
                map(lambda x: idx2labels[x], remove_padding(c, d))
                for c, d in zip(validPred, X_valid)
            ]
            print conlleval(validPred, validAct, wordsValid,
                            output_dir + 'current.valid.txt')

            print "Valid Accuracy : " + str(
                accuracy(predict_classes(valid_predicts[1]), validL))

            test_predicts = model_slots.predict([X_test])
            testPred = predict_classes(test_predicts[0])
            testPred = [
                map(lambda x: idx2labels[x], remove_padding(c, d))
                for c, d in zip(testPred, X_test)
            ]
            print conlleval(testPred, testAct, wordsTest,
                            output_dir + 'current.test.txt')

            print "Test Accuracy : " + str(
                accuracy(predict_classes(test_predicts[1]), testL))
Esempio n. 8
0
 def evaluate(self, session, data, id2char, id2tag):
     # 先预测再评估
     prediction = self.predict(session, data, id2char, id2tag)
     return utils.conlleval(prediction)