def _predict():
            print(' requests incoming..')
            sentence = []
            try:
                input_string = word_tokenize(request.json["text"].lower())
                sentence.append([VOCAB[w]
                                 for w in input_string] + [VOCAB['EOS']])
                dummy_label = LongVar([0])
                sentence = LongVar(sentence)
                input_ = [0], (sentence, ), (0, )
                output, attn = model(input_)
                #print(LABELS[output.max(1)[1]], attn)
                nwords = len(input_string)
                return jsonify({
                    "result": {
                        'sentence':
                        input_string,
                        'attn': [
                            '{:0.4f}'.format(i) for i in
                            attn.squeeze().data.cpu().numpy().tolist()[:-1]
                        ],
                        'probs': [
                            '{:0.4f}'.format(i) for i in
                            output.exp().squeeze().data.cpu().numpy().tolist()
                        ],
                        'label':
                        LABELS[output.max(1)[1].squeeze().data.cpu().numpy()]
                    }
                })

            except Exception as e:
                print(e)
                return jsonify({"result": "model failed"})
示例#2
0
        )))
    
    VOCAB = Vocab(vocabulary, VOCAB, freq_threshold=100)
    pprint(VOCAB.word2index)
    if 'train' in sys.argv:
        labelled_samples = [d for d in dataset if len(d.a) > 0] #[:100]
        pivot = int( Config().split_ratio * len(labelled_samples) )
        random.shuffle(labelled_samples)
        train_set, test_set = labelled_samples[:pivot], labelled_samples[pivot:]
        
        train_set = sorted(train_set, key=lambda x: -len(x.a + x.story))
        test_set  = sorted(test_set, key=lambda x: -len(x.a + x.story))
        exp_image = experiment(VOCAB, dataset, datapoints=[train_set, test_set])
        
    if 'predict' in sys.argv:
        model =  BiLSTMDecoderModel(Config(), len(VOCAB),  len(LABELS))
        if Config().cuda:  model = model.cuda()
        model.load_state_dict(torch.load('{}.{}'.format(SELF_NAME, '.pth')))
        start_time = time.time()
        strings = sys.argv[2]
        
        s = [WORD2INDEX[i] for i in word_tokenize(strings)] + [WORD2INDEX['PAD']]
        e1, e2 = [WORD2INDEX['ENTITY1']], [WORD2INDEX['ENTITY2']]
        output = model(s, e1, e2)
        output = output.data.max(dim=-1)[1].cpu().numpy()
        label = LABELS[output[0]]
        print(label)

        duration = time.time() - start_time
        print(duration)
        exp_image = experiment(
            config,
            ROOT_DIR,
            model,
            VOCAB,
            LABELS,
            datapoints=[train_set, train_set + test_set, train_set + test_set])

    if 'predict' in sys.argv:
        print('=========== PREDICTION ==============')
        model.eval()
        count = 0
        while True:
            count += 1
            sentence = []
            input_string = word_tokenize(input('?').lower())
            sentence.append([VOCAB[w] for w in input_string] + [VOCAB['EOS']])
            dummy_label = LongVar([0])
            sentence = LongVar(sentence)
            input_ = [0], (sentence, ), (0, )
            output, attn = model(input_)

            print(LABELS[output.max(1)[1]])

            if 'show_plot' in sys.argv or 'save_plot' in sys.argv:
                nwords = len(input_string)

                from matplotlib import pyplot as plt
                plt.figure(figsize=(20, 10))
                plt.bar(range(nwords + 1), attn.squeeze().data.cpu().numpy())
                plt.title('{}\n{}'.format(output.exp().tolist(),