Exemplo n.º 1
0
def get_predictions(model: SequentialRNN, input_field: Field, prepared_input: Union[List[str], List[List[str]]],
                    max_n_predictions: int) -> (Variable, Variable):
    t = to_gpu(input_field.numericalize(prepared_input, -1))
    res, *_ = model(t)
    last_res = res[-1]
    n_predictions = min(max_n_predictions, last_res.size()[0])
    outputs, labels = torch.topk(last_res, n_predictions)
    probs = F.softmax(outputs)
    return probs, labels
#     preds = model(inputs)
#     preds = preds.round()
#     num_hit+=torch.eq(preds.squeeze(),targets.squeeze()).sum().item() #data[0]

# print(num_hit/len(test_data)*100)


### test
# test_inputs = ["헐 진짜 개별로다..", "진짜 너무 재밌는 영화다 오랜만에","오..이건 진짜 봐야함", "진짜 쓰레기 같은 영화","노잼","존잼","꾸울잼","핵노잼",'또 보고싶다', '꼬옥 봐야한다.. 진짜..', '나만 보기 아깝다', '돈이 아깝다', '나만 보기 억울하다', '나만 당할 수 없다', '너도 봐야한다', '혼자 본게 정말 후회된다. 이건 꼭 같이 봐야한다.', '재미없어요...', '꾸르르르르르르잼', '꾸르르르잼', '꾸르잼', '이 영화를 보고 암이 나았습니다.']
test_inputs = ['이 영화를 보고 암이 나았습니다.']


for test_input in test_inputs:
    tokenized = tagger.morphs(test_input)
    tokenized = pad_under_five(tokenized)
    input_ = TEXT.numericalize([tokenized], device=DEVICE)
    print (input_)
    if USE_CUDA: input_ = input_.cuda()

    prediction = model(input_)
    prediction = prediction.round()
    prediction = "긍정" if prediction.data[0][0] == 1 else "부정"
    if prediction=="긍정":
        print(test_input,"\033[1;01;36m" + prediction + "\033[0m")
        # print(len(tokenized), tokenized)
    else:
        print(test_input,"\033[1;01;31m" + prediction + "\033[0m")
        # print(len(tokenized), tokenized)


        loss = criterion(outputs[0].view(-1, len(src_field.vocab)),
                         target.view(-1))
        loss.backward()
        # torch.nn.utils.clip_grad_norm(model.parameters(), 2.0)
        optimizer.step()

        epoch_loss += loss.item()
    print(i, epoch_loss)

# In[ ]:

source_text = [
    "manual and gaze input cascaded ( magic ) pointing . this work explores a new direction in utilizing eye gaze for computer input . gaze tracking has long been considered as an alternative or potentially superior pointing method for computer input . we believe that many fundamental limitations exist with traditional gaze pointing . in particular , it is unnatural to overload a perceptual channel such as vision with a motor control task . we therefore propose an alternative approach , dubbed magic ( manual and gaze input cascaded ) pointing . with such an approach , pointing appears to the user to be a manual task , used for fine manipulation and selection . however , a large portion of the cursor movement is eliminated by warping the cursor to the eye gaze area , which encompasses the target . two specific magic pointing techniques , one conservative and one liberal , were designed , analyzed , and implemented with an eye tracker we developed . they were then tested in a pilot study . this early stage exploration showed that the magic pointing techniques might offer many advantages , including reduced physical effort and fatigue as compared to traditional manual pointing , greater accuracy and naturalness than traditional gaze pointing , and possibly faster speed than manual pointing . the pros and cons of the two techniques are discussed in light of both performance data and subjective reports"
]
inp = src_field.tokenize(source_text[0])
inp = src_field.numericalize([inp]).to(device)

# In[ ]:

result = []
enc_output = model.encoder(inp)

# In[ ]:

res = model.decoder.infer_rnn_auto_regressive(
    encoder_output_dict=enc_output, vocab=src_field.vocab,
    length=3).view(-1).detach().cpu().numpy()

# In[2]:

for i in res: