Exemplo n.º 1
0
def load_vocab(path):
    vocab = json.load(open(path))
    vocab['id2word'] = invert_dict(vocab['word2id'])
    vocab['id2entity'] = invert_dict(vocab['entity2id'])
    vocab['id2relation'] = invert_dict(vocab['relation2id'])
    # vocab['entity2name'] = invert_dict(vocab['name2entity'])
    return vocab
Exemplo n.º 2
0
def load_vocab(path):
    vocab = json.load(open(path))
    vocab['word_idx_to_token'] = invert_dict(vocab['word_token_to_idx'])
    vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx'])
    vocab['kb_idx_to_token'] = invert_dict(vocab['kb_token_to_idx'])
    vocab['predicate_idx_to_token'] = invert_dict(vocab['predicate_token_to_idx'])
    return vocab
Exemplo n.º 3
0
def load_vocab(path):
    vocab = json.load(open(path))
    vocab['word_idx_to_token'] = invert_dict(vocab['word_token_to_idx'])
    vocab['function_idx_to_token'] = invert_dict(
        vocab['function_token_to_idx'])
    vocab['answer_idx_to_token'] = invert_dict(vocab['answer_token_to_idx'])
    return vocab
Exemplo n.º 4
0
def validate_with_david_generated_program(model, data, device, pretrained_dir):
    program_generator = load_program_generator(os.path.join(pretrained_dir, 'program_generator.pt')).to(device)
    david_vocab = json.load(open(os.path.join(pretrained_dir, 'david_vocab.json')))
    david_vocab['program_idx_to_token'] = invert_dict(david_vocab['program_token_to_idx'])
    details = { cat:[0,0] for cat in {'count', 'compare number', 'exist', 'query', 'compare attribute'}}

    count, correct = 0, 0
    model.eval()
    print('validate...')
    for batch in tqdm(data, total=len(data)):
        answers, questions, gt_programs, gt_program_inputs, features, edge_vectors = [todevice(x, device) for x in batch]
        programs, program_inputs = [], []
        # generate program using david model for each question
        for i in range(questions.size(0)):
            question_str = []
            for j in range(questions.size(1)):
                word = data.vocab['question_idx_to_token'][questions[i,j].item()]
                if word == '<START>': continue
                if word == '<END>': break
                question_str.append(word)
            question_str = ' '.join(question_str) # question string
            david_program = generate_single_program(question_str, program_generator, david_vocab, device)
            david_program = [david_vocab['program_idx_to_token'][i.item()] for i in david_program.squeeze()]
            # convert david program to ours. return two index lists
            program, program_input = convert_david_program_to_mine(david_program, data.vocab)
            programs.append(program)
            program_inputs.append(program_input)
        # padding
        max_len = max(len(p) for p in programs)
        for i in range(len(programs)):
            while len(programs[i]) < max_len:
                programs[i].append(vocab['program_token_to_idx']['<NULL>'])
                program_inputs[i].append(vocab['question_token_to_idx']['<NULL>'])
        # to tensor
        programs = torch.LongTensor(programs).to(device)
        program_inputs = torch.LongTensor(program_inputs).to(device)

        logits = model(programs, program_inputs, features, edge_vectors)
        predicts = logits.max(1)[1]
        correct += torch.eq(predicts, answers).long().sum().item()
        count += answers.size(0)
        # details
        for i in range(len(answers)):
            for j in range(len(gt_programs[i])):
                program = data.vocab['program_idx_to_token'][gt_programs[i][j].item()]
                if program in ['<NULL>', '<START>', '<END>', '<UNK>', 'unique']:
                    continue
                cat = map_program_to_cat[program]
                details[cat][0] += int(predicts[i].item()==answers[i].item())
                details[cat][1] += 1
                break
    acc = correct / count
    details = { k:(v[0]/v[1]) for k,v in details.items() }
    return acc, details
Exemplo n.º 5
0
def test_with_david_generated_program(model, data, device, pretrained_dir):
    program_generator = load_program_generator(os.path.join(pretrained_dir, 'program_generator.pt')).to(device)
    david_vocab = json.load(open(os.path.join(pretrained_dir, 'david_vocab.json')))
    david_vocab['program_idx_to_token'] = invert_dict(david_vocab['program_token_to_idx'])
    results = [] 
    model.eval()
    for batch in tqdm(data, total=len(data)):
        _, questions, gt_programs, gt_program_inputs, features, edge_vectors = [todevice(x, device) for x in batch]
        programs, program_inputs = [], []
        # generate program using david model for each question
        for i in range(questions.size(0)):
            question_str = []
            for j in range(questions.size(1)):
                word = data.vocab['question_idx_to_token'][questions[i,j].item()]
                if word == '<START>': continue
                if word == '<END>': break
                question_str.append(word)
            question_str = ' '.join(question_str) # question string
            david_program = generate_single_program(question_str, program_generator, david_vocab, device)
            david_program = [david_vocab['program_idx_to_token'][i.item()] for i in david_program.squeeze()]
            # convert david program to ours. return two index lists
            program, program_input = convert_david_program_to_mine(david_program, data.vocab)
            programs.append(program)
            program_inputs.append(program_input)
        # padding
        max_len = max(len(p) for p in programs)
        for i in range(len(programs)):
            while len(programs[i]) < max_len:
                programs[i].append(vocab['program_token_to_idx']['<NULL>'])
                program_inputs[i].append(vocab['question_token_to_idx']['<NULL>'])
        # to tensor
        programs = torch.LongTensor(programs).to(device)
        program_inputs = torch.LongTensor(program_inputs).to(device)

        logits = model(programs, program_inputs, features, edge_vectors)
        predicts = logits.max(1)[1]
        for predict in predicts: # note questions must not shuffle!
            results.append(data.vocab['answer_idx_to_token'][predict.item()])
    return results