Пример #1
0
def sample_sequence(source, bert_tokenizer, model, bert_model, gpt_tokenizer, args, current_output=None):
    special_tokens_ids = gpt_tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
    if current_output is None:
        current_output = []

    for i in range(args.max_length):
        instance = build_input_from_segments(source, current_output, bert_tokenizer, gpt_tokenizer, with_eos=False)

        input_ids = torch.tensor(instance["source_ids"], device=args.device).unsqueeze(0)
        target_ids = torch.tensor(instance["target_ids"], device=args.device).unsqueeze(0)

        encoded_layers, _ = bert_model(input_ids)
        logits = model(target_ids, encoded_layers)
        if isinstance(logits, tuple):  # for gpt2 and maybe others
            logits = logits[0]
        logits = logits[0, -1, :] / args.temperature
        logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
        probs = F.softmax(logits, dim=-1)

        prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
        if i < args.min_length and prev.item() in special_tokens_ids:
            while prev.item() in special_tokens_ids:
                if probs.max().item() == 1:
                    warnings.warn("Warning: model generating special token with probability 1.")
                    break  # avoid infinitely looping over special token
                prev = torch.multinomial(probs, num_samples=1)

        if prev.item() in special_tokens_ids:
            break
        current_output.append(prev.item())

    return current_output
def predict_next_word(personality,
                      history,
                      tokenizer,
                      model,
                      args,
                      current_output=None):

    # import pdb; pdb.set_trace()
    instance, sequence = build_input_from_segments(personality,
                                                   history,
                                                   current_output,
                                                   tokenizer,
                                                   with_eos=False)

    input_ids = torch.tensor(instance["input_ids"],
                             device=args.device).unsqueeze(0)
    token_type_ids = torch.tensor(instance["token_type_ids"],
                                  device=args.device).unsqueeze(0)

    logits = model(input_ids, token_type_ids=token_type_ids)

    if isinstance(logits, tuple):
        logits = logits[0]

    # logits = logits[0, -1, :] / args.temperature
    # migration notes: logits is a single value tuple. logits -> logits[0]
    logits = logits[0, -1, :] / args.temperature

    logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
    probs = F.softmax(logits, dim=-1)

    return probs
Пример #3
0
def sample_sequence(personality, history, tokenizer, model, args, current_output=None):
    special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
    if current_output is None:
        current_output = []

    for i in range(args.get("max_length")):
        instance = build_input_from_segments(personality, history, current_output, tokenizer, with_eos=False)

        input_ids = torch.tensor(instance["input_ids"], device=args.get("device")).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"], device=args.get("device")).unsqueeze(0)

        logits = model(input_ids, token_type_ids=token_type_ids)
        if isinstance(logits, tuple):  # for gpt2 and maybe others
            logits = logits[0]
        logits = logits[0, -1, :] / args.get("temperature")
        logits = top_filtering(logits, top_k=args.get("top_k"), top_p=args.get("top_p"))
        probs = F.softmax(logits, dim=-1)

        prev = torch.multinomial(probs, 1)
        if i < args.get("min_length") and prev.item() in special_tokens_ids:
            while prev.item() in special_tokens_ids:
                if probs.max().item() == 1:
                    warnings.warn("Warning: model generating special token with probability 1.")
                    break  # avoid infinitely looping over special token
                prev = torch.multinomial(probs, num_samples=1)

        if prev.item() in special_tokens_ids:
            break
        current_output.append(prev.item())

    return current_output
def sample_sequence(personality, history, tokenizer, model, args, current_output=None):
    #SPECIAL_TOKENS = ["<bos>", "<eos>", "<speaker1>", "<speaker2>", "<pad>"]
    #special_tokens_ids=[2,1,32008,32009,0]
    special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
    print(special_tokens_ids)
    if current_output is None:
        current_output = []

    for i in range(args.max_length):
        instance, _ = build_input_from_segments(personality, history, current_output, tokenizer, with_eos=False)

        input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)

        logits = model(input_ids, token_type_ids=token_type_ids)
        if isinstance(logits, tuple):  # for gpt2 and maybe others
            logits = logits[0]
        logits = logits[0, -1, :] / args.temperature
        logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
        probs = F.softmax(logits, dim=-1)

        prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
        if i < args.min_length and prev.item() in special_tokens_ids:
            while prev.item() in special_tokens_ids:
                if probs.max().item() == 1:
                    warnings.warn("Warning: model generating special token with probability 1.")
                    break  # avoid infinitely looping over special token
                prev = torch.multinomial(probs, num_samples=1)

        if prev.item() in special_tokens_ids:
            break
        current_output.append(prev.item())

    return current_output
Пример #5
0
def sample_sequence(feature,
                    background,
                    tokenizer,
                    model,
                    current_output=None):
    special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
    if current_output is None:
        current_output = []
    for i in range(20):  # 20 tokens max output per line
        accumulator = build_input_from_segments(feature,
                                                background,
                                                current_output,
                                                tokenizer,
                                                with_eos=False)
        inlab = torch.tensor(accumulator["input_ids"],
                             device="cpu").unsqueeze(0)
        toklab = torch.tensor(accumulator["token_type_ids"],
                              device="cpu").unsqueeze(0)
        m = model(inlab, token_type_ids=toklab)
        if isinstance(m, tuple):
            m = m[0]
        m = m[0, -1, :] / 0.7  # temperature value
        m = filter_tokens(m, filter1=0, filter2=0.9)  # 0 means no filtering
        probs = torch.nn.functional.softmax(m, dim=-1)
        back = torch.multinomial(probs, 1)
        if i < 1 and back.item() in special_tokens_ids:
            while back.item() in special_tokens_ids:
                if probs.max().item() == 1:
                    break
                back = torch.multinomial(probs, num_samples=1)
        if back.item() in special_tokens_ids:
            break
        current_output.append(back.item())
    return current_output
Пример #6
0
def sample_sequence(inst, tokenizer, model, args,past):
    special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
    
    inst['original_question'] = inst['question']
    inst['question'] = []
    inst['paragraph_orig']=inst['paragraph']
    inst['paragraph']=[] # "past" has hidden states of tokens inparagraph. So assigning empty value 
    instance, _ = build_input_from_segments(inst, tokenizer, with_eos=False)
    input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
    token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)
    inst['original_context'] = instance['input_ids']
    logits,past = model(input_ids,token_type_ids = token_type_ids,past =past)

    for i in range(args.max_length):
        #instance, _ = build_input_from_segments(inst, tokenizer, with_eos=False)
        #input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
        #token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)

        #logits, _ = model(input_ids, token_type_ids=token_type_ids)

        logits = logits[0, -1, :] / args.temperature
        logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
        probs = F.softmax(logits, dim=-1)

        prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
        if i < args.min_length and prev.item() in special_tokens_ids:
            while prev.item() in special_tokens_ids:
                prev = torch.multinomial(probs, num_samples=1)

        if prev.item() in special_tokens_ids:
            break
        inst['question'].append(prev.item())
        logits,past = model(input_ids,token_type_ids=token_type_ids,past=past)
    inst['paragraph']=inst['paragraph_orig']
    return inst
Пример #7
0
def sample_sequence(personality, history, tokenizer, model, args, current_output=None):

    special_tokens = {'<bos>', '<eos>', '<speaker1>', '<speaker2>'}
    special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)
    if current_output is None:
        current_output = []

    for i in range(args.max_length):
        instance, sequence = build_input_from_segments(personality, history, current_output, tokenizer, with_eos=False)

        input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)

        logits = model(input_ids, token_type_ids=token_type_ids)

        if "gpt2" == args.model:
            logits = logits[0]

        # logits = logits[0, -1, :] / args.temperature
        # migration notes: logits is a single value tuple. logits -> logits[0]
        logits = logits[0][0, -1, :] / args.temperature

        logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
        probs = F.softmax(logits, dim=-1)

        prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
        if i < args.min_length and prev.item() in special_tokens_ids:
            while prev.item() in special_tokens_ids:
                prev = torch.multinomial(probs, num_samples=1)

        if prev.item() in special_tokens_ids:
            break
        current_output.append(prev.item())

    return current_output
Пример #8
0
    def next_word_probability(self, partial_out):
        """Return probability distribution over next words given an input and
        partial true output. This is used to calculate the per-word perplexity.
        """
        partial_out_ids = self.tokenizer.encode(' '.join(partial_out))
        instance = build_input_from_segments(self.persona,
                                             self.history,
                                             partial_out_ids,
                                             self.tokenizer,
                                             with_eos=False)

        input_ids = torch.tensor(instance["input_ids"],
                                 device=self.args.device).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"],
                                      device=self.args.device).unsqueeze(0)

        with torch.no_grad():
            logits = self.model_checkpoint(input_ids,
                                           token_type_ids=token_type_ids)

        if isinstance(logits, tuple):  # for gpt2 and maybe others
            logits = logits[0]
        probs = F.softmax(logits[0, -1], dim=0)

        dist = {}
        for prefix_id, words in self.prefix2words.items():
            for word, ratio in words.items():
                dist[word] = probs[prefix_id].item() * ratio
        return dist
Пример #9
0
def sample_sequence(personality,
                    history,
                    tokenizer,
                    model,
                    args,
                    current_output=None):
    special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
    if current_output is None:
        current_output = []
    # PPPPP = open('UNsuccessfulOutput.txt', 'w')
    #print('\n'.join([f'{i}: {tokenizer.decode(chain(*p))}' for (i, p) in enumerate(personalities)]), file=PPPPP)
    for i in range(args.max_length):

        instance = build_input_from_segments(personality,
                                             history,
                                             current_output,
                                             tokenizer,
                                             with_eos=False)
        # print("Args Device", args.device)
        # print("Type Args Device", type(args.device))
        # print("Instance input ids ", instance['input_ids'])
        # print("Type Instance input ids ", type(instance['input_ids']))

        input_ids = torch.tensor(instance["input_ids"],
                                 device=args.device).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"],
                                      device=args.device).unsqueeze(0)

        logits = model(input_ids, token_type_ids=token_type_ids)
        if isinstance(logits, tuple):  # for gpt2 and maybe others
            logits = logits[0]
        logits = logits[0, -1, :] / args.temperature
        logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
        probs = F.softmax(logits, dim=-1)

        prev = torch.topk(
            probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
        if i < args.min_length and prev.item() in special_tokens_ids:
            while prev.item() in special_tokens_ids:
                if probs.max().item() == 1:
                    warnings.warn(
                        "Warning: model generating special token with probability 1."
                    )
                    break  # avoid infinitely looping over special token
                prev = torch.multinomial(probs, num_samples=1)

        if prev.item() in special_tokens_ids:
            break
        current_output.append(prev.item())

    return current_output
Пример #10
0
def sample_sequence(topic,
                    source,
                    vocab,
                    tokenizer,
                    model,
                    args,
                    current_output=None):
    bos, eos, speaker1, speaker2 = vocab[vocab.bos_token], vocab[
        vocab.eos_token], vocab[vocab.cls_token], vocab[vocab.sep_token]

    if current_output is None:
        current_output = []

    for i in range(args.max_length):
        instance = build_input_from_segments(topic,
                                             source,
                                             current_output,
                                             vocab,
                                             tokenizer,
                                             with_eos=False)

        input_ids = torch.tensor(instance["input_ids"], device=args.device)
        token_type_ids = torch.tensor(instance["token_type_ids"],
                                      device=args.device)

        logits = model(input_ids, token_type_ids=token_type_ids)
        if isinstance(logits, tuple):  # for gpt2 and maybe others
            logits = logits[0]
        logits = logits[-1, :] / args.temperature
        logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
        probs = F.softmax(logits, dim=-1)

        prev = torch.topk(
            probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
        if i < args.min_length and prev.item() in [
                bos, eos, speaker1, speaker2
        ]:
            while prev.item() in [bos, eos, speaker1, speaker2]:
                if probs.max().item() == 1:
                    warnings.warn(
                        "Warning: model generating special token with probability 1."
                    )
                    break  # avoid infinitely looping over special token
                prev = torch.multinomial(probs, num_samples=1)
        elif prev[0].item() in [bos, eos, speaker1, speaker2]:
            break
        current_output.append(prev[0].item())

    return current_output
def sample_sequence(personality,
                    history,
                    tokenizer,
                    model,
                    args,
                    current_output=None):
    special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
    if current_output is None:
        current_output = []

    for i in range(args.max_length):
        authors = [str(i % 2) for i in range(len(history))]
        instance, sequence = build_input_from_segments(personality,
                                                       history,
                                                       current_output,
                                                       authors,
                                                       tokenizer,
                                                       with_eos=False,
                                                       max_len=1024)

        input_ids = torch.tensor(instance["input_ids"],
                                 device=args.device).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"],
                                      device=args.device).unsqueeze(0)

        logits = model(input_ids, token_type_ids=token_type_ids)

        if "gpt2" == args.model:
            logits = logits[0]
        logits = logits[0, -1, :] / args.temperature
        logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
        probs = F.softmax(logits, dim=-1)

        prev = (torch.topk(probs, 1)[1]
                if args.no_sample else torch.multinomial(probs, 1))
        if i < args.min_length and prev.item() in special_tokens_ids:
            # Sometimes the model fails to abide by the min output length, lets try only 20 times to avoid a inf loop
            for j in range(20):
                if prev.item() in special_tokens_ids:
                    prev = torch.multinomial(probs, num_samples=1)
                else:
                    break

        if prev.item() in special_tokens_ids:
            break
        current_output.append(prev.item())

    return current_output
Пример #12
0
def sample_sequence(source,
                    bert_model,
                    bert_tokenizer,
                    gpt_model,
                    gpt_vocab,
                    args,
                    current_output=None):
    bos, eos = gpt_vocab[gpt_vocab.bos_token], gpt_vocab[gpt_vocab.eos_token]
    if current_output is None:
        current_output = []

    for i in range(args.max_length):
        instance = build_input_from_segments(source,
                                             current_output,
                                             bert_tokenizer,
                                             gpt_vocab,
                                             with_eos=False)

        source_ids = torch.tensor([instance["source_ids"]], device=args.device)
        target_ids = torch.tensor(instance["target_ids"], device=args.device)

        #logits = model(input_ids, token_type_ids=token_type_ids)
        encoded_layers, pooled_output = bert_model(source_ids)
        logits = gpt_model(target_ids, encoded_layers[-1])
        if isinstance(logits, tuple):  # for gpt2 and maybe others
            logits = logits[0]
        logits = logits[-1, :] / args.temperature

        logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
        probs = F.softmax(logits, dim=-1)

        prev = torch.topk(
            probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
        if i < args.min_length and prev.item() in [bos, eos]:
            while prev.item() in [bos, eos]:
                if probs.max().item() == 1:
                    warnings.warn(
                        "Warning: model generating special token with probability 1."
                    )
                    break  # avoid infinitely looping over special token
                elif torch.multinomial(probs, 1).item() in [bos, eos]:
                    break
                prev = torch.multinomial(probs, num_samples=1)
        elif prev[0].item() in [bos, eos]:
            break
        current_output.append(prev[0].item())
    return current_output
Пример #13
0
    def act(self):
        reply = {}

        if self.args.eval_type == "hits@1" and len(self.candidates) > 0:
            instances = defaultdict(list)
            for candidate, _ in self.candidates:
                instance, _ = build_input_from_segments(
                    self.persona, self.history, candidate, self.tokenizer)
                for input_name, input_array in instance.items():
                    instances[input_name].append(input_array)

            inputs = pad_dataset(instances,
                                 padding=self.special_tokens_ids[-1])

            tensor_inputs = {}
            for input_name in ["input_ids", "mc_token_ids", "token_type_ids"]:
                tensor = torch.tensor(inputs[input_name],
                                      device=self.args.device)
                tensor = tensor.view((-1, len(self.candidates)) +
                                     tensor.shape[1:])
                tensor_inputs[input_name] = tensor

            with torch.no_grad():
                _, mc_logits = self.model_checkpoint(**tensor_inputs)

            val, ind = torch.sort(mc_logits[0], descending=True)

            ypred = self.candidates[ind[0].item()][1]  # match
            tc = []
            for j in range(len(self.candidates)):
                tc.append(self.candidates[ind[j].item()][1])
            reply = {'text': ypred, 'text_candidates': tc}
        else:
            # We are in interactive of f1 evaluation mode => just sample
            with torch.no_grad():
                out_ids = sample_sequence(self.persona, self.history,
                                          self.tokenizer,
                                          self.model_checkpoint,
                                          self.args)  # YW: TODO: out_ids, _?
            out_text = self.tokenizer.decode(
                out_ids,
                skip_special_tokens=True,
                clean_up_tokenization_spaces=(self.args.eval_type != 'f1'))
            # print('out_text:', out_text)
            reply = {'text': out_text}

        return reply
def sample_sequence(personality,
                    history,
                    tokenizer,
                    model,
                    args,
                    SPECIAL_TOKENS,
                    current_output=None):
    special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)

    if current_output is None:
        current_output = []

    for i in range(args.max_length):
        instance, sequence = build_input_from_segments(personality,
                                                       history,
                                                       current_output,
                                                       tokenizer,
                                                       SPECIAL_TOKENS,
                                                       with_eos=False)

        input_ids = torch.tensor(instance["input_ids"],
                                 device=args.device).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"],
                                      device=args.device).unsqueeze(0)

        logits = model(input_ids, token_type_ids=token_type_ids)

        if "gpt2" == args.model:
            logits = logits[0]
        logits = logits[0, -1, :] / args.temperature
        logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
        probs = F.softmax(logits, dim=-1)

        #todo: change this part like evaluate for probs=[0,,0,...1,0,0,..]
        prev = torch.topk(
            probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
        if i < args.min_length and prev.item(
        ) in special_tokens_ids:  #todo rooh: to remove special tokens
            while prev.item() in special_tokens_ids:
                prev = torch.multinomial(probs, num_samples=1)

        if prev.item() in special_tokens_ids:
            break
        current_output.append(prev.item())

    return current_output
Пример #15
0
def sample_sequence(personality,
                    history,
                    tokenizer,
                    model,
                    args,
                    current_output=None):
    """A function to sample the network. Taken from `interact.py`"""
    special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
    if current_output is None:
        current_output = []

    for i in range(args["max_length"]):
        instance, sequence = build_input_from_segments(personality,
                                                       history,
                                                       current_output,
                                                       tokenizer,
                                                       with_eos=False)

        input_ids = torch.tensor(instance["input_ids"],
                                 device=args["device"]).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"],
                                      device=args["device"]).unsqueeze(0)

        logits = model(input_ids, token_type_ids=token_type_ids)

        if "gpt2" == args["model"]:
            logits = logits[0]
        logits = logits[0, -1, :] / args["temperature"]
        logits = top_filtering(logits,
                               top_k=args["top_k"],
                               top_p=args["top_p"])
        probs = F.softmax(logits, dim=-1)

        prev = (torch.topk(probs, 1)[1]
                if args["no_sample"] else torch.multinomial(probs, 1))
        if i < args["min_length"] and prev.item() in special_tokens_ids:
            while prev.item() in special_tokens_ids:
                prev = torch.multinomial(probs, num_samples=1)

        if prev.item() in special_tokens_ids:
            break
        current_output.append(prev.item())

    return current_output
def sample_sequence(personality, history, tokenizer, model, args, current_output=None):

    special_tokens = ['<bos>', '<eos>', '<speaker1>', '<speaker2>']
    special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)
    if current_output is None:
        current_output = []

    for i in range(args.max_length):
        instance, sequence = build_input_from_segments(personality, history, current_output, tokenizer, with_eos=False)

        input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)

        logits = model(input_ids, token_type_ids=token_type_ids)

        if isinstance(logits, tuple):  # for gpt2 and maybe others
            logits = logits[0]
        logits = logits[0, -1, :] / args.temperature
        logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
        probs = F.softmax(logits, dim=-1)


        prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
        if i < args.min_length and prev.item() in special_tokens_ids:
            count = 0 
            while prev.item() in special_tokens_ids:
                if probs.max().item() == 1: 
                    break
                prev = torch.multinomial(probs, num_samples=1)
                count += 1 
                # to prevent endless looping
                if count == 10: 
                    break 

        # import pdb; pdb.set_trace()
        if prev.item() in special_tokens_ids:
            break
        current_output.append(prev.item())

    return current_output
Пример #17
0
def sample_sequence(history, tokenizer, model, args):
    special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
    current_output = []
        
    for i in range(args.max_length):
        instance = build_input_from_segments([tokenizer.convert_tokens_to_ids(history)], current_output, tokenizer, with_eos=False)
        input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)

        logits = model(input_ids, token_type_ids=token_type_ids)
        if isinstance(logits, tuple):  # for gpt2 and maybe others
            logits = logits[0]
        logits = logits[0, -1, :] / args.temperature
        logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
        probs = F.softmax(logits, dim=-1)

        prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)

        if prev.item() in special_tokens_ids:
            current_output.append(prev.item())
            break
        current_output.append(prev.item())

    return current_output
Пример #18
0
    def act(self):
        reply = {}

        if self.args.eval_type == "hits@1" and len(self.candidates) > 0:
            instances = defaultdict(list)
            for candidate, _ in self.candidates:
                instance = build_input_from_segments(self.persona, self.history, candidate, self.tokenizer)
                for input_name, input_array in instance.items():
                    instances[input_name].append(input_array)

            inputs = pad_dataset(instances, padding=self.special_tokens_ids[-1])

            tensor_inputs = {}
            for input_name in ["input_ids", "mc_token_ids", "token_type_ids"]:
                tensor = torch.tensor(inputs[input_name], device=self.args.device)
                tensor = tensor.view((-1, len(self.candidates)) + tensor.shape[1:])
                tensor_inputs[input_name] = tensor

            with torch.no_grad():
                mc_logits = self.model_checkpoint(**tensor_inputs)[1]

            val, ind = torch.sort(mc_logits[0], descending=True)

            ypred = self.candidates[ind[0].item()][1] # match
            tc = []
            for j in range(len(self.candidates)):
                tc.append(self.candidates[ind[j].item()][1])
            reply = {'text': ypred, 'text_candidates': tc}


        elif self.args.eval_type == 'f1':
            # We are in interactive of f1 evaluation mode => just sample
            with torch.no_grad():
                out_ids = sample_sequence(self.persona, self.history, self.history_wd, self.tokenizer, self.model_checkpoint, self.args)
            out_text = self.tokenizer.decode(out_ids, skip_special_tokens=True,
                                             clean_up_tokenization_spaces=(self.args.eval_type != 'f1'))
            reply = {'text': out_text}

            # integrate it LSM Metrics, during sampling with F1 eval type (decreases validation commands).
            # For every response to a given dialogue history, we calculate the LSM score
            # This score is added to a total list of scores and meaned at the end
            # We separate a model response with a label response, so that results in 2 LSM scores.
            response = reply['text']
            dialogue_hist = self.history_wd

            table = str.maketrans(dict.fromkeys(string.punctuation))

            # split history into history of speaker 2 and speaker 1
            speaker2_hist_string, speaker1_chatbot_hist_list = split_history(dialogue_hist, self.tokenizer)

            # use deepcopy to avoid variable troubles
            label_c_hist = deepcopy(speaker1_chatbot_hist_list)

            # add response generated by model
            speaker1_chatbot_hist_list.append(response)

            # add label to chatbot hist
            label_c_hist.append(self.tokenizer.decode(self.labels))

            # convert to strings
            pred_c_hist_string = (' ' + ' '.join(speaker1_chatbot_hist_list).lower() + ' ').replace("'", ' ').translate(
                table)
            label_c_hist_string = (' ' + ' '.join(label_c_hist).lower() + ' ').replace("'", ' ').translate(table)

            # results in two vectors containing the function word usage percentage for each category
            # we use prediction and labeled, so we get 2 LSM score eventually at the end of evaluation
            _, p1_model, p2_model = calc_fw_perc_diffs(self.d, pred_c_hist_string, speaker2_hist_string)
            _, p1_human, p2_human = calc_fw_perc_diffs(self.d, label_c_hist_string, speaker2_hist_string)

            # calculate LSM score for the model response and calculate LSM score for the label response
            LSMs_model = torch.tensor([1-(abs(p1 - p2_model[i]) / (p1 + p2_model[i] + 0.00000001)) for i, p1 in enumerate(p1_model)]).cuda()
            LSMs_human = torch.tensor([1-(abs(p1 - p2_human[i]) / (p1 + p2_human[i] + 0.00000001)) for i, p1 in enumerate(p1_human)]).cuda()

            lsm_model_list.append(torch.mean(LSMs_model))
            lsm_human_list.append(torch.mean(LSMs_human))

        else:
            # We are in interactive of f1 evaluation mode => just sample
            with torch.no_grad():
                out_ids = sample_sequence(self.persona, self.history, self.history_wd, self.tokenizer,
                                          self.model_checkpoint, self.args)
            out_text = self.tokenizer.decode(out_ids, skip_special_tokens=True,
                                             clean_up_tokenization_spaces=(self.args.eval_type != 'f1'))
            reply = {'text': out_text}


        return reply
Пример #19
0
def run():
    parser = ArgumentParser()
    parser.add_argument("--model_type", type=str, default="gpt", help="gpt or gpt2")
    parser.add_argument("--model_checkpoint", type=str, default="", help="Path, url or short name of the model")
    parser.add_argument("--device", type=str, default="cuda" if torch.cuda.is_available() else "cpu", help="Device (cuda or cpu)")
    parser.add_argument("--filename", type=str, default="data/instances_dev.pkl", help="File to use for decoding")
    parser.add_argument("--no_sample", action='store_true', help="Set to use greedy decoding instead of sampling")
    parser.add_argument("--max_length", type=int, default=50, help="Maximum length of the output utterances")
    parser.add_argument("--min_length", type=int, default=1, help="Minimum length of the output utterances")
    parser.add_argument("--seed", type=int, default=42, help="Seed")
    parser.add_argument("--temperature", type=int, default=0.7, help="Sampling softmax temperature")
    parser.add_argument("--top_k", type=int, default=0, help="Filter top-k tokens before sampling (<=0: no filtering)")
    parser.add_argument("--top_p", type=float, default=0.9, help="Nucleus filtering (top-p) before sampling (<=0.0: no filtering)")
    args = parser.parse_args()

    logging.basicConfig(level=logging.INFO)
    logger = logging.getLogger(__file__)
    logger.info(pformat(args))

    random.seed(args.seed)
    torch.random.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    logger.info("Get pretrained model and tokenizer")

    if args.model_type == 'gpt2':
        tokenizer = GPT2Tokenizer.from_pretrained(args.model_checkpoint)
        model = GPT2LMHeadModel.from_pretrained(args.model_checkpoint)
    else:
        tokenizer = OpenAIGPTTokenizer.from_pretrained(args.model_checkpoint)
        model = OpenAIGPTLMHeadModel.from_pretrained(args.model_checkpoint)

    model.to(args.device)
    model.eval()

    data = get_dataset_from_file(tokenizer, args.filename)
    final_output_dict = {
        "version": "squash-2.0",
        "data": [{
            "paragraphs": []
        }]
    }
    question_number = 0
    # For all the instances corresponding one paragraph, model input format is: paragraph + answer + question) 
    # Paragraph will be common accross all the instances.
    # "past" can be used to reuse precomputed hidden state for paragraph in a subsequent predictions
    
    imort copy 
    
    previous_para_index = None
    past = None
    for inst in tqdm.tqdm(data):
        with torch.no_grad():
            current_para_index = inst['para_index']
            if current_para_index != prev_para_index:
                past = None
                currrent_inst = copy.deepcopy(inst)
                # only keeping paragraph details in the instance to get its hidden states 
                current_inst['question'] =  []
                current_inst['answer'] = []
                instance, _ = build_input_from_segments(current_inst,tokenizer,with_eos=False)
                input_ids = torch.tensor(instance['input_ids'][:-2],device=args.device).unsqueeze(0)
                token_type_ids = torch.tensor(instance['token_type_ids'][:-2],device=args.device).unsqueeze(0)
                _,past=model(input_ids,toekn_type_ids=toekn_type_ids,past=past) #output "past" will have paragraph embeddings
            output = sample_sequence(inst, tokenizer, model, args,past)

        original_paragraph = tokenizer.decode(output['paragraph'])
        generated_question = tokenizer.decode(output['question'], skip_special_tokens=True)
        original_answer = tokenizer.decode(output['answer'], skip_special_tokens=True)
        para_index = inst['para_index']

        # Output in a SQUAD-like format with questions clumped together under their parent paragraph
        if len(final_output_dict["data"][0]["paragraphs"]) > para_index:
            # verify whether the paragraph text is identical
            assert original_paragraph == final_output_dict["data"][0]["paragraphs"][para_index]['context']
            # append the question answer pair
            final_output_dict["data"][0]["paragraphs"][para_index]['qas'].append({
                'id': 'question_%d' % question_number,
                'question': generated_question,
                'answers': [{
                    'text': original_answer,
                    'answer_start': original_paragraph.index(original_answer)
                }],
                'class': output['class'],
                'algorithm': output['algorithm'],
                'is_impossible': False
            })
        else:
            # add a new question to the list of QA pairs
            final_output_dict['data'][0]['paragraphs'].append({
                'context': original_paragraph,
                'qas': [{
                    'id': 'question_%d' % question_number,
                    'question': generated_question,
                    'answers': [{
                        'text': original_answer,
                        'answer_start': original_paragraph.index(original_answer)
                    }],
                    'class': output['class'],
                    'algorithm': output['algorithm'],
                    'is_impossible': False
                }]
            })

        question_number += 1

    with open("squash/temp/generated_questions.json", "w") as f:
        f.write(json.dumps(final_output_dict))
def sample_sequence(personality,
                    history,
                    tokenizer,
                    model,
                    args,
                    words,
                    weight4ind,
                    We,
                    current_output=None):
    special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)
    if current_output is None:
        current_output = []
    last_utt = history[-1]
    last = tokenizer.decode(last_utt, skip_special_tokens=True)
    sentences = [last]
    # load sentences
    x, m = data_io.sentences2idx(
        sentences, words
    )  # x is the array of word indices, m is the binary mask indicating whether there is a word in that location
    w = data_io.seq2weight(x, m, weight4ind)  # get word weights

    rmpc = 1  # number of principal components to remove in SIF weighting scheme

    # set parameters
    global params
    params.rmpc = rmpc
    # get SIF embedding
    embedding1 = SIF_embedding.SIF_embedding(
        We, x, w, params)  # embedding[i,:] is the embedding for sentence i

    for i in range(args.max_length):
        instance, _ = build_input_from_segments(personality,
                                                history,
                                                current_output,
                                                tokenizer,
                                                with_eos=False)

        input_ids = torch.tensor(instance["input_ids"],
                                 device=args.device).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"],
                                      device=args.device).unsqueeze(0)
        temperature = 1.0
        top_k = 0
        top_p = 0.9

        logits = model(input_ids, token_type_ids=token_type_ids)
        if isinstance(logits, tuple):  # for gpt2 and maybe others
            logits = logits[0]
        logits = logits[0, -1, :] / args.temperature
        logits = top_filtering(logits,
                               words,
                               weight4ind,
                               We,
                               tokenizer,
                               history,
                               args,
                               params,
                               embedding1,
                               top_k=top_k,
                               top_p=top_p,
                               current_output=current_output)
        probs = F.softmax(logits, dim=-1)

        prev = torch.topk(
            probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
        if i < args.min_length and prev.item() in special_tokens_ids:
            while prev.item() in special_tokens_ids:
                if probs.max().item() == 1:
                    warnings.warn(
                        "Warning: model generating special token with probability 1."
                    )
                    break  # avoid infinitely looping over special token
                prev = torch.multinomial(probs, num_samples=1)

        if prev.item() in special_tokens_ids:
            break
        current_output.append(prev.item())

    return current_output
    def act(self):
        reply = {}

        if self.args.eval_type == "hits@1" and len(self.candidates) > 0:
            instances = defaultdict(list)
            for candidate, _ in self.candidates:
                instance, _ = build_input_from_segments(self.persona, self.history, candidate, self.tokenizer)
                for input_name, input_array in instance.items():
                    instances[input_name].append(input_array)

            inputs = pad_dataset(instances, padding=self.special_tokens_ids[-1])

            tensor_inputs = {}
            for input_name in ["input_ids", "mc_token_ids", "token_type_ids"]:
                tensor = torch.tensor(inputs[input_name], device=self.args.device)
                tensor = tensor.view((-1, len(self.candidates)) + tensor.shape[1:])
                tensor_inputs[input_name] = tensor

            with torch.no_grad():
                _, mc_logits = self.model_checkpoint(**tensor_inputs)

            val, ind = torch.sort(mc_logits[0], descending=True)

            ypred = self.candidates[ind[0].item()][1] # match
            tc = []
            for j in range(len(self.candidates)):
                tc.append(self.candidates[ind[j].item()][1])
            reply = {'text': ypred, 'text_candidates': tc}
        else:
            # We are in interactive of f1 evaluation mode => just sample
            with torch.no_grad():
                out_ids = sample_sequence(self.persona, self.history, self.tokenizer, self.model_checkpoint, self.args)   # YW: TODO: out_ids, _?
            # Get a generated response
            out_text = self.tokenizer.decode(out_ids, skip_special_tokens=True,
                                             clean_up_tokenization_spaces=(self.args.eval_type != 'f1'))
            out_text_org = out_text
            out_text = out_text.replace(' \' ', '\'')   # TODO: tbd
            out_text = out_text.replace(' \'', '\'')
            # persona NLI
            profiles = []
            for profile in self.persona:
                profile_text = self.tokenizer.decode(profile, skip_special_tokens=True, clean_up_tokenization_spaces=False)
                profile_text = profile_text.replace(' \' ', '\'')   # TODO: tbd
                profile_text = profile_text.replace(' \'', '\'')
                profiles.append(profile_text)
            nli_score, reward_score, c_score, current_con_en = nli_engine(out_text, profiles, self.nli_tokenizer, self.nli_model, eval=True)
            self.nli_scores += nli_score   # persona NLI
            self.reward_scores += reward_score   # reward function
            self.c_scores += c_score   # C score
            self.sample_num += 1
            self.con_en += current_con_en   # if this persona contains a contradicted/entail profile or not (not applied)

            # internal repetition
            response_tok = out_text_org.split()
            intrep_1gram = intrep_frac(response_tok)
            # if 2-gram or 3-gram are going to be used:
            ''''
            # intrep_2gram
            response_tok_2gram = get_ngrams(out_text, 2)
            intrep_2gram = intrep_frac(response_tok_2gram)
            # intrep_3gram
            response_tok_3gram = get_ngrams(out_text, 3)
            intrep_3gram = intrep_frac(response_tok_3gram)
            '''
            intern_rep_reward = intrep_1gram
            self.intrep_scores += intern_rep_reward

            # bleu
            label_text = self.tokenizer.decode(self.labels, skip_special_tokens=True, clean_up_tokenization_spaces=False)
            current_bleu = bleu_rewarder(out_text_org, label_text)
            self.bleu_scores += current_bleu

            # fine-tuned GPT-based language model
            lm_tokenize_input = self.lm_tokenizer.tokenize(out_text)
            # lm_tensor_input = torch.tensor([lm_tokenizer.convert_tokens_to_ids(lm_tokenize_input)]).to(args.device)
            lm_tensor_input = torch.tensor([[self.special_tokens_ids[0]] + self.lm_tokenizer.convert_tokens_to_ids(lm_tokenize_input) + [self.special_tokens_ids[-1]]]).to(self.args.device)
            lm_loss = self.lm_model(lm_tensor_input, lm_labels=lm_tensor_input)
            lm_ppl = math.exp(lm_loss.item())
            self.lm_ppl_scores += lm_ppl

            print('out_text:', out_text)
            print('current nli:', self.nli_scores)
            print('current score:', self.reward_scores / self.sample_num)
            print('current c_score_macro:', self.c_scores / self.sample_num)
            current_c_score_micro = (self.nli_scores[1] - self.nli_scores[0]) / sum(self.nli_scores)
            cn_res = nli_score[1] - nli_score[0]   # cn: C_new (persona level)
            # C_new calculation
            if cn_res > 0:
                current_cn = 1
            elif cn_res < 0:
                current_cn = -1
            else:
                current_cn = 0
            self.cnm += current_cn
            print('current c_new:', self.cnm / self.sample_num)
            print('current c_score_micro:', current_c_score_micro)
            print('current con_en:', self.con_en)
            print('current intrep score:', self.intrep_scores / self.sample_num)
            print('current BLEU:', self.bleu_scores / self.sample_num)
            print('current PPL:', self.lm_ppl_scores / self.sample_num)
            reply = {'text': out_text}

        return reply
Пример #22
0
def sample_sequence(tokenizer,
                    model,
                    args,
                    background=None,
                    personality=None,
                    utterances=(),
                    utterance_types=(),
                    current_output=None,
                    explain=False,
                    replace_unknown=False):
    max_sequence_length = args.max_sequence_length if args.max_sequence_length > 0 else model.config.n_ctx
    assert max_sequence_length <= model.config.n_ctx, 'max_sequence_length [%i] was set to a value higher than ' \
                                                      'supported by the model (config.n_ctx [%i]). Please use a lower ' \
                                                      'value or do not set it [-1] to use the highest supported one.' \
                                                      % (max_sequence_length, model.config.n_ctx)

    special_tokens_ids = tokenizer.all_special_ids
    bot_token_id = tokenizer.convert_tokens_to_ids(TYPE_BOT)
    user_token_id = tokenizer.convert_tokens_to_ids(TYPE_USER)
    #eos_token_id = tokenizer.eos_token_id
    # default to speaker2 if background is not present in model
    #background_token_id = tokenizer.special_tokens.get(TYPE_BACKGROUND, user_token_id)
    background_token_id = tokenizer.convert_tokens_to_ids(TYPE_BACKGROUND)
    #logger.debug('expected sequence length (without prediction): %i; max_allowed: %i (inclusive prediction)'
    #             % (len(list(chain(*(context + history)))) + len(history) + 1, max_sequence_length))
    context = []
    if background is not None:
        if isinstance(background, list) or isinstance(background, tuple):
            context.extend([(background_token_id, b) for b in background])
        else:
            context.append((background_token_id, background))
    if personality is not None:
        context.append((bot_token_id, personality))
    if current_output is None:
        current_output = []
    assert len(utterances) == len(utterance_types), f'length of utterances [{len(utterances)}] has to be the ' \
                                                    f'same as length of utterance_types [{len(utterance_types)}], ' \
                                                    f'if that is provided'
    utterances_with_types = list(zip(utterance_types, utterances))
    eos = None
    explanations = []
    last_ids = None
    for i in range(args.max_length):
        instance, sequence = build_input_from_segments(
            context=context,
            history=utterances_with_types,
            reply=(bot_token_id, current_output),
            tokenizer=tokenizer,
            eos=None,
            max_sequence_length=max_sequence_length)
        l_trunc = len(list(chain(*sequence))) - len(instance['input_ids'])
        assert l_trunc <= 0, 'The sequence was truncated. Please provide less context + history + question!'

        if torch.is_grad_enabled():
            model.zero_grad()

        input_ids = torch.tensor(instance["input_ids"],
                                 device=args.device).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"],
                                      device=args.device).unsqueeze(0)
        if explain:
            position_ids = torch.arange(0,
                                        input_ids.size(-1),
                                        dtype=torch.long,
                                        device=input_ids.device)
            position_ids = position_ids.unsqueeze(0).expand_as(input_ids)
        else:
            position_ids = None

        capture_at_module = None
        if explain:
            if "gpt2" == args.model:
                capture_at_module = model.transformer.drop
            else:
                raise NotImplementedError(
                    'explain is implemented only for gpt2 model')

        # this captures only if capture_at_module != None
        with capture_inputs_with_gradients(capture_at_module) as captured:
            logits = model(input_ids=input_ids,
                           token_type_ids=token_type_ids,
                           position_ids=position_ids)

            logits = logits[0]
            logits_all = logits[0, -1, :] / args.temperature
            logits = top_filtering(logits_all.clone(),
                                   top_k=args.top_k,
                                   top_p=args.top_p)
            probs = F.softmax(logits, dim=-1)

            #logger.debug('nbr of non zeros in filtered probs_top: %i (of %i)' % (torch.nonzero(probs.data).size(0), len(probs)))

            prev = torch.topk(probs,
                              1)[1] if args.no_sample else torch.multinomial(
                                  probs, 1)
            if i < args.min_length and prev.item() in special_tokens_ids:
                while prev.item() in special_tokens_ids:
                    logger.debug('resample because of special token...')
                    prev = torch.multinomial(probs, num_samples=1)

            if explain:
                probs_all = F.softmax(logits_all, dim=-1)
                #logger.debug('nbr of non zeros in filtered probs_all: %i (of %i)'
                #             % (torch.nonzero(probs_all.data).size(0), len(probs)))
                #logger.debug('probs_all min: %f, max: %f; logits_all min: %f, max %f'
                #             % (torch.min(probs_all).item(), torch.max(probs_all).item(),
                #                torch.min(probs_all).item(), torch.max(probs_all).item()))
                #logger.debug('probs_top min: %f, max: %f; logits_top min: %f, max %f'
                #             % (torch.min(probs).item(), torch.max(probs).item(),
                #                torch.min(logits).item(), torch.max(logits).item()))
                prev_prob = probs_all[prev]
                #logger.debug('prob for current sample [%s]: %f' % (tokenizer.decode([prev.item()]), prev_prob.item()))
                prev_prob.backward()

        if explain:
            assert captured is not None, \
                f'no captured inputs an gradients available to create explanations (generated token position: ' \
                f'{len(current_output) + 1})'
            expl_internal = (torch.abs(captured['grads'][0][0]) *
                             torch.abs(captured['inputs'][0][0])).sum(dim=-1)

            last_ids = (instance["input_ids"], instance["token_type_ids"])
            explanations.append(
                {'internal': expl_internal.detach().cpu().numpy()})

        if prev.item() in special_tokens_ids:
            eos = prev.item()
            break
        current_output.append(prev.item())

    if current_output == tokenizer.encode('unknown'):
        current_output = tokenizer.encode('i don\'t know')

    if explain:
        return current_output, eos, last_ids, explanations

    return current_output, eos
Пример #23
0
def calculate_metrics(args, model, tokenizer, dataset, special_tokens):
    special_tokens_ids = tokenizer.convert_tokens_to_ids(special_tokens)

    all_blues = []
    all_f1_scores = []
    all_true_sentences = []
    all_predicted_sentences = []
    for data in tqdm(dataset['valid']):
        personality = data['personality']
        utterances = data['utterances']

        #utterance = utterances[-1] #only the longest conversaion
        for utterance in utterances:
            true_label = utterance['candidates'][-1]
            history = utterance['history']
            predicted_output = []
            for i in range(args.max_length):
                instance, _ = build_input_from_segments(personality,
                                                        history,
                                                        predicted_output,
                                                        tokenizer,
                                                        special_tokens,
                                                        with_eos=False)

                try:

                    if len(instance["input_ids"]) > 310:
                        truncated_history = [hist[:5] for hist in history]
                        instance, _ = build_input_from_segments(
                            personality,
                            truncated_history,
                            predicted_output,
                            tokenizer,
                            special_tokens,
                            with_eos=False)

                    input_ids = torch.tensor(instance["input_ids"],
                                             device=args.device).unsqueeze(0)
                    token_type_ids = torch.tensor(
                        instance["token_type_ids"],
                        device=args.device).unsqueeze(0)

                    logits = model(input_ids, token_type_ids=token_type_ids)
                except:
                    print("exception")
                    continue

                if "gpt2" == args.model:
                    logits = logits[0]
                logits = logits[0, -1, :] / args.temperature
                logits = top_filtering(logits,
                                       top_k=args.top_k,
                                       top_p=args.top_p)
                probs = F.softmax(logits, dim=-1)

                prev = torch.topk(
                    probs, 1)[1] if args.no_sample else torch.multinomial(
                        probs, 1)
                # if i < args.min_length and prev.item() in special_tokens_ids:
                #     k=0
                #     while prev.item() in special_tokens_ids and k < 100:
                #         prev = torch.multinomial(probs, num_samples=1)
                #         k+=1

                if i < args.min_length:
                    prev = torch.multinomial(probs, num_samples=1)

                # if prev.item() in special_tokens_ids:
                #     break
                predicted_output.append(prev.item())

            predicted_sentence = tokenizer.decode(predicted_output,
                                                  skip_special_tokens=True)
            true_sentence = tokenizer.decode(true_label,
                                             skip_special_tokens=True)
            #looks like zero gives the best results

            all_predicted_sentences.append(predicted_sentence)
            all_true_sentences.append(true_sentence)

            bleus = [
                _bleu(predicted_sentence, [true_sentence],
                      method="method" + str(i)) for i in [0, 1, 2, 3, 5]
            ]
            #bleu = _bleu(predicted_sentence, [true_sentence])
            f1_score = _f1_score(predicted_sentence, [true_sentence])
            #print(f1_score)
            all_blues.append(bleus)
            all_f1_scores.append(f1_score)
            #compare predicted and label with bleu

    with open(
            "/home/rohola/codes/transfer-learning-conv-ai/out/emotion_correlation_input.txt",
            'w') as fw:
        for predicted_sentence, true_sentence in zip(all_predicted_sentences,
                                                     all_true_sentences):
            fw.write(predicted_sentence + "\t" + true_sentence + "\n")

    print("avg bleu", np.array(all_blues).mean(axis=0))
    print("avg f1 score", np.mean(all_f1_scores))
    print("max bleu", np.array(all_blues).max(axis=0))
Пример #24
0
def sample_sequence(personality, history, history_wd, tokenizer, model, args, current_output=None):
    """
    This function not only samples a sequence, but additionally calculates the lsm metrics

    :return: samples sequence
    """
    special_tokens_ids = tokenizer.convert_tokens_to_ids(SPECIAL_TOKENS)

    # use weighted decoding
    if args.wd:
        d = get_fw_dict()
        table = str.maketrans(dict.fromkeys(string.punctuation))
        hist_p_string, hist_c = split_history(history_wd, tokenizer)

    if current_output is None:
        current_output = []

    for i in range(args.max_length):
        instance = build_input_from_segments(personality, history, current_output, tokenizer, with_eos=False)

        input_ids = torch.tensor(instance["input_ids"], device=args.device).unsqueeze(0)
        token_type_ids = torch.tensor(instance["token_type_ids"], device=args.device).unsqueeze(0)

        logits = model(input_ids, token_type_ids=token_type_ids)
        if isinstance(logits, tuple):  # for gpt2 and maybe others
            logits = logits[0]
        logits = logits[0, -1, :] / args.temperature

        # use pretrained tokenizer for Weighted decoding testing (otherwise latin-1 error)
        if args.wd:
            dec_curr_output = tokenizer.decode(current_output)
            hist_c.append(dec_curr_output)
            hist_c_string = (' ' + ' '.join(hist_c).lower() + ' ').replace("'", ' ').translate(table)

            cat_diffs, _, _ = calc_fw_perc_diffs(d, hist_p_string, hist_c_string)

            w = args.wd_weight
            logits = wd_logits(logits, cat_diffs, w, d, tokenizer)

        logits = top_filtering(logits, top_k=args.top_k, top_p=args.top_p)
        probs = F.softmax(logits, dim=-1)

        prev = torch.topk(probs, 1)[1] if args.no_sample else torch.multinomial(probs, 1)
        if i < args.min_length and prev.item() in special_tokens_ids:
            while prev.item() in special_tokens_ids:
                if probs.max().item() == 1:
                    warnings.warn("Warning: model generating special token with probability 1.")
                    break  # avoid infinitely looping over special token
                prev = torch.multinomial(probs, num_samples=1)

        if prev.item() in special_tokens_ids:
            break
        current_output.append(prev.item())

        if args.wd:
            # reset hist
            hist_c = hist_c[:-1]

    if args.wd:
        hist_c.append(current_output)
    return current_output