Exemple #1
0
import numpy as np
from electra_diet.dataset import ElectraDataset
# file_path ='/Users/digit82_mac/git_repo/nlu_dataset/nlu_goldenset.md'
file_path ='/Users/digit82_mac/git_repo/nlu_dataset/nlu.md'

data = ElectraDataset(file_path)

from tqdm import trange
for idx in trange(len(data)):
    o = data.__getitem__(idx)


from electra_diet.tokenizer import get_tokenizer

tokenizer = get_tokenizer()

# idx =30
idx = 24325
dataset = data.dataset
text = dataset[idx]['text']
print(text)

data.__getitem__(idx)

text_token = data.tokenizer.encode(text)
for i, t in enumerate(text_token):
    print("i:{} token:{}".format(i, data.tokenizer.ids_to_tokens[t]))

entity_dict_bio = data.entity_dict_bio

def find_sub_list(sub_list,this_list):
Exemple #2
0
    def inference(self, text: str, intent_topk=5):
        try:
            if self.model.hparams.lower_text:
                text = text.lower()
            lower_text = self.model.hparams.lower_text
        except:
            lower_text = False

        if self.space_cor:
            text = self.spacer.space(text)

        if self.model is None:
            raise ValueError(
                "model is not loaded, first call load_model(checkpoint_path)"
            )
        tokenizer = get_tokenizer()
        tokens_tmp = tokenize(text, self.model.hparams.seq_len, lower_text=lower_text)
        tokens = []
        for t in tokens_tmp:
            tokens.append(t.unsqueeze(0))

        tokens = tuple(tokens)
        
        intent_result, entity_result = self.model.forward(*tokens)

        # mapping intent result
        rank_values, rank_indicies = torch.topk(
            nn.Softmax(dim=1)(intent_result)[0], k=intent_topk
        )
        intent = {}
        intent_ranking = []
        for i, (value, index) in enumerate(
            list(zip(rank_values.tolist(), rank_indicies.tolist()))
        ):
            intent_ranking.append({"confidence": value, "name": self.intent_dict[index]})

            if i == 0:
                intent["name"] = self.intent_dict[index]
                intent["confidence"] = value

        # mapping entity result
        entities = []

        # except first sequnce token whcih indicate BOS token
        _, entity_indices = torch.max(entity_result[0], dim=1)
        entity_indices = entity_indices.tolist()

        input_token, _ = tokens_tmp
        input_token = input_token.numpy()

        entity_val = []
        entity_typ = ''
        entity_pos = dict()

        for i, e in enumerate(entity_indices):
            e = int(e)

            if e > 0:
                ##get index info
                entity_label = self.entity_dict[e]
                pos, typ = entity_label.split('-')
#                 print("pos:{} typ:{}".format(pos, typ))
                if pos == 'B':
                    ##최초로 B- entity가 발생한 경우
                    if len(entity_val) == 0:
                        entity_val = []
                        entity_val.append(input_token[i])
                        entity_typ = typ
                        
                    ##이전에 B- entity가 존재한 경우
                    else:
                        ##update previous entity
                        value = tokenizer.decode(entity_val)
                        value = delete_josa(value).replace('#', '')
                        # value = value.replace('#', '')
                        entity_pos[value] = entity_typ
                        entity_val = []
                        ##add current entity
                        entity_val.append(input_token[i])
                        entity_typ = typ
                        
                ## 동일한 Entity의 I- label인 경우
                elif pos == 'I' and typ == entity_typ:
                    entity_val.append(input_token[i])
            
            ## O token인 경우
            else:
                if len(entity_val) > 0:
                    value = tokenizer.decode(entity_val)
                    value = delete_josa(value).replace('#', '')
                    # value = value.replace('#', '')
                    entity_pos[value] = entity_typ
                    entity_val = []

        # ## For debug type
        # print(entity_pos)
        for value, typ in entity_pos.items():
            m = re.search(value, text)
            try:
                start_idx, end_idx = m.span()
                entities.append(
                                {
                                    "start": start_idx,
                                    "end": end_idx,
                                    "value": value,
                                    "entity": typ
                                }
                            )
            except:
                pass

        ##post processor
        # intent, intent_ranking, entities = post_process(intent, intent_ranking, entities)

        return {
            "text": text,
            "intent": intent,
            "intent_ranking": intent_ranking,
            "entities": entities,
        }

        # rasa NLU entire result format
        """
    def __init__(
        self,
        file_path: str,
        seq_len=128,
        tokenizer=None,
        intent_dict=None,
        entity_dict=None,
        lower_text=True,
        tag_type='bio'
    ):  
        self.lower_text = lower_text
        self.intent_dict = {}
        self.entity_dict_bio = {}
        self.entity_dict_bio[
            "O"
        ] = 0  # based on XO tagging(one entity_type has assigned to one class)
        
        self.entity_dict = {}
        self.entity_dict[
            "O"
        ] = 0  # based on XO tagging(one entity_type has assigned to one class)

        self.dataset = []
        self.seq_len = seq_len
        self.tag_type = tag_type.lower()

        if tokenizer is None:
            self.tokenizer = get_tokenizer()
        else:
            self.tokenizer = tokenizer
        
        self.pad_token_id = self.tokenizer.pad_token_id
        
        markdown_lines = open(file_path, encoding="utf-8").readlines()

        intent_value_list = []
        entity_type_list = []
        current_intent_focus = ""

        for line in tqdm(
            markdown_lines,
            desc="Organizing Intent & Entity dictionary in NLU markdown file ...",
        ):
            if len(line.strip()) < 2:
                current_intent_focus = ""
                continue

            if "## " in line:
                if "intent:" in line:
                    intent_value_list.append(line.split(":")[1].strip())
                    current_intent_focus = line.split(":")[1].strip()
                else:
                    current_intent_focus = ""

            else:
                if current_intent_focus != "":
                    text = line[2:].strip()

                    for type_str in re.finditer(r"\([a-zA-Z_1-2]+\)", text):
                        entity_type = (
                            text[type_str.start() + 1 : type_str.end() - 1]
                            .replace("(", "")
                            .replace(")", "")
                        )
                        entity_type_list.append(entity_type)

        intent_value_list = sorted(intent_value_list)
        for intent_value in intent_value_list:
            if intent_value not in self.intent_dict.keys():
                self.intent_dict[intent_value] = len(self.intent_dict)

        entity_type_list = sorted(list(set(entity_type_list)))
        for entity_type in entity_type_list:
            self.entity_dict_bio['B-'+entity_type] = len(self.entity_dict_bio)
            self.entity_dict_bio['I-'+entity_type] = len(self.entity_dict_bio)
            self.entity_dict[entity_type] = len(self.entity_dict)
        
        ##use existed intent, entity info
        if intent_dict is not None:
            self.intent_dict = intent_dict
        if entity_dict is not None:
            if tag_type == 'bio':
                self.entity_dict_bio = entity_dict
            else:
                self.entity_dict = entity_dict

        current_intent_focus = ""

        for line in tqdm(
            markdown_lines, desc="Extracting Intent & Entity in NLU markdown files...",
        ):
            if len(line.strip()) < 2:
                current_intent_focus = ""
                continue

            if "## " in line:
                if "intent:" in line:
                    current_intent_focus = line.split(":")[1].strip()
                else:
                    current_intent_focus = ""
            else:
                if current_intent_focus != "":  # intent & entity sentence occur case
                    text = line[2:]

                    entity_value_list = []
                    for value in re.finditer(r"\[(.*?)\]", text):
                        entity_value_list.append(
                            text[value.start() + 1 : value.end() - 1]
                            .replace("[", "")
                            .replace("]", "")
                        )

                    entity_type_list = []
                    for type_str in re.finditer(r"\([a-zA-Z_1-2]+\)", text):
                        entity_type = (
                            text[type_str.start() + 1 : type_str.end() - 1]
                            .replace("(", "")
                            .replace(")", "")
                        )
                        entity_type_list.append(entity_type)

                    text = re.sub(r"\([a-zA-Z_1-2]+\)", "", text)
                    text = text.replace("[", "").replace("]", "")
                    each_data_dict = {}
                    if len(text.strip()) > 1:
                        each_data_dict["text"] = text.strip()
                        each_data_dict["intent"] = current_intent_focus
                        each_data_dict["intent_idx"] = self.intent_dict[
                            current_intent_focus
                        ]
                        each_data_dict["entities"] = []
    
                        for value, type_str in zip(entity_value_list, entity_type_list):
                            try:
                                for entity in re.finditer(value, text):
                                    each_data_dict["entities"].append(
                                        {
                                            "start": entity.start(),
                                            "end": entity.end(),
                                            "entity": type_str,
                                            # "entity_idx": self.entity_dict[type_str],
                                        }
                                    )
                            except Exception as ex:
                                print(f"error occured : {ex}")
                                print(f"value: {value}")
                                print(f"text: {text}")
    
                        self.dataset.append(each_data_dict)

        print(f"Intents: {self.intent_dict}")
        if self.tag_type == 'bio':
            print(f"Entities: {self.entity_dict_bio}")
        else:
            print(f"Entities: {self.entity_dict}")
Exemple #4
0
 def __init__(self, entity_dict: dict, tokenizer=None):
     self.entity_dict = entity_dict
     if tokenizer is None:
         self.tokenizer = get_tokenizer()
Exemple #5
0
def show_entity_report(dataset,
                       pl_module,
                       file_name=None,
                       output_dir=None,
                       cuda=True):

    ##generate rasa performance matrics
    tokenizer = get_tokenizer()
    text = []
    label_dict = dict()
    pl_module.model.eval()
    for k, v in pl_module.entity_dict.items():
        label_dict[int(k)] = v

    decoder = NERDecoder(label_dict, None)
    dataloader = DataLoader(dataset, batch_size=32)

    preds = list()
    targets = list()
    labels = set()

    for batch in tqdm(dataloader, desc="load entity dataset"):
        inputs, intent_idx, entity_idx = batch
        (input_ids, token_type_ids) = inputs
        token = get_token_to_text(tokenizer, input_ids)
        text.extend(token)
        model = pl_module.model
        if cuda > 0:
            input_ids = input_ids.cuda()
            token_type_ids = token_type_ids.cuda()
            model = model.cuda()
        _, entity_result = pl_module.model.forward(input_ids, token_type_ids)
        _, entity_result = model.forward(input_ids, token_type_ids)

        entity_result = entity_result.detach().cpu()
        _, entity_indices = torch.max(entity_result, dim=-1)

        for i in range(entity_idx.shape[0]):
            decode_original = decoder.process(input_ids[i].cpu().numpy(),
                                              entity_idx[i].numpy())
            decode_pred = decoder.process(input_ids[i].cpu().numpy(),
                                          entity_indices[i].numpy())
            targets.append(decode_original)
            preds.append(decode_pred)

            # for origin in decode_original:
            #     labels.add(origin['entity'])
            #     find_idx = 0
            #     for pred in decode_pred:
            #         if origin['start'] == pred['start'] and origin['end'] == pred['end']:
            #             preds.append(origin['entity'])
            #             targets.append(origin['entity'])
            #             find_idx += 1
            #     if find_idx == 0:
            #          preds.append('No_Entity')
            #          targets.append(origin['entity'])

    report = show_entity_metrics(pred=preds,
                                 label=targets,
                                 file_name=file_name,
                                 output_dir=output_dir)
Exemple #6
0
def show_intent_report(dataset,
                       pl_module,
                       file_name=None,
                       output_dir=None,
                       cuda=True):
    ##generate rasa performance matrics
    tokenizer = get_tokenizer()
    text = []
    preds = np.array([])
    targets = np.array([])
    logits = np.array([])
    label_dict = dict()
    pl_module.model.eval()
    for k, v in pl_module.intent_dict.items():
        label_dict[int(k)] = v
    dataloader = DataLoader(dataset, batch_size=32)

    for batch in tqdm(dataloader, desc="load intent dataset"):
        inputs, intent_idx, entity_idx = batch
        (input_ids, token_type_ids) = inputs
        token = get_token_to_text(tokenizer, input_ids)
        text.extend(token)
        model = pl_module.model
        if cuda > 0:
            input_ids = input_ids.cuda()
            token_type_ids = token_type_ids.cuda()
            model = model.cuda()
        intent_pred, entity_pred = pl_module.model.forward(
            input_ids, token_type_ids)
        intent_pred, entity_pred = model.forward(input_ids, token_type_ids)
        y_label = intent_pred.argmax(1).cpu().numpy()
        preds = np.append(preds, y_label)
        targets = np.append(targets, intent_idx.cpu().numpy())

        logit = intent_pred.detach().cpu()
        softmax = torch.nn.Softmax(dim=-1)
        logit = softmax(logit).numpy()
        logits = np.append(logits, logit.max(-1))

    preds = preds.astype(int)
    targets = targets.astype(int)

    labels = list(label_dict.keys())
    target_names = list(label_dict.values())

    report = show_rasa_metrics(pred=preds,
                               label=targets,
                               labels=labels,
                               target_names=target_names,
                               file_name=file_name,
                               output_dir=output_dir)
    ##generate confusion matrix
    inequal_index = np.where(preds != targets)[0]
    inequal_dict = dict()
    for i in range(inequal_index.shape[0]):
        idx = inequal_index[i].item()
        pred = preds[idx]
        if label_dict[pred] not in inequal_dict.keys():
            inequal_dict[label_dict[pred]] = []
        tmp_dict = dict()
        tmp_dict['target'] = label_dict[targets[idx]]
        tmp_dict['prob'] = round(logits[idx], 3)
        tmp_dict['text'] = text[idx]
        inequal_dict[label_dict[pred]].append(tmp_dict)

    cm_file_name = file_name.replace('.', '_cm.')
    cm_matrix = confusion_matrix(pred=preds,
                                 label=targets,
                                 label_index=label_dict,
                                 file_name=cm_file_name,
                                 output_dir=None)

    pred_report(inequal_dict,
                cm_matrix,
                file_name=cm_file_name.replace('.json', '.md'),
                output_dir=output_dir)