def get_persona_faiss_selected(args, tokenizer):
    """ Prepare the dataset for training and evaluation """
    personachat = get_dataset_with_no_tokenizer(tokenizer, args.dataset_path,
                                                args.dataset_cache)

    logger.info("Build inputs and labels")
    datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
    persona_faiss_selected = []
    history_faiss_selected = []
    persona_faiss_index = []
    history_faiss_index = []
    persona_complete = parse_data('./Dataset/train_self_original.txt')
    persona_complete = persona_complete[:20]
    for dataset_name, dataset in personachat.items():
        num_candidates = len(dataset[0]["utterances"][0]["candidates"])
        if args.num_candidates > 0 and dataset_name == 'train':
            num_candidates = min(args.num_candidates, num_candidates)
        for dialog in dataset:
            #persona = dialog["personality"].copy()
            persona = dialog["persona_info"]
            #persona2 = dialog["persona_info2"].copy()
            #persona_selected = faiss(replyanddialog)
            #index: all persona1 sentences or all personalities
            #model1 = SentenceTransformer('bert-large-nli-mean-tokens')
            #model = SentenceTransformer('sentence-transformers/distiluse-base-multilingual-cased')
            model = SentenceTransformer('distilbert-base-nli-stsb-mean-tokens')

            embeddings_persona = model.encode(persona_complete,
                                              show_progress_bar=True)
            #data_train list of set of list of all personalities (not duplicated)
            # Step 1: Change data type
            #embeddings_persona = np.array([embedding for embedding in embeddings_persona]).astype("float32")

            # Step 2: Instantiate the index
            index = faiss.IndexFlatL2(embeddings_persona.shape[1])

            # Step 3: Pass the index to IndexIDMap
            index = faiss.IndexIDMap(index)

            # Step 4: Add vectors and their IDs
            index.add_with_ids(
                embeddings_persona,
                np.array(list(range(0, embeddings_persona.shape[0]))))

            for _ in range(args.personality_permutations):
                for utterance in dialog["utterances"]:
                    history = utterance["history"][-(2 * args.max_history +
                                                     1):]
                    for j, candidate in enumerate(
                            utterance["candidates"][-num_candidates:]):
                        history_encoded = model.encode(history,
                                                       show_progress_bar=True)
                        D, I = index.search(np.array(history_encoded), k=5)
                        history_faiss_selected.append(history)
                        persona_faiss_selected.append(
                            persona_complete[I[0][1]])

                #persona = [persona[-1]] + persona[:-1]  # permuted personalities
    return persona_faiss_selected
def get_data_loaders(args, tokenizer):
    """ Prepare the dataset for training and evaluation """
    #Dataset is charged in variable. It is already tokenidez
    personachat = get_dataset(tokenizer, args.dataset_path, args.dataset_cache)
    personachat_raw = get_dataset_with_no_tokenizer(tokenizer,
                                                    args.dataset_path,
                                                    args.dataset_cache)
    with open('data_faiss_fase1_opcion2_chatbot_personalidad_reducida.pkl',
              'rb') as f:
        persona_selected_list = pickle.load(f)
    count_persona = 0
    #personachat_personalities = get_dataset_personalities(tokenizer,args.dataset_path,args.dataset_cache)
    logger.info("Build inputs and labels")
    #Dictionary inside dictionary
    datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
    persona_info_raw = {"train": [], "valid": []}
    personas = {
        "persona_tokens": personachat,
        "persona_history": personachat_raw
    }
    for dataset_name, dataset in personachat.items():
        for persona_index, personaset in personachat_raw.items():
            for dialog in personaset:
                persona_info_raw[persona_index].append(
                    dialog["persona_info"].copy())
        num_candidates = len(dataset[0]["utterances"][0]["candidates"])
        if args.num_candidates > 0 and dataset_name == 'train':
            num_candidates = min(args.num_candidates, num_candidates)
        count = 0
        for dialog in dataset:
            #persona1_raw = persona_info_raw[dataset_name][count].copy()
            #persona1 = dialog["persona_info"].copy()
            #persona_selected = get_persona_faiss_selected(args,tokenizer)
            #persona2 = dialog["persona_info2"].copy()
            #persona_selected = faiss(replyanddialog)
            #index: all persona1 sentences or all personalities
            #for _ in range(args.personality_permutations):
            for utterance in dialog["utterances"]:
                history = utterance["history"][-(2 * args.max_history + 1):]
                for j, candidate in enumerate(
                        utterance["candidates"][-num_candidates:]):
                    lm_labels = bool(j == num_candidates - 1)
                    #D, I = index.search(np.array([history]), k=10)
                    #print(f'L2 distance: {D.flatten().tolist()}\n\nMAG paper IDs: {I.flatten().tolist()}')
                    persona_selected = persona_selected_list[count_persona]
                    persona_selected_tokenized = tokenize(
                        tokenizer, persona_selected)
                    instance = build_input_from_segments(
                        persona_selected_tokenized, history, candidate,
                        tokenizer, lm_labels)
                    for input_name, input_array in instance.items():
                        datasets[dataset_name][input_name].append(input_array)
                    count_persona = count_persona + 1
                datasets[dataset_name]["mc_labels"].append(num_candidates - 1)
                datasets[dataset_name]["n_candidates"] = num_candidates
                #count_persona = count_persona + 1
                #persona1 = [persona1[-1]] + persona1[:-1]  # permuted personalities
                #persona2 = [persona2[-1]] + persona2[:-1]  # permuted personalities
            count = count + 1
    logger.info("Pad inputs and convert to Tensor")
    tensor_datasets = {"train": [], "valid": []}
    for dataset_name, dataset in datasets.items():
        dataset = pad_dataset(dataset,
                              padding=tokenizer.convert_tokens_to_ids(
                                  SPECIAL_TOKENS_2[-1]))
        for input_name in MODEL_INPUTS:
            tensor = torch.tensor(dataset[input_name])
            if input_name != "mc_labels":
                tensor = tensor.view((-1,
                                      datasets[dataset_name]["n_candidates"]) +
                                     tensor.shape[1:])
            tensor_datasets[dataset_name].append(tensor)

    logger.info("Build train and validation dataloaders")
    train_dataset, valid_dataset = TensorDataset(
        *tensor_datasets["train"]), TensorDataset(*tensor_datasets["valid"])
    train_sampler = torch.utils.data.distributed.DistributedSampler(
        train_dataset) if args.distributed else None
    valid_sampler = torch.utils.data.distributed.DistributedSampler(
        valid_dataset) if args.distributed else None
    train_loader = DataLoader(train_dataset,
                              sampler=train_sampler,
                              batch_size=args.train_batch_size,
                              shuffle=(not args.distributed))
    valid_loader = DataLoader(valid_dataset,
                              sampler=valid_sampler,
                              batch_size=args.valid_batch_size,
                              shuffle=False)

    logger.info("Train dataset (Batch, Candidates, Seq length): {}".format(
        train_dataset.tensors[0].shape))
    logger.info("Valid dataset (Batch, Candidates, Seq length): {}".format(
        valid_dataset.tensors[0].shape))
    return train_loader, valid_loader, train_sampler, valid_sampler
def get_persona_faiss_selected(args):
    """ Prepare the dataset for training and evaluation """
    tokenizer = ""
    personachat = get_dataset_with_no_tokenizer(tokenizer, args.dataset_path,
                                                args.dataset_cache)

    logger.info("Build inputs and labels")
    datasets = {"train": defaultdict(list), "valid": defaultdict(list)}
    persona_faiss_selected = []
    history_faiss_selected = []
    persona_faiss_index = []
    history_faiss_index = []
    persona_complete = parse_data('./Dataset/train_self_original.txt')
    model = SentenceTransformer('distilbert-base-nli-stsb-mean-tokens')
    count = 0
    #embeddings_persona = model.encode(persona_complete, show_progress_bar=False)
    # Step 1: Change data type
    #embeddings_persona = np.array([embedding for embedding in embeddings_persona]).astype("float32")

    # Step 2: Instantiate the index
    #index = faiss.IndexFlatL2(embeddings_persona.shape[1])

    # Step 3: Pass the index to IndexIDMap
    #index = faiss.IndexIDMap(index)

    # Step 4: Add vectors and their IDs
    #index.add_with_ids(embeddings_persona, np.array(list(range(0,embeddings_persona.shape[0]))))
    for dataset_name, dataset in personachat.items():
        num_candidates = len(dataset[0]["utterances"][0]["candidates"])
        if args.num_candidates > 0 and dataset_name == 'train':
            num_candidates = min(args.num_candidates, num_candidates)
        for dialog in tqdm(dataset):
            #persona = dialog["personality"].copy()
            persona = dialog["persona_info"]
            #persona2 = dialog["persona_info2"].copy()
            #persona_selected = faiss(replyanddialog)
            #index: all persona1 sentences or all personalities
            #model1 = SentenceTransformer('bert-large-nli-mean-tokens')
            #model = SentenceTransformer('sentence-transformers/distiluse-base-multilingual-cased')
            embeddings_persona = model.encode(persona, show_progress_bar=False)
            # Step 1: Change data type
            embeddings_persona = np.array([
                embedding for embedding in embeddings_persona
            ]).astype("float32")

            # Step 2: Instantiate the index
            index = faiss.IndexFlatL2(embeddings_persona.shape[1])

            # Step 3: Pass the index to IndexIDMap
            index = faiss.IndexIDMap(index)

            # Step 4: Add vectors and their IDs
            index.add_with_ids(
                embeddings_persona,
                np.array(list(range(0, embeddings_persona.shape[0]))))
            #if count==4:
            #    break
            count = count + 1
            #data_train list of set of list of all personalities (not duplicated)
            for _ in range(args.personality_permutations):
                for utterance in dialog["utterances"]:
                    history = utterance["history"][-(2 * args.max_history +
                                                     1):]
                    for j, candidate in enumerate(
                            utterance["candidates"][-num_candidates:]):
                        #historysplitted = " ".join(history)
                        history_encoded_user = model.encode(
                            [history[-1]], show_progress_bar=False)
                        D, I = index.search(np.array(history_encoded_user),
                                            k=len(persona))
                        history_faiss_selected.append(history)

                        index_to_be_removed = I[0][0]

                        persona2 = persona[:index_to_be_removed] + persona[
                            index_to_be_removed + 1:]

                        embeddings_persona2 = model.encode(
                            persona2, show_progress_bar=False)
                        # Step 1: Change data type
                        embeddings_persona2 = np.array([
                            embedding for embedding in embeddings_persona2
                        ]).astype("float32")

                        # Step 2: Instantiate the index
                        index2 = faiss.IndexFlatL2(
                            embeddings_persona2.shape[1])

                        # Step 3: Pass the index to IndexIDMap
                        index2 = faiss.IndexIDMap(index2)

                        # Step 4: Add vectors and their IDs
                        index2.add_with_ids(
                            embeddings_persona2,
                            np.array(
                                list(range(0, embeddings_persona2.shape[0]))))
                        persona_faiss_index.append([I[0][1:-1].tolist()])
                        persona_list = []
                        for i in I[0][1:-1]:
                            persona_list.append(persona[i])
                        if len(history) > 1:
                            history_encoded_chatbot = model.encode(
                                [history[-2]], show_progress_bar=False)
                        else:
                            history_encoded_chatbot = model.encode(
                                [history[-1]], show_progress_bar=False)
                        T, J = index2.search(np.array(history_encoded_user),
                                             k=len(persona2))
                        persona_faiss_selected.append(persona2[J[0][0]])
                #persona = [persona[-1]] + persona[:-1]  # permuted personalities
        #break
    return persona_faiss_selected, persona_faiss_index