Пример #1
0
def get_data(
        model_args,
        training_args,
        tokenizer,
        text_data_path="../data/test_dataset"):  # 경로 변경 ../data/test_dataset
    """
    get data

    Args:
        model_args: model arguments
        training_args: training arguments
        tokenizer: tokenizer
        text_data_path: Defaults to "../data/test_dataset"

    Returns:
        text_data, val_iter, val_dataset, scores
    """
    text_data = load_from_disk(text_data_path)

    # run_ lasticsearch
    if "elastic" in model_args.retrieval_type:
        is_sentence_trainformer = False
        if "sentence_trainformer" in model_args.retrieval_type:
            is_sentence_trainformer = True
        # number of text to concat
        concat_num = model_args.retrieval_elastic_num
        text_data, scores = run_elasticsearch(text_data, concat_num,
                                              model_args,
                                              is_sentence_trainformer)
    elif model_args.retrieval_type == "dense":
        concat_num = model_args.retrieval_elastic_num
        text_data, scores = run_concat_dense_retrival(text_data, concat_num)

    column_names = text_data["validation"].column_names

    data_collator = (DataCollatorWithPadding(
        tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None))
    # 데이터 tokenize(mrc 모델안에 들어 갈 수 있도록)
    data_processor = DataProcessor(tokenizer)
    val_text = text_data["validation"]
    val_dataset = data_processor.val_tokenzier(val_text, column_names)
    val_iter = DataLoader(val_dataset, collate_fn=data_collator, batch_size=1)

    return text_data, val_iter, val_dataset, scores
Пример #2
0
def get_data(data_args, training_args, tokenizer):
    '''train과 validation의 dataloader와 dataset를 반환하는 함수'''
    if data_args.dataset_name == 'basic':
        if os.path.isdir("../data/train_dataset"):
            dataset = load_from_disk("../data/train_dataset")
        else:
            raise Exception("Set the data path to 'p3-mrc-team-ikyo/data/.'")
    elif data_args.dataset_name == 'preprocessed':
        if os.path.isfile("../data/preprocess_train.pkl"):
            dataset = get_pickle("../data/preprocess_train.pkl")
        else:
            dataset = make_custom_dataset("../data/preprocess_train.pkl")
    elif data_args.dataset_name == 'concat':
        if os.path.isfile("../data/concat_train.pkl"):
            dataset = get_pickle("../data/concat_train.pkl")
        else:
            dataset = make_custom_dataset("../data/concat_train.pkl")
    elif data_args.dataset_name == 'korquad':
        if os.path.isfile("../data/korquad_train.pkl"):
            dataset = get_pickle("../data/korquad_train.pkl")
        else:
            dataset = make_custom_dataset("../data/korquad_train.pkl")
    elif data_args.dataset_name == "question_type":
        if os.path.isfile("../data/question_type.pkl"):
            dataset = get_pickle("../data/question_type.pkl")
        else:
            dataset = make_custom_dataset("../data/question_type.pkl")
    elif data_args.dataset_name == "ai_hub":
        if os.path.isfile("../data/ai_hub_dataset.pkl"):
            dataset = get_pickle("../data/ai_hub_dataset.pkl")
        else:
            dataset = make_custom_dataset("../data/ai_hub_dataset.pkl")
    elif data_args.dataset_name == "only_korquad":
        dataset = load_dataset("squad_kor_v1")
    elif data_args.dataset_name == "random_masking":
        if os.path.isfile("../data/random_mask_train.pkl"):
            dataset = get_pickle("../data/random_mask_train.pkl")
        else:
            dataset = make_custom_dataset("../data/random_mask_train.pkl")
    elif data_args.dataset_name == "token_masking":
        if os.path.isfile("../data/concat_token_mask_top_3.pkl"):
            dataset = get_pickle("../data/concat_token_mask_top_3.pkl")
        else:
            dataset = make_mask_dataset("../data/concat_token_mask_top_3.pkl",
                                        tokenizer)
        train_dataset = dataset['train']
        val_dataset = dataset['validation']
    else:
        raise Exception(
            "dataset_name have to be one of ['basic', 'preprocessed', 'concat', 'korquad', 'only_korquad', 'question_type', 'ai_hub', 'random_masking', 'token_masking']"
        )

    if data_args.dataset_name != "token_masking":
        train_dataset = dataset['train']
        val_dataset = dataset['validation']
        train_column_names = train_dataset.column_names
        val_column_names = val_dataset.column_names

        data_processor = DataProcessor(tokenizer, data_args.max_seq_length,
                                       data_args.doc_stride)
        train_dataset = data_processor.train_tokenizer(train_dataset,
                                                       train_column_names)
        val_dataset = data_processor.val_tokenzier(val_dataset,
                                                   val_column_names)

    data_collator = (DataCollatorWithPadding(
        tokenizer, pad_to_multiple_of=8 if training_args.fp16 else None))
    train_iter = DataLoader(
        train_dataset,
        collate_fn=data_collator,
        batch_size=training_args.per_device_train_batch_size)
    val_iter = DataLoader(val_dataset,
                          collate_fn=data_collator,
                          batch_size=training_args.per_device_eval_batch_size)

    return dataset, train_iter, val_iter, train_dataset, val_dataset