コード例 #1
0
ファイル: eval_specific.py プロジェクト: DReiser7/w2v_did
 def __init__(self, path=None):
     if path is None:
         dir = 'Zaid/wav2vec2-large-xlsr-dialect-classification'
         self.model = Wav2Vec2KlaamModel.from_pretrained(dir).to("cuda")
         self.processor = CustomWav2Vec2Processor.from_pretrained(dir)
     else:
         dir = path
         self.model = Wav2VecClassifierModel.from_pretrained(dir).to("cuda")
         self.processor = CustomWav2Vec2Processor.from_pretrained(dir)
コード例 #2
0
def main():
    # See all possible arguments in src/transformers/training_args.py
    # or by passing the --help flag to this script.
    # We now keep distinct sets of args, for a cleaner separation of concerns.

    parser = HfArgumentParser(
        (ModelArguments, DataTrainingArguments, TrainingArguments))
    if len(sys.argv) == 2 and sys.argv[1].endswith(".json"):
        # If we pass only one argument to the script and it's the path to a json file,
        # let's parse it to get our arguments.
        model_args, data_args, training_args = parser.parse_json_file(
            json_file=os.path.abspath(sys.argv[1]))
    else:
        model_args, data_args, training_args = parser.parse_args_into_dataclasses(
        )
    # Detecting last checkpoint.
    last_checkpoint = None
    if os.path.isdir(
            training_args.output_dir
    ) and training_args.do_train and not training_args.overwrite_output_dir:
        last_checkpoint = get_last_checkpoint(training_args.output_dir)
        if last_checkpoint is None and len(os.listdir(
                training_args.output_dir)) > 0:
            raise ValueError(
                f"Output directory ({training_args.output_dir}) already exists and is not empty. "
                "Use --overwrite_output_dir to overcome.")
        elif last_checkpoint is not None:
            logger.info(
                f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
                "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
            )

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        handlers=[logging.StreamHandler(sys.stdout)],
    )
    logger.setLevel(logging.INFO if is_main_process(training_args.local_rank
                                                    ) else logging.WARN)

    # Log on each process the small summary:
    logger.warning(
        f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
        +
        f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
    )
    # Set the verbosity to info of the Transformers logger (on main process only):
    if is_main_process(training_args.local_rank):
        transformers.utils.logging.set_verbosity_info()

    # Set seed before initializing model.
    set_seed(training_args.seed)

    # Get the datasets:

    train_dataset = datasets.load_dataset("corpora/com_voice_accent_corpus",
                                          split="train",
                                          cache_dir=model_args.cache_dir)
    eval_dataset = datasets.load_dataset("corpora/com_voice_accent_corpus",
                                         split="test",
                                         cache_dir=model_args.cache_dir)

    feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1,
                                                 sampling_rate=16_000,
                                                 padding_value=0.0,
                                                 do_normalize=True,
                                                 return_attention_mask=True)
    processor = CustomWav2Vec2Processor(feature_extractor=feature_extractor)
    model = Wav2Vec2CommVoiceAccentModel.from_pretrained(
        "facebook/wav2vec2-large-xlsr-53",
        attention_dropout=0.01,
        hidden_dropout=0.01,
        feat_proj_dropout=0.0,
        mask_time_prob=0.05,
        layerdrop=0.01,
        gradient_checkpointing=True,
    )

    if model_args.freeze_feature_extractor:
        model.freeze_feature_extractor()

    if data_args.max_train_samples is not None:
        train_dataset = train_dataset.select(range(
            data_args.max_train_samples))

    if data_args.max_val_samples is not None:
        eval_dataset = eval_dataset.select(range(data_args.max_val_samples))

    # Preprocessing the datasets.
    # We need to read the aduio files as arrays and tokenize the targets.
    def speech_file_to_array_fn(batch):
        start = 0
        stop = 10
        srate = 16_000
        speech_array, sampling_rate = torchaudio.load(batch["file"])
        speech_array = speech_array[0].numpy()[:stop * sampling_rate]
        batch["speech"] = librosa.resample(np.asarray(speech_array),
                                           sampling_rate, srate)
        batch["sampling_rate"] = srate
        batch["parent"] = batch["label"]
        return batch

    train_dataset = train_dataset.map(
        speech_file_to_array_fn,
        remove_columns=train_dataset.column_names,
        num_proc=data_args.preprocessing_num_workers,
    )
    eval_dataset = eval_dataset.map(
        speech_file_to_array_fn,
        remove_columns=eval_dataset.column_names,
        num_proc=data_args.preprocessing_num_workers,
    )

    def prepare_dataset(batch):
        # check that all files have the correct sampling rate
        assert (
            len(set(batch["sampling_rate"])) == 1
        ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
        batch["input_values"] = processor(
            batch["speech"],
            sampling_rate=batch["sampling_rate"][0]).input_values
        batch["labels"] = batch["parent"]
        return batch

    train_dataset = train_dataset.map(
        prepare_dataset,
        remove_columns=train_dataset.column_names,
        batch_size=training_args.per_device_train_batch_size,
        batched=True,
        num_proc=data_args.preprocessing_num_workers,
    )
    eval_dataset = eval_dataset.map(
        prepare_dataset,
        remove_columns=eval_dataset.column_names,
        batch_size=training_args.per_device_train_batch_size,
        batched=True,
        num_proc=data_args.preprocessing_num_workers,
    )

    from sklearn.metrics import classification_report, confusion_matrix

    def compute_metrics(pred):
        label_idx = [0, 1, 2]
        label_names = ['austria', 'germany', 'switzerland']
        labels = pred.label_ids.argmax(-1)
        preds = pred.predictions.argmax(-1)
        acc = accuracy_score(labels, preds)
        f1 = f1_score(labels, preds, average='macro')
        report = classification_report(y_true=labels,
                                       y_pred=preds,
                                       labels=label_idx,
                                       target_names=label_names)
        matrix = confusion_matrix(y_true=labels, y_pred=preds)
        print(report)
        print(matrix)

        wandb.log({
            "conf_mat":
            wandb.plot.confusion_matrix(probs=None,
                                        y_true=labels,
                                        preds=preds,
                                        class_names=label_names)
        })

        wandb.log({
            "precision_recall":
            wandb.plot.pr_curve(y_true=labels,
                                y_probas=pred.predictions,
                                labels=label_names)
        })

        return {"accuracy": acc, "f1_score": f1}

    wandb.init(name=training_args.output_dir, config=training_args)

    # Data collator
    data_collator = DataCollatorCTCWithPadding(processor=processor,
                                               padding=True)

    # Initialize our Trainer
    trainer = CTCTrainer(
        model=model,
        data_collator=data_collator,
        args=training_args,
        compute_metrics=compute_metrics,
        train_dataset=train_dataset if training_args.do_train else None,
        eval_dataset=eval_dataset if training_args.do_eval else None,
        tokenizer=processor.feature_extractor,
    )

    # Training
    if training_args.do_train:
        if last_checkpoint is not None:
            checkpoint = last_checkpoint
        elif os.path.isdir(model_args.model_name_or_path):
            checkpoint = model_args.model_name_or_path
        else:
            checkpoint = None
        train_result = trainer.train(resume_from_checkpoint=checkpoint)
        trainer.save_model()

        # save the feature_extractor and the tokenizer
        if is_main_process(training_args.local_rank):
            processor.save_pretrained(training_args.output_dir)

        metrics = train_result.metrics
        max_train_samples = (data_args.max_train_samples
                             if data_args.max_train_samples is not None else
                             len(train_dataset))
        metrics["train_samples"] = min(max_train_samples, len(train_dataset))

        trainer.log_metrics("train", metrics)
        trainer.save_metrics("train", metrics)
        trainer.save_state()

    # Evaluation
    results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")
        metrics = trainer.evaluate()
        max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(
            eval_dataset)
        metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))

        trainer.log_metrics("eval", metrics)
        trainer.save_metrics("eval", metrics)

    return results
コード例 #3
0
def main(model_args, data_args, training_args):
    # Detecting last checkpoint.
    last_checkpoint = None
    if os.path.isdir(
            training_args.output_dir
    ) and training_args.do_train and not training_args.overwrite_output_dir:
        last_checkpoint = get_last_checkpoint(training_args.output_dir)
        if last_checkpoint is None and len(os.listdir(
                training_args.output_dir)) > 0:
            raise ValueError(
                f"Output directory ({training_args.output_dir}) already exists and is not empty. "
                "Use --overwrite_output_dir to overcome.")
        elif last_checkpoint is not None:
            logger.info(
                f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
                "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
            )

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        handlers=[logging.StreamHandler(sys.stdout)],
    )
    logger.setLevel(logging.INFO if is_main_process(training_args.local_rank
                                                    ) else logging.WARN)

    # Log on each process the small summary:
    logger.warning(
        f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
        +
        f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
    )
    # Set the verbosity to info of the Transformers logger (on main process only):
    if is_main_process(training_args.local_rank):
        transformers.utils.logging.set_verbosity_info()

    # Set seed before initializing model.
    set_seed(training_args.seed)

    # Get the datasets:

    train_dataset = datasets.load_dataset(CORPORA_PATH,
                                          split="train",
                                          cache_dir=model_args.cache_dir)
    eval_dataset = datasets.load_dataset(CORPORA_PATH,
                                         split="test",
                                         cache_dir=model_args.cache_dir)

    feature_extractor = Wav2Vec2FeatureExtractor(feature_size=1,
                                                 sampling_rate=16_000,
                                                 padding_value=0.0,
                                                 do_normalize=True,
                                                 return_attention_mask=True)
    processor = CustomWav2Vec2Processor(feature_extractor=feature_extractor)
    model = Wav2VecClassifierModel.from_pretrained(
        "facebook/wav2vec2-large-xlsr-53",
        attention_dropout=0.01,
        hidden_dropout=0.01,
        feat_proj_dropout=0.0,
        mask_time_prob=0.05,
        layerdrop=0.01,
        gradient_checkpointing=True,
    )

    if model_args.freeze_feature_extractor:
        model.freeze_feature_extractor()

    if data_args.max_train_samples is not None:
        train_dataset = train_dataset.select(range(
            data_args.max_train_samples))

    if data_args.max_val_samples is not None:
        max_val_samples = min(data_args.max_val_samples, len(eval_dataset))
        eval_dataset = eval_dataset.select(range(max_val_samples))

    # Preprocessing the datasets.
    # We need to read the aduio files as arrays and tokenize the targets.
    def speech_file_to_array_fn(batch):
        start = 0
        stop = SECONDS_STOP
        srate = S_RATE
        speech_array, sampling_rate = torchaudio.load(batch["file"])
        speech_array = speech_array[0].numpy()[:stop * sampling_rate]
        batch["speech"] = librosa.resample(np.asarray(speech_array),
                                           sampling_rate, srate)
        batch["sampling_rate"] = srate
        batch["parent"] = batch["label"]
        return batch

    train_dataset = train_dataset.map(
        speech_file_to_array_fn,
        remove_columns=train_dataset.column_names,
        num_proc=data_args.preprocessing_num_workers,
    )
    eval_dataset = eval_dataset.map(
        speech_file_to_array_fn,
        remove_columns=eval_dataset.column_names,
        num_proc=data_args.preprocessing_num_workers,
    )

    def prepare_dataset(batch):
        # check that all files have the correct sampling rate
        assert (
            len(set(batch["sampling_rate"])) == 1
        ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
        batch["input_values"] = processor(
            batch["speech"],
            sampling_rate=batch["sampling_rate"][0]).input_values
        batch["labels"] = batch["parent"]
        return batch

    train_dataset = train_dataset.map(
        prepare_dataset,
        remove_columns=train_dataset.column_names,
        batch_size=training_args.per_device_train_batch_size,
        batched=True,
        num_proc=data_args.preprocessing_num_workers,
    )
    eval_dataset = eval_dataset.map(
        prepare_dataset,
        remove_columns=eval_dataset.column_names,
        batch_size=training_args.per_device_train_batch_size,
        batched=True,
        num_proc=data_args.preprocessing_num_workers,
    )

    from sklearn.metrics import classification_report, confusion_matrix

    def macro_averaged_mean_absolute_error(y_true, y_pred):
        c = np.unique(y_true)
        c_len = len(c)

        err = 0.0
        for i in c:
            idx = np.where(y_true == i)[0]
            y_true_label = np.take(y_true, idx)
            y_pred_label = np.take(y_pred, idx)

            err = err + mean_absolute_error(y_true=y_true_label,
                                            y_pred=y_pred_label)

        return err / c_len

    def compute_metrics(pred):
        label_idx = LABEL_IDX
        label_names = LABEL_NAMES
        labels = pred.label_ids.argmax(-1)
        preds = pred.predictions.argmax(-1)
        acc = accuracy_score(labels, preds)
        f1 = f1_score(labels, preds, average='macro')
        report = classification_report(y_true=labels,
                                       y_pred=preds,
                                       labels=label_idx,
                                       target_names=label_names)
        matrix = confusion_matrix(y_true=labels, y_pred=preds)
        print(report)
        print(matrix)

        wandb.log({
            "conf_mat":
            wandb.plot.confusion_matrix(probs=None,
                                        y_true=labels,
                                        preds=preds,
                                        class_names=label_names)
        })

        wandb.log({
            "precision_recall":
            wandb.plot.pr_curve(y_true=labels,
                                y_probas=pred.predictions,
                                labels=label_names)
        })

        mse = mean_squared_error(y_true=labels, y_pred=preds)
        mae = mean_absolute_error(y_true=labels, y_pred=preds)

        maem = macro_averaged_mean_absolute_error(y_true=labels, y_pred=preds)

        return {
            "accuracy": acc,
            "f1_score": f1,
            "MSE": mse,
            "MAE": mae,
            "MAE^M": maem
        }

    wandb.init(name=training_args.output_dir, config=training_args)

    # Data collator
    data_collator = DataCollatorCTCWithPadding(processor=processor,
                                               padding=True)

    # Initialize our Trainer
    trainer = CTCTrainer(
        model=model,
        data_collator=data_collator,
        args=training_args,
        compute_metrics=compute_metrics,
        train_dataset=train_dataset if training_args.do_train else None,
        eval_dataset=eval_dataset if training_args.do_eval else None,
        tokenizer=processor.feature_extractor,
    )

    # Training
    if training_args.do_train:
        if last_checkpoint is not None:
            checkpoint = last_checkpoint
        elif os.path.isdir(model_args.model_name_or_path):
            checkpoint = model_args.model_name_or_path
        else:
            checkpoint = None
        train_result = trainer.train(resume_from_checkpoint=checkpoint)
        trainer.save_model()

        # save the feature_extractor and the tokenizer
        if is_main_process(training_args.local_rank):
            processor.save_pretrained(training_args.output_dir)

        metrics = train_result.metrics
        max_train_samples = (data_args.max_train_samples
                             if data_args.max_train_samples is not None else
                             len(train_dataset))
        metrics["train_samples"] = min(max_train_samples, len(train_dataset))

        trainer.log_metrics("train", metrics)
        trainer.save_metrics("train", metrics)
        trainer.save_state()

    # Evaluation
    results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")
        metrics = trainer.evaluate()
        max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(
            eval_dataset)
        metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))

        trainer.log_metrics("eval", metrics)
        trainer.save_metrics("eval", metrics)

    return results
コード例 #4
0
 def __init__(self, path=None):
     self.model = Wav2VecClassifierModel.from_pretrained(path).to("cuda")
     self.processor = CustomWav2Vec2Processor.from_pretrained(path)
コード例 #5
0
 def __init__(self, path, window_length, number_of_windows, labels):
     self.model = Wav2VecClassifierModel.from_pretrained(path).to("cuda")
     self.processor = CustomWav2Vec2Processor.from_pretrained(path)
     self.window_length = window_length
     self.number_of_windows = number_of_windows
     self.labels = labels
コード例 #6
0
def main(model_args, data_args, training_args):
    last_checkpoint = None
    if os.path.isdir(
            training_args.output_dir
    ) and training_args.do_train and not training_args.overwrite_output_dir:
        last_checkpoint = get_last_checkpoint(training_args.output_dir)
        if last_checkpoint is None and len(os.listdir(
                training_args.output_dir)) > 0:
            raise ValueError(
                f"Output directory ({training_args.output_dir}) already exists and is not empty. "
                "Use --overwrite_output_dir to overcome.")
        elif last_checkpoint is not None:
            logger.info(
                f"Checkpoint detected, resuming training at {last_checkpoint}. To avoid this behavior, change "
                "the `--output_dir` or add `--overwrite_output_dir` to train from scratch."
            )

    # Setup logging
    logging.basicConfig(
        format="%(asctime)s - %(levelname)s - %(name)s -   %(message)s",
        datefmt="%m/%d/%Y %H:%M:%S",
        handlers=[logging.StreamHandler(sys.stdout)],
    )
    logger.setLevel(logging.INFO if is_main_process(training_args.local_rank
                                                    ) else logging.WARN)

    # Log on each process the small summary:
    logger.warning(
        f"Process rank: {training_args.local_rank}, device: {training_args.device}, n_gpu: {training_args.n_gpu}"
        +
        f"distributed training: {bool(training_args.local_rank != -1)}, 16-bits training: {training_args.fp16}"
    )
    # Set the verbosity to info of the Transformers logger (on main process only):
    if is_main_process(training_args.local_rank):
        transformers.utils.logging.set_verbosity_info()

    # Set seed before initializing model.
    set_seed(training_args.seed)

    # Get the datasets:
    eval_dataset = datasets.load_dataset(CORPORA_PATH,
                                         split="test",
                                         cache_dir=model_args.cache_dir)

    processor = CustomWav2Vec2Processor.from_pretrained(
        model_args.model_name_or_path)
    model = Wav2VecClassifierModel.from_pretrained(
        model_args.model_name_or_path,
        attention_dropout=0.01,
        hidden_dropout=0.01,
        feat_proj_dropout=0.0,
        mask_time_prob=0.05,
        layerdrop=0.01,
        gradient_checkpointing=True,
    )

    if data_args.max_val_samples is not None:
        max_val_samples = min(data_args.max_val_samples, len(eval_dataset))
        eval_dataset = eval_dataset.select(range(max_val_samples))

    # Preprocessing the datasets.
    # We need to read the aduio files as arrays and tokenize the targets.
    def speech_file_to_array_fn(batch, start_param, stop_param):
        speech_array, sampling_rate = torchaudio.load(batch["file"])

        if start_param * sampling_rate >= len(speech_array[0]):
            batch["speech"] = np.asarray([0])
            batch["sampling_rate"] = S_RATE
            batch["parent"] = batch["label"]
            return batch

        speech_array = speech_array[0].numpy()[(start_param *
                                                sampling_rate):(stop_param *
                                                                sampling_rate)]
        batch["speech"] = librosa.resample(np.asarray(speech_array),
                                           sampling_rate, S_RATE)
        batch["sampling_rate"] = S_RATE
        batch["parent"] = batch["label"]
        return batch

    def filter_null(batch):
        return not (batch['speech'] == np.array([0])).all()

    eval_dataset_array = []
    stop = 0
    for i in range(WINDOW_COUNT):
        start = 0 if i == 0 else stop
        stop = start + WINDOW_LENGTH
        arguments = {'start_param': start, 'stop_param': stop}
        eval_dataset_array.append(
            eval_dataset.map(speech_file_to_array_fn,
                             remove_columns=eval_dataset.column_names,
                             num_proc=data_args.preprocessing_num_workers,
                             fn_kwargs=arguments).filter(filter_null))

    eval_dataset = datasets.concatenate_datasets(eval_dataset_array)

    def prepare_dataset(batch):
        # check that all files have the correct sampling rate
        assert (
            len(set(batch["sampling_rate"])) == 1
        ), f"Make sure all inputs have the same sampling rate of {processor.feature_extractor.sampling_rate}."
        batch["input_values"] = processor(
            batch["speech"],
            sampling_rate=batch["sampling_rate"][0]).input_values
        batch["labels"] = batch["parent"]
        return batch

    eval_dataset = eval_dataset.map(
        prepare_dataset,
        remove_columns=eval_dataset.column_names,
        batch_size=training_args.per_device_train_batch_size,
        batched=True,
        num_proc=data_args.preprocessing_num_workers,
    )

    from sklearn.metrics import classification_report, confusion_matrix

    def compute_metrics(pred):
        label_idx = LABEL_IDX
        label_names = LABEL_NAMES
        labels = pred.label_ids.argmax(-1)
        preds = pred.predictions.argmax(-1)
        acc = accuracy_score(labels, preds)
        f1 = f1_score(labels, preds, average='macro')
        report = classification_report(y_true=labels,
                                       y_pred=preds,
                                       labels=label_idx,
                                       target_names=label_names)
        matrix = confusion_matrix(y_true=labels, y_pred=preds)
        print(report)
        print(matrix)

        wandb.log({
            "conf_mat":
            wandb.plot.confusion_matrix(probs=None,
                                        y_true=labels,
                                        preds=preds,
                                        class_names=label_names)
        })

        wandb.log({
            "precision_recall":
            wandb.plot.pr_curve(y_true=labels,
                                y_probas=pred.predictions,
                                labels=label_names)
        })

        return {"accuracy": acc, "f1_score": f1}

    wandb.init(name=training_args.output_dir, config=training_args)

    # Data collator
    data_collator = DataCollatorCTCWithPadding(processor=processor,
                                               padding=True)

    # Initialize our Trainer
    trainer = CTCTrainer(
        model=model,
        data_collator=data_collator,
        args=training_args,
        compute_metrics=compute_metrics,
        train_dataset=None,
        eval_dataset=eval_dataset if training_args.do_eval else None,
        tokenizer=processor.feature_extractor,
    )

    # Evaluation
    results = {}
    if training_args.do_eval:
        logger.info("*** Evaluate ***")
        metrics = trainer.evaluate()
        max_val_samples = data_args.max_val_samples if data_args.max_val_samples is not None else len(
            eval_dataset)
        metrics["eval_samples"] = min(max_val_samples, len(eval_dataset))

        trainer.log_metrics("eval", metrics)
        trainer.save_metrics("eval", metrics)

    return results
コード例 #7
0
 def __init__(self, path, window_length, number_of_windows, labels):
     self.model = Wav2Vec2CommVoice10sModel.from_pretrained(path).to("cuda")
     self.processor = CustomWav2Vec2Processor.from_pretrained(path)