Beispiel #1
0
def do_process(settings_path: Optional[Union[Path, None]] = None,
               settings: Optional[Union[MutableMapping, None]] = None,
               model_to_use: Optional[Union[str, None]] = None) \
        -> None:
    """The process of the baseline experiment.

    :param settings_path: Path for the settings. Defaults to None.
    :type settings_path: str|None, optional
    :param settings: Settings. Defaults to None.
    :type settings: dict|None, optional
    :param model_to_use: Model to use. Defaults to None.
    :type model_to_use: str, optional
    """
    if settings_path is not None:
        settings = load_settings_file(Path(f'{settings_path}.yaml'))

    if model_to_use == 'baseline':
        msg = 'Baseline experiment'
        model = CRNN
    elif model_to_use == 'baseline_dilated':
        msg = 'Baseline with dilated convolutions experiment'
        model = BaselineDilated
    elif model_to_use == 'dessed':
        msg = 'Depth-wise separable with RNN experiment'
        model = DESSED
    elif model_to_use == 'dessed_dilated':
        msg = 'Depth-wise separable with dilated convolutions experiment'
        model = DESSEDDilated
    else:
        raise AttributeError(f'Unrecognized model `{model_to_use}`. '
                             f'Accepted model names are: `baseline`, '
                             '`baseline_dilated`, `dessed`, '
                             '`dessed_dilated`.')

    cmd_msg(msg, start='\n-- ')
    model_settings = settings[model_to_use]

    cmd_msg('Starting experiment', end='\n\n')
    experiment(settings=settings,
               model_settings=model_settings,
               model_class=model)
Beispiel #2
0
def experiment(settings: MutableMapping,
               model_settings: MutableMapping,
               model_class: Callable) \
        -> None:
    """Does the experiment with the specified settings and model.

    :param settings: General settings.
    :type settings: dict
    :param model_settings: Model settings.
    :type model_settings: dict
    :param model_class: The class of the model.
    :type model_class: callable
    """
    device = 'cuda' if is_available() else 'cpu'

    with InformAboutProcess('Creating the model'):
        model = model_class(**model_settings)
        model = model.to(device)

    with InformAboutProcess('Creating training data loader'):
        training_data = get_tut_sed_data_loader(split='training',
                                                **settings['data_loader'])

    with InformAboutProcess('Creating validation data loader'):
        validation_data = get_tut_sed_data_loader(split='validation',
                                                  **settings['data_loader'])

    with InformAboutProcess('Creating optimizer'):
        optimizer = Adam(model.parameters(), lr=settings['optimizer']['lr'])

    cmd_msg('', start='')

    common_kwargs = {
        'f1_func': f1_per_frame,
        'er_func': error_rate_per_frame,
        'device': device
    }

    nb_examples([training_data, validation_data], ['Training', 'Validation'],
                settings['data_loader']['batch_size'])

    if hasattr(model, 'dnn'):
        nb_parameters(model.dnn, 'DNN')
    if hasattr(model, 'dilated_cnn'):
        nb_parameters(model.dilated_cnn, 'Dilated CNN')
    if hasattr(model, 'rnn'):
        nb_parameters(model.rnn, 'RNN')
    nb_parameters(model.classifier, 'Classifier')
    nb_parameters(model)

    cmd_msg('', start='')
    device_info(device)

    cmd_msg('Starting training', start='\n\n-- ', end='\n\n')

    optimized_model = training(
        model=model,
        data_loader_training=training_data,
        optimizer=optimizer,
        objective=BCEWithLogitsLoss(),
        epochs=settings['training']['epochs'],
        data_loader_validation=validation_data,
        validation_patience=settings['training']['validation_patience'],
        grad_norm=settings['training']['grad_norm'],
        **common_kwargs)

    del training_data
    del validation_data

    with InformAboutProcess('Creating testing data loader'):
        testing_data = get_tut_sed_data_loader(split='testing',
                                               **settings['data_loader'])

    nb_examples([testing_data], ['Testing'],
                settings['data_loader']['batch_size'])

    cmd_msg('Starting testing', start='\n\n-- ', end='\n\n')
    testing(model=optimized_model, data_loader=testing_data, **common_kwargs)

    cmd_msg('That\'s all!', start='\n\n-- ', end='\n\n')
Beispiel #3
0
def training(model:Module,
             data_loader_training: DataLoader,
             optimizer: pt_opt.Optimizer,
             objective: Callable,
             f1_func: Callable,
             er_func: Callable,
             epochs: int,
             data_loader_validation: DataLoader,
             validation_patience: int,
             device: str,
             grad_norm: float) \
        -> Module:
    """Optimizes a model.

    :param model: Model to optimize.
    :type model: torch.nn.Module
    :param data_loader_training: Data loader to be used with\
                                 the training data.
    :type data_loader_training: torch.utils.data.DataLoader
    :param optimizer: Optimizer to be used.
    :type optimizer: torch.optim.Optimizer
    :param objective: Objective function to be used.
    :type objective: callable
    :param f1_func: Function to calculate the F1 score.
    :type f1_func: callable
    :param er_func: Function to calculate the error rate.
    :type er_func: callable
    :param epochs: Maximum amount of epochs for training.
    :type epochs: int
    :param data_loader_validation:Data loader to be used with\
                                 the validation data.
    :type data_loader_validation: torch.utils.data.DataLoader
    :param validation_patience: Maximum amount of epochs for waiting\
                                for validation score improvement.
    :type validation_patience: int
    :param device: Device to be used.
    :type device: str
    :param grad_norm: Maximum gradient norm.
    :type grad_norm: float
    :return: Optimized model.
    :rtype: torch.nn.Module
    """
    best_model = None
    epochs_waiting = 100
    lowest_epoch_loss = 1e8
    best_model_epoch = -1

    for epoch in range(epochs):
        start_time = time()

        model = model.train()
        model, epoch_tr_loss, true_training, hat_training = _sed_epoch(
            model=model,
            data_loader=data_loader_training,
            objective=objective,
            optimizer=optimizer,
            device=device,
            grad_norm=grad_norm)

        epoch_tr_loss = epoch_tr_loss.mean().item()

        f1_score_training = f1_func(hat_training, true_training).mean().item()

        error_rate_training = er_func(hat_training,
                                      true_training).mean().item()

        model = model.eval()
        with no_grad():
            model, epoch_va_loss, true_validation, hat_validation = _sed_epoch(
                model=model,
                data_loader=data_loader_validation,
                objective=objective,
                optimizer=None,
                device=device)

        epoch_va_loss = epoch_va_loss.mean().item()

        f1_score_validation = f1_func(hat_validation,
                                      true_validation).mean().item()

        error_rate_validation = er_func(hat_validation,
                                        true_validation).mean().item()

        if epoch_va_loss < lowest_epoch_loss:
            lowest_epoch_loss = epoch_va_loss
            epochs_waiting = 0
            best_model = deepcopy(model.state_dict())
            best_model_epoch = epoch
        else:
            epochs_waiting += 1

        end_time = time() - start_time

        results_training(epoch=epoch,
                         training_loss=epoch_tr_loss,
                         validation_loss=epoch_va_loss,
                         training_f1=f1_score_training,
                         training_er=error_rate_training,
                         validation_f1=f1_score_validation,
                         validation_er=error_rate_validation,
                         time_elapsed=end_time)

        if epochs_waiting >= validation_patience:
            cmd_msg(
                f'Early stopping! Lowest validation loss: {lowest_epoch_loss:7.3f} '
                f'at epoch: {best_model_epoch:3d}',
                start='\n-- ',
                end='\n\n')
            break

    if best_model is not None:
        model.load_state_dict(best_model)

    return model