Esempio n. 1
0
def predict(dataset, fold):
    """Generate predictions for audio tagging.

    This function uses an ensemble of trained models to generate the
    predictions, with the averaging function being an arithmetic mean.
    Computed predictions are then saved to disk.

    Args:
        dataset: Dataset to generate predictions for.
        fold (int): The specific fold to generate predictions for. Only
            applicable for the training dataset.
    """
    import inference

    # Load input data and associated metadata
    x, df = _load_data(dataset)
    dataset_name = dataset.name
    if dataset.name == 'training':
        if fold == -1:
            raise ValueError('Invalid fold: %d' % fold)

        dataset_name += str(fold)
        mask = df.fold == fold
        tr_x = x[~mask]
        x = x[mask]
        df = df[mask]
    else:
        tr_x, tr_df = _load_data(cfg.to_dataset('training'))
        if fold >= 0:
            dataset_name += str(fold)
            tr_x = tr_x[tr_df.fold != fold]

    generator = utils.fit_scaler(tr_x)
    x = generator.standardize(x)

    # Predict class probabilities for each model (epoch)
    preds = []
    for epoch in _determine_epochs(cfg.prediction_epochs, fold, n=4):
        pred = utils.timeit(lambda: _load_model(fold, epoch).predict(x),
                            '[Epoch %d] Predicted class probabilities' % epoch)

        preds.append(inference.merge_predictions(pred, df.index))

    pred_mean = pd.concat(preds).groupby(level=0).mean()

    # Ensure output directory exists and set file path format
    os.makedirs(os.path.dirname(cfg.predictions_path), exist_ok=True)
    predictions_path = cfg.predictions_path.format('%s', dataset_name)

    # Save free parameters to disk
    utils.log_parameters({'prediction_epochs': cfg.prediction_epochs},
                         os.path.join(os.path.dirname(cfg.predictions_path),
                                      'parameters.json'))

    # Write predictions to disk
    pred_mean.to_csv(predictions_path % 'predictions')
    io.write_predictions(pred_mean, predictions_path % 'submission')
Esempio n. 2
0
    def predict(self):
        """Predict target values of the validation data.

        The main utility of this function is to merge the predictions of
        chunks belonging to the same audio clip. The same is done for
        the ground truth target values so that dimensions match.

        Returns:
            tuple: Tuple containing:

                y_true (np.ndarray): Ground truth target values.
                y_pred (np.ndarray): Predicted target values.
        """
        x, y_true = self.validation_data[:2]

        y_pred = self.model.predict(x)
        y_true = inference.merge_predictions(y_true, self.val_index, 'first')
        y_pred = inference.merge_predictions(y_pred, self.val_index)
        return y_true.values, y_pred.values
Esempio n. 3
0
def _validate(loader, index, model, criterion, logger):
    """Validate the model using the given data."""
    # Compute block-level predictions
    with torch.no_grad():
        y_pred = torch.cat([model(batch_x).softmax(dim=1).data
                            for batch_x, _ in loader]).cpu().numpy()

    # Convert to clip-level predictions
    y_true = loader.dataset.y.numpy()
    y_true = inference.merge_predictions(y_true, index).values
    y_pred = inference.merge_predictions(y_pred, index).values
    y_pred_b = inference.binarize_predictions(y_pred)

    loss = criterion(torch.as_tensor(y_pred), torch.as_tensor(y_true))
    logger.log('val_loss', loss.item())

    acc = metrics.accuracy_score(y_true, y_pred_b)
    logger.log('val_acc', acc)

    ap = metrics.average_precision_score(y_true, y_pred, average='micro')
    logger.log('val_mAP', ap)
Esempio n. 4
0
def predict(x, df, epoch, model_path, batch_size=128, callback=None):
    """Compute predictions using a saved model.

    The model that was saved after the specified epoch is used to
    compute predictions. After block-level predictions are computed,
    they are merged to give clip-level predictions.

    Args:
        x (np.ndarray): Array of input data.
        df (pd.DataFrame): Associated metadata.
        epoch (int): Epoch number of the model to load.
        model_path (str): Path to directory containing saved models.
        batch_size (int): Number of instances to predict per batch.
        callback: Optional callback used for inference.

    Returns:
        np.ndarray: The clip-level predictions.
    """
    # Determine which device (GPU or CPU) to use
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

    # Load model from disk
    model_path = os.path.join(model_path, f'model.{epoch:02d}.pth')
    checkpoint = torch.load(model_path, map_location=device)
    model = models.create_model(*checkpoint['creation_args'])
    model.load_state_dict(checkpoint['model_state_dict'])
    model.to(device).eval()

    # Repeat data along channel dimension if applicable
    n_channels = next(model.parameters()).shape[1]
    if n_channels > 1:
        x = x.repeat(n_channels, axis=-1)

    loader = ImageLoader(x, device=device, batch_size=batch_size)
    if callback:
        y_pred = callback(model, loader)
    else:
        with torch.no_grad():
            y_pred = torch.cat([model(batch_x).softmax(dim=1).data
                                for batch_x, in loader])
    return inference.merge_predictions(y_pred.cpu().numpy(), df.index)