Beispiel #1
0
def get_predictions(model,
                    dataloaders,
                    device="cpu",
                    as_dict=False,
                    per_neuron=True,
                    test_data=True,
                    **kwargs):
    predictions = {}
    with eval_state(model) if not isinstance(
            model, types.FunctionType) else contextlib.nullcontext():
        for k, v in dataloaders.items():
            if test_data:
                _, output = model_predictions_repeats(dataloader=v,
                                                      model=model,
                                                      data_key=k,
                                                      device=device)
            else:
                _, output = model_predictions(dataloader=v,
                                              model=model,
                                              data_key=k,
                                              device=device)
            predictions[k] = output.T

    if not as_dict:
        predictions = [v for v in predictions.values()]
    return predictions
Beispiel #2
0
def get_targets(model,
                dataloaders,
                device="cpu",
                as_dict=True,
                per_neuron=True,
                test_data=True,
                **kwargs):
    responses = {}
    with eval_state(model) if not isinstance(
            model, types.FunctionType) else contextlib.nullcontext():
        for k, v in dataloaders.items():
            if test_data:
                targets, _ = model_predictions_repeats(dataloader=v,
                                                       model=model,
                                                       data_key=k,
                                                       device=device)
                targets_per_neuron = []
                for i in range(targets[0].shape[1]):
                    neuronal_responses = []
                    for repeats in targets:
                        neuronal_responses.append(repeats[:, i])
                    targets_per_neuron.append(neuronal_responses)
                responses[k] = targets_per_neuron
            else:
                targets, _ = model_predictions(dataloader=v,
                                               model=model,
                                               data_key=k,
                                               device=device)
                responses[k] = targets.T

    if not as_dict:
        responses = [v for v in responses.values()]
    return responses
Beispiel #3
0
def get_poisson_loss(model,
                     dataloaders,
                     device="cpu",
                     as_dict=False,
                     avg=False,
                     per_neuron=True,
                     eps=1e-12):
    poisson_loss = {}
    with eval_state(model) if not isinstance(
            model, types.FunctionType) else contextlib.nullcontext():
        for k, v in dataloaders.items():
            target, output = model_predictions(dataloader=v,
                                               model=model,
                                               data_key=k,
                                               device=device)
            loss = output - target * np.log(output + eps)
            poisson_loss[k] = np.mean(loss, axis=0) if avg else np.sum(loss,
                                                                       axis=0)
    if as_dict:
        return poisson_loss
    else:
        if per_neuron:
            return np.hstack([v for v in poisson_loss.values()])
        else:
            return (np.mean(np.hstack([v
                                       for v in poisson_loss.values()])) if avg
                    else np.sum(np.hstack([v for v in poisson_loss.values()])))
Beispiel #4
0
def get_correlations(model,
                     dataloaders,
                     device="cpu",
                     as_dict=False,
                     per_neuron=True,
                     **kwargs):
    correlations = {}
    with eval_state(model) if not isinstance(
            model, types.FunctionType) else contextlib.nullcontext():
        for k, v in dataloaders.items():
            target, output = model_predictions(dataloader=v,
                                               model=model,
                                               data_key=k,
                                               device=device)
            correlations[k] = corr(target, output, axis=0)

            if np.any(np.isnan(correlations[k])):
                warnings.warn("{}% NaNs , NaNs will be set to Zero.".format(
                    np.isnan(correlations[k]).mean() * 100))
            correlations[k][np.isnan(correlations[k])] = 0

    if not as_dict:
        correlations = (np.hstack([v for v in correlations.values()])
                        if per_neuron else np.mean(
                            np.hstack([v for v in correlations.values()])))
    return correlations
Beispiel #5
0
def get_module_output(model, input_shape, use_cuda=True):
    """
    Returns the output shape of the model when fed in an array of `input_shape`.
    Note that a zero array of shape `input_shape` is fed into the model and the
    shape of the output of the model is returned.

    Args:
        model (nn.Module): PyTorch module for which to compute the output shape
        input_shape (tuple): Shape specification for the input array into the model
        use_cuda (bool, optional): If True, model will be evaluated on CUDA if available. Othewrise
            model evaluation will take place on CPU. Defaults to True.

    Returns:
        tuple: output shape of the model

    """
    # infer the original device
    initial_device = next(iter(model.parameters())).device
    device = "cuda" if torch.cuda.is_available() and use_cuda else "cpu"
    with eval_state(model):
        with torch.no_grad():
            input = torch.zeros(1, *input_shape[1:], device=device)
            output = model.to(device)(input)
    model.to(initial_device)
    return output.shape
Beispiel #6
0
def get_FEV(dataloaders, model, device='cpu', as_dict=False):
    FEV = {}
    with eval_state(model) if not isinstance(
            model, types.FunctionType) else contextlib.nullcontext():
        for k, v in dataloaders.items():
            repeated_inputs, repeated_outputs = get_repeats(v)
            FEV[k] = compute_FEV(
                repeated_inputs=repeated_inputs,
                repeated_outputs=repeated_outputs,
                model=model,
                device=device,
                data_key=k,
            )
    return FEV if as_dict else np.hstack([v for v in FEV.values()])
Beispiel #7
0
def model_predictions_repeats(dataloader,
                              model,
                              data_key,
                              device='cpu',
                              broadcast_to_target=False):
    """
    Computes model predictions for dataloader that yields batches with identical inputs along the first dimension
    Unique inputs will be forwarded only once through the model
    Returns:
        target: ground truth, i.e. neuronal firing rates of the neurons as a list: [num_images][num_reaps, num_neurons]
        output: responses as predicted by the network for the unique images. If broadcast_to_target, returns repeated 
                outputs of shape [num_images][num_reaps, num_neurons] else (default) returns unique outputs of shape [num_images, num_neurons]
    """

    output = torch.empty(0)
    target = []

    # Get unique images and concatenate targets:
    unique_images = torch.empty(0)
    for images, responses in dataloader:
        if len(images.shape) == 5:
            images = images.squeeze(dim=0)
            responses = responses.squeeze(dim=0)

        assert torch.all(torch.eq(
            images[-1, ],
            images[0, ],
        )), "All images in the batch should be equal"
        unique_images = torch.cat((unique_images, images[0:1, ]), dim=0)
        target.append(responses.detach().cpu().numpy())

    # Forward unique images once:
    with eval_state(model) if not isinstance(
            model, types.FunctionType) else contextlib.nullcontext():
        with device_state(model, device):
            output = model(unique_images.to(device),
                           data_key=data_key).detach().cpu()

    output = output.numpy()

    if broadcast_to_target:
        output = [
            np.broadcast_to(x, target[idx].shape)
            for idx, x in enumerate(output)
        ]

    return target, output
Beispiel #8
0
def get_module_output(model, input_shape):
    """
    Gets the output dimensions of the convolutional core
        by passing an input image through all convolutional layers

    :param core: convolutional core of the DNN, which final dimensions
        need to be passed on to the readout layer
    :param input_shape: the dimensions of the input

    :return: output dimensions of the core
    """
    initial_device = "cuda" if next(iter(
        model.parameters())).is_cuda else "cpu"
    device = "cuda" if torch.cuda.is_available() else "cpu"
    with eval_state(model):
        with torch.no_grad():
            input = torch.zeros(1, *input_shape[1:]).to(device)
            output = model.to(device)(input)
    model.to(initial_device)
    return output.shape
Beispiel #9
0
def get_FEV(model,
            dataloaders,
            device="cpu",
            as_dict=False,
            per_neuron=True,
            threshold=None):
    """
    Computes the fraction of explainable variance explained (FEVe) per Neuron, given a model and a dictionary of dataloaders.
    The dataloaders will have to return batches of identical images, with the corresponing neuronal responses.
    Args:
        model (object): PyTorch module
        dataloaders (dict): Dictionary of dataloaders, with keys corresponding to "data_keys" in the model
        device (str): 'cuda' or 'gpu
        as_dict (bool): Returns the scores as a dictionary ('data_keys': values) if set to True.
        per_neuron (bool): Returns the grand average if set to True.
        threshold (float): for the avg feve, excludes neurons with a explainable variance below threshold
    Returns:
        FEV (dict, or np.array, or float): Fraction of explainable varianced explained. Per Neuron or as grand average.
    """
    dataloaders = dataloaders["test"] if "test" in dataloaders else dataloaders
    FEV = {}
    with eval_state(model) if not isinstance(
            model, types.FunctionType) else contextlib.nullcontext():
        for data_key, dataloader in dataloaders.items():
            targets, outputs = model_predictions_repeats(
                model=model,
                dataloader=dataloader,
                data_key=data_key,
                device=device,
                broadcast_to_target=True)
            if threshold is None:
                FEV[data_key] = compute_FEV(targets=targets, outputs=outputs)
            else:
                fev, feve = compute_FEV(targets=targets,
                                        outputs=outputs,
                                        return_exp_var=True)
                FEV[data_key] = feve[fev > threshold]
    if not as_dict:
        FEV = np.hstack([v for v in FEV.values()]) if per_neuron else np.mean(
            np.hstack([v for v in FEV.values()]))
    return FEV