Exemple #1
0
def estimate_spiking_likelihood(spikes, conditional_intensity,
                                is_track_interior=None):
    '''

    Parameters
    ----------
    spikes : ndarray, shape (n_time, n_neurons)
    conditional_intensity : ndarray, shape (n_bins, n_neurons)
    is_track_interior : None or ndarray, optional, shape (n_x_position_bins,
                                                          n_y_position_bins)
    Returns
    -------
    likelihood : ndarray, shape (n_time, n_bins)
    '''
    if is_track_interior is not None:
        is_track_interior = is_track_interior.ravel(order='F')
    else:
        n_bins = conditional_intensity.shape[0]
        is_track_interior = np.ones((n_bins,), dtype=np.bool)

    log_likelihood = combined_likelihood(spikes, conditional_intensity)

    mask = np.ones_like(is_track_interior, dtype=np.float)
    mask[~is_track_interior] = np.nan

    return scaled_likelihood(log_likelihood * mask)
    def predict(self, multiunits, time=None, is_compute_acausal=True):
        '''

        Parameters
        ----------
        multiunits : array_like, shape (n_time, n_marks, n_electrodes)
        time : None or ndarray, shape (n_time,)
        is_compute_acausal : bool, optional
            Use future information to compute the posterior.

        Returns
        -------
        results : xarray.Dataset

        '''
        multiunits = np.asarray(multiunits)
        is_track_interior = self.is_track_interior_.ravel(order='F')
        n_time = multiunits.shape[0]
        n_position_bins = is_track_interior.shape[0]
        st_interior_ind = np.ix_(is_track_interior, is_track_interior)

        results = {}

        results['likelihood'] = scaled_likelihood(
            _ClUSTERLESS_ALGORITHMS[self.clusterless_algorithm][1](
                multiunits=multiunits,
                place_bin_centers=self.place_bin_centers_,
                is_track_interior=is_track_interior,
                **self.encoding_model_))
        results['causal_posterior'] = np.full((n_time, n_position_bins),
                                              np.nan)
        results['causal_posterior'][:, is_track_interior] = _causal_decode(
            self.initial_conditions_[is_track_interior],
            self.state_transition_[st_interior_ind],
            results['likelihood'][:, is_track_interior])

        if is_compute_acausal:
            results['acausal_posterior'] = np.full(
                (n_time, n_position_bins, 1), np.nan)
            results['acausal_posterior'][:, is_track_interior] = (
                _acausal_decode(
                    results['causal_posterior'][:, is_track_interior,
                                                np.newaxis],
                    self.state_transition_[st_interior_ind]))

        if time is None:
            time = np.arange(n_time)

        return self.convert_results_to_xarray(results, time)
Exemple #3
0
def estimate_multiunit_likelihood(multiunits,
                                  place_bin_centers,
                                  joint_pdf_models,
                                  ground_process_intensities,
                                  occupancy,
                                  mean_rates,
                                  is_track_interior=None):
    '''

    Parameters
    ----------
    multiunits : ndarray, shape (n_time, n_marks, n_electrodes)
    place_bin_centers : ndarray, (n_bins, n_position_dims)
    joint_pdf_models : list of sklearn models, shape (n_electrodes,)
    ground_process_intensities : list of ndarray, shape (n_electrodes,)
    occupancy : ndarray, (n_bins, n_position_dims)
    mean_rates : ndarray, (n_electrodes,)

    Returns
    -------
    log_likelihood : (n_time, n_bins)

    '''
    if is_track_interior is None:
        is_track_interior = np.ones((place_bin_centers.shape[0], ),
                                    dtype=np.bool)

    n_bin = place_bin_centers.shape[0]
    n_time = multiunits.shape[0]
    log_likelihood = np.zeros((n_time, n_bin))

    zipped = zip(np.moveaxis(multiunits, -1, 0), joint_pdf_models, mean_rates,
                 ground_process_intensities)
    for multiunit, joint_model, mean_rate, ground_process_intensity in zipped:
        joint_mark_intensity = estimate_joint_mark_intensity(
            multiunit, place_bin_centers, occupancy, joint_model, mean_rate,
            is_track_interior)
        log_likelihood += poisson_mark_log_likelihood(
            joint_mark_intensity, np.atleast_2d(ground_process_intensity))

    mask = np.ones_like(is_track_interior, dtype=np.float)
    mask[~is_track_interior] = np.nan

    return scaled_likelihood(log_likelihood * mask)
    def predict(self, spikes, time=None, is_compute_acausal=True):
        '''

        Parameters
        ----------
        spikes : ndarray, shape (n_time, n_neurons)
        time : ndarray or None, shape (n_time,), optional
        is_compute_acausal : bool, optional

        Returns
        -------
        results : xarray.Dataset

        '''
        spikes = np.asarray(spikes)
        is_track_interior = self.is_track_interior_.ravel(order='F')
        n_time = spikes.shape[0]
        n_position_bins = is_track_interior.shape[0]
        st_interior_ind = np.ix_(is_track_interior, is_track_interior)

        results = {}
        results['likelihood'] = scaled_likelihood(
            estimate_spiking_likelihood(spikes,
                                        np.asarray(self.place_fields_)))
        results['causal_posterior'] = np.full((n_time, n_position_bins),
                                              np.nan)
        results['causal_posterior'][:, is_track_interior] = _causal_decode(
            self.initial_conditions_[is_track_interior],
            self.state_transition_[st_interior_ind],
            results['likelihood'][:, is_track_interior])

        if is_compute_acausal:
            results['acausal_posterior'] = np.full(
                (n_time, n_position_bins, 1), np.nan)
            results['acausal_posterior'][:, is_track_interior] = (
                _acausal_decode(
                    results['causal_posterior'][:, is_track_interior,
                                                np.newaxis],
                    self.state_transition_[st_interior_ind]))

        if time is None:
            time = np.arange(n_time)

        return self.convert_results_to_xarray(results, time)
    def predict(self, multiunits, time=None, is_compute_acausal=True,
                state_names=None):
        '''

        Parameters
        ----------
        multiunits : array_like, shape (n_time, n_marks, n_electrodes)
        time : None or ndarray, shape (n_time,)
        is_compute_acausal : bool, optional
            Use future information to compute the posterior.
        state_names : None or array_like, shape (n_states,)

        Returns
        -------
        results : xarray.Dataset

        '''
        multiunits = np.asarray(multiunits)
        is_track_interior = self.is_track_interior_.ravel(order='F')
        n_time = multiunits.shape[0]
        n_position_bins = is_track_interior.shape[0]
        n_states = self.discrete_state_transition_.shape[0]
        is_states = np.ones((n_states,), dtype=bool)
        st_interior_ind = np.ix_(
            is_states, is_states, is_track_interior, is_track_interior)

        results = {}

        likelihood = {}
        for encoding_group, encoding_params in self.encoding_model_.items():
            likelihood[encoding_group] = _ClUSTERLESS_ALGORITHMS[
                self.clusterless_algorithm][1](
                    multiunits=multiunits,
                    place_bin_centers=self.place_bin_centers_,
                    is_track_interior=is_track_interior,
                    **encoding_params
            )

        results['likelihood'] = np.stack(
            [likelihood[encoding_group]
             for encoding_group in self.encoding_group_to_state_],
            axis=1)
        results['likelihood'] = scaled_likelihood(
            results['likelihood'], axis=(1, 2))[..., np.newaxis]

        results['causal_posterior'] = np.full(
            (n_time, n_states, n_position_bins, 1), np.nan)
        results['causal_posterior'][:, :, is_track_interior] = _causal_classify(
            self.initial_conditions_[:, is_track_interior],
            self.continuous_state_transition_[st_interior_ind],
            self.discrete_state_transition_,
            results['likelihood'][:, :, is_track_interior])

        if is_compute_acausal:
            results['acausal_posterior'] = np.full(
                (n_time, n_states, n_position_bins, 1), np.nan)
            results['acausal_posterior'][:, :, is_track_interior] = _acausal_classify(
                results['causal_posterior'][:, :, is_track_interior],
                self.continuous_state_transition_[st_interior_ind],
                self.discrete_state_transition_)

        if time is None:
            time = np.arange(n_time)

        return self.convert_results_to_xarray(results, time, state_names)
    def predict(self, spikes, time=None, is_compute_acausal=True,
                state_names=None):
        '''

        Parameters
        ----------
        spikes : ndarray, shape (n_time, n_neurons)
        time : ndarray or None, shape (n_time,), optional
        is_compute_acausal : bool, optional
        state_names : None or array_like, shape (n_states,)

        Returns
        -------
        results : xarray.Dataset

        '''
        spikes = np.asarray(spikes)
        is_track_interior = self.is_track_interior_.ravel(order='F')
        n_time = spikes.shape[0]
        n_position_bins = is_track_interior.shape[0]
        n_states = self.discrete_state_transition_.shape[0]
        is_states = np.ones((n_states,), dtype=bool)
        st_interior_ind = np.ix_(
            is_states, is_states, is_track_interior, is_track_interior)

        results = {}

        likelihood = {}
        for encoding_group in np.asarray(self.place_fields_.encoding_group):
            likelihood[encoding_group] = estimate_spiking_likelihood(
                spikes,
                np.asarray(self.place_fields_.sel(
                    encoding_group=encoding_group)),
                is_track_interior)

        results['likelihood'] = np.stack(
            [likelihood[encoding_group]
             for encoding_group in self.encoding_group_to_state_],
            axis=1)
        results['likelihood'] = scaled_likelihood(
            results['likelihood'], axis=(1, 2))[..., np.newaxis]

        results['causal_posterior'] = np.full(
            (n_time, n_states, n_position_bins, 1), np.nan)
        results['causal_posterior'][:, :, is_track_interior] = _causal_classify(
            self.initial_conditions_[:, is_track_interior],
            self.continuous_state_transition_[st_interior_ind],
            self.discrete_state_transition_,
            results['likelihood'][:, :, is_track_interior])

        if is_compute_acausal:
            results['acausal_posterior'] = np.full(
                (n_time, n_states, n_position_bins, 1), np.nan)
            results['acausal_posterior'][:, :, is_track_interior] = _acausal_classify(
                results['causal_posterior'][:, :, is_track_interior],
                self.continuous_state_transition_[st_interior_ind],
                self.discrete_state_transition_)

        n_time = spikes.shape[0]

        if time is None:
            time = np.arange(n_time)

        return self.convert_results_to_xarray(results, time, state_names)