def preprocess(self):
        """

        :return:
        """
        self.N_particles = hypers['N_particles'].value

        # Set up initial state distribution
        # Initial state is centered around the steady state
        D = sz_dtype(self.population.latent_dtype)
        self.mu_initial = self.population.steady_state().reshape((D, 1))

        # TODO: Implement a distribution over the initial variances
        sig_initial = np.ones(1, dtype=self.population.latent_dtype)
        sig_initial.fill(np.asscalar(hypers['sig_ch_init'].value))
        for neuron in self.population.neurons:
            for compartment in neuron.compartments:
                sig_initial[neuron.name][
                    compartment.name]['V'] = hypers['sig_V_init'].value
        self.sig_initial = as_matrix(sig_initial)

        # TODO: Implement a distribution over the  transition noise
        sig_trans = np.ones(1, dtype=self.population.latent_dtype)
        sig_trans.fill(np.asscalar(hypers['sig_ch'].value))
        for neuron in self.population.neurons:
            for compartment in neuron.compartments:
                sig_trans[neuron.name][
                    compartment.name]['V'] = hypers['sig_V'].value
        self.sig_trans = as_matrix(sig_trans)
Exemple #2
0
    def preprocess(self):
        """

        :return:
        """
        self.N_particles = hypers['N_particles'].value

        # Set up initial state distribution
        # Initial state is centered around the steady state
        D = sz_dtype(self.population.latent_dtype)
        self.mu_initial = self.population.steady_state().reshape((D,1))

        # TODO: Implement a distribution over the initial variances
        sig_initial = np.ones(1, dtype=self.population.latent_dtype)
        sig_initial.fill(np.asscalar(hypers['sig_ch_init'].value))
        for neuron in self.population.neurons:
            for compartment in neuron.compartments:
                sig_initial[neuron.name][compartment.name]['V'] = hypers['sig_V_init'].value
        self.sig_initial = as_matrix(sig_initial)

        # TODO: Implement a distribution over the  transition noise
        sig_trans = np.ones(1, dtype=self.population.latent_dtype)
        sig_trans.fill(np.asscalar(hypers['sig_ch'].value))
        for neuron in self.population.neurons:
            for compartment in neuron.compartments:
                sig_trans[neuron.name][compartment.name]['V'] = hypers['sig_V'].value
        self.sig_trans = as_matrix(sig_trans)
    def update(self, model, cache=None):
        """

        :param current_state:
        :return:
        """
        population = model.population

        # Update each data sequence one at a time
        for data in model.data_sequences:
            t = data.t
            T = data.T
            latent = data.latent
            inpt = data.input
            state = data.states
            obs = data.observations

            # View the latent state as a matrix
            D = sz_dtype(latent.dtype)
            z = as_matrix(latent, D)
            x = as_matrix(obs)

            lb = population.latent_lb
            ub = population.latent_ub
            p_initial = StaticTruncatedGaussianDistribution(
                D, self.mu_initial, self.sig_initial, lb, ub)

            # The observation model gives us a noisy version of the voltage
            lkhd = ObservationLikelihood(model)

            # The transition model is a noisy Hodgkin Huxley proposal
            # prop = TruncatedHodgkinHuxleyProposal(population, t, inpt, self.sig_trans)
            prop = HodgkinHuxleyProposal(population, t, inpt, self.sig_trans)

            # Run the particle Gibbs step with ancestor sampling
            # Create a conditional particle filter with ancestor sampling
            pf = ParticleGibbsAncestorSampling(D,
                                               T,
                                               0,
                                               x[:, 0],
                                               prop,
                                               lkhd,
                                               p_initial,
                                               z,
                                               Np=self.N_particles)

            for ind in np.arange(1, T):
                x_curr = x[:, ind]
                pf.filter(ind, x_curr)

            # Sample a particular weight trace given the particle weights at time T
            i = np.random.choice(np.arange(pf.Np), p=pf.trajectory_weights)
            # z_inf = pf.trajectories[:,i,:].reshape((D,T))
            z_inf = pf.get_trajectory(i).reshape((D, T))

            # Update the data sequence's latent and state
            data.latent = as_sarray(z_inf, population.latent_dtype)
            data.states = population.evaluate_state(data.latent, inpt)
Exemple #4
0
    def update(self, model, cache=None):
        """

        :param current_state:
        :return:
        """
        population = model.population

        # Update each data sequence one at a time
        for data in model.data_sequences:
            t = data.t
            T = data.T
            latent = data.latent
            inpt = data.input
            state = data.states
            obs = data.observations

            # View the latent state as a matrix
            D = sz_dtype(latent.dtype)
            z = as_matrix(latent, D)
            x = as_matrix(obs)


            lb = population.latent_lb
            ub = population.latent_ub
            p_initial = StaticTruncatedGaussianDistribution(D, self.mu_initial, self.sig_initial, lb, ub)

            # The observation model gives us a noisy version of the voltage
            lkhd = ObservationLikelihood(model)

            # The transition model is a noisy Hodgkin Huxley proposal
            # prop = TruncatedHodgkinHuxleyProposal(population, t, inpt, self.sig_trans)
            prop = HodgkinHuxleyProposal(population, t, inpt, self.sig_trans)

            # Run the particle Gibbs step with ancestor sampling
            # Create a conditional particle filter with ancestor sampling
            pf = ParticleGibbsAncestorSampling(D, T, 0, x[:,0],
                                               prop, lkhd, p_initial, z,
                                               Np=self.N_particles)

            for ind in np.arange(1,T):
                x_curr = x[:,ind]
                pf.filter(ind, x_curr)

            # Sample a particular weight trace given the particle weights at time T
            i = np.random.choice(np.arange(pf.Np), p=pf.trajectory_weights)
            # z_inf = pf.trajectories[:,i,:].reshape((D,T))
            z_inf = pf.get_trajectory(i).reshape((D,T))

            # Update the data sequence's latent and state
            data.latent = as_sarray(z_inf, population.latent_dtype)
            data.states = population.evaluate_state(data.latent, inpt)
    def update(self, model, cache=None):
        """
        TODO: Handle truncated Gaussians noise for channel params
        """
        # Keep sufficient stats for a Gaussian noise model
        N = 0
        beta_V_hat = 0
        # beta_ch_hat = 0

        # Get the latent voltages
        for ds in model.data_sequences:
            latent = as_matrix(ds.latent)
            # The transition model is a noisy Hodgkin Huxley proposal
            prop = TruncatedHodgkinHuxleyProposal(
                model.population, ds.t, ds.input,
                as_matrix(np.zeros(1, dtype=model.population.latent_dtype)))
            dt = ds.t[1:] - ds.t[:-1]
            # import pdb; pdb.set_trace()
            pred = np.zeros((latent.shape[0], ds.T - 1))
            for t in np.arange(ds.T - 1):
                pt, _, _ = prop.sample_next(t, latent[:, t, np.newaxis], t + 1)
                pred[:, t] = pt.ravel()

            # pred2,_ = prop.sample_next(np.arange(ds.T-1),
            #                           latent[:,:-1],
            #                           np.arange(1,ds.T))

            # if not np.allclose(pred, pred2):
            #     import pdb; pdb.set_trace()
            for neuron in model.population.neurons:
                for compartment in neuron.compartments:
                    V = get_item_at_path(
                        as_sarray(latent, model.population.latent_dtype),
                        compartment.path)['V']
                    Vpred = get_item_at_path(
                        as_sarray(pred, model.population.latent_dtype),
                        compartment.path)['V']
                    dV = (Vpred - V[1:]) / dt

                    # Update sufficient stats
                    N += len(dV)
                    beta_V_hat += (dV**2).sum()

        # Sample a new beta_V
        sig2_V = 1.0 / np.random.gamma(
            hypers['a_sig_V'].value + N / 2., 1.0 /
            (hypers['b_sig_V'].value + beta_V_hat / 2.))

        hypers['sig_V'].value = np.sqrt(sig2_V)
Exemple #6
0
def initialize_model(model):
    """
    Find a decent parameter regime to start the model
    Most of the model parameters have been drawn from the prior, so
    we'll just focus on the latent states

    :param model:
    :return:
    """
    for data in model.data_sequences:
        if np.allclose(as_matrix(data.latent), 0.0) or \
           np.allclose(as_matrix(data.states), 0.0):
            z, _ = simulate(model, data.t, data.stimuli)
            data.latent = z
            data.states = model.population.evaluate_state(data.latent, data.input)
Exemple #7
0
def initialize_model(model):
    """
    Find a decent parameter regime to start the model
    Most of the model parameters have been drawn from the prior, so
    we'll just focus on the latent states

    :param model:
    :return:
    """
    for data in model.data_sequences:
        if np.allclose(as_matrix(data.latent), 0.0) or \
           np.allclose(as_matrix(data.states), 0.0):
            z, _ = simulate(model, data.t, data.stimuli)
            data.latent = z
            data.states = model.population.evaluate_state(
                data.latent, data.input)
Exemple #8
0
def plot_latent_compartment_state(t, z, state, compartment, axs=None, colors=['k'], linewidth=1):
    dtype = compartment.latent_dtype
    lb = compartment.latent_lb
    ub = compartment.latent_ub
    D = sz_dtype(dtype)
    z_comp = get_item_at_path(z, compartment.path)
    z = as_matrix(z_comp, D)
    z_names = extract_names_from_dtype(dtype)

    # Compute the channel currents
    s_comp = get_item_at_path(state, compartment.path)
    N_ch = len(compartment.channels)
    Is = [s_comp[ch.name]['I'] for ch in compartment.channels]

    # if fig is None:
    #     fig,axs = plt.subplots(D,1)
    # else:
    #     axs = fig.get_axes()
    # # Make sure axs is a list of axes, even if it is length 1
    # if not isinstance(axs, (list, np.ndarray)):
    #     axs = [axs]


    if axs is None:
        axs = []

        for d in np.arange(D):
            ax = plt.subplot2grid((D,3), (d,0), colspan=2)
            axs.append(ax)

        ax = plt.subplot2grid((D,3), (0,2), rowspan=D)
        axs.append(ax)

    for d in np.arange(D):
        axs[d].plot(t, z[d,:], color=colors[d % len(colors)], linewidth=linewidth)
        axs[d].set_ylabel(z_names[d])

        yl = list(axs[d].get_ylim())
        if np.isfinite(lb[d]):
            yl[0] = lb[d]
        if np.isfinite(ub[d]):
            yl[1] = ub[d]
        axs[d].set_ylim(yl)

    # Plot the channel densities
    C = len(compartment.channels)
    gs = [ch.g.value for ch in compartment.channels]
    names = [ch.name for ch in compartment.channels]


    axs[-1].bar(np.arange(C), gs, facecolor=colors[0], alpha=0.5)
    axs[-1].set_xticks(np.arange(C))
    axs[-1].set_xticklabels(map(lambda n: '$g_%s$' % n, names))
    axs[-1].set_title('Channel densities')
    axs[-1].set_ylim([0,30])

    # if not fig_given:
    #     plt.show()

    return axs
Exemple #9
0
    def update(self, model, cache=None):
        """
        TODO: Handle truncated Gaussians noise for channel params
        """
        # Keep sufficient stats for a Gaussian noise model
        N = 0
        beta_V_hat = 0
        # beta_ch_hat = 0

        # Get the latent voltages
        for ds in model.data_sequences:
            latent = as_matrix(ds.latent)
            # The transition model is a noisy Hodgkin Huxley proposal
            prop = TruncatedHodgkinHuxleyProposal(model.population, ds.t, ds.input,
                                                  as_matrix(np.zeros(1, dtype=model.population.latent_dtype)))
            dt = ds.t[1:] - ds.t[:-1]
            # import pdb; pdb.set_trace()
            pred = np.zeros((latent.shape[0], ds.T-1))
            for t in np.arange(ds.T-1):
                pt,_,_ = prop.sample_next(t, latent[:,t,np.newaxis], t+1)
                pred[:,t] = pt.ravel()

            # pred2,_ = prop.sample_next(np.arange(ds.T-1),
            #                           latent[:,:-1],
            #                           np.arange(1,ds.T))

            # if not np.allclose(pred, pred2):
            #     import pdb; pdb.set_trace()
            for neuron in model.population.neurons:
                for compartment in neuron.compartments:
                    V = get_item_at_path(as_sarray(latent, model.population.latent_dtype),
                                         compartment.path)['V']
                    Vpred = get_item_at_path(as_sarray(pred, model.population.latent_dtype),
                                             compartment.path)['V']
                    dV = (Vpred - V[1:])/dt

                    # Update sufficient stats
                    N += len(dV)
                    beta_V_hat += (dV**2).sum()

        # Sample a new beta_V
        sig2_V = 1.0/np.random.gamma(hypers['a_sig_V'].value + N/2.,
                                     1.0/(hypers['b_sig_V'].value + beta_V_hat/2.))

        hypers['sig_V'].value = np.sqrt(sig2_V)