Example #1
0
    def __init__(self, *args, **kwargs):
        """A simple matcher that prefers paths where each matched location is as close as possible to the
        observed position.

        :param avoid_goingback: Change the transition probability to be lower for the direction the path is coming
            from.
        :param kwargs: Arguments passed to :class:`BaseMatcher`.
        """
        if "matching" not in kwargs:
            kwargs['matching'] = SimpleMatching
        super().__init__(*args, **kwargs)

        self.obs_noise_dist = halfnorm(scale=self.obs_noise)
        self.obs_noise_dist_ne = halfnorm(scale=self.obs_noise_ne)
        # normalize to max 1 to simulate a prob instead of density
        self.obs_noise_logint = math.log(self.obs_noise *
                                         math.sqrt(2 * math.pi) / 2)
        self.obs_noise_logint_ne = math.log(self.obs_noise_ne *
                                            math.sqrt(2 * math.pi) / 2)

        # Transition probability is divided (in logprob_trans) by this factor if we move back on the
        # current edge.
        self.avoid_goingback = kwargs.get('avoid_goingback', True)
        self.gobackonedge_factor_log = math.log(0.99)
        # Transition probability is divided (in logprob_trans) by this factor if the next state is
        # also the previous state, thus if we go back
        self.gobacktoedge_factor_log = math.log(0.5)
        # Transition probability is divided (in logprob_trans) by this factor if a transition is made
        # This is to try to stay on the same node unless there is a good reason
        self.transition_factor = math.log(0.9)
Example #2
0
def minimal_priors():
    return [
        lambda x: halfnorm(scale=1.).logpdf(np.sqrt(np.exp(x))) + x / 2.0 - np.
        log(2.0), lambda x: invgamma(a=5.0, scale=1.).logpdf(np.exp(x)) + x,
        lambda x: halfnorm(scale=1.).logpdf(np.sqrt(np.exp(x))
                                            ) + x / 2.0 - np.log(2.0)
    ]
Example #3
0
def create_priors(
    n_parameters: int,
    signal_scale: float = 4.0,
    lengthscale_lower_bound: float = 0.1,
    lengthscale_upper_bound: float = 0.5,
    noise_scale: float = 0.0006,
) -> List[Callable[[float], float]]:
    """Create a list of priors to be used for the hyperparameters of the tuning process.

    Parameters
    ----------
    n_parameters : int
        Number of parameters to be optimized.
    signal_scale : float
        Prior scale of the signal (standard deviation) which is used to parametrize a
        half-normal distribution.
    lengthscale_lower_bound : float
        Lower bound of the inverse-gamma lengthscale prior. It marks the point at which
        1 % of the cumulative density is reached.
    lengthscale_upper_bound : float
        Upper bound of the inverse-gamma lengthscale prior. It marks the point at which
        99 % of the cumulative density is reached.
    noise_scale : float
        Prior scale of the noise (standard deviation) which is used to parametrize a
        half-normal distribution.

    Returns
    -------
    list of callables
        List of priors in the following order:
         - signal prior
         - lengthscale prior (n_parameters times)
         - noise prior
    """
    if signal_scale <= 0.0:
        raise ValueError(
            f"The signal scale needs to be strictly positive. Got {signal_scale}."
        )
    if noise_scale <= 0.0:
        raise ValueError(
            f"The noise scale needs to be strictly positive. Got {noise_scale}."
        )
    signal_prior = halfnorm(scale=signal_scale)
    lengthscale_prior = make_invgamma_prior(
        lower_bound=lengthscale_lower_bound,
        upper_bound=lengthscale_upper_bound)
    noise_prior = halfnorm(scale=noise_scale)

    priors = [
        lambda x: signal_prior.logpdf(np.sqrt(np.exp(x))) + x / 2.0 - np.log(
            2.0)
    ]
    for _ in range(n_parameters):
        priors.append(lambda x: lengthscale_prior.logpdf(np.exp(x)) + x)
    priors.append(lambda x: noise_prior.logpdf(np.sqrt(np.exp(x))) + x / 2.0 -
                  np.log(2.0))
    return priors
Example #4
0
    def test_multiple_basis_and_training_set_sizes(self):
        np.random.seed(seed)
        Ns = [
            50,
            100,
        ]  # triaing set sizes
        Ms = [
            5,
            10,
        ]  # basis set size

        epsilon = stats.norm(loc=0, scale=0.01)
        tfun = lambda x: np.sin(x) + np.cos(2. * x)

        init_beta = distribution_wrapper(stats.halfnorm(scale=1),
                                         size=1,
                                         single=True)
        init_alphas = distribution_wrapper(stats.halfnorm(scale=1),
                                           single=False)

        for N in Ns:
            t_est, t_err = [], []
            for M in Ms:
                x = np.linspace(0, 1, N)
                k = M

                trafo = FourierFeatures(k=k)
                base_trafo = trafo.fit_transform

                model_type = RelevanceVectorMachine
                model_kwargs = dict(n_iter=50,
                                    verbose=False,
                                    compute_score=True,
                                    init_beta=init_beta,
                                    init_alphas=init_alphas)

                runtimes, coefs = repeated_regression(
                    x,
                    base_trafo,
                    model_type,
                    t=None,
                    tfun=tfun,
                    epsilon=epsilon,
                    model_kwargs=model_kwargs,
                    Nruns=Nruns,
                    return_coefs=True)
                #print_run_stats(base_trafo,x,runtimes,coefs,Nruns)
                t_est.append(runtimes.mean())
                t_err.append(runtimes.std(ddof=1) * 2)
            print("\ntime for N = {}:".format(N))
            for est, err in zip(t_est, t_err):
                print("    estimate = {:.4f}s, 2*std = {:.4f}s".format(
                    est, 2 * err))
 def __init__(
     self,
     n_particles,
     prior={
         'logk': norm(loc=np.log(1 / 365), scale=2),
         's': halfnorm(loc=1, scale=2),
         'α': halfnorm(loc=0, scale=3)
     }):
     self.n_particles = int(n_particles)
     self.prior = prior
     self.θ_fixed = {'ϵ': 0.01}
     self.choiceFunction = CumulativeNormalChoiceFunc
 def __init__(
     self,
     n_particles,
     prior={
         'a': norm(loc=0.01, scale=0.1),
         'b': halfnorm(loc=0.001, scale=3),
         'α': halfnorm(loc=0, scale=3)
     }):
     self.n_particles = int(n_particles)
     self.prior = prior
     self.θ_fixed = {'ϵ': 0.01}
     self.choiceFunction = CumulativeNormalChoiceFunc
 def __init__(
     self,
     n_particles,
     prior={
         'gamma_reward': halfnorm(loc=0, scale=10),
         'gamma_delay': halfnorm(loc=0, scale=10),
         'k': norm(loc=0, scale=2),
         'α': halfnorm(loc=0, scale=3)
     }):
     self.n_particles = int(n_particles)
     self.prior = prior
     self.θ_fixed = {'ϵ': 0.01}
     self.choiceFunction = CumulativeNormalChoiceFunc
Example #8
0
    def test_semimanual_hyperparameters(self):

        np.random.seed(seed)
        init_beta = stats.halfnorm(scale=1).rvs(size=1)[0]
        init_alphas = stats.halfnorm(scale=1).rvs(size=self.X.shape[1])

        model = RelevanceVectorMachine(n_iter=50,
                                       verbose=False,
                                       compute_score=True,
                                       init_beta=init_beta,
                                       init_alphas=init_alphas)
        model.fit(self.X, t)
        y, yerr = model.predict(self.X, return_std=True)
Example #9
0
 def __init__(
     self,
     n_particles,
     prior={
         "logk": norm(loc=np.log(1 / 365), scale=2),
         "s": halfnorm(loc=1, scale=2),
         "α": halfnorm(loc=0, scale=3),
     },
 ):
     self.n_particles = int(n_particles)
     self.prior = prior
     self.θ_fixed = {"ϵ": 0.01}
     self.choiceFunction = CumulativeNormalChoiceFunc
Example #10
0
 def __init__(
     self,
     n_particles,
     prior={
         "a": norm(loc=0.01, scale=0.1),
         "b": halfnorm(loc=0.001, scale=3),
         "α": halfnorm(loc=0, scale=3),
     },
 ):
     self.n_particles = int(n_particles)
     self.prior = prior
     self.θ_fixed = {"ϵ": 0.01}
     self.choiceFunction = CumulativeNormalChoiceFunc
Example #11
0
    def test_random_hyperparameters(self):
        np.random.seed(seed)
        init_beta = distribution_wrapper(stats.halfnorm(scale=1), single=True)
        init_alphas = distribution_wrapper(stats.halfnorm(scale=1),
                                           single=False)

        model = RelevanceVectorMachine(n_iter=50,
                                       verbose=False,
                                       compute_score=True,
                                       init_beta=init_beta,
                                       init_alphas=init_alphas)
        model.fit(self.X, t)
        y, yerr = model.predict(self.X, return_std=True)
Example #12
0
 def __init__(
     self,
     n_particles,
     prior={
         "gamma_reward": halfnorm(loc=0, scale=10),
         "gamma_delay": halfnorm(loc=0, scale=10),
         "k": norm(loc=0, scale=2),
         "α": halfnorm(loc=0, scale=3),
     },
 ):
     self.n_particles = int(n_particles)
     self.prior = prior
     self.θ_fixed = {"ϵ": 0.01}
     self.choiceFunction = CumulativeNormalChoiceFunc
Example #13
0
def _recursive_priors(kernel, prior_list):
    if hasattr(kernel, "kernel"):  # Unary operations
        _recursive_priors(kernel.kernel, prior_list)
    elif hasattr(kernel, "k1"):  # Binary operations
        _recursive_priors(kernel.k1, prior_list)
        _recursive_priors(kernel.k2, prior_list)
    elif hasattr(kernel, "kernels"):  # CompoundKernel
        for k in kernel.kernels:
            _recursive_priors(k, prior_list)
    else:
        name = type(kernel).__name__
        if name in ["ConstantKernel", "WhiteKernel"]:
            # We use a half-normal prior distribution on the signal variance and
            # noise. The input x is sampled in log-space, which is why the
            # change of variables is necessary.
            # This prior assumes that the function values are standardized.
            # Note, that we do not know the structure of the kernel, which is
            # why this is just only a best guess.
            prior_list.append(
                lambda x: halfnorm(scale=2.0).logpdf(np.sqrt(np.exp(x))) + x /
                2.0 - np.log(2.0), )
        elif name in ["Matern", "RBF"]:
            # Here we apply an inverse gamma distribution to any lengthscale
            # parameter we find. We assume the input variables are normalized
            # to lie in [0, 1]. The specific values for a and scale were
            # obtained by fitting the 1% and 99% quantile to 0.15 and 0.8.
            prior_list.append(
                lambda x: invgamma(a=8.286, scale=2.4605).logpdf(np.exp(x)) +
                x, )
        else:
            raise NotImplementedError(
                f"Unable to guess priors for this kernel: {kernel}.")
def mcmc_regression():    
    np.random.seed(123) # Uncomment to reproduce the plot exactly
    X, Y = generate_data(100)    
    alpha_dist = beta1_dist = beta2_dist = norm(0, 10)
    sigma_dist = halfnorm(0, 1)
    dists = (alpha_dist, beta1_dist, beta2_dist, sigma_dist)

    nburnin = 50000
    niter = 50000 + nburnin
    ncomps = 4

    chain = np.zeros(shape=(niter, ncomps))
    chain[0, :] = np.abs(np.random.normal(size=ncomps))

    for i in tqdm(range(niter - 1)):                
        v = chain[i]
        log_posterior_old = loglik(v, X, Y) + logprior(v, dists)         
        proposal = proposalfunc(v)
        log_posterior_new = loglik(proposal, X, Y) + logprior(proposal, dists)        
        a = min(0.0, log_posterior_new - log_posterior_old)
        if np.random.random() < np.exp(a):            
            chain[i + 1, :] = proposal
        else:
            chain[i + 1, :] = v

    plot(chain, nburnin)
def mcmc_regression():
    np.random.seed(123)  # Uncomment to reproduce the plot exactly
    X, Y = generate_data(100)
    alpha_dist = beta1_dist = beta2_dist = norm(0, 10)
    sigma_dist = halfnorm(0, 1)
    dists = (alpha_dist, beta1_dist, beta2_dist, sigma_dist)

    nburnin = 50000
    niter = 50000 + nburnin
    ncomps = 4

    chain = np.zeros(shape=(niter, ncomps))
    chain[0, :] = np.abs(np.random.normal(size=ncomps))

    for i in tqdm(range(niter - 1)):
        v = chain[i]
        log_posterior_old = loglik(v, X, Y) + logprior(v, dists)
        proposal = proposalfunc(v)
        log_posterior_new = loglik(proposal, X, Y) + logprior(proposal, dists)
        a = min(0.0, log_posterior_new - log_posterior_old)
        if np.random.random() < np.exp(a):
            chain[i + 1, :] = proposal
        else:
            chain[i + 1, :] = v

    plot(chain, nburnin)
Example #16
0
 def sample(self):
     self.std += self.dx
     # variance increases as t-> infinity
     delta = stats.halfnorm(scale=self.std).rvs()
     state = self._clip(self.state + delta)
     self.state = state
     return state
Example #17
0
 def __init__(
     self,
     n_particles,
     prior={"indiff": uniform(0, 1), "α": halfnorm(loc=0, scale=0.1)},
 ):
     self.n_particles = int(n_particles)
     self.prior = prior
     self.θ_fixed = {"ϵ": 0.01}
     self.choiceFunction = CumulativeNormalChoiceFunc
Example #18
0
 def __init__(
     self,
     n_particles,
     prior={"logk": norm(loc=-4.5, scale=1), "α": halfnorm(loc=0, scale=2)},
 ):
     self.n_particles = int(n_particles)
     self.prior = prior
     self.θ_fixed = {"ϵ": 0.01}
     self.choiceFunction = CumulativeNormalChoiceFunc
Example #19
0
def filter_events_array(trace_arr, scale=2):
    from scipy import stats
    filt = stats.halfnorm(loc=0, scale=scale).pdf(np.arange(20))
    filtered_arr = np.empty(trace_arr.shape)
    for ind_cell in range(trace_arr.shape[0]):
        this_trace = trace_arr[ind_cell, :]
        this_trace_filtered = np.convolve(this_trace, filt)[:len(this_trace)]
        filtered_arr[ind_cell, :] = this_trace_filtered
    return filtered_arr
Example #20
0
 def __init__(self,
              n_particles,
              prior={
                  "γ": beta(1, 1),
                  "α": halfnorm(loc=0, scale=3)
              }):
     self.n_particles = int(n_particles)
     self.prior = prior
     self.θ_fixed = {"ϵ": 0.01}
     self.choiceFunction = CumulativeNormalChoiceFunc
Example #21
0
    def testHalfNormalQuantile(self):
        batch_size = 50
        scale = self._rng.rand(batch_size) + 1.0
        p = np.linspace(0., 1.0, batch_size).astype(np.float64)

        halfnorm = tfd.HalfNormal(scale=scale)
        x = halfnorm.quantile(p)
        self._testBatchShapes(halfnorm, x)

        expected_x = sp_stats.halfnorm(scale=scale).ppf(p)
        self.assertAllClose(expected_x, self.evaluate(x), atol=0)
Example #22
0
def sampling1(nsamples,
              ndim,
              scale=1.0,
              to_tensor=True,
              device="cpu",
              **kwargs):

    res = scale * spstats.halfnorm().rvs(
        (nsamples, 1)) * uniform_sphere(nsamples, ndim)
    if to_tensor:
        res = torch.tensor(res.astype(np.float32), device=device)
    return res
 def __init__(
     self,
     n_particles,
     prior={
         'm': norm(loc=-2.43, scale=2),
         'c': norm(loc=0, scale=100),
         'α': halfnorm(loc=0, scale=3)
     }):
     self.n_particles = int(n_particles)
     self.prior = prior
     self.θ_fixed = {'ϵ': 0.01}
     self.choiceFunction = CumulativeNormalChoiceFunc
Example #24
0
    def __init__(self, n_particles=1000,
                 prior={'logk': norm(loc=-4.25, scale=1.5),
                        'α': halfnorm(loc=0, scale=2)}):

        self.n_particles = n_particles
        self.prior = prior
        self.θ_fixed = {'ϵ': 0.01}
        # Annoying, this is why they invented probabilistic programming
        true_alpha = np.abs(scipy.random.normal(loc=0., scale=2.))
        true_logk = scipy.random.normal(loc=-4.25, scale=1.5)
        self.θ_true = pd.DataFrame([{'α': true_alpha, 'logk': true_logk}])

        self.choiceFunction = CumulativeNormalChoiceFunc
Example #25
0
 def __init__(
     self,
     n_particles,
     prior={
         "m": norm(loc=-2.43, scale=2),
         "c": norm(loc=0, scale=100),
         "α": halfnorm(loc=0, scale=3),
     },
 ):
     self.n_particles = int(n_particles)
     self.prior = prior
     self.θ_fixed = {"ϵ": 0.01}
     self.choiceFunction = CumulativeNormalChoiceFunc
Example #26
0
def response_xr_filtered(scale):
    filt = stats.halfnorm(loc=0, scale=scale).pdf(np.arange(20))

    filtered_arr = np.empty(trace_arr.shape)
    for ind_cell in range(trace_arr.shape[0]):
        this_trace = trace_arr[ind_cell, :]
        this_trace_filtered = np.convolve(this_trace, filt)[:len(this_trace)]
        filtered_arr[ind_cell, :] = this_trace_filtered

    response_xr_filtered = rp.get_response_xr(dataset, filtered_arr,
                                              ophys_timestamps, events,
                                              event_ids, trace_ids,
                                              response_analysis_params)
    return response_xr_filtered
Example #27
0
def _recursive_priors(kernel, prior_list):
    if hasattr(kernel, "kernel"):  # Unary operations
        _recursive_priors(kernel.kernel, prior_list)
    elif hasattr(kernel, "k1"):  # Binary operations
        _recursive_priors(kernel.k1, prior_list)
        _recursive_priors(kernel.k2, prior_list)
    elif hasattr(kernel, "kernels"):  # CompoundKernel
        # It seems that the skopt kernels are not compatible with the
        # CompoundKernel. This is therefore not officially supported.
        for k in kernel.kernels:
            _recursive_priors(k, prior_list)
    else:
        name = type(kernel).__name__
        if name in ["ConstantKernel", "WhiteKernel"]:
            if name == "ConstantKernel" and kernel.constant_value_bounds == "fixed":
                return
            if name == "WhiteKernel" and kernel.noise_level_bounds == "fixed":
                return
            # We use a half-normal prior distribution on the signal variance and
            # noise. The input x is sampled in log-space, which is why the
            # change of variables is necessary.
            # This prior assumes that the function values are standardized.
            # Note, that we do not know the structure of the kernel, which is
            # why this is just only a best guess.
            prior_list.append(
                lambda x: halfnorm(scale=2.0).logpdf(np.sqrt(np.exp(x))) + x /
                2.0 - np.log(2.0), )
        elif name in ["Matern", "RBF"]:
            # Here we apply a round-flat prior distribution to any lengthscale
            # parameter we find. We assume the input variables are normalized
            # to lie in [0, 1].
            # For common optimization problems, we expect the lengthscales to
            # lie in the range [0.1, 0.6]. The round-flat prior allows values
            # outside the range, if supported by enough datapoints.
            if isinstance(kernel.length_scale,
                          (collections.Sequence, np.ndarray)):
                n_priors = len(kernel.length_scale)
            else:
                n_priors = 1
            roundflat = make_roundflat(
                lower_bound=0.1,
                upper_bound=0.6,
                lower_steepness=2.0,
                upper_steepness=8.0,
            )
            for _ in range(n_priors):
                prior_list.append(lambda x: roundflat(np.exp(x)) + x)
        else:
            raise NotImplementedError(
                f"Unable to guess priors for this kernel: {kernel}.")
Example #28
0
    def testHalfNormalSurvivalFunction(self):
        batch_size = 50
        scale = self._rng.rand(batch_size) + 1.0
        x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
        halfnorm = tfd.HalfNormal(scale=scale)

        sf = halfnorm.survival_function(x)
        self._testBatchShapes(halfnorm, sf)

        log_sf = halfnorm.log_survival_function(x)
        self._testBatchShapes(halfnorm, log_sf)

        expected_logsf = sp_stats.halfnorm(scale=scale).logsf(x)
        self.assertAllClose(expected_logsf, self.evaluate(log_sf), atol=0)
        self.assertAllClose(np.exp(expected_logsf), self.evaluate(sf), atol=0)
  def testHalfNormalLogPDFMultidimensional(self):
    batch_size = 6
    scale = tf.constant([[3.0, 1.0]] * batch_size)
    x = np.array([[-2.5, 2.5, 4.0, 0.0, -1.0, 2.0]], dtype=np.float32).T
    halfnorm = tfd.HalfNormal(scale=scale, validate_args=False)

    log_pdf = halfnorm.log_prob(x)
    self._testBatchShapes(halfnorm, log_pdf)

    pdf = halfnorm.prob(x)
    self._testBatchShapes(halfnorm, pdf)

    expected_log_pdf = sp_stats.halfnorm(scale=self.evaluate(scale)).logpdf(x)
    self.assertAllClose(expected_log_pdf, self.evaluate(log_pdf))
    self.assertAllClose(np.exp(expected_log_pdf), self.evaluate(pdf))
 def __init__(
     self,
     n_particles,
     prior={
         'β0': norm(loc=0, scale=50),
         'β1': norm(loc=0, scale=50),
         'β2': norm(loc=0, scale=50),
         'β3': norm(loc=0, scale=50),
         'β4': norm(loc=0, scale=50),
         'α': halfnorm(loc=0, scale=3)
     }):
     self.n_particles = int(n_particles)
     self.prior = prior
     self.θ_fixed = {'ϵ': 0.01}
     self.choiceFunction = CumulativeNormalChoiceFunc
  def testHalfNormalCDF(self):
    batch_size = 50
    scale = self._rng.rand(batch_size) + 1.0
    x = np.linspace(-8.0, 8.0, batch_size).astype(np.float64)
    halfnorm = tfd.HalfNormal(scale=scale, validate_args=False)

    cdf = halfnorm.cdf(x)
    self._testBatchShapes(halfnorm, cdf)

    log_cdf = halfnorm.log_cdf(x)
    self._testBatchShapes(halfnorm, log_cdf)

    expected_logcdf = sp_stats.halfnorm(scale=scale).logcdf(x)
    self.assertAllClose(expected_logcdf, self.evaluate(log_cdf), atol=0)
    self.assertAllClose(np.exp(expected_logcdf), self.evaluate(cdf), atol=0)
Example #32
0
        var_mean = dot(pdf_mean, (x-mean_mean)**2)/pdf_mean.sum()

        f1 = figure(num=1, figsize=(13.66,7.68), dpi=100)
        plot(x, pdf_actual, color='blue', label='Numerical')
        plot(x, pdf_peak, color='red', linestyle='--', label='Theoretical')
        plot(x, pdf_mean, color='green', linestyle='--', label='Theo mean')
        xlabel('Membrane potential (volt)')
        title('Theoretical and numerical pre-spike volt distributions')
        legend(loc='best')
        figname = 'tmp/%s_psvolt.png' % filehead
        savefig(figname)
        f1.clf()
        rms_mean = sqrt(mean((pdf_mean - pdf_actual)**2))
        rms_peak = sqrt(mean((pdf_peak - pdf_actual)**2))

        hn_mean = halfnorm(loc=v_th-Mt_mean, scale=sqrt(Vt_mean)/dt)
        hn_peak = halfnorm(loc=v_th-Mt_peak, scale=sqrt(Vt_peak)/dt)

        quantiles = frange(0, 1, 0.01)

        hn_mean_qs = hn_mean.ppf(quantiles)
        hn_peak_qs = hn_peak.ppf(quantiles)

        #from IPython import embed
        #embed()
        #sys.exit()

        #print "RMS mean dist: %s\nRMS peak dist: %f" % (rms_mean, rms_peak)
        #print "KS Mean: %f\nKS Peak: %f" % (ks_mean[1], ks_peak[1])
        #print "Avgs Actual, Mean, Peak: %10.9f, %10.9f, %10.9f" % (
        #        mean_actual,