def calc_decay_weights_normal(shape: Tuple[int, int], sigma: float = 1.0) -> np.ndarray:
    """
    Generate a matrix of probabilities (as weights) sampled from a truncated bivariate normal distribution
    centered at (0, 0) whose covariance matrix is equal to sigma times the identity matrix

    Basically it's a product of two identical and independent normal distributions truncated in the range [-1,1]
    with mean=0 and sigma=sigma

    Generate an "upside-down" bivariate normal if sigma is negative.

    Use sigma=0 to generate a matrix with uniform weights
    """
    h, w = shape
    if sigma == 0:
        return np.ones((h, w))
    else:
        h_arr = truncnorm.pdf(np.linspace(-1, 1, h), -1,
                              1, loc=0, scale=abs(sigma))
        w_arr = truncnorm.pdf(np.linspace(-1, 1, w), -1,
                              1, loc=0, scale=abs(sigma))
        h_arr, w_arr = np.meshgrid(h_arr, w_arr)
        if sigma > 0:
            weights = h_arr * w_arr
        else:
            weights = 1 - h_arr * w_arr

        # if the sum of weights is too small, return a matrix with uniform weights
        if abs(np.sum(weights)) > 0.00001:
            return weights
        else:
            return np.ones((h, w))
Exemplo n.º 2
0
    def indiv_MHRW(self, theta, cov, epsilon, prev_gen_sample, prev_gen_eta):
        ''' 
        Return the Metropolis Hastings Random Walk Kernel updates of the thetas=(phi, eta, tau) 
        theta: (array-like) the prior to simulate the population from
        cov: ((3,3)-array): The covariance matrix of the thetas previously computed
        epsilon (float): The tolerance level of the current iteration
        prev_gen_sample (array-like): A sample previously generated
        prev_gen_eta (array-like): The eta associated to that sample
        -------------------------------------------------------------------------
        returns (array-like): the updated or not theta
        '''

        new_theta = theta + np.random.multivariate_normal([0, 0, 0], (
            (2.38**2) / 3) * cov)  # Has dimensionality is 3
        new_theta = new_theta / new_theta.sum(
        )  # reparametrization on the real line. Is it the right way to do it ?

        new_pop, fail_to_gen_pop = self.simulate_population(theta,
                                                            verbose=False)
        if fail_to_gen_pop == False:
            new_sample = np.random.choice(new_pop,
                                          self.sample_size,
                                          replace=True)
            new_gen_eta = self.compute_eta(new_sample)

            indicatrices_ratio = int(
                self.compute_rho(new_gen_eta) < epsilon) / int(
                    self.compute_rho(prev_gen_eta) < epsilon)
            proposal_ratio = 1  # random walk proposal is symetric then the ratio of q(theta*,theta)/q(theta,theta*)=1
            new_priors = np.array([
                gamma.pdf(x=new_theta[0], a=0.1),
                uniform.pdf(x=new_theta[1], loc=0, scale=theta[0]),
                truncnorm.pdf(x=new_theta[2],
                              a=0,
                              b=10**10,
                              loc=0.198,
                              scale=0.067352)
            ])
            old_priors = np.array([
                gamma.pdf(x=theta[0], a=0.1),
                uniform.pdf(x=theta[1], loc=0, scale=theta[0]),
                truncnorm.pdf(x=theta[2],
                              a=0,
                              b=10**10,
                              loc=0.198,
                              scale=0.067352)
            ])

            acceptance_probas = np.minimum(
                np.ones(len(theta)),
                indicatrices_ratio * proposal_ratio * new_priors / old_priors)
            unif_draws = uniform.rvs(size=len(theta))

            new_theta_accepted = unif_draws <= acceptance_probas
            final_theta = np.where(new_theta_accepted, new_theta, theta)

        else:
            final_theta = theta

        return final_theta  # Might return acceptance probas
def random_walk(x_initial, sd_q, max_iteration, Range):
    """ This model takes the input the initial guess of the parameter, wanted standard deviation for
    markov sampling, maximum iteration to run and the range in which normal distribution is defined"""
    x_new = list()
    x_new.append(x_initial)
    x_state = x_initial  #This state variable will be updated if the generated probabiliy is favourable.
    for i in range(max_iteration):
        x_star = np.random.normal(
            x_state, 1)  # Probability distribution for Markov Sampling
        limit = np.random.uniform(
            low=0, high=1)  #Generating Data form uniform distribution

        if (i < 100):
            mean_ap = x_state
        else:
            mean_ap = np.mean(x_new)
        if limit < min(
                1,
                truncnorm.pdf(x_star, Range[0], Range[1], loc=mean_ap, scale=3)
                / truncnorm.pdf(
                    x_state, Range[0], Range[1], loc=mean_ap, scale=3)):
            x_state = x_star
        else:
            x_state = x_state
        x_new.append(x_state)

    return x_new
Exemplo n.º 4
0
def calculate_decay_weights_normal(shape: tuple,
                                   sigma: float = 1) -> np.ndarray:
    """
    Generate a matrix of probabilities (as weights) sampled from a truncated bivariate normal distribution
    centered at (0, 0) whose covariance matrix is equal to sigma times the identity matrix

    Basically it's a product of two identical and independent normal distributions truncated in the range [-1,1]
    with mean=0 and sigma=sigma

    Generate an "upside-down" bivariate normal if sigma is negative.
    """
    assert sigma != 0
    from scipy.stats import truncnorm
    h, w = shape
    h_arr = truncnorm.pdf(np.linspace(-1, 1, h),
                          -1,
                          1,
                          loc=0,
                          scale=abs(sigma))
    w_arr = truncnorm.pdf(np.linspace(-1, 1, w),
                          -1,
                          1,
                          loc=0,
                          scale=abs(sigma))
    h_arr, w_arr = np.meshgrid(h_arr, w_arr)
    if sigma > 0:
        weights = h_arr * w_arr
    else:
        weights = 1 - h_arr * w_arr
    return weights
Exemplo n.º 5
0
def CreateGaussianMixtureModel(image, kp, dimension=0):
        
    
    if(len(kp) == 0):
        myclip_a = 0
        myclip_b = 28
        my_mean = 10
        my_std = 3

        a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
        x_range = np.linspace(myclip_a,myclip_b,28)
        sampled = truncnorm.pdf(x_range, a, b, loc = my_mean, scale = my_std)
        
        return sampled, 28
    
    shape = image.shape
    observations = 0
    if (dimension == 0):
        observations = shape[1]
        index_to_use = 1
    else:
        observations = shape[0]
        index_to_use = 0
    distributions = []
    sum_of_weights = 0
    for k in kp:
        mu, sigma = int(round(k.pt[index_to_use])), k.size
        
        myclip_a = 0
        myclip_b = observations
        my_mean = mu
        my_std = sigma

        a, b = (myclip_a - my_mean) / my_std, (myclip_b - my_mean) / my_std
        x_range = np.linspace(myclip_a,myclip_b,observations)
        lamb = truncnorm.pdf(x_range, a, b, loc = my_mean, scale = my_std/2)
        
        distributions.append(lamb)
        sum_of_weights += k.response
    gamma = []
    for k in kp:
        gamma.append(k.response/sum_of_weights)
    A = []
    sum_of_densitys = 0
    #print("observations: %s distributions: %s "%(observations, len(distributions)))
    # here we assume that the shape returns a size and not a highest index... may be problematic
    for i in range(observations-1):
        prob_of_observation = 0
        for d in distributions:
            prob_of_observation = prob_of_observation + d[i]
        A.append(prob_of_observation)
        sum_of_densitys = sum_of_densitys + prob_of_observation
    
    A = np.divide(A, np.sum(A))
    if(np.sum(A) != 1):
        val_to_add = 1 - np.sum(A)
        #NEED TO GET MAX INDEX HERE
        A[np.argmax(A)] = A[np.argmax(A)] + val_to_add
    return A, observations
Exemplo n.º 6
0
    def pdf(self, x: Tuple[float]):
        """Find the PDF for a certain x value.

        Args:
            x (float): The value for which the PDF is needed.
        """
        x_a, x_b = (self.x_lower_bound - self.x_mean) / self.x_std, (
            self.x_upper_bound - self.x_mean) / self.x_std
        y_a, y_b = (self.y_lower_bound - self.y_mean) / self.y_std, (
            self.y_upper_bound - self.y_mean) / self.y_std
        return truncnorm.pdf(x[0], x_a, x_b, self.x_mean,
                             self.x_std) * truncnorm.pdf(
                                 x[1], y_a, y_b, self.y_mean, self.y_std)
Exemplo n.º 7
0
def usrf(status, x, needF, neF, F, needG, neG, G, cu, iu, ru):
    """
    ==================================================================
    Computes the nonlinear objective and constraint terms for the
    problem.
    ==================================================================
    """

    # print('called usrfun with ' + str(len(G)) + ' non-linear variables')

    if (needF[0] != 0):
        # the second last row is for chance constraint
        F[neF[0] - 2] = 0

        if cc_var > 0:
            F[neF[0] - 2] += x[cc_var]

        for idx in range(0, int(len(G) / 2)):
            mean = prob_means[idx]
            sigma = prob_stds[idx]
            lb_var = prob_vars[2 * idx]
            ub_var = prob_vars[2 * idx + 1]
            # print("Mean: " + str(mean) + " / Sigma: " + str(sigma))

            a, b = (0 - mean) / sigma, (1e6 - mean) / sigma

            ub_survival = truncnorm.sf(x[ub_var], a, b, loc=mean, scale=sigma)
            lb_mass = truncnorm.cdf(x[lb_var], a, b, loc=mean, scale=sigma)

            F[neF[0] - 2] += ub_survival + lb_mass

            # print('Updating F['+str(neF[0] - 2)+']: ' + str(x[lb_var]) + '-' + str(x[ub_var]) + ': ' + str(lb_mass) + "+" +str(ub_survival) + "="+str(F[neF[0] - 2]))

    if (needG[0] != 0):
        # Compute the partial derivatives of the chance constraint
        # over the lower and upper bounds of the
        # probabilistic durations
        for idx in range(0, int(len(G) / 2)):
            mean = prob_means[idx]
            sigma = prob_stds[idx]
            lb_var = prob_vars[2 * idx]
            ub_var = prob_vars[2 * idx + 1]

            a, b = (0 - mean) / sigma, (1e6 - mean) / sigma

            # For the lower bound, the derivative is the Gaussian pdf
            G[2 * idx] = truncnorm.pdf(x[lb_var], a, b, loc=mean, scale=sigma)

            # For the upper bound, it is the negation of the Gaussian pdf
            G[2 * idx +
              1] = -1 * truncnorm.pdf(x[ub_var], a, b, loc=mean, scale=sigma)
Exemplo n.º 8
0
def jump_dst(theta_old,j,user_std,K):
    #theta_old: previous value of vector theta
    #j: index for which dist is unconditioned
    #user_std: size of step of jumping distribution
    
    dt = 0.0001 #avoid exactly taking limits of bounds

    mu = theta_old[j]
    theta = theta_old.copy()

    # q_eval_new is q(x_new | x_old).
    # q_eval_old is q(x_old | x_new).

    if (j < (K*K*2)):
        # rv = norm(loc=mu,scale=user_std)
        theta[j] = norm.rvs(loc=mu, scale=user_std)
        q_eval_new = norm.pdf(theta[j], loc=mu, scale=user_std)
        q_eval_old = norm.pdf(mu, loc=theta[j], scale=user_std)
    elif ( (j >= (K*K*2)) and (j < (K*K*2+K)) ):
        # a, b = (-1+dt - mu) / user_std, (1-dt - mu) / user_std
        # rv = truncnorm(a=a,b=b,loc=mu,scale=user_std) #bounded between (-1,1)
        a_new, b_new = (-1+dt - mu) / user_std, (1-dt - mu) / user_std
        theta[j] = truncnorm.rvs(a=a_new, b=b_new, loc=mu, scale=user_std)
        a_old, b_old = (-1+dt - theta[j]) / user_std, (1-dt - theta[j]) / user_std
        q_eval_new = truncnorm.pdf(a=a_new, b=b_new, loc=mu, scale=user_std)
        q_eval_old = truncnorm.pdf(a=a_old, b=b_old, loc=theta[j], scale=user_std)
    elif ( (j >= (K*K*2+K)) and (j < (K*K*2+K*2)) ):
        # a  = (0+dt - mu) / user_std
        # rv = truncnorm(a=a,b=np.inf,loc=mu,scale=user_std) #bounded between (0,+inf)
        a_new = (0+dt - mu) / user_std
        theta[j] = truncnorm(a=a_new, b=np.inf, loc=mu, scale=user_std)
        a_old = (0+dt - theta[j]) / user_std
        q_eval_new = truncnorm.pdf(a=a_new, b=np.inf, loc=mu, scale=user_std)
        q_eval_old = truncnorm.pdf(a=a_old, b=np.inf, loc=theta[j], scale=user_std)
    else:
        print("ERROR: index j out of bounds")

    # theta = theta_old.copy()
    # theta[j] = rv.rvs()
    # q_eval = rv.pdf(theta[j])

    # samp_vecA = np.reshape(theta[:(K*K)],(K,K))
    # samp_vecU = np.reshape(theta[(K*K):(K*K*2)],(K,K))
    # samp_valA = np.diag(theta[(K*K*2):(K*K*2+K)])
    # samp_valU = np.diag(theta[(K*K*2+K):(K*K*2+K*2)])

    # A = samp_vecA @ samp_valA @np.linalg.inv(samp_vecA)
    # U = samp_vecU @ samp_valU @np.linalg.inv(samp_vecU)
    
    return(theta, q_eval_new, q_eval_old)
Exemplo n.º 9
0
 def run(self):
     ti = self.ti
     tj = self.tj
     vmax = self.vmax
     vmean = self.vmean
     while True:
         if not self.tq.empty():
             edge = self.tq.get()
             if edge is None:
                 break
             else:
                 t, edgelist = edge
                 sigma = sqrt(((t - ti) * (tj - t)) / (tj - ti))
                 mu = vmean * (t - ti)
                 timep = {}
                 Lx = max(1 - vmax * (tj - t), -vmax * (t - ti))
                 Ux = min(vmax * (t - ti), vmax * (tj - t) + 1)
                 for k, d1, d2 in edgelist:
                     x = self.edgevars[k].x
                     if Lx <= x <= Ux:
                         y = self.edgevars[k].y
                         Uy = min(sqrt(Ux**2.0 - x**2.0),
                                  sqrt((1 - Lx)**2.0 - (1 - x)**2.0))
                         if y <= Uy:
                             Ly = -Uy
                             Px = truncnorm.pdf(x, (Lx - mu) / sigma,
                                                (Ux - mu) / sigma,
                                                loc=mu,
                                                scale=sigma)
                             Py = truncnorm.pdf(y,
                                                Ly / sigma,
                                                Uy / sigma,
                                                scale=sigma)
                             P = Px * Py
                             timep[k] = P
                 totalp = sum(timep.itervalues())
                 resultedges = {}
                 if totalp > 0:
                     normfactor = 1.0 / totalp
                     for k, v in timep.iteritems():
                         normp = timep[k] * normfactor
                         try:
                             P = log(1 - normp) * 30
                         except:
                             P = log(.000001) * 30
                             pass
                         resultedges[k] = (P, normp)
                 self.rq.put(resultedges)
         else:
             sleep(.1)
Exemplo n.º 10
0
def usrf(status, x, needF, neF, F, needG, neG, G, cu, iu, ru):
    """
    ==================================================================
    Computes the nonlinear objective and constraint terms for the
    problem.
    ==================================================================
    """

    # print('called usrfun with ' + str(len(G)) + ' non-linear variables')

    if (needF[0] != 0):
        # the second last row is for chance constraint
        F[neF[0] - 2] = 0

        if cc_var > 0:
            F[neF[0] - 2] += x[cc_var]

        for idx in range(0, int(len(G)/ 2)):
            mean = prob_means[idx]
            sigma = prob_stds[idx]
            lb_var = prob_vars[2 * idx]
            ub_var = prob_vars[2 * idx+1]
            # print("Mean: " + str(mean) + " / Sigma: " + str(sigma))

            a, b = (0 - mean) / sigma, (1e6 - mean) / sigma

            ub_survival = truncnorm.sf(x[ub_var],a,b, loc=mean, scale=sigma)
            lb_mass = truncnorm.cdf(x[lb_var],a,b, loc=mean, scale=sigma)

            F[neF[0] - 2] += ub_survival + lb_mass

            # print('Updating F['+str(neF[0] - 2)+']: ' + str(x[lb_var]) + '-' + str(x[ub_var]) + ': ' + str(lb_mass) + "+" +str(ub_survival) + "="+str(F[neF[0] - 2]))

    if (needG[0] != 0):
        # Compute the partial derivatives of the chance constraint
        # over the lower and upper bounds of the
        # probabilistic durations
        for idx in range(0, int(len(G) / 2)):
            mean = prob_means[idx]
            sigma = prob_stds[idx]
            lb_var = prob_vars[2 * idx]
            ub_var = prob_vars[2 * idx + 1]

            a, b = (0 - mean) / sigma, (1e6 - mean) / sigma

            # For the lower bound, the derivative is the Gaussian pdf
            G[2 * idx] = truncnorm.pdf(x[lb_var], a,b, loc=mean, scale=sigma)

            # For the upper bound, it is the negation of the Gaussian pdf
            G[2 * idx + 1] = -1 * truncnorm.pdf(x[ub_var], a,b, loc=mean, scale=sigma)
Exemplo n.º 11
0
 def pdf(self, x):
     pdfs = truncnorm.pdf(x,
                          self.a,
                          self.b,
                          loc=self.means,
                          scale=self.sigmas)
     return np.sum(np.dot(pdfs, self.coeff))
Exemplo n.º 12
0
 def computeReward(self, nearestNeighbourDistance):
     reward = 0.0
     for i, r in enumerate(nearestNeighbourDistance):
         # Lennard-Jones potential
         if self.potential == "Lennard-Jones":
             x = self.sigmaPotential / r
             reward -= 4 * self.epsilon * (x**12 - x**6)
         # Harmonic potential
         elif self.potential == "Harmonic":
             reward += self.epsilon - 4 * self.epsilon / self.sigmaPotential**2 * (
                 156 / 2**(7 / 3) - 42 / 2**
                 (4 / 3)) * (r - 2**(1 / 6) * self.sigmaPotential)**2
         # Observations (https://www.sciencedirect.com/science/article/pii/0304380094900132)
         elif self.potential == "Observed":
             if i > 2:
                 assert 0, print(
                     "The 'Observed' reward only supports up to 3 nearest Neighbours"
                 )
             # rTest = np.linspace(-10,10,1001)
             # plt.plot(rTest, truncnorm.pdf(rTest, a=observedA[i], b=observedB[i], loc=observedMean[i], scale=observedSigma[i]))
             reward += truncnorm.pdf(r,
                                     a=observedA[i],
                                     b=observedB[i],
                                     loc=observedMean[i],
                                     scale=observedSigma[i])
         else:
             assert 0, print(
                 "Please chose a pair-potential that is implemented")
     # plt.show()
     # print(nearestNeighbourDistance, reward)
     return reward
Exemplo n.º 13
0
 def yonas_3(self):
     a, b = 0, 3
     x_sim = np.linspace(truncnorm.ppf(0.01, a, b),
                         truncnorm.ppf(0.99, a, b), self.n_intervals)
     x = np.arange(self.n_intervals)
     y = np.exp(truncnorm.pdf(x_sim, a, b) * 1.35)
     return y
Exemplo n.º 14
0
    def loss_dual(y_true, y_pred):
        log_pi = np.zeros(no_trajectories)

        for i_col in range(no_trajectories):
            print(y_pred[i_col, 0:no_pv])
            log_pi[i_col] = truncnorm.pdf(tr_qg,
                                          qg_min,
                                          qg_max,
                                          loc=y_pred[i_col, 0:no_pv],
                                          scale=y_pred[i_col, no_pv:])
        return 0 * ke.mean(y_true - y_pred) + np.sum(
            obj_loss * truncnorm.pdf(tr_qg,
                                     qg_min,
                                     qg_max,
                                     loc=y_pred[i_col, 0:no_pv],
                                     scale=y_pred[:, no_pv:]))
Exemplo n.º 15
0
 def PDF_func(self, t):
     return truncnorm.pdf(
         t,
         self.__low_bound,
         self.__up_bound,
         loc=self.mu,
         scale=self.sigma)
Exemplo n.º 16
0
def ln_prior_z(ln_z_b, ln_t_b, z_min=c.min_z, z_max=c.max_z, normed=False):
    """
    Return the prior probability on the log of the metallicity.

    """

    Z = np.exp(ln_z_b)
    t = np.exp(ln_t_b)

    if Z < z_min or Z > z_max: return -np.inf

    # Get redshift corresponding to age
    z_ref = utilities.get_z_from_t(t)

    # Get metallicity of the universe at that time
    Z_ref = calc_Z(z_ref)

    # Set the scale around the metallicity - in logspace
    log_Z_scale = 0.5

    # The truncnorm function is slower, but produces a normalized distribution.
    if normed:
        a, b = (np.log10(z_min) - np.log10(Z_ref)) / 0.5, (
            np.log10(z_max) - np.log10(Z_ref)) / 0.5
        return np.log(
            truncnorm.pdf(np.log10(Z), a, b, loc=np.log10(Z_ref), scale=0.5))

    else:
        ln_prior = -(np.log10(Z) - np.log10(Z_ref))**2 / (2 * log_Z_scale**2)
        ln_prior[Z < z_min] = -np.inf
        ln_prior[Z > z_max] = -np.inf
        return ln_prior
Exemplo n.º 17
0
def lookup_value_add(find_obj_index, alt_obj_index, sample_pt, alt_obj_pose,
                     alt_obj_pose_conf):
    #We take inputs as the index of the object that we would like to find, index of the object we are comparing with,
    #Sample point which we are checking the value of, pose of the alternate object, and confidence of detection of the alternate object.

    #The mean radius and standard deviation are defined pairwise, so we look up this data.
    mean_rad_obj = spatial_rel_mean[find_obj_index][alt_obj_index]
    dev_rad_obj = spatial_rel_dev[find_obj_index][alt_obj_index]

    lower_bound_calc = (lower_bound - mean_rad_obj) / dev_rad_obj
    upper_bound_calc = (upper_bound - mean_rad_obj) / dev_rad_obj

    prob_dist_func = truncnorm.pdf(rad_dist, lower_bound_calc,
                                   upper_bound_calc, mean_rad_obj, dev_rad_obj)

    ##Calculate radius as norm of sample point minus the alt_obj_pose
    #radius_val =

    #Must calculate the radius bucket that this particular location falls into.
    for i in range(0, discrete_size - 1):
        if (radius_val > rad_dist[i]) and (radius_val < rad_dist[i + 1]):
            bucket = i

    value_add = alt_obj_pose_conf * prob_dist_func(bucket)
    return value_add
Exemplo n.º 18
0
 def pdf(self, x, alpha, beta):
     bounds_rescaled = self.bounds_rescaled(alpha, beta)
     return truncnorm.pdf(a=bounds_rescaled[:, 0].detach().numpy(),
                          b=bounds_rescaled[:, 1].detach().numpy(),
                          loc=alpha.detach().numpy(),
                          scale=beta.exp().detach().numpy(),
                          x=x.numpy())
Exemplo n.º 19
0
def pdf(x):
    """
    Probability distribution function for Random Variable X
    from which we want to sample points. Here we assume
    we have truncated standard normal distribution in the domain of -3 to 3
    """
    return truncnorm.pdf(x, xdomain[0], xdomain[1])
Exemplo n.º 20
0
def _create_gaussian_longitudinal_profile(z_c, s_z, n_part, sigma_trunc_lon,
                                          min_len_scale_noise):
    """ Creates a Gaussian longitudinal profile """
    # Make sure number of particles is an integer
    n_part = int(n_part)
    if min_len_scale_noise is None:
        if sigma_trunc_lon is not None:
            z = truncnorm.rvs(-sigma_trunc_lon,
                              sigma_trunc_lon,
                              loc=z_c,
                              scale=s_z,
                              size=n_part)
        else:
            z = np.random.normal(z_c, s_z, n_part)
    else:
        tot_len = 2 * sigma_trunc_lon * s_z
        n_slices = int(np.round(tot_len / (min_len_scale_noise)))
        part_per_slice = 2 * sigma_trunc_lon * n_part / n_slices * truncnorm.pdf(
            np.linspace(-sigma_trunc_lon, sigma_trunc_lon, n_slices),
            -sigma_trunc_lon, sigma_trunc_lon)
        part_per_slice = part_per_slice.astype(int)
        slice_edges = np.linspace(z_c - sigma_trunc_lon * s_z,
                                  z_c + sigma_trunc_lon * s_z, n_slices + 1)
        z = _create_smooth_z_array(part_per_slice, slice_edges)
    return z
Exemplo n.º 21
0
    def pdf(self, x) -> float:
        """
        Calculate the Normal probability distribution value at position `x`.
        :param x: value where the probability distribution function is evaluated.
        :return: value of the probability distribution function.
        """

        if self.hard_clip_min is not None and (x < self.hard_clip_min):
            return 0.

        if self.hard_clip_max is not None and (x > self.hard_clip_max):
            return 0.

        if self.hard_clip_min is not None or self.hard_clip_max is not None:
            a = -np.inf
            b = np.inf

            if self.hard_clip_min is not None:
                a = (self.hard_clip_min - self.mean) / self.std

            if self.hard_clip_max is not None:
                b = (self.hard_clip_max - self.mean) / self.std

            return truncnorm.pdf(x, a=a, b=b, loc=self.mean, scale=self.std)

        return norm.pdf(x, loc=self.mean, scale=self.std)
Exemplo n.º 22
0
 def late(self):
     a, b = 2, 3
     y = np.linspace(truncnorm.ppf(0.01, a, b), truncnorm.ppf(0.99, a, b),
                     self.n_intervals)
     y = np.exp(truncnorm.pdf(y, a, b))
     y = y[::-1] / 13 + 1
     y -= y[0] - 1
     return y
Exemplo n.º 23
0
 def prob(self, sample):
     x, y, yaw = sample
     dx = x - self.x
     dy = y - self.y
     dyaw = circular_difference(yaw, self.yaw)
     return norm.pdf(dx, scale=self.pos_std) * \
            norm.pdf(dy, scale=self.pos_std) * \
            truncnorm.pdf(dyaw, a=-np.pi, b=np.pi, scale=self.ori_std)
Exemplo n.º 24
0
def impsample1():
    x = truncnorm.rvs(a1, b1, loc=0, size=N)
    p = truncnorm.pdf(x, a1, b1, loc=0)
    z = np.empty(N)
    for i in range(N):
        z[i] = integrand1(x[i])
        z[i] /= p[i]
    return np.mean(z)
Exemplo n.º 25
0
 def late(self):
     a, b = 2, 100
     x_sim = np.linspace(truncnorm.ppf(0.01, a, b),
                         truncnorm.ppf(0.99, a, b), self.n_intervals)
     x = np.arange(50)
     y = np.exp(truncnorm.pdf(x_sim, a, b))
     y = y[::-1] / 15 + 1
     y -= y[0] - 1
     return y
Exemplo n.º 26
0
def trunc_visualization(parameters):
    [mu, sig, min, max] = parameters
    a, b, = (min - mu) / sig, (max - mu) / sig
    x_range = np.linspace(0, 1, 1000)
    fig, ax = plt.subplots()
    sns.lineplot(x_range, truncnorm.pdf(x_range, a, b, loc=mu, scale=sig), label='pdf')
    sns.lineplot(x_range, truncnorm.cdf(x_range, a, b, loc=mu, scale=sig), label='cdf')
    ax.legend()
    return ax
Exemplo n.º 27
0
    def pdf(self, x: float):
        """Find the PDF for a certain x value.

        Args:
            x (float): The value for which the PDF is needed.
        """
        a, b = (self.lower_bound - self.mean) / self.std, (
            self.upper_bound - self.mean) / self.std
        return truncnorm.pdf(x, a, b, self.mean, self.std)
Exemplo n.º 28
0
def generate_rewards(sd=15, nb_samples=int(1e5), verbose=0):

    myclip_a = 1
    myclip_b = 99
    a1, b1 = (myclip_a - mean1) / sd, (myclip_b - mean1) / sd
    a2, b2 = (myclip_a - mean2) / sd, (myclip_b - mean2) / sd
    assert (sd in [10, 15, 20,
                   25]), 'standard deviation must be in [10, 15, 20, 25]'
    bound = (sd == 10) * 0.08 + (sd == 20) * 0.25 + (sd == 25) * 0.3 + (
        sd == 15) * 0.17

    while True:
        sampleslow = np.array(truncnorm.rvs(a1,
                                            b1,
                                            loc=mean1,
                                            scale=sd,
                                            size=nb_samples),
                              dtype=np.int)
        sampleshigh = np.array(truncnorm.rvs(a2,
                                             b2,
                                             loc=mean2,
                                             scale=sd,
                                             size=nb_samples),
                               dtype=np.int)
        if np.abs(
            (sampleslow > sampleshigh).mean() - bound) < epsilon and np.abs(
                np.std(sampleshigh) -
                sd) < 1 and np.abs(np.std(sampleslow) - sd) < 1 and np.abs(
                    sampleslow.mean() -
                    mean1) < 2 and np.abs(sampleshigh.mean() - mean2) < 2:
            break

    if verbose:
        from matplotlib import pyplot as plt
        print((sampleslow > sampleshigh).mean())
        plt.figure()
        x_range = np.arange(0, 100, 0.1)
        plt.plot(x_range, truncnorm.pdf(x_range, a1, b1, loc=mean1, scale=sd))
        plt.plot(x_range, truncnorm.pdf(x_range, a2, b2, loc=mean2, scale=sd))
        plt.plot(
            x_range, .5 * truncnorm.pdf(x_range, a2, b2, loc=mean2, scale=sd) +
            .5 * truncnorm.pdf(x_range, a1, b1, loc=mean1, scale=sd))

    return sampleslow, sampleshigh
Exemplo n.º 29
0
 def get_truncnorm_grid_old(self, pos, a, loc, scale):
     key = np.array([pos[0], pos[1], a, loc, scale])
     if key.tobytes() not in self.truncnorm_grid_cache:
         self.truncnorm_grid_cache[key.tobytes()] = truncnorm.pdf(
             self.grid.distance_grid(pos),
             a,
             b=np.inf,
             loc=loc,
             scale=scale)
     return self.truncnorm_grid_cache[key.tobytes()]
Exemplo n.º 30
0
def p(x1, x2, sig):
    """
	Defines the partition function
	something like the probability of a cell with internal coordinate x2 dividing into 2 cells in state x1
	"""
    mu = x2 / 2
    clipa = 0
    clipb = x2
    a, b = (clipa - mu) / sig, (clipb - mu) / sig
    return (truncnorm.pdf(x1, a, b, loc=mu, scale=sig))
Exemplo n.º 31
0
def truncnorm_pdf(x, mean, var, min, max):
    a, b = _truncnorm_params_transform(mean, var, min, max)
    val = truncnorm.pdf(
        x,
        a,
        b,
        loc=mean,
        scale=sqrt(var),
    )
    return (val)
Exemplo n.º 32
0
def test_trunc_norm():
    '''
    Should return values from a truncated normal distribution.

    '''
    # sample values from a distribution
    mu, sigma, trunc_min, trunc_max = 2, 1, 0, 5
    x = [_trunc_norm(mu, sigma, trunc_min, trunc_max) for _ in range(100000)]
    x = np.asarray(x)

    # simple check: values must be within truncated bounds
    assert (x >= trunc_min).all() and (x <= trunc_max).all()

    # trickier check: values must approximate distribution's PDF
    hist, bins = np.histogram(x, bins=np.arange(0, 10.1, 0.2), density=True)
    xticks = bins[:-1] + 0.1
    a, b = (trunc_min - mu) / float(sigma), (trunc_max - mu) / float(sigma)
    trunc_closed = truncnorm.pdf(xticks, a, b, mu, sigma)
    assert np.allclose(hist, trunc_closed, atol=0.015)
def lookup_value_add(find_obj_index, alt_obj_index, sample_pt, alt_obj_pose, alt_obj_pose_conf):
	#We take inputs as the index of the object that we would like to find, index of the object we are comparing with, 
	#Sample point which we are checking the value of, pose of the alternate object, and confidence of detection of the alternate object. 

	#The mean radius and standard deviation are defined pairwise, so we look up this data. 
	mean_rad_obj = spatial_rel_mean[find_obj_index][alt_obj_index]
	dev_rad_obj = spatial_rel_dev[find_obj_index][alt_obj_index]

	lower_bound_calc = (lower_bound - mean_rad_obj) / dev_rad_obj
	upper_bound_calc = (upper_bound - mean_rad_obj) / dev_rad_obj

	prob_dist_func = truncnorm.pdf(rad_dist,lower_bound_calc,upper_bound_calc,mean_rad_obj,dev_rad_obj)

	##Calculate radius as norm of sample point minus the alt_obj_pose 
	#radius_val = 

	#Must calculate the radius bucket that this particular location falls into. 
	for i in range(0,discrete_size-1):
		if (radius_val>rad_dist[i]) and (radius_val<rad_dist[i+1]):
			bucket = i

	value_add = alt_obj_pose_conf * prob_dist_func(bucket)
	return value_add
Exemplo n.º 34
0
uniform = False

def one_dim_f(x, alpha):
    return math.exp(-0.5 * x ** 2 - alpha * x ** 4 )

def gauss_cut():
    while True:
        x = random.gauss(0.0, 1.0)
        if abs(x) <= 1.0:
            return x

area = integrate.quad(one_dim_f, -1, 1, args=(alpha, ))[0]
print area

import random, math, pylab
minVal = truncnorm.pdf(-1, -1., 1.)

alpha = 0.5
nsteps = 100000
samples_x = []
samples_y = []
x, y = 0.0, 0.0
exp_old = - 0.5 * (x ** 2 + y ** 2) - alpha * (x ** 4 + y ** 4)
for step in range(nsteps):
    if uniform:
        xnew = random.uniform(-1.0, 1.0)
        ynew = random.uniform(-1.0, 1.0)
    else:
        xnew, ynew = gauss_cut(), gauss_cut()
    exp_new = - 0.5 * (xnew ** 2 + ynew ** 2) - alpha * (xnew ** 4 + ynew ** 4)
    if not uniform:
lower_bound=0
upper_bound=radius_threshold

#Defining mean and standard deviation values. 
#In the real system, both of these come from the learnt data. 
mean = 5
sigma = 100

#Converting to a non-zero mean. 
lower_bound = (lower_bound-mean)/sigma
upper_bound = (upper_bound-mean)/sigma

rad_dist = npy.linspace(0,radius_threshold,discrete_size)

prob_dist_func = truncnorm.pdf(rad_dist,lower_bound,upper_bound,mean,sigma)

plt.plot(rad_dist,prob_dist_func)
plt.show()

rad_dist_2 = npy.linspace(0,radius_threshold,discrete_size)
prob_dist_func_2 = truncnorm.pdf(rad_dist_2,lower_bound,upper_bound,mean,sigma)

#Define function that computes the value to be added to the particular point, for a particular object to find, and an alternate object.

#spatial_rel_mean is the array of radius values stored from the learn_spatial_relationships cpp file. 
#spatial_rel_cov is the array of standard deviation values stored from the learn spatial rel cpp file. 

def lookup_value_add(find_obj_index, alt_obj_index, sample_pt, alt_obj_pose, alt_obj_pose_conf):
	#We take inputs as the index of the object that we would like to find, index of the object we are comparing with, 
	#Sample point which we are checking the value of, pose of the alternate object, and confidence of detection of the alternate object. 
Exemplo n.º 36
0
import random, math, pylab
from scipy.stats import truncnorm

def gauss_cut():
    while True:
        x = random.gauss(0.0, 1.0)
        if abs(x) <= 1.0:
            return x

uniform = False

alpha = 0.5
nsamples = 100000
samples_x = []
samples_y = []
normalization = truncnorm.pdf(0, -1.0, 1.0) ** 2.
for sample in xrange(nsamples):
    while True:
	if uniform:
		x = random.uniform(-1.0, 1.0)
		y = random.uniform(-1.0, 1.0)
	else:
		x = gauss_cut()
		y = gauss_cut()

        p = math.exp(-0.5 * (x ** 2 + y ** 2) - alpha * (x ** 4 + y ** 4))
	if not uniform:
		p = p/(truncnorm.pdf(x, -1.0, 1.0) * truncnorm.pdf(y, -1.0, 1.0) / normalization)
        if random.uniform(0.0, 1.0) < p:
            break
    samples_x.append(x)
Exemplo n.º 37
0
samples_x = []
samples_y = []
x, y = 0.0, 0.0

uniform = False

def one_dim_f(x, alpha):
    return math.exp(-0.5 * x ** 2 - alpha * x ** 4 )

def gauss_cut():
    while True:
        x = random.gauss(0.0, 1.0)
        if abs(x) <= 1.0:
            return x

normalization = truncnorm.pdf(1., -1.0, 1.0)
for step in range(nsteps):
    if step % 2 == 0:
        while True:
            if uniform:
                x = random.uniform(-1.0, 1.0)
            else:
                x = gauss_cut()
            p = one_dim_f(x, alpha)
            if not uniform:
                p = p/truncnorm.pdf(x, -1.0, 1.0)./normalization
            if random.uniform(0.0, 1.0) < p:
                break
    else:
        while True:
            if uniform: