def vmf_pdf(x, mu, kappa, log = False): if (type(x) == int): x = [x] if (type(x) == float): x = [x] vm = np.zeros(len(x)) if (log): if (kappa == 0): vm = np.log(np.repreat(1/(2*pi), len(x))) elif (kappa < 100000): vm = -(np.log(2*math.pi)+np.log(scp.ive(0, kappa)) + kappa) + kappa*(np.cos(np.subtract(x - mu))) else: if (((x-mu)%(2*math.pi))==0): vm = math.inf else: vm = -math.inf else: if (kappa == 0): vm = np.repeat(1/(2*np.pi), len(x)) elif (kappa < 100000): vm = 1/(2 * np.pi * scp.ive(0, kappa)) * (np.exp(np.subtract(np.cos(np.subtract(x, mu)), 1)))**kappa else: if (np.mod(np.subtract(x, mu),(2*np.pi))==0): vm = math.inf else: vm = 0 return(vm)
def vmf_pdf(x, mu, kappa, log = False): """ Pdf of the von Mises-Fisher distribution for one input Parameters: mu: list, location parameter kappa: float/int, scale parameter. Large values of kappa corresponds to lower variance Returns: list, pdf function """ if (type(x) == int): x = [x] if (type(x) == float): x = [x] vm = np.zeros(len(x)) if (log): if (kappa == 0): vm = np.log(np.repreat(1/(2*pi), len(x))) elif (kappa < 100000): vm = -(np.log(2*math.pi)+np.log(scp.ive(0, kappa)) + kappa) + kappa*(np.cos(np.subtract(x - mu))) else: if (((x-mu)%(2*math.pi))==0): vm = math.inf else: vm = -math.inf else: if (kappa == 0): vm = np.repeat(1/(2*np.pi), len(x)) elif (kappa < 100000): vm = 1/(2 * np.pi * scp.ive(0, kappa)) * (np.exp(np.subtract(np.cos(np.subtract(x, mu)), 1)))**kappa else: if (np.mod(np.subtract(x, mu),(2*np.pi))==0): vm = math.inf else: vm = 0 return(vm)
def lnprob(theta): #------- Support -------------------- if (theta[0] < hyp_loc_min or theta[0] > hyp_loc_max ) : return -np.inf #------- Prior -------------------------------------------------------------- lp_theta_0 = st.uniform.logpdf(theta[0],loc=hyp_loc_min,scale=hyp_loc_max) log_prior = lp_theta_0 thetas = np.repreat(theta,self.pax.shape[0]) theta_pax = np.block([thetas,self.pax]) log_lik = np.sum(map(llik_star,theta_pax)) return log_prior + log_lik