コード例 #1
0
ファイル: GUI.py プロジェクト: srnthsrdhrn/VehicleTrackingGUI
    def graph(self):

        self.Posterior_in = MCMC.sampler(self.data_2min_in, samples=15000, mu_init=1.5)

        self.Posterior_out = MCMC.sampler(self.data_2min_out, samples=15000, mu_init=1.5)

        self.graph_call()
コード例 #2
0
ファイル: test.py プロジェクト: AdkPete/ASTP720
def T0():
	data = 2
	T = MCMC.MCMC( P0 , data)


	X, MP , ML = T.M_H(Q0 , 10000 , np.array(  [ 10 , 3 ] ) )
	mkp0(X)
コード例 #3
0
ファイル: MIDAmodule.py プロジェクト: fangqx/MIDA
 def initData(self):
     # initialize parameter class
     try:
         param = DataType.paramType()
         param.readParams(self.paramFile)
         if len(self.paramCovFile) > 0:
             param.readCov(self.paramCovFile)
     except:
         print('WARNING: Error occurred in initializing parameter class.')
         raise Exception()
     # initialize mcmc class
     try:
         mcmc_case = MCMC.MCMC_alg()
         mcmc_case.init_MCMC(self, param)
     except:
         print('WARNING: Error occurred in initializing MCMC class.')
         raise Exception()
     # initialize data class
     try:
         nObs = len(self.obsDirList)
         dataList = np.array([DataType.dataType() for i in range(nObs)])
         if len(self.obsVarDirList) > 0:  # obsVar file is provided
             for i in range(nObs):
                 dataList[i].readObsData(self.obsDirList[i], self.obsVarDirList[i],
                                         self.simuDirList[i],
                                         self.workPath + '/' + 'config_' + str(i + 1) + '.txt')
         else:  # no obsVar provided, MIDA will calculate the variance of obs
             for i in range(nObs):
                 dataList[i].readObsData(self.obsDirList[i], '', self.simuDirList[i],
                                         self.workPath + '/' + 'config_' + str(i + 1) + '.txt')
     except:
         print('WARNING: Error occurred in initializing data class.')
         raise Exception()
     return [param, mcmc_case, dataList]
コード例 #4
0
ファイル: RV.py プロジェクト: AdkPete/ASTP720
def measure():

    ##Measures and returns our R and M estimate

    tr = 2454955.788373

    phi = tr * 2 * np.pi / (3.55)

    t, rvel = read_rv()

    DI = 0.0043
    Rstar = 1.79

    Rp = np.sqrt(Rstar**2 * DI)

    print(Rp)

    x0 = [3.55, 4.34505429e+06, 2.50998989e+02]

    opt = MCMC.MCMC(lsq, [t, rvel])
    opt.M_H(Q, 5000, x0)
    print(opt.MAP, opt.MAPL)
    plot_fit(t, rvel, opt.MAP)

    vp = opt.MAP[2] * u.m / u.s

    Mstar = 1.35 * u.M_sun

    pstar = Mstar * vp
    p_planet = pstar

    T = opt.MAP[0] * u.day
    m_planet = T**2 * (p_planet**6) / (4 * np.pi**2 * (const.G * Mstar)**2)
    m_planet = m_planet**(1.0 / 6)
    print(m_planet.to(u.M_sun))
コード例 #5
0
ファイル: Run.py プロジェクト: DmitriiGudin/MCMC
def PART_1():
    # Generate clean data.
    data_x_1 = np.random.uniform(x_min_1, x_max_1, N_points_1)
    data_y_1 = A_1*data_x_1 + B_1
    # Add noise.
    data_x_1 += lib_distrib.gen_distrib (lib_distrib.Gauss, (0, dx_1), -dx_factor_1*dx_1, dx_factor_1*dx_1, 0, lib_distrib.Gauss(0, (0, dx_1)), N_points_1)
    data_y_1 += lib_distrib.gen_distrib (lib_distrib.Gauss, (0, dy_1), -dy_factor_1*dy_1, dy_factor_1*dy_1, 0, lib_distrib.Gauss(0, (0, dy_1)), N_points_1)
    # Plot the generated data.
    plot_initial_data_1 (data_x_1, data_y_1)
    # Perform the MCMC procedure. 
    data_1 = MCMC.MCMC (linear, (0,0), data_x_1, data_y_1, dp_1, dp_decay_1, N_coarse_1, N_fine_1, N_final_1, N_logging)
    # Retrieve the relevant data (the last N_final_1 batches):
    data_final_1 = data_1[N_coarse_1+N_fine_1+1:N_coarse_1+N_fine_1+N_final_1+1]
    # Calculate and output the optimal A, B values:
    A_1_opt, B_1_opt = np.median([d[1][0] for d in data_final_1]), np.median([d[1][1] for d in data_final_1])
    print "Optimal linear parameter values: A = ", A_1_opt, ", B = ", B_1_opt
    # Calculate the acceptance rates for all 3 modes. Print them out.
    acc_rates = calc_acceptance_rates ([d[2] for d in data_1[1:N_coarse_1+1]], [d[2] for d in data_1[N_coarse_1+1:N_coarse_1+N_fine_1+1]], [d[2] for d in data_final_1])
    print "Acceptance rate for the Coarse mode: ", round(acc_rates[0]*100, 2), "%."
    print "Acceptance rate for the Fine mode: ", round(acc_rates[1]*100, 2), "%."
    print "Acceptance rate for the Final mode: ", round(acc_rates[2]*100, 2), "%."
    # Plot the posterior distribution for A, B and both. Only for the Final mode.
    plot_distr ([[d[1][0] for d in data_final_1], [d[1][1] for d in data_final_1]], ['A','B'], 1)
    # Plot the cumulative sum graph and calculate 68% and 95% confidence intervals.
    plot_cumul ([[d[1][0] for d in data_final_1],[d[1][1] for d in data_final_1]], ['A','B'], 1, [0.68, 0.95])
コード例 #6
0
ファイル: test.py プロジェクト: AdkPete/ASTP720
def T1():
	
	data = np.random.poisson(3 , 1000)
	T = MCMC.MCMC(P1 , data)
	
	X , MP , ML = T.M_H(Q1 , 10000 , np.array( [ 5 ] ) )
	mkp1(X)
	print (MP , ML)
コード例 #7
0
def generateMCMC(filename):
    a = mcmc.MCMC()
    a.setSteps(100000)
    a.setSigmas(sigma)
    a.current_point = mean
    a.setLogLikelihoodFunction(logp)
    a.loop()
    f = open(filename, 'wb')
    pickle.dump(a, f)
    f.close()
    return a
コード例 #8
0
ファイル: Cal_MIRT_main.py プロジェクト: tweister/MIRT-NUCT
def main():
    while True:
        print("""
        指令列表如下:
        1.查看不同题目作答组合的L曲线
        2.对题目作答进行DSY算法估计
        3.对题目作答进行MCMC算法估计
        4.导入xls类型的作答结果并对其进行分析和处理
        0.退出
        """)
        command = input('请输入需要执行的指令')
        a, b = Init_MIRT.Init_a_b()
        if command == '0':
            print('感谢您的使用')
            break
        elif command == '1':
            time_star=time.time()
            Show_L.m_show(a, b)
            time_end=time.time()
            print("程序运行时间:%.8s s" %(time_end-time_star))
        elif command == '2':
            time_star = time.time()
            MIRT_DSY.DSY(a, b)
            time_end = time.time()
            print("程序运行时间:%.8s s" % (time_end-time_star))
        elif command == '3':
            time_star = time.time()
            MCMC.MCMC(a,b)
            time_end = time.time()
            print("程序运行时间:%.8s s" % (time_end-time_star))
        elif command == '4':
            print("""
            请输入需要处理的文件位置         
            """)
            while True:
                URL=input()
                if URL=='':
                    print("请输入有效地址")
                else:
                    print('测试完成,本功能还在开发中')
                    break
        print('finish')
        KEY=input('继续执行请按1,停止执行请按0')
        if (KEY!='1'):
            break
コード例 #9
0
def get_birdcounts_sample(datafile,
                          loglikelihood,
                          poly_deg,
                          dof=4,
                          pc=4,
                          maxruns=50000):
    bc = pd.read_csv(datafile, index_col=0)
    #bc = bc.drop("Total",axis=1)
    N = len(bc)
    lbd = 10. / numpy.sqrt((poly_deg + 1) * (N))
    sigma = numpy.eye((poly_deg + 1) * N)
    time = numpy.array([int(x) for x in bc.columns], dtype=float)
    time = Seasonal_ODE.time_transform(time)
    bc_mat = bc.as_matrix()
    dof = 4
    pc = 4
    init_guess = bc_init_guess(datafile, poly_deg)
    p_vals = numpy.zeros((N, len(time)))
    amean = numpy.zeros(N)
    bmean = numpy.zeros(N)
    coeff_mean = numpy.zeros((N, poly_deg + 1))
    coeff = []
    for i in range(N):
        loglikargs = (bc_mat[i], time)
        theta_bc, k_bc = MCMC.run_MCMC_convergence(init_guess[i, :],
                                                   loglikelihood,
                                                   loglikargs,
                                                   maxruns=maxruns,
                                                   pc=4,
                                                   method="Nelder-Mead")
        n = len(bc_mat[i])
        results = theta_bc[0][::10]
        #coeff = numpy.zeros((len(results),poly_deg+1))
        coeff.append(numpy.zeros((len(results), poly_deg + 1)))
        for j in range(poly_deg + 1):
            #coeff[:,j,numpy.newaxis] = results[:,j::poly_deg+1]
            coeff[-1][:, j, numpy.newaxis] = results[:, j::poly_deg + 1]
        #coeff_mean[i,:] = numpy.mean(coeff,axis=0)
        coeff_mean[i, :] = numpy.mean(coeff[-1], axis=0)
    return (coeff_mean, coeff)
コード例 #10
0
ファイル: Run.py プロジェクト: DmitriiGudin/MCMC
def PART_2():
    # Retrieve the data set.
    data_x_2 = Data_part_2.x
    data_y_2 = Data_part_2.y
    # Plot the retrieved data.
    plot_initial_data_2 (data_x_2, data_y_2)
    # Perform the MCMC procedure.
    data_2 = MCMC.MCMC (sinusoidal, (0,0,0), data_x_2, data_y_2, dp_2, dp_decay_2, N_coarse_2, N_fine_2, N_final_2, N_logging)
    # Retrieve the relevant data (the last N_final_1 batches):
    data_final_2 = data_2[N_coarse_2+N_fine_2+1:N_coarse_2+N_fine_2+N_final_2+1]
    # Calculate and output the optimal A, B, C values:
    A_2_opt, B_2_opt, C_2_opt = np.median([d[1][0] for d in data_final_2]), np.median([d[1][1] for d in data_final_2]), np.median([d[1][2] for d in data_final_2])
    print "Optimal sinusoidal-linear parameter values: A = ", A_2_opt, ", B = ", B_2_opt, ", C = ", C_2_opt
    # Calculate the acceptance rates for all 3 modes. Print them out.
    acc_rates = calc_acceptance_rates ([d[2] for d in data_2[1:N_coarse_2+1]], [d[2] for d in data_2[N_coarse_2+1:N_coarse_2+N_fine_2+1]], [d[2] for d in data_final_2])
    print "Acceptance rate for the Coarse mode: ", round(acc_rates[0]*100, 2), "%."
    print "Acceptance rate for the Fine mode: ", round(acc_rates[1]*100, 2), "%."
    print "Acceptance rate for the Final mode: ", round(acc_rates[2]*100, 2), "%."
    # Plot the posterior distribution for A, B and both. Only for the Final mode.
    plot_distr ([[d[1][0] for d in data_final_2], [d[1][1] for d in data_final_2], [d[1][2] for d in data_final_2]], ['A','B','C'], 2)
    # Plot the cumulative sum graph and calculate 68% and 95% confidence intervals.
    plot_cumul ([[d[1][0] for d in data_final_2], [d[1][1] for d in data_final_2], [d[1][2] for d in data_final_2]], ['A','B','C'], 2, [0.68, 0.95])
    # Plot the predicted function over the initial dataset.
    plot_part_2_result (data_x_2, data_y_2, sinusoidal, (A_2_opt, B_2_opt, C_2_opt))
コード例 #11
0
ファイル: bloodmeal.py プロジェクト: mullert613/EEE_Code
def get_bloodmeal_sample(datafile,
                         loglikelihood,
                         poly_deg,
                         dof=4,
                         pc=4,
                         maxruns=50000):
    bm = pd.read_csv(datafile, index_col=0)
    #bm=bm.drop("Total",axis=1)
    N = len(bm)
    time = numpy.array([int(x) for x in bm.columns], dtype=float)
    time = Seasonal_ODE.time_transform(time)
    init_guess = numpy.zeros((poly_deg + 1) * (N - 1))
    loglikargs = (bm.as_matrix(), time)
    #the_0 = MCMC.sample_dist(init_guess,pc,loglikelihood,loglikargs)
    theta_bm, k_bm = MCMC.run_MCMC_convergence(init_guess,
                                               loglikelihood,
                                               loglikargs=(bm.as_matrix(),
                                                           time),
                                               maxruns=maxruns,
                                               pc=4)

    n = len(bm.as_matrix())
    results = theta_bm[0][::10]
    coeff_mean = numpy.zeros((N - 1, poly_deg + 1))
    for j in range(poly_deg + 1):
        coeff_mean[:, j] = numpy.mean(results[:, j::poly_deg + 1], axis=0)
    # a = results[:,0::2]
    # b = results[:,1::2]
    # a0= numpy.percentile(a,5,axis=0)
    # a1= numpy.percentile(a,95,axis=0)
    # amean = numpy.mean(a,axis=0)
    # b0= numpy.percentile(b,5,axis=0)
    # b1= numpy.percentile(b,95,axis=0)
    # bmean = numpy.mean(b,axis=0)

    return (coeff_mean, results)
コード例 #12
0
T = 3  # The length of MCMC is T
t = 0
Para = Function.GeneratePara(t, Para0=None, D=None, P=None)  # Initialize Para
D = None
P = None  # Get D1 and P1
df = 1
# Begin to find the best Para and its relevant parameters D, chi2 Cov etc-----------------------------------------------
t += 1
ParaSet = array(Para)  # The set of the accepted Para
chi2 = Function.chi2(Para, Data, EoS)
chi2Set = array([chi2
                 ])  # The set of the chi2 corresponding to the accepted Para
Accept = array([1])  # The set of the Accept corresponding to the accepted Para
while t <= T:
    [Para, chi2, D, P, ParaSet, chi2Set, Accept, mu, Cov] = \
        MCMC.MarkovChain(Data, EoS, Para, chi2, D, P, ParaSet, chi2Set, Accept, t)
    t += 1
CI = Function.CI(df, chi2)
[index, ProbabilityDensity, Probability] = Function.CIindex(CI, Accept)
Deviation = abs(Para - ParaSet[index])
# The NS's maximum mass and its upper limits and lower limits ----------------------------------------------------------
TOV_output = Function.TOV(Para, EoS)
M_NS_max = max(TOV_output[2])
[Para_Uplims, Para_Lolims, M_NS_max_Uplims,
 M_NS_max_Lolims] = Para_Uplims_Lolims(Para, Deviation, EoS, M_NS_max)
End = time.clock()
TimeUsed = End - Start

# Output Result --------------------------------------------------------------------------------------------------------
ResultDir = os.getcwd() + '/Result/'
コード例 #13
0
ファイル: LotkaVolterra.py プロジェクト: juanra31a/Tarea-8
    return odeint(LotkaVolterra,state0,t, args=(alpha,beta,gamma, delta))

def chi_2 (tiempos, X_obs, params):
    x_obs =X_obs[0]
    y_obs =X_obs[1]
    chi2_1 = sum((x_obs - my_model(tiempos,params)[:,0])**2)
    chi2_2 = sum((y_obs - my_model(tiempos,params)[:,1])**2)
    return chi2_1+chi2_2

guess = [30,5,50,2]
step_size = [0.01,0.01,0.01,0.01]

n_params = 4
n_points = 1000

best, walk, chi2 = MCMC.hammer(tiempos,[x_obs , y_obs] , guess, chi_2, step_size ,n_params, n_points)

print "El valor de alpha es", best[0]
print "El valor de beta es", best[1]
print "El valor de gamma es", best[2]
print "El valor de delta es", best[3]

f1=open('valores.dat', 'w+')

for i in range(n_points):
    f1.write('%f %f %f %f %f\n' %(walk[0,i], walk[1,i], walk[2,i], walk[3,i],chi2[i]))


#UNCERTAINTIES: SELECCIONO UNOS CUANTOS VALORES QUE SATISFAGAN LAS CONDICIONES PEDIDAS:

コード例 #14
0
# alpha_doublepl = alpha*(c**(tau-sigma))/tau
# t = (sigma*size/alpha_doublepl)**(1/sigma) # size è quella finale o originale?
# u = TruncPois.tpoissrnd(t*w0)
t = (sigma * size / alpha)**(1 / sigma)
u = TruncPois.tpoissrnd(t * w)
n = Updates.update_n(w, G, p_ij)  # sui vecchi o nuovi w?

# output = MCMC("w_gibbs", "exptiltBFRY", iter, sigma=sigma_true, tau=tau_true, alpha=alpha_true, u=u_true,
#               p_ij=p_ij, n=n_true)
output = MCMC("w_gibbs",
              "GGP",
              iter,
              sigma_tau=0.08,
              tau=tau,
              sigma=sigma,
              alpha=alpha,
              u=u,
              n=n,
              p_ij=p_ij,
              c=c,
              w_init=w)
# output = MCMC("w_HMC", "exptiltBFRY", iter, epsilon=epsilon, R=R,
#               tau=tau_true, sigma=sigma_true, alpha=alpha_true, u=u_true, n=n_true, p_ij=p_ij, w_init=w)
output = MCMC("w_HMC",
              "GGP",
              iter,
              epsilon=epsilon,
              R=R,
              tau=tau,
              sigma=sigma,
              alpha=alpha,
コード例 #15
0
def chi_2(tiempos, X_obs, params):
    x_obs = X_obs[0]
    y_obs = X_obs[1]
    chi2_1 = sum((x_obs - my_model(tiempos, params)[:, 0])**2)
    chi2_2 = sum((y_obs - my_model(tiempos, params)[:, 1])**2)
    return chi2_1 + chi2_2


guess = [30, 5, 50, 2]
step_size = [0.01, 0.01, 0.01, 0.01]

n_params = 4
n_points = 1000

best, walk, chi2 = MCMC.hammer(tiempos, [x_obs, y_obs], guess, chi_2,
                               step_size, n_params, n_points)

print "El valor de alpha es", best[0]
print "El valor de beta es", best[1]
print "El valor de gamma es", best[2]
print "El valor de delta es", best[3]

f1 = open('valores.dat', 'w+')

for i in range(n_points):
    f1.write('%f %f %f %f %f\n' %
             (walk[0, i], walk[1, i], walk[2, i], walk[3, i], chi2[i]))

#UNCERTAINTIES: SELECCIONO UNOS CUANTOS VALORES QUE SATISFAGAN LAS CONDICIONES PEDIDAS:

plt.scatter(tiempos, y_obs, c='g')
コード例 #16
0
###### Calculate the total number of datapoints
datapoints = 0
for phase in spectra_observed.keys():
    for spectrum in spectra_observed[phase].keys():
        datapoints += len(spectra_observed[phase][spectrum])

parameters['OTHER']['total_datapoints'] = datapoints
"""
==================
Main MCMC sampling
==================
"""
###### Set the initial positions of the walkers ################################
parameters, parameters_init = \
            MCMC.mcmc_initial_positions(parameters,
                                  prev_chain=parameters['OTHER']['previous_chain'],
                                  dir_previous_chain=parameters['OTHER']['dir_previous_chain'])

###### Check if the priors are in the accepted range ###########################
for walker in range(parameters['OTHER']['n_walkers']):

    parameters_add = MCMC.calc_additional_par(parameters,
                                              parameters_init[walker, :])

    while MCMC.ln_prior(parameters_init[walker, :], parameters,
                        parameters_add) == -np.inf:

        pars, parameters_init[walker, :] = MCMC.mcmc_initial_positions(
            parameters, single_walker=True)
        parameters_add = MCMC.calc_additional_par(parameters,
                                                  parameters_init[walker, :])
コード例 #17
0
def main(argv):
    t0 = time.time()
    filename = 'test.pkl'
    nStep = 10000
    nThreads = 1
    dirname = './'
    matrix = ''
    alpha = 0

    try:
        opts, args = getopt.getopt(argv, "f:n:t:a:d:m:")
    except getopt.GetoptError:
        print 'test.py -i <inputfile> -o <outputfile>'
        sys.exit(2)
    for opt, arg in opts:
        if opt == '-f':
            filename = arg
        elif opt == '-n':
            nStep = int(arg)
        elif opt == '-t':
            nThreads = int(arg)
        elif opt == '-a':
            alpha = float(arg)
        elif opt == '-d':
            dirname = arg

    model = load_model("datasets/R_resolution.csv",
                       "datasets/B_resolution.csv")

    #observed = np.loadtxt("datasets/observed_mock_equal.txt")

    fluxD = 100 + np.array([2 * i for i in range(model.nBinsBT)])
    fluxP = 100 + np.array([2 * i for i in range(model.nBinsBT)])
    # fluxP = 100 + np.zeros(model.nBinsBT)
    flux = np.concatenate([fluxP, fluxD])

    observed = make_mock_observation(fluxP, fluxD)

    def logp(value):
        value = np.array(value)
        if (value < 0).any(): return -np.inf

        # value must be and array twice the size of binning
        expected = model(*value.reshape((2, model.nBinsBT)))
        log = (observed * np.log(expected) - expected).sum()
        # Didn't figure that out yet
        #firstDerivative = np.diff(np.log(value))
        #secondDerivative = np.fabs(np.diff(firstDerivative))
        #smoothness = -(alpha * secondDerivative).sum()
        return log  #+ smoothness

    sigma = 5

    def proposal_function(previous_point):
        point = np.zeros(len(previous_point))
        #return previous_point+self.sigma*np.random.standard_normal(self.nVar)
        for i in range(len(previous_point)):
            while True:
                val = previous_point[i] + 0.01 * np.random.standard_normal()
                if val > 0:
                    point[i] = val
                    break
        return point

    filename = 'alpha{}_'.format(alpha) + filename

    threads = []

    for i in range(nThreads):
        theFileName = dirname + '/thread{}_'.format(i) + filename
        a = MCMC.MCMC(theFileName,
                      initialCondition=flux[:],
                      realValues=flux[:])
        a.setProposalFunction(proposal_function)
        a.setLogLikelihoodFunction(logp)
        a.setSteps(nStep)
        threads.append(a)

    for t in threads:
        print 'launching thread'
        t.start()
        time.sleep(1)

    for t in threads:
        t.join()

    print 'done'
    print 'time : {}'.format(time.time() - t0)
コード例 #18
0
def runShatellite(numOfSlices,
                  Acloud,
                  rateDiss,
                  speedCloud,
                  w,
                  ndata,
                  fastFoward,
                  Days,
                  nwalkers,
                  nsamples,
                  nsteps,
                  timespan,
                  phispan,
                  burning,
                  plot=True,
                  mcmc=True):

    #Maximum number of slices is hPerDayHours
    hPerDay = int((w / (2 * np.pi))**(-1))
    if (numOfSlices > hPerDay):
        print("Cannot have more than 24 number of slices for now")
        return 0, 0
    #Generate the initial condition of the planet
    surf = M_init.initialPlanet(numOfSlices, False)
    clouds = M_init.cloudCoverage(numOfSlices)
    finalTime = []
    apparentTime = []
    l, d = dataAlbedoDynamic(numOfSlices,
                             Days,
                             w,
                             Acloud,
                             surf,
                             clouds,
                             rateDiss,
                             speedCloud,
                             Animation=False)

    print("Got sum fake data. YOHO, SCIENCE BITCH!")

    for i in range(1, Days + 1):
        #Seperates the effective albedo and longitude per day.
        effective = d[(i - 1) * numOfSlices:(i) * (numOfSlices)]
        lon = l[(i - 1) * numOfSlices:(i) * (numOfSlices)]
        #Calculates the apparent albedo with the forward model.
        time, apparent = M_Init.apparentAlbedo(effective,
                                               time_days=timespan,
                                               long_frac=phispan,
                                               n=5000,
                                               plot=False,
                                               alb=True)
        finalTime.append(time + (hPerDay * (i - 1)))
        apparentTime.append(apparent)

    finalTime = np.asarray(finalTime).flatten()
    apparentTime = np.asarray(apparentTime).flatten()
    t, a = extractN(finalTime, apparentTime, ndata * Days)
    print("Done extracting {}".format(numOfSlices))
    #Plotting
    if plot:
        fig, ax = plt.subplots(1,
                               1,
                               gridspec_kw={'height_ratios': [1]},
                               figsize=(10, 8))
        for i in range(Days + 1):
            ax.axvline((i) * hPerDay, color='orange', alpha=1, zorder=10)
        ax.plot(finalTime,
                apparentTime,
                '-',
                color='black',
                linewidth=5,
                label="Simulated curve")
        ax.errorbar(t,
                    a,
                    fmt='.',
                    color='green',
                    yerr=np.asarray(a) * 0.02,
                    markersize=8,
                    solid_capstyle='projecting',
                    capsize=4,
                    label="selected {} data".format(ndata))
        ax.set_xlabel("Time (h)", fontsize=22)
        ax.set_ylabel("Apparent Albedo ($A^*$)", fontsize=22)
        ax.tick_params(labelsize=22)
        ax.legend(fontsize=15)
    chainArray = []
    alb = []
    if (mcmc):
        #Implement the MCMC running stuff in a seperate function
        for i in range(1, Days + 1):
            time = t[(i - 1) * ndata:i * ndata]
            app = a[(i - 1) * ndata:i * ndata]
            #Maybe this is wrong, check this, fix this stuff
            lon = np.asarray(l[(i - 1) * numOfSlices:(i) * (numOfSlices)])
            lon = [l % (360) for l in lon]
            lon[lon == 0] = 360
            m.MCMC(nwalkers, nsteps, numOfSlices, time, app, lon, timespan,
                   phispan, burning, hPerDay, chainArray, i, ax, plot)
            print("done MCMC for day {}".format(i))
    for chain in chainArray:
        alb.append(m.mcmc_results(chain, burning))
    return alb
コード例 #19
0
def runSatellowan(numOfSlices,
                  Acloud,
                  npara,
                  rateDiss,
                  speedCloud,
                  w,
                  ndata,
                  fastFoward,
                  Days,
                  nwalkers,
                  nsamples,
                  nsteps,
                  timespan,
                  phispan,
                  burning,
                  plot=True,
                  mcmc=True,
                  repeat=False,
                  walkers=False,
                  forming=True,
                  Epic=None):
    #Need to make this a bit more efficient)
    ndim = numOfSlices
    hPerDay = int((w / (2 * np.pi))**(-1))
    if plot:
        fig, ax = plt.subplots(1,
                               1,
                               gridspec_kw={'height_ratios': [1]},
                               figsize=(10, 4))

    if type(Epic) == type(None):
        #if (numOfSlices>24):
        #    print ("Cannot have more than 24 number of slices for now")
        #    return 0,0
        surfDict = M_init.initialPlanet(numOfSlices, plot=True)
        surf = np.fromiter(surfDict.values(), dtype=float)
        surf = [0.458, 0.327, 0.332, 0.263]
        print("The planet's surface albedo is theoritically", surf)
        clouds = M_init.cloudCoverage(numOfSlices)
        clouds = [0, 0, 0, 0]
        print(
            "The effective albedo is ",
            M_init.effectiveAlbedo(numOfSlices,
                                   Acloud,
                                   plot=False,
                                   calClouds=clouds,
                                   calsurf=surf))
        finalTime = []
        apparentTime = []
        print(clouds, "Cloud coverage is ")
        d = dataAlbedoDynamic(numOfSlices,
                              Days,
                              w,
                              Acloud,
                              surf,
                              clouds,
                              rateDiss,
                              speedCloud,
                              Animation=False,
                              hourlyChange=False,
                              repeat=repeat,
                              forming=forming)
        for i in range(1, Days + 1):
            effective = d[(i - 1) * numOfSlices:(i) * (numOfSlices)]
            print("The albedo map for day {} is ".format(i - 1), effective)
            #start = tyme.time()
            time, apparent = M_init.apparentAlbedo(effective,
                                                   time_days=timespan,
                                                   long_frac=phispan,
                                                   phi_obs_0=0,
                                                   n=10000,
                                                   plot=False,
                                                   alb=True)
            #tim_comp, app_comp = ck_lib.lightcurve(effective,time_days=timespan,
            #        long_frac=phispan,phi_obs_0=0,n=10000,plot=False,alb=True)
            #end = tyme.time()
            #print (end-start)
            finalTime.append(time + (hPerDay * (i - 1)))
            apparentTime.append(apparent)

        finalTime = np.asarray(finalTime).flatten()
        apparentTime = np.asarray(apparentTime).flatten()

        t, a = extractN(finalTime, apparentTime, ndata, Days)
        noise = [
            np.random.normal(loc=0, scale=0.02 * a[i]) for i in range(len(a))
        ]
        a = np.array(a) + np.array(noise)
        if plot:
            fig, ax = plt.subplots(1,
                                   1,
                                   gridspec_kw={'height_ratios': [1]},
                                   figsize=(10, 3))
            tim, albedo = drawAlbedo(d, Days, 5000)
            ax.errorbar(t,
                        a,
                        fmt='.',
                        yerr=0.02 * np.array(a),
                        color='black',
                        markersize=8,
                        label="Simulated lightcurve",
                        solid_capstyle='projecting',
                        capsize=4)
            #ax.plot(tim_comp,app_comp,'.',color='orange',linewidth=4,label="Early Version of F.M.")
            #ax.plot(tim,albedo,color='purple',linewidth=6,alpha=0.6,label=r'$A_{T.O.P}(\phi)$')
            #ax.plot(t,a,'.',color='purple',label='Albedo Generated for {} slices'.format(numOfSlices),alpha=0.3)
            ax.set_xlabel("Time (h)", fontsize=22)
            ax.set_ylabel("Apparent Albedo ($A^*$)", fontsize=22)

            #ax.legend(fontsize=17)
            ax.tick_params(labelsize=22)
    else:
        t = Epic[0]
        a = Epic[1]
        t = (t - t[0]) * 24
        ax.errorbar(t,
                    a,
                    fmt='.',
                    yerr=Epic[3],
                    color='black',
                    markersize=8,
                    label="EPIC data",
                    solid_capstyle='projecting',
                    capsize=4)

    print("Done extracting {}".format(numOfSlices))
    chainArray = []
    alb = []
    if plot:
        #ax.errorbar(t,a,fmt='.',color='blue',yerr = np.asarray(a)*0.02,markersize=10,solid_capstyle='projecting', capsize=4,
        #            label= "Raw Data from EPIC")
        ax.set_xlabel("Time (h)", fontsize=22)
        ax.set_ylabel("Apparent Albedo ($A^*$)", fontsize=22)
        if type(Epic) != type(None):
            title = r"EPIC data [$d$ = {}] ".format(util.date_after(Epic[2]))
            #title = r"Forward model for {} slice albedo map".format(numOfSlices)
            ax.set_title(title, fontsize=22)
        ax.legend(fontsize=20)
        ax.tick_params(labelsize=25)
    if (mcmc):
        #Implement the MCMC running stuff in a seperate function
        for i in range(1, Days + 1):
            time = t[(i - 1) * ndata:i * ndata]
            app = a[(i - 1) * ndata:i * ndata]
            chain = m.make_chain(nwalkers,
                                 nsteps,
                                 ndim,
                                 time,
                                 app,
                                 timespan,
                                 phispan,
                                 alb=True)
            chainArray.append(chain)
            print(
                "Well call me a slave, because I just made some chains for day {}..."
                .format(i))
            mean_mcmc_params = m.mcmc_results(chain, burning)
            mean_mcmc_time, mean_mcmc_ref = M_init.apparentAlbedo(
                mean_mcmc_params,
                time_days=timespan,
                long_frac=phispan,
                n=10000,
                plot=False,
                alb=True)
            print("Got the mean MCMC results for day {}. Ya YEET!".format(i))

            flat = m.flatten_chain(chain, burning)
            sample_params = flat[np.random.randint(len(flat), size=nsamples)]
            for s in sample_params:
                sample_time, sample_ref = M_init.apparentAlbedo(
                    s,
                    time_days=timespan,
                    long_frac=phispan,
                    n=10000,
                    plot=False,
                    alb=True)
                sample_time = np.asarray(sample_time)
                mean_mcmc_params = np.asarray(mean_mcmc_params)
                plotting_x = np.asarray(sample_time) + (i - 1) * hPerDay
                if (plot):
                    ax.plot(plotting_x, sample_ref, color='k', alpha=0.1)
            if (plot):
                ax.plot(plotting_x,
                        sample_ref,
                        color='k',
                        alpha=0.1,
                        label='50 samples from MCMC')
                ax.plot(mean_mcmc_time + (i - 1) * hPerDay,
                        mean_mcmc_ref,
                        color='red',
                        label="Mean MCMC")
                ax.legend(fontsize=20)

            #aic = Aic(app,mean_mcmc_ref,npara)
            #bic = Bic(app,mean_mcmc_ref,npara,ndata)
            print("done MCMC for day {}".format(i))
    if plot:
        plt.show()

    counter = 0
    effmean = []
    #hour = dt.datetime.now().time()
    #fileName = hour.strftime("%H:%M:%S").replace(":","_",2)
    #f= open("MCMC_{}__{}.csv".format(fileName,numOfSlices),"w+")
    #f.write(''.join(str(effectiveAlbedo(numOfSlices,Acloud,plot=False,calClouds=clouds,calsurf=surf)).replace("[","",1).replace("]","",1))+"\n")
    for chainy in chainArray:
        results = m.mcmc_results(chainy, burning)
        if (walkers):
            m.plot_walkers_all(chainy, expAlb=None)
            m.cornerplot(chainy, burning)
            plt.show()
        alb.append(results)
        print("The result for day {} is".format(counter), results)
        #f.write(''.join(str(results).replace("[","",1).replace("]","",1))+"\n")
        effmean.append(np.mean(results))
        counter += 1
    #f.close()
    if (plot):
        x = np.arange(counter)
        mean = np.mean(effmean)
        error = np.std(effmean) / np.sqrt(len(effmean))
        fig, ax = plt.subplots(1,
                               1,
                               gridspec_kw={'height_ratios': [1]},
                               figsize=(10, 8))
        ax.plot(x, effmean, '.', markersize=8, color='purple')
        ax.axhline(np.mean(
            M_init.effectiveAlbedo(numOfSlices,
                                   Acloud,
                                   plot=False,
                                   calClouds=clouds,
                                   calsurf=surf)),
                   linewidth=8,
                   color='orange',
                   label='Expected Mean')
        ax.axhline(mean + error, linewidth=8, color='purple', alpha=0.3)
        ax.axhline(mean - error, linewidth=8, color='purple', alpha=0.3)
        ax.axhline(mean, linewidth=8, color='purple', label='Found Mean')
        ax.set_xlabel("Day", fontsize=22)
        ax.set_ylabel("Average albedo from MCMC", fontsize=22)
        ax.legend(fontsize=15)
        plt.show()
    minAlb = minimumAlb(alb)
    return alb, minAlb, surf
コード例 #20
0
ファイル: EmmisionLines.py プロジェクト: juanra31a/Tarea-8
def chi_2 (x_obs, y_obs, params):
    chi2 = sum((y_obs - my_model(x_obs,params))**2)
    return chi2


datos = np.loadtxt("energy_counts.dat")
x_obs = datos[:,0]
y_obs = datos[:,1]
guess = [5*(10**16), 1000, 1400, 10**2, -2]
step_size = [5*10**14, 10, 0.1, 0.1, 0.01]

n_params = 5
n_points = 1000000

best, walk, chi2 = MCMC.hammer(x_obs, y_obs, guess, chi_2, step_size ,n_params, n_points)

print "El valor de A es", best[0]
print "El valor de B es", best[1]
print "El valor de E0 es", best[2]
print "El valor de sigma es", best[3]
print "El valor de alpha es", best[4]

plt.plot(walk[1,:],chi2)
plt.xlabel('$\\alpha$')
plt.ylabel('$\chi^2$')
plt.title('$\chi^2$ vs. $\\alpha$')
plt.savefig("x2vsalpha.pdf")
plt.close()
plt.plot(walk[0,:],walk[1,:])
plt.xlabel('$A$')
コード例 #21
0
ファイル: EmmisionLines.py プロジェクト: JuanRAlvarez/Tarea-8
def chi_2(x_obs, y_obs, params):
    chi2 = sum((y_obs - my_model(x_obs, params))**2)
    return chi2


datos = np.loadtxt("energy_counts.dat")
x_obs = datos[:, 0]
y_obs = datos[:, 1]
guess = [5 * (10**16), 1000, 1400, 10**2, -2]
step_size = [5 * 10**14, 10, 0.1, 0.1, 0.01]

n_params = 5
n_points = 1000000

best, walk, chi2 = MCMC.hammer(x_obs, y_obs, guess, chi_2, step_size, n_params,
                               n_points)

print "El valor de A es", best[0]
print "El valor de B es", best[1]
print "El valor de E0 es", best[2]
print "El valor de sigma es", best[3]
print "El valor de alpha es", best[4]

plt.plot(walk[1, :], chi2)
plt.xlabel('$\\alpha$')
plt.ylabel('$\chi^2$')
plt.title('$\chi^2$ vs. $\\alpha$')
plt.savefig("x2vsalpha.pdf")
plt.close()
plt.plot(walk[0, :], walk[1, :])
plt.xlabel('$A$')
コード例 #22
0
import MCMC
import numpy as np

print("Hello!")

J = 3
s = MCMC.State(np.array(range(J)) * 1.0, np.array(range(J)), np.zeros(3), 0.0)
print(s)
print(type(s))
print(s.alpha)
for a in s.alpha:
    print(a)


def fieldnames(x):
    return list(filter(lambda xi: xi[0:2] != '__', x.__dir__()))


fieldnames(s)

MCMC.cmodel(np.random.randn(100), 10, s, {'N': 100}, nmcmc=1000, nburn=1000)
コード例 #23
0
    ''' Concatyente Poisson means '''
    out = np.empty(len(disasters_array))
    out[:s] = e
    out[s:] = l
    return out
 9/8:
@deterministic(plot=False)
def rate(s=switchPoint, e=early_mean, l=late_mean):
    ''' Concatyente Poisson means '''
    out = np.empty(len(disasters_array))
    out[:s] = e
    out[s:] = l
    return out
 9/9: disasters = Poisson('disasters', mu=rate, value=disasters_array, observed=True)
9/10: from pymc import MCMC
9/11: M = MCMC(disasters)
9/12: M.sample(iter=10000, burn=1000, thin=10)
9/13: M.trace('switchPoint')[:]
9/14: M.trace('switchpoint')[:]
10/1: fromm open
10/2: from opencv2
12/1: import cv2 as cv
13/1: import scipy.io
13/2: from scipy import io
13/3: import scipy
14/1:
class neuraNetwork:
    def __init__():
        pass
    
    def train():
コード例 #24
0
    tstart = Seasonal_ODE.time_transform(90)  # Setting Start Time to April 1st
    tend = Seasonal_ODE.time_transform(270)
    flag = 2
    write_flag = 0  # If set to 0 will write the results, if set to 1 will not
    if flag == 0:  # This section runs the MCMC for the various areas of interest, and stores the results
        mos_coeff, mos_results = bloodmeal.vector_coeff(
            msq_file, MCMC.poly_poiss_log_lik, poly_deg, maxruns=maxruns)
        bm_coeff_mat, bm_results = bloodmeal.get_bloodmeal_sample(
            bm_file,
            MCMC.bm_polynomial_loglikelihood,
            poly_deg,
            maxruns=maxruns)
        bc_coeff_mat, bc_results = BirdCount.get_birdcounts_sample(
            bc_file, MCMC.poly_poiss_log_lik, poly_deg, maxruns=maxruns)
        #BirdCount.BirdcountTest(bc_file,bc_coeff_mat,poly_deg)
        bc_DIC = MCMC.get_DIC(bc_results, MCMC.poly_poiss_log_lik, bc_file)
        bm_DIC = MCMC.DIC(bm_results, MCMC.bm_polynomial_loglikelihood,
                          bm_data, Seasonal_ODE.time_transform(bm_time))
        mos_DIC = MCMC.get_DIC(mos_results, MCMC.poly_poiss_log_lik, msq_file)

        if write_flag == 0:
            with open('Mos_coeff_poly_deg(%d).pkl' % poly_deg, 'wb') as output:
                pickle.dump(mos_coeff, output)

            with open('bloodmeal_coeff_poly_deg(%d).pkl' % poly_deg,
                      'wb') as output:
                pickle.dump(bm_coeff_mat, output)

            with open('host_coeff_poly_deg(%d).pkl' % poly_deg,
                      'wb') as output:
                pickle.dump(bc_coeff_mat, output)
コード例 #25
0
ファイル: main.py プロジェクト: AdkPete/ASTP720
            continue

        time.append(float(i.split()[0]))
        flux.append(float(i.split()[1]))

    return time, flux


t, f = read_lightcurve("lightcurve_data.txt")

x0 = [3.5, 2.2, .1, 0.3]  ##A reasonable initial guess

r = [3.546, 2.25, .007,
     .15]  ##This is my current estimate for our best solution

opt = MCMC.MCMC(P, [t, f])

X, M, ML = opt.M_H(
    Q2, 5000, r
)  ##Lots of iterations, so somewhat slow. Good chance to find the right answer.

print(opt.MAP, opt.MAPL)
plot_fit(opt.MAP, [t, f])

p_list = []
di_list = []
for i in X[500::]:

    p_list.append(i[0])
    di_list.append(i[2])
コード例 #26
0
import plotting as p
import experiments as e
import MCMC as m
import matplotlib.pyplot as plt

# pass the path where Submission folder is saved.
path = "C:/Users/Shruti Jadon/Desktop/590_HW3/"
J = np.array([0, 1, 1, 0, 0, 0, 1, 0])
B = np.array([1, 1, 0, 1, 1, 0, 0, 0])

#(F.) output
J = np.array([0, 0, 1, 1, 1])  # pass the J, B and alpha Values
B = np.array([1, 1, 0, 0, 1])
alpha = 0.6
output = m.MetropolisJsampling(
    J, B, alpha
)  # call metropolis J sampling function of normal distribution, and normal J sampling by chosing one random bit flipping
print "output of (f.) question. i.e. Jar Values"
print output  # print values in console
p.BarGraph(output, path, "output of (f.) question")  # plot graph of

#(F.) running Experiment of J_random to check if we get same output or not.
output_experiment = e.MC_Jar_Experiment(
    J, B, alpha)  # calling experiment metropolis Sampling question
print "output_experiment values"
print output_experiment
p.BarGraph(
    output_experiment, path,
    "Experimental output of (f.) question")  # plotting graph of Experiment

#(H.) output