def fit_rabi_flop(data, settings):
    if settings is None:
        return data

    p = settings.get('p')
    p_fix = settings.get('p_fix', [])
    func = settings.get('func', 'sine')
    state = settings.get('state', 'frac')
    times = data['sequencer']['*Trabi']
    xlim = settings.get('xlim', [min(times), max(times)])
    func = globals()[func]
    in_range = np.where(np.logical_and(times > xlim[0], times < xlim[1]))
    x = times[in_range]
    y = data['pmt'][state][in_range]
    p_fit, p_err = fit(func, p, x, y, p_fix=p_fix)

    data['fit_rabi_flop'] = {
        'xlim': xlim,
        'func': func,
        'state': state,
        'fit': p_fit,
        'err': p_err,
    }

    return data
Example #2
0
def Part2():
    if load_data is True:
        data = pickle.load(open("data.pickle", 'r'))
    else:
        data = fetch_data()
        if save_data is True:
            pickle.dump(data, open("data.pickle", 'w'),  pickle.HIGHEST_PROTOCOL)
    
    times, data = data
    data = data[1:]-data[0:-1]
    
    mean = [numpy.mean(data[:,k]) for k in range(len(tickers))]
    std = [numpy.std(data[:,k]) for k in range(len(tickers))]
    
    import scipy.stats
    kstest = scipy.stats.kstest
    titles = []
    
    def fit(data, distribution, title=''):
        fname = title
        params = distribution.fit(data)
        D,p_value = kstest(data, cdf=distribution.cdf, args=params)
        title += ", p-value: %05.4f"%p_value
        if p_value > 0.05:
            title += ", Pass"
        else:
            title += ", Fail"
        
        titles.append(title)
        plt.clf()
        sample_range = numpy.linspace(min(data), max(data), 1000)
        plt.hist(data, bins=100, normed=True, color='0.75')
        plt.plot(sample_range, [distribution.pdf(x, *params) for x in sample_range], color='0.0', lw=3)
        plt.title(title)
        plt.ylabel("Probability density")
        plt.xlabel("Daily change in stock price")
        plt.savefig("out/" + fname + ".png")
    
    print "Fitting distribution:"
    for j in range(len(tickers)):
        fit(data[:,j], scipy.stats.laplace, tickers[j]+ ", Laplace distribution")
    
    for j in range(len(tickers)):
        fit(data[:,j], scipy.stats.cauchy, tickers[j]+ ", Cauchy distribution")
        
    for j in range(len(tickers)):
        fit(data[:,j], scipy.stats.hypsecant, tickers[j]+ ", Hyperbolic sec distribution")
    
    for j in range(len(tickers)):
        fit(data[:,j], scipy.stats.norm, tickers[j]+ ", Normal distribution")

    for title in titles:
        print title
Example #3
0
 def runfit(self, func, param, data, x=None):
     return fit(func, param, data, x=x, var=self.var)
Example #4
0
def process(world, country, model):

    config = dict()
    length = 130
    config['x_limits'] = [0, length]
    config['nx'] = length+1
    config['dx'] = (config['x_limits'][1] - config['x_limits'][0]) / (config['nx'] - 1)

    years = mdates.YearLocator()  # every year
    months = mdates.MonthLocator()  # every month
    days =  mdates.DayLocator()
    months_fmt = mdates.DateFormatter('%m/%d')

    os.makedirs('Results', exist_ok=True)
    os.makedirs('Figures', exist_ok=True)

    if model == 'SEIR':
        daily = False
    else:
        daily = True
    #NY = gauss_model(config, 34.38, 47588, 39.03)
    #print(NY)

    #US = gauss_model(config, 36, 97000, 64)
    #print(US)

    #df = pd.read_csv("../csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv")
    #df_c, df_d, df_r, column_names, keyword, startIndex, offset = loadData(world)
    data_c, data_d, data_r, column_names, countryList = loadData(world, country, daily)

    dates = np.array([column_names[0] + np.timedelta64(i, 'D')
                      for i in range(length+1)])


    print(column_names, column_names.size)
    print(dates)
    #df.columns=df.columns.str.replace('/','_')

    #df = df[(df['Country/Region'] == 'US') & (df['Province/State'] == 'New York')]
    #df = df[(df['Country/Region'] == 'US') & (df['Province/State'] == 'Texas')]

    if world:
        new_column_names = ['Country']
    else:
        new_column_names = ['State/Province']
    for date in dates:
        new_column_names.append(date.strftime('%m-%d'))
    new_column_names.append('Total')
    out = csv.writer(open("Results/prediction.csv", "w"), delimiter=',')
    #out.write('Country')
    out.writerow(new_column_names)
    totalData_c = np.zeros(column_names.size)
    totalPred_c = np.zeros(dates.size)
    totalData_d = np.zeros(column_names.size)
    totalPred_d = np.zeros(dates.size)


    predicted_label = 'Predicted Daily New Cases'
    data_label = 'Confirmed Daily New Cases'

    testing_dates, Incident_rate, Testing_rate, Confirmed, Tested = loadDailyReports()
    #C = np.insert(Confirmed['New York'], 0, 0)


    SC = sum(Confirmed.values())
    SC = np.diff(SC)
    ST = sum(Tested.values())
    ST = np.diff(ST)

    for c in countryList:
        print("=============== ", c, " =============")

        if c != 'US ACCUM':
            #print(data_c[c])
            if data_c[c].max() < 10:
                print('----- skip ---- ', c)
                continue

            #print(len(data))

            print("Daily New Case:", data_c[c], data_c[c].size)
            print("Daily Deaths:", data_d[c], data_d[c].size)
            #print(data_r)
            if data_r is not None:
                print("Daily Recovered:", data_r[c], data_r[c].size)
            #detrend(data)
            #ar(data)
            #arima(data)
            #ma(data)
            ma = convolve_sma(data_c[c], 5)

            data2 = np.insert(data_c[c], 0, 0)
            data2 = np.diff(data2)
            print(data2)
            datemin = np.datetime64(column_names[0])
            datemax = datemin + np.timedelta64(length, 'D')
            #print(datemin, datemax)

            #print(torch.from_numpy(data).size())
            if model == "SEIR":
                pred_s, pred_c, pred_r, pred_d = fit_SEIR(torch.from_numpy(data_c[c]).double(), torch.from_numpy(data_d[c]).double(),
                                                  torch.from_numpy(data_r[c]).double(), 329466283, config)
                plot_seir(dates, c, data_c, data_d, pred_c, pred_r, pred_d)
                continue
            if world:
                curve_c, position_c, amp_c, span_c, curve_d, position_d, amp_d, span_d = fit(
                    torch.from_numpy(data_c[c]).double(), torch.from_numpy(data_d[c]).double(), config, dist='gaussian')
            else:
                curve_c,position_c,amp_c,span_c,curve_d,position_d,amp_d,span_d = fit(torch.from_numpy(data_c[c]).double(), torch.from_numpy(data_d[c]).double(), config, dist='gaussian')
            #futures.append([curve,position,amp,span])
            if (math.isnan(position_c) or math.isnan(amp_c) or math.isnan(span_c) or \
                math.isnan(position_d) or math.isnan(amp_d) or math.isnan(span_d)):
                continue

            totalData_c += data_c[c]
            totalPred_c += curve_c
            totalData_d += data_d[c]
            totalPred_d += curve_d

            if (position_c > curve_c.size):
                print(c, " position is out of bound: ", position_c)
                position_c = curve_c.size-1

            if (position_d > curve_d.size):
                print(c, " position is out of bound: ", position_d)
                position_d = curve_d.size - 1

            print(position_c, amp_c, span_c, position_d, amp_d, span_d)

            max_val = max(curve_c.max(), data_c[c].max())
        else:
            max_val = max(totalPred_c.max(), totalData_c.max())
            ma = convolve_sma(totalData_c, 5)

        if world == False or c == 'US':
            if c == 'US ACCUM' or c == 'US':
                plot_US(dates, datemin, datemax, column_names, c, testing_dates, (SC / ST).astype(float), totalPred_c,
                        totalData_c,
                        totalPred_d, totalData_d, np.argmax(totalPred_c),
                        np.argmax(totalPred_d), max_val, predicted_label, data_label)
            else:
                try:
                    C = np.diff(Confirmed[c])
                    T = np.diff(Tested[c])
                    positive_rate = (C / T).astype(float)
                    print(positive_rate)
                except:
                    continue
                for i in range(positive_rate.size):
                    if np.isinf(positive_rate[i]) or np.isnan(positive_rate[i]) or positive_rate[i]>1.0:
                        positive_rate[i] = 0.0

                plot_US(dates, datemin, datemax, column_names, c, testing_dates, positive_rate, curve_c,
                        data_c[c], curve_d, data_d[c], position_c,
                        position_d, max_val, predicted_label, data_label)
        else:
            plot_country(dates, datemin, datemax, column_names, c, curve_c, data_c[c], curve_d, data_d[c], position_c,
                    position_d, max_val, predicted_label, data_label)

        if c!='US ACCUM':
            result_list = curve_c.astype(int).tolist()
            result_list.append(int(curve_c.sum()))
            result_list.insert(0, c)
        else:
            result_list = totalData_c.astype(int).tolist()
            result_list.append(int(totalData_c.sum()))
            result_list.insert(0, 'Total Data')
            out.writerow(result_list)
            result_list = totalPred_c.astype(int).tolist()
            result_list.append(int(totalPred_c.sum()))
            result_list.insert(0, 'Total Pred')
            out.writerow(result_list)
            result_list = totalData_d.astype(int).tolist()
            result_list.append(int(totalData_d.sum()))
            result_list.insert(0, 'Total Deaths')
            out.writerow(result_list)
            result_list = totalPred_d.astype(int).tolist()
            result_list.append(int(totalPred_d.sum()))
            result_list.insert(0, 'Total Pred Deaths')
        out.writerow(result_list)
Example #5
0
 def runfit(self, func, param, data, x=None):
     return fit(func, param, data, x=x, var=self.var)
Example #6
0
#Now I'll run the fit 
h1 = TH1F("integrate_fit","",100000,0,100000)
h2 = TH1F("integrate_pixels","",15000,0,15000)
h3 = TH1F("peakh_fit","",200,0,200)
h4 = TH1F("peakh_pixels","",200,0,200)
h5 = TH1F("peaka_fit","",15000,0,15000)
h6 = TH1F("peaka_pixels","",15000,0,1500)
h7 = TH1F("raw_pixel_integral","",100000,0,100000)
h8 = TH1F("raw_pixel_area","",15000,0,15000)
#h9 = TH1F("fixed_integral","",100000,0,100000)
percent = special.erf(sqrt(2))
params = fitgaussian(out,0.5*threshold[1])
for p in params:
	fit = gaussian(*p)
	results = np.array([[fit(j,i) for i in X] for j in Y],dtype=np.float)
	coords = np.argwhere((results-p[0])/p[1]>two_sigma_val/(p[5]*p[6]))
	integral = 0
	smooth_integral = 0
	num_pix = 0
	num_smooth_pix = len(coords)
	for coord in coords:
		smooth_integral += out[coord[0]][coord[1]]
		if data[coord[0]][coord[1]]>0.5*threshold[1]:
			integral += data[coord[0]][coord[1]]
			num_pix += 1
	h1.Fill(abs(2*pi*p[1]*p[5]*p[6]*percent*percent))
	h2.Fill(smooth_integral)
	h3.Fill(p[1])
	h4.Fill(out[int(p[3])][int(p[4])])
	h5.Fill(abs(pi*p[5]*p[6]))
Example #7
0
import numpy as np
import sys; sys.path.append("/home/boruoshihao/fit/python") # Path about the fit src code
from fit import *

def fun(x,a,b):
    return a+b*x**2
p0=[-9.213378324437171, -634.2556002942869]

x,y,dy=np.loadtxt("data", dtype=np.float64,skiprows=0,unpack=True)
popt,perr=fit(fun, x, y, dy, p0)
fit_plot(fun, x, y, dy, popt, xmin="none", xmax="none", save=0)