示例#1
0
def FastScatLimit(exp, x_in, Lim_in, Del_in, Del, geff, optype="V"):
    ##### Assuming the splitting to be irrelevant --> upscattering easy to get using the beam energy
    gu, gd, ge, gs = Fillg(geff)

    if Del > 0.5:
        print(
            "Warning: recasting of scattering limits only implemented for small or zero splitting"
        )
    M2tildeToM1 = (1 + 1 / (1 + Del)) / (2 + Del_in)
    xProd_DPtmp, NProd_DP = br.NProd_DP(exp)
    xProd_DP = xProd_DPtmp / 1.0  # Switching to zero splitting to avoid problems at the resonance
    mymin = np.min(xProd_DP) / M2tildeToM1
    # Sending M1 to M2tilde
    mymax = np.max(xProd_DP) / M2tildeToM1
    xi = uf.log_sample(mymin, mymax, 200)
    Lam1TeV = np.full(np.size(xi), 1000)
    xProd_new, Prod_new = br.NProd(Del, exp, geff, optype)
    Nnew = np.interp(xi, xProd_new * (1 + Del), Prod_new)

    xProd, NProd = br.NProd(Del, exp, geff, optype)

    xi, ratio, LimDP = uf.GetRatio(NProd, xProd, NProd_DP, xProd_DP, Lim_in,
                                   x_in)

    gscat = (np.abs(gu) + np.abs(gd))
    EffLim = 0.013 * np.sqrt(xi) / np.sqrt(LimDP) * np.power(
        ratio, 1 / 8.) * 1000 * np.sqrt(gscat)

    return xi, EffLim
示例#2
0
文件: kNN.py 项目: shanky3011/CS4641
def mainCurves():
    UsefulFunctions.warning()

    first_X, first_Y = UsefulFunctions.loadVehicleData()
    first_graph_data = analyzePerNeighbor(first_X, first_Y)

    second_X, second_Y = UsefulFunctions.loadWineData()
    second_graph_data = analyzePerNeighbor(second_X, second_Y)

    graphDataCurves(first_graph_data, second_graph_data)
示例#3
0
def read_file(path):
    ged = open(path)
    ged_lines = ged.readlines()
    person_date_tags = ["BIRT", "DEAT"]
    fam_date_tags = ["MARR", "DIV"]
    fam_flag = False
    date_type = ''
    individuals = {}
    ind_id = ""
    families = {}
    #Makes individual and family dicts
    for ged_line in ged_lines:
        status, tag, args = check(strip(ged_line))
        if (status == True):
            if (tag == "FAMS"):
                individuals[ind_id]["FAMS"] = args
            if (tag == "FAMC"):
                individuals[ind_id]["FAMC"] = args
            if (tag == "INDI"):
                ind_id = args
                individuals[ind_id] = {}
                individuals[ind_id]["ID"] = args
                fam_flag = False
            #Gets name and sex
            if (tag == "NAME" or tag == "SEX"):
                individuals[ind_id][tag] = args
            #gets date
            if (tag in person_date_tags):
                date_type = tag
            if (tag == "DATE"):
                if (fam_flag == False):
                    individuals[ind_id][
                        date_type] = UsefulFunctions.handle_date(args)
                else:
                    families[ind_id][date_type] = UsefulFunctions.handle_date(
                        args)
            #Indetifies family
            if (tag == "FAM"):
                ind_id = args
                fam_flag = True
                families[ind_id] = {}
                families[ind_id]["ID"] = args
            #gets husband wife and child IDS
            if (tag in fam_date_tags):
                date_type = tag
            if (tag == "HUSB" or tag == "WIFE"):
                families[ind_id][tag] = args
            if (tag == "CHIL"):
                if (tag not in families[ind_id].keys()):
                    families[ind_id][tag] = [args]
                else:
                    families[ind_id][tag].append(args)

    ged.close
    return (individuals, families)
示例#4
0
def main():
    UsefulFunctions.warning()

    # Building Phase
    first_X, first_Y = UsefulFunctions.loadVehicleData()
    clf_first, first_training_score, first_training_data, first_testing_data, first_graph_data = analyze(
        first_X, first_Y)
    print(
        "Decision Tree Training Score (first) After Cross Validation: {0:.2f}%"
        .format(first_training_score * 100))
    UsefulFunctions.calc_accuracy(first_training_data[1],
                                  clf_first.predict(first_training_data[0]),
                                  first_testing_data[1],
                                  clf_first.predict(first_testing_data[0]))

    second_X, second_Y = UsefulFunctions.loadWineData()
    clf_second, second_training_score, second_training_data, second_testing_data, second_graph_data = analyze(
        second_X, second_Y)
    print(
        "Decision Tree Training Score (second) After Cross Validation: {0:.2f}%"
        .format(second_training_score * 100))
    UsefulFunctions.calc_accuracy(second_training_data[1],
                                  clf_second.predict(second_training_data[0]),
                                  second_testing_data[1],
                                  clf_second.predict(second_testing_data[0]))
示例#5
0
def MakeEffLimits(exp, xLim_DP, Lim_DP, geff, optype, DelIni=0.1):
    xeff, ProdEff = br.NProd(DelIni, exp, geff, optype)
    xProd_DP, NProd_DP = br.NProd_DP(exp)

    #     if exp=="faser": print("faser Lim eps original:",ProdEff,xeff,NProd_DP,xProd_DP,Lim_DP,xLim_DP)

    xi, ratio, Lim = uf.GetRatio(ProdEff, xeff, NProd_DP, xProd_DP, Lim_DP,
                                 xLim_DP)
    if optype == "AV":  # We need to rescale the decay rate also
        Lam1TeV = np.full(np.size(xi), 1000)
        GamV = am.GamHDSee(xi, DelIni, Lam1TeV, {
            "gu11": 2 / 3.,
            "gd11": -1 / 3.,
            "gd22": -1 / 3.,
            "gl11": -1.
        }, "V")
        GamAV = am.GamHDSee(xi, DelIni, Lam1TeV, geff, "AV")
        Gamratio = GamAV / GamV
    else:
        Lam1TeV = np.full(np.size(xi), 1000)
        GamVem = am.GamHDSee(xi, DelIni, Lam1TeV, {
            "gu11": 2 / 3.,
            "gd11": -1 / 3.,
            "gd22": -1 / 3.,
            "gl11": -1.
        }, "V")
        GamV = am.GamHDSee(xi, DelIni, Lam1TeV, geff, "V")
        Gamratio = GamV / GamVem
    EffLimIni = 0.013 * np.sqrt(xi) / np.sqrt(Lim) * np.power(
        ratio * Gamratio, 1 / 8.) * 1000
    #     if exp=="faser": print("faser ratio original:",ratio[xi>0.01]*Gamratio[xi>0.01])

    #     if exp=="faser": print("faser EffLimIni original:",EffLimIni[xi>0.01])
    #     print(xi, EffLimIni)
    return xi, EffLimIni
示例#6
0
    def __init__(self, name, exp, channel, Delini, interptype="log"):
        self.exp = exp
        self.channel = channel
        self.delini = Delini
        self.descr = "No description for this limit"
        self.ref = "No reference for this limit"
        self.combthr = 1.1  # Standard value used when relevant to combine the upper and lower limit
        try:
            self.name = name
            self.mx_ini, self.lim_ini = uf.LoadData('LimData/' + name + '.dat',
                                                    interptype)


#             if verbose: print("Loading: ",  self.name,self.lim_ini   )
        except:
            self.name = "NotDefined"
            self.mx_ini = np.logspace(-3., 1., 30.)
            self.lim_ini = np.zeros(np.shape(self.mx_ini))

        try:
            self.model = model
        except:
            self.model = "No model for this limit"

        self.lim_inifull = {
            "V": (self.mx_ini, self.lim_ini),
            "AV": (self.mx_ini, self.lim_ini)
        }
示例#7
0
def main():
    abaloneX, abaloneY = UsefulFunctions.loadVehicleData()
    clf_abalone, abalone_training_score, abalone_testing_data, abalone_graph_data, abalone_elapsed_time = analyze(
        abaloneX, abaloneY)
    print(
        "Neural Network Training Score (Abalone) After Cross Validation: {0}%".
        format(abalone_training_score * 100))
    print("Neural Network Took (Abalone) {0}s to Train".format(
        abalone_elapsed_time))
    start = time.time()
    results = clf_abalone.predict(abalone_testing_data[0])
    end = time.time() - start
    print("Neural Network (Abalone) Took {0}s to Test".format(end))
    print(confusion_matrix(abalone_testing_data[1], results))
    print("Neural Testing Score for Abalone {0}%".format(
        cal_accuracy(abalone_testing_data[1], results) * 100))

    wine_X, wine_Y = UsefulFunctions.loadWineData()
    clf_wine, wine_training_score, wine_testing_data, wine_graph_data, wine_elapsed_time = analyze(
        wine_X, wine_Y)
    print(
        "Neural Network Tree Training Score (Wine) After Cross Validation: {0}%"
        .format(wine_training_score * 100))
    print("Neural Network Took (Wine) {0}s to Train".format(wine_elapsed_time))
    start = time.time()
    results = clf_wine.predict(wine_testing_data[0])
    end = time.time() - start
    print("Neural Network (Wine) Took {0}s to Test".format(end))
    print(confusion_matrix(wine_testing_data[1], results))
    print("Neural Testing Score for Wine {0}%".format(
        cal_accuracy(wine_testing_data[1], results) * 100))

    fig = plt.figure(200)
    ax1 = plt.subplot(211)
    ax1.plot(abalone_graph_data[0], abalone_graph_data[1])
    ax1.set_xlabel("Number of Epochs")
    ax1.set_ylabel("Cross Validated Accuracy Score")
    ax1.set_title("Score vs Number of Epochs for Vehicle Data")

    ax2 = plt.subplot(212)
    ax2.plot(wine_graph_data[0], wine_graph_data[1])
    ax2.set_xlabel("Number of Epochs")
    ax2.set_ylabel("Cross Validated Accuracy Score")
    ax2.set_title("Score vs Number of Epochs for Wine Data")
    fig.tight_layout()
    plt.show()
示例#8
0
 def GET(self):
     web.header('Content-Type', 'application/json')
     user_data = web.input()
     query = user_data.query
     #Start with an empty list.
     inputques = []
     #Add the input query to our list.
     inputques.append(query)
     inputques = UsefulFunctions.tokenization_spellcheck(inputques)
     im = UsefulFunctions.createTfidfVectorizer_Instance(inputques)
     #Loading the tfidf matrix from disk
     qm = np.load(Training.save_matrix_path)
     coslist = cosine_similarity(qm, im).flatten()
     maxsim = np.argmax(coslist)
     response = Training.response[maxsim]
     #returning response
     return response
示例#9
0
def main():
    title = "Learning Curves Vehicle (SVM)"
    abalone_X, abalone_Y = UsefulFunctions.loadVehicleData()
    abaloneX_train, abaloneX_test, abaloneY_train, abaloneY_test = train_test_split(
        abalone_X, abalone_Y, test_size=0.30, random_state=100)
    cv = StratifiedShuffleSplit(n_splits=10, test_size=0.1, random_state=42)
    # change the kernel here
    estimator = SVC(gamma=.001, C=1000.0, kernel='poly')
    plt, abalone_elapsed_time = plot_learning_curve(estimator,
                                                    title,
                                                    abaloneX_train,
                                                    abaloneY_train, (0.1, 0.5),
                                                    cv=cv,
                                                    n_jobs=4)
    print("It took SVM (Abalone) {0}s to train".format(abalone_elapsed_time))
    estimator.fit(abaloneX_train, abaloneY_train)
    t0 = time()
    y_pred = estimator.predict(abaloneX_test)
    print("SVM (Abalone) Took {0}s to test".format(time() - t0))
    print("SVM Accuracy Score (Abalone) was {0}%".format(
        accuracy_score(abaloneY_test, y_pred) * 100))
    plt.show()

    title = "Learning Curves Wine (SVM)"
    wine_X, wine_Y = UsefulFunctions.loadWineData()
    wineX_train, wineX_test, wineY_train, wineY_test = train_test_split(
        wine_X, wine_Y, test_size=0.30, random_state=100)
    cv = StratifiedShuffleSplit(n_splits=10, test_size=0.1, random_state=42)
    # change the kernel here
    estimator = SVC(gamma=.001, C=1000.0, kernel='rbf')
    plt, wine_elapsed_time = plot_learning_curve(estimator,
                                                 title,
                                                 wineX_train,
                                                 wineY_train, (0.1, 1.01),
                                                 cv=cv,
                                                 n_jobs=4)
    print("It took SVM (Wine) {0}s to train".format(wine_elapsed_time))
    estimator.fit(wineX_train, wineY_train)
    t0 = time()
    y_pred = estimator.predict(wineX_test)
    print("It took SVM (Wine) {0}s to test".format((time() - t0)))
    print("SVM Accuracy Score (Wine) was {0}%".format(
        accuracy_score(wineY_test, y_pred) * 100))
    plt.show()
示例#10
0
def FastMonoJet(exp, g_in, Lim_Up_in, Delini, Del, geff, optype="V"):
    gu, gd, ge, gs = Fillg(geff)
    xi_basic = uf.log_sample(0.005, 5, 200)
    gef = np.sqrt(2 * gu**2 + gd**2)

    if gef < np.min(g_in):
        Lim_u_out = 0
    else:
        Lim_u_out = np.interp(gef, g_in, Lim_Up_in)
    Lim_full = np.full(200, Lim_u_out)
    return xi_basic, Lim_full
示例#11
0
def FastInvMesDecay(channel, delrec, geff, optype):

    xi_full = uf.GetxiRange(channel, delrec)

    #     print("Range for ", channel, " ",xi_full)
    Lim_full = np.array([
        br.getLimLambdaFromInvBR(mx / (1 + delrec), mx, channel, geff, optype)
        for mx in xi_full
    ])
    #     Lim_full = np.array([br.LambdaInvBR(mx, mx) for mx in xi_full])
    return xi_full, Lim_full
示例#12
0
def main():
    wine_X, wine_Y = UsefulFunctions.loadWineData()
    clf_wine, wine_training_score, wine_testing_data, wine_graph_data, wine_elapsed_time = analyze(
        wine_X, wine_Y)
    print(
        "Neural Network Tree Training Score (Wine) After Cross Validation: {0}%"
        .format(wine_training_score * 100))
    print("Neural Network Took (Wine) {0}s to Train".format(wine_elapsed_time))
    start = time.time()
    results = clf_wine.predict(wine_testing_data[0])
    end = time.time() - start
    print("Neural Network (Wine) Took {0}s to Test".format(end))
    # print(confusion_matrix(wine_testing_data[1], results))
    print(wine_graph_data[1])
    print("Neural Testing Score for Wine {0}%".format(
        cal_accuracy(wine_testing_data[1], results) * 100))
示例#13
0
def FastDecayLimit(exp,
                   x_in,
                   Lim_in,
                   Del_in,
                   Del,
                   geff,
                   optype="V",
                   CombMerge=1.1,
                   useHeavyMeson=False):

    xeff, Limeff = MakeEffLimits(exp, x_in, Lim_in, geff, optype, Del_in)
    xi_cast, Lim_cast, Lim_cast_low = RecastDecayLimit(xeff, Limeff, Del_in,
                                                       Del, exp, geff, optype,
                                                       useHeavyMeson)
    #     print("Full limits: " , exp, xi_cast,Lim_cast,Lim_cast_low)
    xi_cast_full, Lim_cast_full = uf.CombineUpDown(xi_cast, Lim_cast_low,
                                                   Lim_cast, CombMerge)
    return xi_cast_full, Lim_cast_full
示例#14
0
def Stretchfeff(xi, feffin, DelProd, Del):
    thrup = br.MPi / 3

    xlow = np.min(xi[feffin > 0])
    fefftoStretch = feffin[np.logical_and(xi < thrup, xi > 1.2 * xlow)]
    xefftoStretch = xi[np.logical_and(xi < thrup, xi > 1.2 * xlow)]

    xilow = xi[xi < thrup]
    xiInterp = uf.log_sample(2 * me / Del * (2 + Del), thrup,
                             len(xefftoStretch))
    #     print(xiInterp)
    fEffinM2Fit = np.interp(xilow, xiInterp, fefftoStretch)
    res = feffin
    #     print(res[np.logical_and(xi>2*me/Del*(2+Del),xi<thrup)])
    res[np.logical_and(xi > 2 * me / Del * (2 + Del),
                       xi < thrup)] = fEffinM2Fit[np.logical_and(
                           xilow > 2 * me / Del * (2 + Del), xilow < thrup)]
    return res
示例#15
0
def DetEff(xNProd, NProd, xlim, Lamlim, Del, DelIni, exp, geff, optype="V"):

    # First we get the production ratio
    # We need to shift the masses, making sure that the invariant mass is equal: M1 + M2 = M1tilde + M2tilde
    M2tildeToM1 = (1 + 1 / (1 + Del)) / (2 + DelIni)
    #     print("Production function: ", xNProd,NProd)
    M1ToX = (2 + DelIni)
    xmin = np.min(xNProd) * M1ToX
    # Sending M1 to X=M1+M2
    xmax = np.max(xNProd) * M1ToX
    xi = uf.log_sample(xmin, xmax, 200)
    Lam1TeV = np.full(np.size(xi), 1000)
    #     print("Limlim: ", Lamlim[xlim*M1ToX>0.01])
    LamliminX = np.interp(xi, xlim * M1ToX, Lamlim)
    #     print("Laimlim: ", LamliminX[xi>0.01])
    NprodinX = np.interp(xi, xNProd * M1ToX, NProd)
    GammaDecayinX = am.GamHDSee(xi / M1ToX, DelIni, Lam1TeV, geff, optype)

    Res = np.power(LamliminX, 8) / NprodinX / GammaDecayinX
    #     if exp=="faser" :print("Res: ", np.nan_to_num(Res)*(GammaDecayinX>0))
    return xi, np.nan_to_num(Res) * (GammaDecayinX > 0
                                     )  # We output as fnction of M1+M2
示例#16
0
def GetNaiveDecayLimits(Del, exp, Nexp, geff, optype="V", HeavyMeson=False):

    Dexp, Lexp, beamtype = GeomForExp(exp)
    xNProd, NProd = br.NProd(Del, exp, geff, optype, HeavyMeson)

    MinvFac = (1 + Del)
    mymin = np.min(xNProd * MinvFac)
    mymax = np.max(xNProd * MinvFac)
    xi = uf.log_sample(mymin, mymax, 500)
    Lam1TeV = np.full(np.size(xi), 1000.)

    ctaugamma = 1 / am.GamHDSll(xi / (1 + Del), Del,
                                Lam1TeV, geff, optype) * BoostFact(
                                    xi, Del, beamtype) * (3e8 * 6.5875e-25)
    Limnew = 1000 * np.power(
        np.interp(xi, xNProd * MinvFac, NProd) * Lexp / ctaugamma / Nexp,
        1 / 8.)

    if (beamtype == "LHC"):
        Limnew = ReduceLimLHC(Limnew)

    return xi / (
        1 + Del
    ), Limnew  # We need to export as M1 to match with the other imported limits
示例#17
0
def FastSN1987Limit(limlist, Del, geff, optype="V", upperlimit=True):
    xi_basic = uf.log_sample(0.005, 0.3, 400)
    ##### Currently just test the different operator and apply a naive proton scattering scaling, except from the AV case where the upper limit derives from the pi0 branching ratio
    gu, gd, ge, gs = Fillg(geff)
    M2tildeToM1 = (1 + 1 /
                   (1 + Del)) / (2)  ### Change for scaling dle=0 initially
    x_in, Lim_in = limlist[optype]
    if upperlimit:
        if optype == "V":
            Lim_out = Lim_in * np.sqrt((np.abs(gu) + np.abs(gd) + np.abs(ge)) /
                                       2)  # Scaling based on e+e- annihilation
            return xi_basic, np.interp(
                xi_basic, x_in / M2tildeToM1, Lim_out
            )  # we include the possibility of production from electrons just incase -- very rough
        else:
            #             Gam1=br.GamAV_MestoXX(x_inAV,x_inAV*(1),br.MPi,1,1000.)
            #             Gam2=br.GamAV_MestoXX(x_inAV,x_inAV*(1+Del),br.MPi,1,1000.)
            xf = x_in / M2tildeToM1
            Gam1 = am.GamAV_MestoXX(x_in, x_in * (1 + 0), br.MPi, 1, 1000.)
            Gam2 = am.GamAV_MestoXX(xf / (1 + Del), xf, br.MPi, 1, 1000.)

            Lim_out = Lim_in * np.power(gu - gd, 1 / 2.) * np.power(
                Gam2 / Gam1, 1 / 8.)  # Limits from invisible pi0 decay
            #             print("x for SN, ",M2tildeToM1, x_inAV , Lim_inAV, Lim_out)
            return xi_basic, np.interp(xi_basic, x_in / M2tildeToM1, Lim_out)
    else:
        if optype == "V":
            Lim_out = Lim_in * np.sqrt(
                (np.abs(gu) + np.abs(gd)
                 ))  # we include the possibility of scattering from nuclei
            return xi_basic, np.interp(xi_basic, x_in / M2tildeToM1, Lim_out)
        else:
            Lim_out = Lim_in * np.sqrt((np.abs(gu) + np.abs(gd)))
            return xi_basic, np.interp(
                xi_basic, x_in / M2tildeToM1, Lim_out
            )  # we include the possibility of scattering from nuclei
示例#18
0
import pandas as pd
import numpy as np
import configparser
import UsefulFunctions

configParser = configparser.RawConfigParser()
configFilePath = r'C:\Users\Admin\Desktop\code-09-02\configfile.txt'
configParser.read(configFilePath)
data_path = configParser.get('file-config', 'data-path')
save_matrix_path = configParser.get('file-config', 'sparse-matrix-path')


#Reading Training data 
df = pd.read_csv(data_path)
question,response=UsefulFunctions.columnstoList(df)

#Datacleaning and spell check
question = UsefulFunctions.tokenization_spellcheck(question)

#Saving Tfidf matrix into disk
qmatrix = UsefulFunctions.createTfidfVectorizer(question)
np.save(save_matrix_path,qmatrix)







示例#19
0
#
#sdif = abs(sm1-s1)

err = np.zeros((len(x),len(t)))
errSumX = np.zeros((len(x),1))
#D=2
#x = x*2
#
#t = t*((d**2)/D)
##std = std*((d**2)/D)
#sparktimes = sparktimes*((d**2)/D)
#std = np.diff(sparktimes)
#conc = 0.14*conc
plt.figure()
for i in np.arange(0,len(t),1):
    err[:,i] = abs(conc[:,i]-uf.cBar(x,t[i])).T
    plt.plot(x,conc[:,i],'b',linewidth=3)
    plt.plot(x[::20],uf.cBar(x[::20],t[i]),'ro',mew=3,ms=3)
###    plt.ylim((0,cmax))
#    
    plt.plot(np.ones(40),np.arange(0,4,0.1),'--')
    plt.plot(-1*np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(2*np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(-2*np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(3*np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(-3*np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(4*np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(-4*np.ones(40),np.arange(0,4,0.1),'--')
#    
    plt.plot(x,np.ones(len(x)),'--')
    plt.plot(-x,np.ones(len(x)),'--')
示例#20
0
                ind_id = args
                fam_flag = True
                families[ind_id] = {}
                families[ind_id]["ID"] = args
            #gets husband wife and child IDS
            if (tag in fam_date_tags):
                date_type = tag
            if (tag == "HUSB" or tag == "WIFE"):
                families[ind_id][tag] = args
            if (tag == "CHIL"):
                if (tag not in families[ind_id].keys()):
                    families[ind_id][tag] = [args]
                else:
                    families[ind_id][tag].append(args)

    ged.close
    return (individuals, families)


#reads information from file
individuals, families = read_file('./test.ged')
#calculates and stores useful data, mostly ages
individuals = UsefulFunctions.age_bank(families, individuals)

#printing in table format
ind_table = pd.DataFrame(individuals).transpose()
print(tabulate(ind_table, headers='keys', tablefmt='psql'))

fam_table = pd.DataFrame(families).transpose()
print(tabulate(fam_table, headers='keys', tablefmt='psql'))
示例#21
0
import scipy
import scipy.signal as sig
import os
from scipy import io
from scipy import signal
from PyQt5 import QtGui, QtWidgets
import matplotlib.pyplot as plt
from numpy import linalg as lin
import pyqtgraph as pg
import matplotlib.pyplot as plt
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages

filetoload = '/Volumes/backup/2016/Michael/Axopatch/21112016/17B_10mMCis100mMtransKCl_80mer.dat'
PartToConsider = np.array([21.542, 21.566])
out = uf.ImportAxopatchData(filetoload)

partinsamples = np.int64(np.round(out['samplerate'] * PartToConsider))

i1part = out['i1'][partinsamples[0]:partinsamples[1]]
i2part = out['i2'][partinsamples[0]:partinsamples[1]]
t = self.t[partinsamples[0]:partinsamples[1]]

plt.figure(1)
plt.subplot(2, 1, 1)
plt.plot(t, i1part, 'b')
plt.title('i1 vs. i2')
plt.ylabel('Ionic Current [A]')
ax = plt.gca()
ax.set_xticklabels([])
示例#22
0
withFit = 0

expname = 'FakeData'
buffer = 250

file = '/Users/migraf/Desktop/04B_FemtoIV_10mMKCl_Noise_.dat'
datafile = '/Users/migraf/Desktop/04B_FemtoIV_10mMKCl_Noise__OriginalDB.hdf5'

directory = (str(os.path.split(datafile)[0]) + os.sep + expname +
             '_SavedImages')

if not os.path.exists(directory):
    os.makedirs(directory)

out = uf.ImportAxopatchData(file)

f = h5py.File(datafile, 'r')

i1data = f['LowPassSegmentation/i1/']
i2data = f['LowPassSegmentation/i2/']

if common:
    #Plot All Common Events
    pp = PdfPages(directory + os.sep + 'SavedEventsCommon.pdf')
    ind1 = np.uint64(i1data['CommonIndex'][:])
    ind2 = np.uint64(i2data['CommonIndex'][:])

    t = np.arange(0, len(out['i1']))
    t = t / out['samplerate'] * 1e3
示例#23
0
import matplotlib.pyplot as plt
from matplotlib.backends.backend_pdf import PdfPages
from tkinter import Tk
from tkinter.filedialog import askopenfilenames
from matplotlib.font_manager import FontProperties
fontP = FontProperties()
fontP.set_size('small')
Tk().withdraw()
os.system(
    '''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "python" to true' '''
)
expname = 'Gradient'

filename = '/Users/migraf/Desktop/Roche meetings/30B_1MKCl_AxoIV_FemtoOff_1.dat'

output = uf.OpenFile(filename)
directory = (str(os.path.split(filename)[0]) + os.sep + expname +
             '_SavedImages')
AllData = uf.MakeIVData(output, delay=0.642)

# Plot all the Fits
time = np.arange(len(output['i1'])) / output['samplerate']
fig1, ax = plt.subplots(1)
ax.plot(time, output['i1'])
ax2 = ax.twinx()
ax2.plot(time, output['v1'], 'y')
ch = 'i1'
#Loop through the parts
for idx, val in enumerate(AllData[ch]['StartPoint']):
    timepart = np.arange(AllData[ch]['EndPoint'][idx] -
                         val) / output['samplerate']
示例#24
0
    def recast(self, delrec, geff, optype, useHeavyMeson=False):
        # we define the recast based on the expected signal

        if self.channel == "decay":
            if self.model == "EFT":  # For naive NoE based limits
                xi, EffLimtmp = de.GetNaiveDecayLimits(delrec, self.exp, 10,
                                                       geff, optype)
                xi, EffLim, EffLim_low = de.RecastDecayLimit(
                    xi, EffLimtmp, delrec, delrec, self.exp, geff, optype)
                xi_full, Lim_full = uf.CombineUpDown(xi, EffLim_low, EffLim,
                                                     self.combthr)
            else:  # Standard recasting case
                xi_full, Lim_full = de.FastDecayLimit(
                    self.exp, self.mx_ini, self.lim_ini, self.delini, delrec,
                    geff, optype, self.combthr, useHeavyMeson)
        elif self.channel == "heavymesondecay":
            if self.model == "EFT":
                xi, EffLimtmp = de.GetNaiveDecayLimits(delrec, self.exp, 10,
                                                       geff, optype, True)
                xi_heavy, EffLim_heavy, EffLim_heavy_low = de.RecastDecayLimit(
                    xi, EffLimtmp, delrec, delrec, self.exp, geff, optype,
                    True)
                xi_full, Lim_full = uf.CombineUpDown(xi_heavy,
                                                     EffLim_heavy_low,
                                                     EffLim_heavy,
                                                     self.combthr)
            else:
                xi_full, Lim_full = de.FastDecayLimit(
                    self.exp, self.mx_ini, self.lim_ini, self.delini, delrec,
                    geff, optype, self.combthr, useHeavyMeson)
        elif self.channel == "scattering":
            xi_full, Lim_full = de.FastScatLimit(self.exp, self.mx_ini,
                                                 self.lim_ini, self.delini,
                                                 delrec, geff, optype)
        elif self.channel == "monogam":
            xi_full, Lim_full = de.FastMonoPhoton(self.exp, self.mx_ini,
                                                  self.lim_ini, self.delini,
                                                  delrec, geff, optype)
        elif self.channel == "invisibledecayBmtoKm":
            if self.exp == "babar":
                xi_full, Lim_full = de.FastInvMesDecay("babar_BmtoKmnunu",
                                                       delrec, geff, optype)
            elif self.exp == "belle2":
                xi_full, Lim_full = de.FastInvMesDecay("belle2_BmtoKmnunu",
                                                       delrec, geff, optype)
        elif self.channel == "invisibledecayBmtoPim":
            xi_full, Lim_full = de.FastInvMesDecay("babar_BmtoPimnunu", delrec,
                                                   geff, optype)
        elif self.channel == "invisibledecayB0toPi0":
            xi_full, Lim_full = de.FastInvMesDecay("belle_B0toPi0nunu", delrec,
                                                   geff, optype)
        elif self.channel == "invisibledecayB0toK0":
            xi_full, Lim_full = de.FastInvMesDecay("belle_B0toK0nunu", delrec,
                                                   geff, optype)
        elif self.channel == "monojet_down":
            xi_full, Lim_full = de.FastMonoJet(self.exp, self.mx_ini,
                                               self.lim_ini, self.delini,
                                               delrec, geff, optype)
        elif self.channel == "monojet_up":
            xi_full, Lim_full = de.FastMonoJet(self.exp, self.mx_ini,
                                               self.lim_ini, self.delini,
                                               delrec, geff, optype)
        elif self.channel == "invisibledecayKL0toPi0":
            if self.exp == "e391a":
                xi_full, Lim_full = de.FastInvMesDecay("e391a_KL0toPi0nunu",
                                                       delrec, geff, optype)
            if self.exp == "na62":
                xi_full, Lim_full = de.FastInvMesDecay("na62_KL0toPi0nunu",
                                                       delrec, geff, optype)
        elif self.channel == "invisibledecayPi0":
            xi_full, Lim_full = de.FastInvMesDecay("na62_pi0toinvisible",
                                                   delrec, geff, optype)
        elif self.channel == "invisibledecayJPsi":
            xi_full, Lim_full = de.FastInvMesDecay("bes_JPsitoinvisible",
                                                   delrec, geff, optype)
        elif self.channel == "invisibledecayUpsilon":
            xi_full, Lim_full = de.FastInvMesDecay("babar_Upsilontoinvisible",
                                                   delrec, geff, optype)
        elif self.channel == "invisibledecayKptoPip":
            if self.exp == "na62":
                xi_full, Lim_full = de.FastInvMesDecay("na62_KptoPipa", delrec,
                                                       geff, optype)
            elif self.exp == "e949":
                xi_full, Lim_full = de.FastInvMesDecay("e949_KptoPipa", delrec,
                                                       geff, optype)
        elif self.channel == "cosmicrays":
            xi_full, Lim_low_full, Lim_high_full = de.FastCRLimit(
                "t2k", delrec, geff, optype)
            xi_full, Lim_full = uf.CombineUpDown(xi_full, Lim_low_full,
                                                 Lim_high_full)
        elif self.channel == "low_cooling":
            xi_full, Lim_full = de.FastSN1987Limit(self.lim_inifull, delrec,
                                                   geff, optype, False)
        elif self.channel == "high_cooling":
            xi_full, Lim_full = de.FastSN1987Limit(self.lim_inifull, delrec,
                                                   geff, optype, True)
        else:
            print(
                "Channel selected: ", self.channel,
                " is not currently implemented. Possible choices: \n",
                "'decay' : faser, mathusla, ship, charm, seaquest, seaquest_Phase2, lsnd \n",
                "heavymesondecay : ship"
                "'scattering' :  nova, miniboone, sbnd \n",
                "'missingE' : babar, belleII, atlas, lep \n",
                "'cosmicrays' : t2k (decay from cosmic ray showers into t2k), ",
                "cooling : for sn1987_low (pessimistic limit from SN1987 cooling), sn1987_high (optimistic limit from SN1987 cooling) ",
                "Invisible light meson decays: na62 (pi0decay and invisibledecayKptoPip) e949 (pi0decay and invisibledecayKptoPip), e391a (invisibledecayKL0toPi0)",
                "Invisible heavy meson decay: belle (invisibledecayB0toPi0 ,invisibledecayB0toK0) and belleII (invisibledecayBmtoKm)"
            )
            xi_full = np.logspace(-3., 1., 30.)
            Lim_full = np.zeros(np.shape(xi_full))
        return xi_full, Lim_full
示例#25
0
        b[i - 1] = conc[i, k]

    c = np.linalg.solve(A, b)

    for i in np.arange(0, len(x) - 2):
        conc[i + 1, k + 1] = c[i]

    conc[0, k + 1] = (4 / 3) * conc[1, k + 1] - (1 / 3) * conc[2, k + 1]
    conc[-1, k + 1] = (4 / 3) * conc[1, k + 1] - (1 / 3) * conc[2, k + 1]

err = np.zeros((len(x), len(t)))
errSumX = np.zeros((len(x), 1))

plt.figure()
for i in np.arange(0, len(t), 1):
    err[:, i] = abs(conc[:, i] - uf.cBar(x, t[i])).T
    plt.plot(x, conc[:, i], 'b', linewidth=3)
    plt.plot(x[::5], uf.cBar(x[::5], t[i]), 'rx', mew=5, ms=5)
    ##    plt.ylim((0,cmax))
    #    plt.plot(np.ones(40),np.arange(0,4,0.1),'--')
    #    plt.plot(-1*np.ones(40),np.arange(0,4,0.1),'--')
    #    plt.plot(x,np.ones(len(x)),'--')
    #    plt.plot(-x,np.ones(len(x)),'--')
    #    plt.xlim((x_start,3))
    plt.pause(0.00000001)
    plt.clf()
#

errSumX = np.sum(err[:, 1:], axis=1)
errSumT = np.sum(err[:, 1:], axis=0)
示例#26
0
    c = np.linalg.solve(A, b)

    for i in np.arange(0, len(x) - 2):
        conc[i + 1, k + 1] = c[i]

    conc[0, k + 1] = (4 / 3) * conc[1, k + 1] - (1 / 3) * conc[2, k + 1]

realsol = np.zeros((len(x), len(t)))

err = np.zeros((len(x), len(t)))
errSumX = np.zeros((len(x), 1))
""" Plot solutions for all time """
plt.figure()
for i in np.arange(0, len(t), 1):
    err[:, i] = abs(conc[:, i] - uf.cBar(x, t[i])).T
    plt.plot(x, conc[:, i], 'b', linewidth=3)
    plt.plot(x, uf.cBar(x, t[i]))
    #    plt.ylim((0,cmax))
    plt.plot(np.ones(40), np.arange(0, 4, 0.1), '--')
    plt.plot(-1 * np.ones(40), np.arange(0, 4, 0.1), '--')
    plt.plot(x, np.ones(len(x)), '--')
    plt.plot(-x, np.ones(len(x)), '--')
    plt.xlim((x_start, 3))
    plt.pause(0.00000001)
    plt.clf()

errSumX = np.sum(err[:, 1:], axis=1)
errSumT = np.sum(err[:, 1:], axis=0)

plt.figure()
示例#27
0
import UsefulFunctions as uf
import pyqtgraph as pg
import matplotlib.pyplot as plt
from timeit import default_timer as timer
import scipy
import numpy as np
import matplotlib
from matplotlib.backends.backend_pdf import PdfPages
matplotlib.use('GTKAgg')

file = '/Users/migraf/Desktop/Temp/17B_10mMCis100mMtransKCl_80mer_5.dat'
out = uf.ImportAxopatchData(file)

coefficients = {
    'a': 0.999,
    'E': 0,
    'S': 5,
    'eventlengthLimit': 10e-3 * out['samplerate']
}

start1 = timer()
RoughEventLocations1 = uf.RecursiveLowPass(out['i1'], coefficients)
RoughEventLocations2 = uf.RecursiveLowPass(out['i2'], coefficients)
end1 = timer()
print('Conventional Filter took :{} s'.format(str(end1 - start1)))

start2 = timer()
RoughEventLocations1 = uf.RecursiveLowPassFastUp(out['i1'], coefficients)
RoughEventLocations2 = uf.RecursiveLowPassFastUp(out['i2'], coefficients)
end2 = timer()
print('New Filter took :{} s'.format(str(end2 - start2)))
示例#28
0
    
    conc[0,k+1] = c[1]
    conc[-1,k+1] = c[-2]

#std = np.diff(sparktimes)
#sm1 = conc[400,:]
#s1 = conc[600,:]
#
#sdif = abs(sm1-s1)

err = np.zeros((len(x),len(t)))
errSumX = np.zeros((len(x),1))

plt.figure()
for i in np.arange(0,len(t),1):
    err[:,i] = abs(conc[:,i]-uf.cBar(x,t[i])).T
    plt.plot(x,conc[:,i],'b',linewidth=3)
#    plt.plot(x[::10],uf.cBar(x[::10],t[i]),'ro',mew=3,ms=3)
###    plt.ylim((0,cmax))
#    
#    plt.plot(np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(-1*np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(2*np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(-2*np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(3*np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(-3*np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(4*np.ones(40),np.arange(0,4,0.1),'--')
#    plt.plot(-4*np.ones(40),np.arange(0,4,0.1),'--')
#    
#    plt.plot(x,np.ones(len(x)),'--')
#    plt.plot(-x,np.ones(len(x)),'--')
Tk().withdraw()
os.system(
    '''/usr/bin/osascript -e 'tell app "Finder" to set frontmost of process "python" to true' '''
)

#filenames = ['/Volumes/backup/2017/Michael/Axopatch/20170512/10mMKClInFlowCellORingPore1mm.dat']
#filenames = ['/Volumes/backup/2017/Michael/Axopatch/20170512/bmimpf6lInFlowCellORingPore1mm.dat']
expname = 'Gradient'

filenames = askopenfilenames(
)  # show an "Open" dialog box and return the path to the selected file
for filename in filenames:
    print(filename)
    #Make Dir to save images
    output = uf.OpenFile(filename)
    directory = (str(os.path.split(filename)[0]) + os.sep + expname +
                 '_SavedImages')
    if not os.path.exists(directory):
        os.makedirs(directory)

    AllData = uf.MakeIVData(output, delay=2)
    if AllData == 0:
        print('!!!! No Sweep in: ' + filename)
        continue

    #Plot Considered Part
    #figExtracteParts = plt.figure(1)
    #ax1 = figExtracteParts.add_subplot(211)
    #ax2 = figExtracteParts.add_subplot(212, sharex=ax1)
    #(ax1, ax2) = uf.PlotExtractedPart(output, AllData, current = 'i1', unit=1e9, axis = ax1, axis2=ax2)
示例#30
0
atlas_monoXlow.ref = "inspirehep.net/record/1635274"
atlas_monoXlow.UpdateLimIni(xi_ATLAS, Lim_ATLAS_Down)
UpdateLimitList(atlas_monoXlow)

atlas_monoXhigh = Limit("", "atlas", "monojet_up", 0.0)
atlas_monoXhigh.descr = """
Limits for ATLAS collaboration, 1711.03301 based on the upper recasted limit from 1807.03817 
and our own recast.
"""
atlas_monoXhigh.ref = "inspirehep.net/record/1635274"
atlas_monoXhigh.UpdateLimIni(xi_ATLAS, Lim_ATLAS_Up)
UpdateLimitList(atlas_monoXhigh)

#########################  MonoPhoton searches at DELPHI from 1103.0240  #########################

xi_LimLEP_V, LimLEP_V = uf.LoadData('LimData/lep_1103.0240_V.dat', "lin")
xi_LimLEP_AV, LimLEP_AV = uf.LoadData('LimData/lep_1103.0240_AV.dat', "lin")

lep_monoX = Limit("lep_1103.0240", "lep", "monogam", 0.0)
lep_monoX.descr = """
Limits for LEP collaboration, based on the recasting from 1103.0240 for the upper limit. 
The lower limit is given by the breakdown of the EFT at the LEP CoM energy.
"""
lep_monoX.ref = "inspirehep.net/record/890992"
lep_monoX.UpdateLimIni(xi_LimLEP_V, LimLEP_V)
lep_monoX.UpdateLimIni(xi_LimLEP_AV, LimLEP_AV, "AV")
UpdateLimitList(lep_monoX)

######################### SN1987 cooling rate constraints #####
if verbose: print("SN1987 cooling ...")