예제 #1
0
def testCode5():
    lc = Launcher()
    sample = Sample()
    sample.setID("0358ab4e8595db846b709cf85d7b397d92230bef")
    # sample.setID("223e8761fbb93458140a3592096109501927ff64")
    sample.setStorageVersion({})
    lc.launchAnalysisByID(sample)
예제 #2
0
def testCode():
    file = "Test_files/error_de_codificacion_en_nombre_de_libreria"
    data = open(file, "rb").read()

    start_time = time.time()
    dic = {}
    sample = Sample()
    sample.setBinary(data)
    pe = PEProcessor(sample, dic)
    res = pe.process()
    # print(res)
    # print(res["particular_header"]["sections"])
    elapsed = time.time() - start_time
예제 #3
0
    def add_new(self):
        name = None

        for name in self.valid_names:
            if not name in self.samples:
                break

        self.samples[name] = Sample(name, None, self.sd_var.get(), self)
        print("add new " + name)
        self.samples[name].load_file()
        self.samples[name].create_waveform_file()
예제 #4
0
 def generateCEG(self, octave=4, typ='sin'):
     '''
     produces a single sample of the given waveform with 3 WaveFormFunctions
     '''
     #assume A4 is base octave at 440 hz. everything works off of assuming A4 is 440
     #will produce C4, E4, G4 if octave is 4
     C = 440 * math.pow(2, (((octave - 4) * 12 + 3.0) / 12.0))
     E = 440 * math.pow(2, (((octave - 4) * 12 + 6.0) / 12.0))
     G = 440 * math.pow(2, (((octave - 4) * 12 + 10.0) / 12.0))
     #now we have the frequencies we need to give to our Sample.
     SAM = Sample()
     SAM.noteDuration = 4
     SAM.addFunc(C, typ, 0.7)
     SAM.addFunc(E, typ, 0.65)
     SAM.addFunc(G, typ, 0.6)
     self.Samples.append(SAM)
     print("done")
예제 #5
0
def testCode6():
    inicio=0
    client=MongoClient(env["files"]["host"],env["files"]["port"])
    db=client[env["db_files_name"]]
    fs=gridfs.GridFS(db)
    res=fs.find(timeout=False).skip(inicio)
    lc=Launcher()
    count=inicio; reset=0
    start=time.time()
    first=True
    for f in res:
        sam_id=f.filename
        sample=Sample()
        sample.setID(sam_id)
        sample.setStorageVersion({})
        lc.launchAnalysisByID(sample)
        reset+=1; count+=1
        if(reset>=1000):
            print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))+" procesados:"+str(count/1000)+"K")
            reset=0
    print(str(count)+" procesados")
예제 #6
0
def getSampleObjectAt(sound, index):
    if not isinstance(sound, Sound):
        print "getSampleObjectAt(sound,index): First input is not a sound"
        raise ValueError
    # return sound.getSampleObjectAt(index-Sound._SoundIndexOffset)
    if index < Sound._SoundIndexOffset:
        print "You asked for the sample at index: " + str(index) + ".  This number is less than " + str(Sound._SoundIndexOffset) + ".  Please try" + " again using an index in the range [" + str(Sound._SoundIndexOffset) + "," + str(getLength(sound) - 1 + Sound._SoundIndexOffset) + "]."
        raise ValueError
    if index > getLength(sound) - 1 + Sound._SoundIndexOffset:
        print "You are trying to access the sample at index: " + str(index) + ", but the last valid index is at " + str(getLength(sound) - 1 + Sound._SoundIndexOffset)
        raise ValueError
    return Sample(sound, index - Sound._SoundIndexOffset)
예제 #7
0
def testCode5():
    lc = Launcher()
    sample = Sample()
    sample.setID("0358ab4e8595db846b709cf85d7b397d92230bef")
    #sample.setID("223e8761fbb93458140a3592096109501927ff64")
    sample.setStorageVersion({})
    lc.launchAnalysisByID(sample)
예제 #8
0
def testCode6():
    inicio = 0
    client = MongoClient(env["files"]["host"], env["files"]["port"])
    db = client[env["db_files_name"]]
    fs = gridfs.GridFS(db)
    res = fs.find(timeout=False).skip(inicio)
    lc = Launcher()
    count = inicio
    reset = 0
    start = time.time()
    first = True
    for f in res:
        sam_id = f.filename
        sample = Sample()
        sample.setID(sam_id)
        sample.setStorageVersion({})
        lc.launchAnalysisByID(sample)
        reset += 1
        count += 1
        if (reset >= 1000):
            print(
                time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))
                + " processed:" + str(count / 1000) + "K")
            reset = 0
    print(str(count) + " processed")
예제 #9
0
def process_file(file_hash):
    #print "process_file("+str(file_hash)+")"
    pc = PackageController()
    res = pc.getFile(file_hash)
    if res == None: return None
    sam_id = file_hash
    sample = Sample()
    sample.setID(sam_id)
    sample.setBinary(res)
    sample.setStorageVersion({})
    lc = Launcher()
    lc.launchAnalysisByID(sample)

    return 0
예제 #10
0
def testSample():
    for i in range(4, 11):
        s = Sample.Sample(math.pow(2, i))
        samples.append(s)

    print("Size:\t50th Percentile")

    for item in samples:
        for number in streamOfRandomNumbers:
            item.increment(number)
        item.sort()
        print(f'{item.maximumSize:0000.0f} :\t {item.getPercentile(50):.2f}')
        assert True == Config.Config.Close(item.getPercentile(50), 0.5, 0.33)
예제 #11
0
def load_to_mongo2(folder_path):
    pc=PackageController()
    ram = Ram()
    files=recursive_read(folder_path)
    count=0
    reset=0
    already_loaded=0
    time_start = datetime.datetime.now()
    uploaded=0
    in_mem=0
    loaded_ram_counter=0
    lc=Launcher()
    if(files is None):
        return "No files where found."
    while (uploaded < len(files)):
        loaded_ram_counter=0
        data_vector=[]
        print "loading files to memory"
        while (in_mem < len(files)):
            f=files[in_mem]
            file_cursor=open(f,"r")
            data_vector.append(file_cursor.read())
            in_mem=in_mem+1
            loaded_ram_counter=loaded_ram_counter+1
            if(loaded_ram_counter > 100):
                if(ram.free_percent() < 0.3):
                    print "Ram full"
                    break
        for data in data_vector:
            file_id=hashlib.sha1(data).hexdigest()
            print "loading to db: "+str(file_id)
            res=pc.searchFile(file_id)
            if(res==None):
                pc.append(file_id,data)
                sample=Sample()
                sample.setID(file_id)
                sample.setBinary(data)
                sample.setStorageVersion({}) 
                count+=1
                lc.launchAnalysisByID(sample)
            else:
                already_loaded+=1
            uploaded=uploaded+1

    result=str(already_loaded)+" were already loaded to mongo.\n"
    result+=thetime(time_start,datetime.datetime.now(),count)
    print result
    return result
def __main__():
    for i in range(len(files)):
        name = "../Resources/" + d + files[i] + ".csv"
        test = caseTest.CaseTest(name, dimension, n[i], (mMu[i], seed),
                                 classes[i])
        sample = test.initTest()
        exporter.exportCaseTest(sample, name)
        c0, c1 = Sample.divideForClass(sample)
        plotter.graphSample(c0, c1)
        plt.show()

    print(
        "--> {} test de dimension {} fueron creados en la carpeta {}.".format(
            n[0], dimension, d))
예제 #13
0
def main():
    files = [f for f in listdir(path) if isfile(join(path, f))]
    with open('samples.csv', 'w') as outfile:
        for file in files:
            sample = Sample.Sample("data/{}".format(file))
            print "{}: {}".format(sample.type, sample.twitName)
            if not sample.data:
                continue
            features = []
            #features.append(twitName)
            features.extend(encode_type(sample.type))
            features.extend(sample.data)
            outwriter = csv.writer(outfile, lineterminator='\n')
            outwriter.writerow(features)
예제 #14
0
    def __init__(self,
                 dataBase=None,
                 elistBase=None,
                 lumi=1550.,
                 hPU=None,
                 data=False):
        DatasetBase.__init__(self,dataBase=dataBase,elistBase=elistBase, \
                                 sampleDir="tuples_from_Artur/MiniAODv2/")

        self.mcReweight = ("nVert", hPU) if hPU != None else None
        self.lumi = lumi
        self.data = data

        self.samples = []
        self.add(Sample("TTJets_LO_HT",self.sampleBase,type="B",color=ROOT.kRed,fill=True, \
                            kfactor=1.,filter=GenLepFilter(0,0), \
                            namelist=["TTJets_LO_HT600to800","TTJets_LO_HT800to1200", \
                                          "TTJets_LO_HT1200to2500","TTJets_LO_HT2500toInf"], \
                            baseweights=lumi,mcReweight=self.mcReweight ))
        if self.data:
            self.add(Sample("Data_Run2015D_1p2fb",dataBase+"tuples_from_Artur/JECv6recalibrateMET_eleCBID_1550pb/", \
                                type="D",color=1,fill=False,
                            namelist=[ "SingleMuon_Run2015D_v4", "SingleMuon_Run2015D_05Oct", \
                                           "SingleElectron_Run2015D_v4", "SingleElectron_Run2015D_05Oct" ] ))
예제 #15
0
    def sortDataset(self, path, test_percentage, do_shuffle=False):
        # percentage definisce il numero di immagini da prendere per il train
        testSet = []
        trainingSet = []
        categories = os.listdir(path)
        # accede alle cartelle
        for c in range(len(categories)):
            imgName = []
            # singolo nome delle immagini
            for s in os.listdir(os.path.join(path, categories[c])):
                filename = os.path.basename(s)
                str = os.path.splitext(filename)[1]
                if os.path.splitext(filename)[1] == '.jpg' or os.path.splitext(
                        filename)[1] == '.bmp':
                    # prendiamo tutte le immagini della sottocartella
                    imgName.append(os.path.join(path, categories[c], s))

            if do_shuffle is True:
                random.shuffle(imgName)

            # calcola la percentuale di dataset da prendere
            n = int(len(imgName) * test_percentage)
            for l in range(0, n):
                # crea il sample e associa i parametri
                sample = Sample.Sample()
                sample.label = categories[c]
                sample.path = imgName[l]
                testSet.append(sample)

            for l in range(n, len(imgName)):
                sample = Sample.Sample()
                sample.label = categories[c]
                sample.path = imgName[l]
                trainingSet.append(sample)

        return trainingSet, testSet
예제 #16
0
파일: Sven.py 프로젝트: xlwuHIT/Sven
def feature(args):
    mytime('feature construction start')
    sample = args.sample
    bam = args.bam
    snv = args.snv
    sv = args.sv
    if not os.path.exists(sample):
        os.mkdir(sample)
    feature = sample + '/feature.txt' if args.feature_file == None else sample + '/' + args.feature_file
    gtype = args.genome_type
    gfile = args.genome_file
    pop = args.population

    Sample(sample, bam, snv, sv, feature, gtype, gfile, pop)

    mytime('feature construction end')
예제 #17
0
def process_file(file_hash):
    #print "process_file("+str(file_hash)+")"
    pc=PackageController()
    res=pc.getFile(file_hash)
    if res==None:return None
    sam_id=file_hash
    sample=Sample()
    sample.setID(sam_id)
    sample.setBinary(res)
    sample.setStorageVersion({}) 
    lc=Launcher()
    lc.launchAnalysisByID(sample)
    
    return 0
예제 #18
0
def process_file(file_hash, force=False):
    if file_hash is None:
        return None
    print "process_file(" + str(file_hash) + ")"
    pc = PackageController()
    res = pc.getFile(file_hash)
    if res == None:
        return None
    sam_id = file_hash
    sample = Sample()
    sample.setID(sam_id)
    sample.setBinary(res)
    if force:
        sample.setStorageVersion({})
    lc = Launcher()
    lc.launchAnalysisByID(sample)
    log_event("process", str(file_hash))
    return 0
예제 #19
0
def getEdgeSample(edges, perc):
    nrEdges = len(edges)
    selectedEdgeId = sorted(
        Sample.getSample(0, nrEdges - 1,
                         min(nrEdges - 1, int((nrEdges * perc) / 100))))
    selectedEdges = {}
    currentId = 0
    currentSelectedId = 0

    for edge in edges:
        if (currentSelectedId == len(selectedEdgeId)):
            break
        if (selectedEdgeId[currentSelectedId] == currentId):
            selectedEdges[edge] = edges[edge]
            currentSelectedId += 1
        currentId += 1
    return selectedEdges
예제 #20
0
    def __init__(self,
                 dataBase=None,
                 elistBase=None,
                 lumi=1550.,
                 hPU=None,
                 data=False):
        DatasetBase.__init__(self,dataBase=dataBase,elistBase=elistBase, \
                                 sampleDir="tuples_from_Artur/MiniAODv2/")

        self.mcReweight = ("nVert", hPU) if hPU != None else None
        self.lumi = lumi
        self.data = data

        self.samples = []
        self.add(Sample("TTJets_LO_DiLepton",self.sampleBase,type="B",color=ROOT.kBlue-3,fill=True, \
                            kfactor=1.059, \
                            namelist=["TTJets_DiLepton_full"], \
                            baseweights=lumi,mcReweight=self.mcReweight ))
예제 #21
0
def main():
    distributions, sizes, n = init()
    for distribution in distributions:
        for size in sizes:
            char_samples = [
                Sample.Sample() for _ in range(
                    0, Char.Characteristics.number_of_characteristics())
            ]
            for i in range(0, n):
                characteristics = Char.Characteristics(
                    distribution.create_sample(size))
                chars = characteristics.get_characteristics()
                j = 0
                for char in chars:
                    char_samples[j].add_elem_to_sample(char)
                    j += 1
            print_results(distribution, char_samples, size)
    return 0
예제 #22
0
def process_file(file_hash, force=False):
    if not is_sha1(file_hash):
        raise ValueError("process_file only accepts sha1")
    logging.debug("process_file(" + str(file_hash) + ")")
    pc = PackageController()
    res = pc.getFile(file_hash)
    if res is None:
        logging.warning("Error: process_file(" + str(file_hash) +
                        "): pc.getFile returned None")
        return None
    sam_id = file_hash
    sample = Sample()
    sample.setID(sam_id)
    sample.setBinary(res)
    if force:
        sample.setStorageVersion({})
    lc = Launcher()
    lc.launchAnalysisByID(sample)
    log_event("process", str(file_hash))
    return 0
예제 #23
0
    def readNextCache(self):
        #read file
        i = 0
        while (i < self.cache_size and self.FileEnd == 0):
            line = self.file.readline().decode('utf8').strip()
            if (line[0:9] == u"MentionID"):
                if (self.group is not None):
                    if (len(self.group.samples) > 1):
                        self.group.samples[0].mentionID = self.group.samples[
                            1].mentionID
                        self.group.samples[0].mentionType = self.group.samples[
                            1].mentionType
                        self.group.samples[0].headStr = self.group.samples[
                            1].headStr
                        self.group.samples[0].docID = self.group.samples[
                            1].docID
                        self.samples.append(self.group)
                        #                         self.group.printInfoStr()
                        i = i + 1
                        if (i % 1000 == 0):
                            print 'DataProvider.readNextCache.read ', i, ' samples.'

                self.group = SampleGroup.SampleGroup()
                self.group.groupName = line[11:]
                continue

            if (not line or line == u'=== doc ==='):
                self.FileEnd = 1
                self.file.close()
                break
            if (self.group is None):
                break
            s = Sample(line)
            self.group.samples.append(s)

        #shuffle
        if (self.shuffle == 1):
            print 'DataProvider.readNextCache shuffle'
            random.shuffle(self.samples)
        else:  #sort by group size
            print 'DataProvider.readNextCache sort'
            self.samples.sort(key=lambda SampleGroup: len(SampleGroup.samples),
                              reverse=False)
예제 #24
0
def main():
    path = "data"
    files = [f for f in listdir(path) if isfile(join(path, f))]
    eval = []
    ml = evaluate.loadML()
    for file in files:
        sample = Sample.Sample("data/{}".format(file))
        print "{}: {}".format(sample.type, sample.twitName)
        if not sample.data:
            continue
        #features.append(twitName)
        real_type = sample_analysis.encode_type(sample.type)
        predicted_type = ml.predict(sample.data)
        result = {
            "type": evaluate.decodeType(real_type),
            "prediction": evaluate.decodeType(predicted_type)
        }
        eval.append(result)

    with open("accuracy_results.json", "w") as f:
        json.dump(eval, f, indent=2)
예제 #25
0
    def SetUpSamples(self):
        for mc in self.MCSamplesDefinitions:
            newsample = Sample.Sample()
            newsample.ProcessName = mc[0]
            newsample.SplitMode = mc[2]
            newsample.UseFlavorSplitting = mc[3]
            newsample.RateSystematics = mc[4]
            newsample.ShapeSystematics = mc[5]
            for ss in mc[1]:
                inpath = self.MCinputDirectory + "/" + ss
                #print ss, inpath
                #indir=check_output(["dir","-1",inpath]).splitlines()
                #for rf in indir:
                #if ".root" in rf and "nominal" in rf:
                #newsample.ListOfRawInputTrees.append(inpath+"/"+rf)
                newsample.ListOfRawInputTrees += glob.glob(inpath)
            print newsample.ListOfRawInputTrees
            #raw_input()
            self.MCSamples.append(newsample)

        print "sat up intrees"
예제 #26
0
def process_file(file_hash, force=False):
    if not is_sha1(file_hash):
        raise ValueError("process_file only accepts sha1")
    logging.debug("process_file(" + str(file_hash) + ")")
    pc = PackageController()
    res = pc.getFile(file_hash)
    if res is None:
        logging.warning(
            "Error: process_file(" + str(file_hash) + "): pc.getFile returned None")
        return None
    sam_id = file_hash
    sample = Sample()
    sample.setID(sam_id)
    sample.setBinary(res)
    if force:
        sample.setStorageVersion({})
    lc = Launcher()
    lc.launchAnalysisByID(sample)
    log_event("process", str(file_hash))
    return 0
예제 #27
0
        def __init__(self, fileList, refList,
                     fmt, sampleName, d=0, ns=0, n2=1,
                     exp=0, start=0, stop=0, flip=False, plot=True,
                     window='', windowPara=[],
                     fitFlag=False, multipleFit=False, model='',
                     complexFunction=False,
                     init=[], para=[], boundaries=[0, 1000], guess=False,
                     fitQty='Conductivity', thin=True):
                """
                Parameters
                ----------
                fileList: list of str
                    names of the files to be analysed.
                    They can be repetition of the same experiment or
                    the result of different experiment.
                refList: list of str
                    names of the file to be used as reference.
                    Can be identical to fileList if that's what the data
                    format requires (see fmt)
                fmt: str
                    format of the stored data, three values accepted:
                    'Ox', 'TW' and 'abcd'. more info in the readme
                sampleName: str
                    name of the sample measured, if the sample name is not
                    recognized the values for GaAs will be assumed.
                    This will lead to errors in the conductivity and mobility
                    absolute values. Although the frequency response will be
                    unaffected. See the readme for howto write a
                    new sample model.
                d: float
                    thickness of the sample, default 0. If 0 an arbitrary
                    default value for the indicated sample will be assumed.
                    If no recognized sample was given, the value will be
                    the default of GaAs indicated in the readme.
                ns: float
                    substrate refractive index, default 0. If 0 an arbitrary
                    default value for the indicated sample will be assumed.
                    If no recognized sample was given, the value will be
                    the default of GaAs indicated in the readme.
                n2: float
                    refractive index of the encapsulation layer or medium
                    in which the THz was travelling prior of hitting
                    the sample. Default 1 (air)
                exp: int
                    zero padding factor if greater than zero increases the
                    time window of exp * current time window
                start: int
                    index of the first TDS time point to consider, default 0
                    (from first point)
                stop: int
                    index of the last TDS time point to consider,
                    measured from the last index, default 0.
                flip: bool
                    if True flips the time axis, default False
                plot: bool
                    if True generates some automatic plots, default True
                window: str
                    the type of window to use to window the time resolved data,
                    default '', none used. Only accepted windows are
                    Gaussian and Lorentz shapes
                windowpara: list of float
                    parameters of the desired window, if none given
                    unit values will be assumed
                fitFlag: bool
                    if True performs a fit on the averaged spectra,
                    default False
                multipleFit: bool
                    if True considers the different files as coming from
                    different experiments and will fit them separately.
                    If False will fit the average of the files provided.
                    Default False.
                model: str
                    model used to fit the spectra, more details in the readme.
                complexFunction: bool
                    If True will attempt to fit the complex valued spectra.
                    Default False
                init: dictionary or list of dictionary
                    dictionary containing the initial guesses for the fit
                    parameters, if multipleFit is True provide a list of
                    dictionaries, one for each experiment
                    default empty (default values will be used)
                para: list of float
                    list of possible external parameters that might be
                    needed for the fit, ex. magnetic field for cyclotron
                    default empty list []
                boundaries: list of int or list of lists of int
                    list of the frequency values between which the fit has
                    to be performed. Unfortunately only in data point index.
                    if multipleFit is True provide a list of lists,
                    one for each experiment
                    default All spectrum considered
                guess: bool
                    if plot is True, controls the plotting of the initial
                    guess of the fit parameters, useful for complicated fits
                    Default False
                fitQty: str
                    Quantity to be fitted, allowed values are 'Conductivity'
                    and 'Transmission'
                    Default 'Conductivity'
                thin: bool
                    if True assumes a thin film samples.
                    Not properly implemented.
                    More information in the readme.
                    default True

                """
                super(THzAnalyser, self).__init__()
                self.fileList = fileList
                self.window = window
                self.para = para
                self.windowPara = windowPara
                self.sample = sam.Sample(sampleName, d=d, ns=ns, n2=n2)
                self.params = 0
                self.multiParams = []

                if fmt == 'Ox':
                        sigCol = 2
                        refCol = 1
                elif fmt == 'TW':
                        sigCol = 1
                        refCol = 7
                elif fmt == 'abcd':
                        sigCol = 1
                        refCol = 2
                else:
                        sigCol = 1
                        refCol = 1

                shapeData = self.Data_Reader(fileList[0], fmt, 0, shape=0)
                shape = np.shape(shapeData)
                lenFiles = shape[1]
                numFiles = len(fileList)
                lenfft = np.int(np.round((exp + 1) *
                                         (lenFiles -
                                          stop - start) / 2 + .6))
                listShape = ((numFiles,
                              (exp + 1) * (lenFiles -
                                           stop - start)))
                lenT = listShape[1]
                listShapeFft = ((numFiles, lenfft))

                # Time domain arrays
                self.xList = np.zeros(listShape)  # time delay in mm
                self.xRefList = np.zeros(listShape)

                self.tList = np.zeros(listShape)  # time delay in ps
                self.tRefList = np.zeros(listShape)

                self.EtList = np.zeros(listShape)  # Time domain dields
                self.EtRefList = np.zeros(listShape)

                # Averaged time domain arrays
                self.t = np.zeros(lenT)  # Same quantities but averaged
                self.Et = np.zeros(lenT)
                self.EtRef = np.zeros(lenT)

                # Frequency domain arrays
                self.fList = np.zeros(listShapeFft,
                                      dtype=np.complex_)  # Frequency
                self.EList = np.zeros(listShapeFft,
                                      dtype=np.complex_)  # Complex FFT field
                self.ERefList = np.zeros(listShapeFft,       # Complex FFT
                                         dtype=np.complex_)  # Reference field
                self.transList = np.zeros(listShapeFft,
                                          dtype=np.complex_)  # Transmission
                self.sigmaList = np.zeros(listShapeFft,
                                          dtype=np.complex_)  # Conductivity
                self.epsilonList = np.zeros(listShapeFft,       # Dielectric
                                            dtype=np.complex_)  # function
                self.lossList = np.zeros(listShapeFft,       # Loss
                                         dtype=np.complex_)  # Function
                self.nList = np.zeros(listShapeFft,       # Complex refractive
                                      dtype=np.complex_)  # index

                # Averaged frequency domain arrays
                self.f = np.zeros(lenfft,             # Same quantities
                                  dtype=np.complex_)  # but averaged
                self.E = np.zeros(lenfft,
                                  dtype=np.complex_)
                self.ERef = np.zeros(lenfft,
                                     dtype=np.complex_)
                self.sigma = np.zeros(lenfft,
                                      dtype=np.complex_)
                self.trans = np.zeros(lenfft,
                                      dtype=np.complex_)
                self.epsilon = np.zeros(lenfft,
                                        dtype=np.complex_)
                self.loss = np.zeros(lenfft,
                                     dtype=np.complex_)
                self.n = np.zeros(lenfft,
                                  dtype=np.complex_)

                # Uncertainties of frequency domain quantities
                self.sigmaUncReal = np.zeros(lenfft)
                self.sigmaUncImag = np.zeros(lenfft)
                self.transUnc = np.zeros(lenfft)
                self.epsilonUncReal = np.zeros(lenfft)
                self.epsilonUncImag = np.zeros(lenfft)
                self.lossUnc = np.zeros(lenfft)
                self.nUnc = np.zeros(lenfft)

                # self.DrudeCoeff = 0
                stop = lenFiles - stop
                if stop == 0:
                        stop = int(1e6)

                tmp_xList, tmp_EtList = self.Data_Reader(fileList, fmt,
                                                         sigCol, shape)
                tmp_xRefList, tmp_EtRefList = self.Data_Reader(refList, fmt,
                                                               refCol, shape)

                # If # ref < # files puts the first ref as the missing files
                if np.abs(len(refList) - len(fileList)) != 0:
                        for i in range(len(fileList)):
                                tmp_xRefList[i] = tmp_xRefList[0]
                                tmp_EtRefList[i] = tmp_EtRefList[0]

                for i, file in enumerate(fileList):
                        (self.tList[i],
                         self.EtList[i],
                         self.tRefList[i],
                         self.EtRefList[i],
                         self.fList[i],
                         self.EList[i],
                         self.ERefList[i],
                         self.transList[i],
                         self.sigmaList[i],
                         self.epsilonList[i],
                         self.lossList[i],
                         self.nList[i]
                         ) = self.Data_Computation(
                             tmp_EtList[i, start:stop],
                             tmp_EtRefList[i, start:stop],
                             tmp_xList[i, start:stop],
                             tmp_xRefList[i, start:stop],
                             self.sample,
                             fmt,
                             flip=flip,
                             exp=exp,
                             window=window,
                             para=windowPara,
                             thin=thin)
                for i in range(lenT):
                        self.t[i] = np.average(self.tList[:, i])
                        self.Et[i] = np.average(self.EtList[:, i])
                        self.EtRef[i] = np.average(self.EtRefList[:, i])
                for i in range(lenfft):
                        self.f[i] = np.average(self.fList[:, i])
                        self.E[i] = np.average(self.EList[:, i])
                        self.ERef[i] = np.average(self.ERefList[:, i])
                        self.trans[i] = np.average(self.transList[:, i])
                        self.sigma[i] = np.average(self.sigmaList[:, i])
                        self.epsilon[i] = np.average(self.epsilonList[:, i])
                        self.loss[i] = np.average(self.lossList[:, i])
                        self.n[i] = np.average(self.nList[:, i])

                        self.sigmaUncReal[i] = np.std(
                            np.real(self.sigmaList[:, i]))
                        self.sigmaUncImag[i] = np.std(
                            np.imag(self.sigmaList[:, i]))
                        self.transUnc[i] = np.std(np.abs(self.transList[:, i]))
                        self.epsilonUncReal[i] = np.std(
                            np.real(self.epsilonList[:, i]))
                        self.epsilonUncImag[i] = np.std(
                            np.imag(self.epsilonList[:, i]))
                        self.lossUnc[i] = np.std(np.abs(self.lossList[:, i]))
                        self.nUnc[i] = np.std(np.abs(self.nList[:, i]))
                # for i in range(3, lenfft):
                #         tmp = np.sqrt(np.real(self.sigma[i])**2) \
                #             / np.sqrt(np.real(self.sigma[i])**2 +
                #                       np.imag(self.sigma[i])**2)
                #         self.DrudeCoeff += tmp
                #         if np.abs(self.f[i] - 2.5) < 0.03:
                #                 self.DrudeCoeff /= (i - 3)
                #                 break
                # self.ratio = 1e15 * np.imag(self.sigma) / (np.abs(self.f) *
                #                                            2e12 * np.pi *
                #                                            np.real(self.sigma))
                if fitFlag:
                        y_Map = {'Conductivity': self.sigma,
                                 'Transmission': self.trans}
                        if multipleFit:
                            y_Map = {'Conductivity': self.sigmaList,
                                     'Transmission': self.transList}
                        y = y_Map[fitQty]
                        err_Map = {'Conductivity':
                                   [self.sigmaUncReal,
                                    self.sigmaUncImag],
                                   'Transmission':
                                   self.transUnc}
                        err = err_Map[fitQty]
                        if model == '':
                                model = self.sample.f
                                wn.warn('Warning:: model undefined, sample' +
                                        '\'s default chosen: ' +
                                        model, RuntimeWarning)
                        if multipleFit:
                            for i, o in enumerate(y):
                                self.multiParams.append(
                                    self.Fit(x=self.fList[i, boundaries[i][0]:
                                                          boundaries[i][1]],
                                             y=o[boundaries[i][0]:
                                                 boundaries[i][1]],
                                             model=model,
                                             err=0,
                                             init=init[i], para=para[i],
                                             c=complexFunction, guess=guess,
                                             plot=plot,
                                             fitQty=fitQty))
                        elif not multipleFit:
                            self.params = self.Fit(x=self.f[boundaries[0]:
                                                            boundaries[1]],
                                                   y=y[boundaries[0]:
                                                       boundaries[1]],
                                                   model=model,
                                                   err=err[boundaries[0]:
                                                           boundaries[1]],
                                                   init=init, para=para,
                                                   c=complexFunction,
                                                   guess=guess,
                                                   plot=plot,
                                                   fitQty=fitQty)
                if plot:
                        (self.multifig,
                         self.multitimefig,
                         self.finalfig) = self.Data_Plotter(fmt)

                        self.valuesfig = 0
예제 #28
0
import Sample

Sample.say_hello_to('Somebody')
예제 #29
0
directory = 'books'
book = 'John'
#book = '3_John'
#book = 'Philemon'
filename = book + '.txt'
path = os.path.join(directory, filename)
reader = Reader.File(path)
lines = reader.lines()
parser = Parser.Simple(lines)
verses = parser.parse(max_width=max_chars)

for verse in verses:
    text = verse.text()
    tokens = Tokens.Classic(text)
    tokenized = tokens.tokenize()
    sample = Sample.Classic(tokenized)
    lines = wrapper.wrap(sample.text())
    section = verse.section()
    lines.insert(0, section)
    reference = verse.reference()
    lines.insert(1, reference)
    redraw(lines, screen)

    while sample.guessable():
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                sys.exit()

            if event.type == pygame.KEYDOWN:
                pressed_keys = pygame.key.get_pressed()
                if pressed_keys[sample.key()]:
예제 #30
0
embeddingUP.periodE = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodE.physics_Muons.PhysCont.NTUP_EMBLHUP.grp14_v02_r4697_p1462/"])
embeddingUP.periodG = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodG.physics_Muons.PhysCont.NTUP_EMBLHUP.grp14_v02_r4697_p1462/"])
embeddingUP.periodH = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodH.physics_Muons.PhysCont.NTUP_EMBLHUP.grp14_v02_r4697_p1462/"])
embeddingUP.periodI = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodI.physics_Muons.PhysCont.NTUP_EMBLHUP.grp14_v02_r4697_p1462/"])
embeddingUP.periodJ = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodJ.physics_Muons.PhysCont.NTUP_EMBLHUP.grp14_v02_r4697_p1462/"])
embeddingUP.periodL = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodL.physics_Muons.PhysCont.NTUP_EMBLHUP.grp14_v02_r4697_p1462/"])

# mfsdn
embeddingDN.periodA = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodA.physics_Muons.PhysCont.NTUP_EMBLHDN.grp14_v02_r4697_p1462/"])
embeddingDN.periodB = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodB.physics_Muons.PhysCont.NTUP_EMBLHDN.grp14_v02_r4697_p1462/"])
embeddingDN.periodC = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodC.physics_Muons.PhysCont.NTUP_EMBLHDN.grp14_v02_r4697_p1462/"])
embeddingDN.periodD = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodD.physics_Muons.PhysCont.NTUP_EMBLHDN.grp14_v02_r4697_p1462/"])
embeddingDN.periodE = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodE.physics_Muons.PhysCont.NTUP_EMBLHDN.grp14_v02_r4697_p1462/"])
embeddingDN.periodG = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodG.physics_Muons.PhysCont.NTUP_EMBLHDN.grp14_v02_r4697_p1462/"])
embeddingDN.periodH = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodH.physics_Muons.PhysCont.NTUP_EMBLHDN.grp14_v02_r4697_p1462/"])
embeddingDN.periodI = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodI.physics_Muons.PhysCont.NTUP_EMBLHDN.grp14_v02_r4697_p1462/"])
embeddingDN.periodJ = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodJ.physics_Muons.PhysCont.NTUP_EMBLHDN.grp14_v02_r4697_p1462/"])
embeddingDN.periodL = Sample2(lumi = -1.0, site = "IN2P3-CC_PHYS-HIGGS", d3pds = ["data12_8TeV.periodL.physics_Muons.PhysCont.NTUP_EMBLHDN.grp14_v02_r4697_p1462/"])

# OS-SS
embedding.OSminusSS_periodAtoL = Sample2(d3pds = ["data12_8TeV.periodAtoL.embedding.OSminusSS"])

for _samp in embedding.__dict__.values():
    Sample.set_type(_samp, "embedding12")
for _samp in embeddingUP.__dict__.values():
    Sample.set_type(_samp, "embedding12-UP")
for _samp in embeddingDN.__dict__.values():
    Sample.set_type(_samp, "embedding12-DN")

# EOF
예제 #31
0
data.JetTauEtmiss.periodAtoL = Sample2(label = "Data 2012", lumi = lumiAtoL, d3pds = ["data12_8TeV.periodAtoL.JetTauEtmiss.merge"])

# -----------------------------------------------------------------------------
# Dummy samples 
# -----------------------------------------------------------------------------

data.multijet           = Sample2(label = "multi-j",                              d3pds = ["data12_8TeV.multijet.merge"])
data.samesign           = Sample2(label = "Data 2012 (SS)",                       d3pds = ["data12_8TeV.samesign.merge"])
data.faketaus           = Sample2(label = "jet->#tau#lower[0.4]{#scale[0.6]{h}}", d3pds = ["data12_8TeV.faketaus.merge"])
data.OSminusSS_faketaus = Sample2(label = "jet->#tau#lower[0.4]{#scale[0.6]{h}}", d3pds = ["data12_8TeV.faketaus.OSminusSS"])
data.fakeleps           = Sample2(label = "jet->l",                               d3pds = ["data12_8TeV.fakeleps.merge"])
data.OSminusSS_fakeleps = Sample2(label = "jet->l",                               d3pds = ["data12_8TeV.fakeleps.OSminusSS"])

# -----------------------------------------------------------------------------
# Embedding samples -- None for p1443.
# -----------------------------------------------------------------------------

# Set sample types.
for _samp in data.Muons.__dict__.values():
    Sample.set_type(_samp, "data12-Muons")
for _samp in data.Egamma.__dict__.values():
    Sample.set_type(_samp, "data12-Egamma")
for _samp in data.JetTauEtmiss.__dict__.values():
    Sample.set_type(_samp, "data12-JetTauEtmiss")
for _samp in mc.__dict__.values():
    Sample.set_type(_samp, "mc12")
for _samp in embedding.__dict__.values():
    Sample.set_type(_samp, "embedding")

# EOF
예제 #32
0
def createSample():
    nodes = list(range(1, nrNodes + 1))
    s = Sample(nrLayers, nodes, Edges, Label, True)
    s.addAliasEdges()
    createLayoutFile("Data\\muxViz-masterData\\dataData\\graph1Data\\layoutFile.txt", s.getNrNodes(), False)
    s.createEdgesFile("Data\\muxViz-masterData\\dataData\\graph1Data\\EdgeFile.txt")
    s.createColoredEdges("Data\\muxViz-masterData\\dataData\\graph1Data\\ExternalEdgeFile.txt")
    print(s.getNrNodes(), s.getNrEdges(), s.getNrLayers())

    return s
예제 #33
0
                          Chain._lE[l])

        dR = inputVec.DeltaR(lVec)
        if dR < .4: hasOverlap = True

    return hasOverlap


#Load in samples
import Sample
#sample = Sample.Sample("DYJetsToLL_M-50", '/pnfs/iihe/cms/store/user/lwezenbe/heavyNeutrino/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/crab_Moriond2017_ext1-v2_ewkino2016MCList-v28/190318_133550/0000/trilep_1.root', 'DYJets', 1, "6024")
#sample = Sample.Sample("DYJetsToLL_M-50", '/pnfs/iihe/cms/store/user/wverbeke/heavyNeutrino/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/crab_Moriond2017_ext1-v2_ewkino2016MCList-v27/180925_111611/0000/trilep_1.root', 'DYJets', 1, "6024")
#sample = Sample.Sample("DYJetsToLL_M-50", '/pnfs/iihe/cms/store/user/lwezenbe/heavyNeutrino/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/crab_Moriond2017_ext1-v2_tau_MC_trilepwOneTau_v2/190221_084926/0000/trilepWithSingletau_1.root', 'DYJets', 1, "6024")
#sample = Sample.Sample("DYJetsToLL_M-50", '/pnfs/iihe/cms/store/user/lwezenbe/heavyNeutrino/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/crab_Moriond2017_ext1-v2_tau_MC_trilepwOneTau_v2/190221_084926/0000', 'DYJets', 1, "6024")
sample = Sample.Sample(
    "DYJetsToLL_M-50",
    '/pnfs/iihe/cms/store/user/lwezenbe/heavyNeutrino/DYJetsToLL_M-50_TuneCUETP8M1_13TeV-madgraphMLM-pythia8/crab_Moriond2017_ext1-v2_tauEfficiency2016_v2/190206_150950/0000/noskim_1.root',
    'DYJets', 1, "6024")

print sample.name
lumi = 36000
#Initialize chain
Chain = sample.initTree()

import objectSelection
from helpers_old import progress

num = 0.
denom = 0.
for entry in xrange(Chain.GetEntries()):

    progress(entry, Chain.GetEntries())
예제 #34
0
파일: Data.py 프로젝트: reimersa/LEAF
from Sample import *
from Storage import *

# data = {
datalist = [
    ('DATA_Tau_B',
     Sample(type='DATA',
            name='DATA_Tau_B',
            group=YearDependentContainer({'2017': 'DATA_Tau'}),
            nanopaths=YearDependentContainer({
                '2017':
                Storage_DAS('/Tau/Run2017B-UL2017_02Dec2019-v1/NANOAOD')
            }),
            tuplepaths=YearDependentContainer({
                '2017':
                Storage_T3PSI(
                    '/store/user/areimers/UL17/Tuples_v02_NANOAOD/DATA_Tau_B')
            }),
            xsecs=YearDependentContainer(),
            xmlfiles=YearDependentContainer(
                {'2017': 'datasets/UL17/Recolevel/DATA_Tau_B.xml'}),
            nevents=YearDependentContainer({'2017': 38158216}))),
    ('DATA_Tau_C',
     Sample(type='DATA',
            name='DATA_Tau_C',
            group=YearDependentContainer({'2017': 'DATA_Tau'}),
            nanopaths=YearDependentContainer({
                '2017':
                Storage_DAS('/Tau/Run2017C-UL2017_02Dec2019-v1/NANOAOD')
            }),
            tuplepaths=YearDependentContainer({
예제 #35
0
args = argParser.parse_args()
print 'Loading in samples'

if args.year == '2016':
    lumi = 35545.499064
elif args.year == '2017':
    lumi = 41859.4
else:
    lumi = 59970
#Load in samples

import Sample
if not args.isCheck:
    sampleList = Sample.createSampleList(
        '/user/lwezenbe/private/PhD/Code/TauStudy/FakeRate/ClosureTest/Data/inputFiles_'
        + args.year + '.conf')
else:
    sampleList = Sample.createSampleList(
        '/user/lwezenbe/private/PhD/Code/TauStudy/FakeRate/Data/inputFiles_' +
        args.year + '.conf')

#Get specific sample for this subjob
sample = Sample.getSampleFromList(sampleList, args.sampleName)

#Set output directory and filename
if args.inData:
    inData_Str = 'DATA'
else:
    inData_Str = 'MC'
output_dir = output_dir + '/' + args.year + '/' + inData_Str + '/' + sample.output + '/' + sample.name + '_subJob_' + str(
예제 #36
0
    for branch in branches:
        Chain.SetBranchStatus(branch, 1)


argParser = argparse.ArgumentParser(description="Argument parser")
argParser.add_argument('--sampleName',
                       action='store',
                       default='DYJetsToLL_M-10to50')
argParser.add_argument('--subJob', action='store', default=None)
argParser.add_argument('--isTest', action='store', default=False)

args = argParser.parse_args()

sampleList = Sample.createSampleList(
    '/user/lwezenbe/private/PhD/Code/TauStudy/ReproduceAN2017_094/Data/inputFiles.conf'
)
sample = Sample.getSampleFromList(sampleList, args.sampleName)

print("\n Looping over all events in: " + sample.path)

#Create Histograms
listOfHist = []
for var in xrange(cst.NumberOfVar):
    listOfHist.append(makeHist(cst.varInfo[var]))
    listOfHist[var].Sumw2()

Chain = sample.initTree()

lIndex = zeros(cst.NumberOfLeptons, dtype=int)
예제 #37
0
#Here are some examples of what we can do with the tool

# Connect to the database
connection = pymysql.connect(host='student.computing.dcu.ie',
                             user='******',
                             password='******',
                             db='la_dev',
                             charset='utf8mb4',
                             cursorclass=pymysql.cursors.DictCursor)

try:
    #make the connection
    with connection.cursor() as cursor:

        #sample object
        data_sample = Sample()
        # Read all the records
        data_sample.sampleReadDBSample(cursor,'Exercise')

finally:
    #close the connection
    connection.close()
#create and save a pie of the average score of the class
data_sample.samplePie(piename="AverageClassScore.png",title="Average Score of the Class")

#create and save a plot of the progression of the score
data_sample.sampleProgressionPlot(title="progression of the class")

#get the sample with student's name 0
#sample object
data_sample_name = Sample()
예제 #38
0
        Zp = self.sp.particles[:]['z']

        I_theta = np.zeros(nbStep)

        for i in range(nbStep):
            print(i)
            distPartDetec = np.sqrt((x_d[i] - Xp)**2 + (y_d[i] - Yp)**2 +
                                    (z_d - Zp)**2)
            I_theta[i] = np.abs(
                np.sum(
                    np.exp(1j * (kx * Xp + ky * Yp + self.k * distPartDetec))))

        #Avec un crystal, les longueurs d'ondes de la lumière sont bien trop grande pour avoir des angles de bragg.

        #angBragg = np.arcsin(self.laserWl / (2 * self.sp.d_Bragg)) * 180/np.pi
        print("angle Incidence %f", angIncidence)
        #print ("angle Bragg %f", angBragg)

        plt.plot(thetas * 180 / np.pi, I_theta)
        plt.show()


if __name__ == "__main__":
    #sp = Sample.Sample(nbParticle=10000, dimXYZ_micron=(1000,1000,1000), particleIniConfig="crystal")
    sp = Sample.Sample(nbParticle=30000,
                       dimXYZ_micron=(1000, 1000, 1000),
                       particleIniConfig="random")
    #sp.testBrownianMotionNew(10000)
    scatTest = scatteringTests(sp)
    #scatTest.getIntensityMap()
    scatTest.testThetaDependency(angIncidence=0)