Esempio n. 1
0
    def readFromRootFile(self,filename,TupleMeanStd, weighter):
        from preprocessing import MeanNormZeroPad
        import numpy
        from stopwatch import stopwatch
        import c_meanNormZeroPad
        c_meanNormZeroPad.zeroPad()
        
        sw=stopwatch()
        swall=stopwatch()
        
        import ROOT
        
        fileTimeOut(filename,120) #give eos a minute to recover
        rfile = ROOT.TFile(filename)
        tree = rfile.Get("deepntuplizer/tree")
        self.nsamples=tree.GetEntries()
        
        print('took ', sw.getAndReset(), ' seconds for getting tree entries')
        
        
        # split for convolutional network
        
        x_global = MeanNormZeroPad(filename,TupleMeanStd,
                                   [self.branches[0]],
                                   [self.branchcutoffs[0]],self.nsamples)
        
        print('took ', sw.getAndReset(), ' seconds for mean norm and zero padding (C module)')
        
        nparray = self.readTreeFromRootToTuple(filename)        
        if self.remove:
            notremoves=weighter.createNotRemoveIndices(nparray)
            undef=nparray['isUndefined']
            notremoves-=undef
            print('took ', sw.getAndReset(), ' to create remove indices')
        
        if self.weight:
            weights=weighter.getJetWeights(nparray)
        elif self.remove:
            weights=notremoves
        else:
            print('neither remove nor weight')
            weights=numpy.ones(self.nsamples)
        
        truthtuple =  nparray[self.truthclasses]
        alltruth=self.reduceTruth(truthtuple)

        if self.remove:
            print('remove')
            weights=weights[notremoves > 0]
            x_global=x_global[notremoves > 0]
            alltruth=alltruth[notremoves > 0]
                        
        newnsamp=x_global.shape[0]
        print('reduced content to ', int(float(newnsamp)/float(self.nsamples)*100),'%')
        self.nsamples = newnsamp
        
        self.w=[weights]
        self.x=[x_global]
        self.y=[alltruth]
Esempio n. 2
0
 def getFlavourClassificationData(self,filename,TupleMeanStd, weighter):
     from stopwatch import stopwatch
     
     sw=stopwatch()
     swall=stopwatch()
     
     import ROOT
     
     fileTimeOut(filename,120) #give eos a minute to recover
     rfile = ROOT.TFile(filename)
     tree = rfile.Get(self.treename)
     self.nsamples=tree.GetEntries()
     
     #print('took ', sw.getAndReset(), ' seconds for getting tree entries')
 
     
     Tuple = self.readTreeFromRootToTuple(filename)
     
     
     x_all = MeanNormZeroPad(filename,TupleMeanStd,self.branches,self.branchcutoffs,self.nsamples)
     
     #print('took ', sw.getAndReset(), ' seconds for mean norm and zero padding (C module)')
     
     notremoves=numpy.array([])
     weights=numpy.array([])
     if self.remove:
         notremoves=weighter.createNotRemoveIndices(Tuple)
         weights=notremoves
         #print('took ', sw.getAndReset(), ' to create remove indices')
     elif self.weight:
         #print('creating weights')
         weights= weighter.getJetWeights(Tuple)
     else:
         print('neither remove nor weight')
         weights=numpy.empty(self.nsamples)
         weights.fill(1.)
     
     
     
     truthtuple =  Tuple[self.truthclasses]
     #print(self.truthclasses)
     alltruth=self.reduceTruth(truthtuple)
     
     #print(alltruth.shape)
     if self.remove:
         #print('remove')
         weights=weights[notremoves > 0]
         x_all=x_all[notremoves > 0]
         alltruth=alltruth[notremoves > 0]
    
     newnsamp=x_all.shape[0]
     #print('reduced content to ', int(float(newnsamp)/float(self.nsamples)*100),'%')
     self.nsamples = newnsamp
     
     #print('took in total ', swall.getAndReset(),' seconds for conversion')
     
     return weights,x_all,alltruth, notremoves
Esempio n. 3
0
    def readFromRootFile(self,filename,TupleMeanStd, weighter):
        from preprocessing import MeanNormApply, MeanNormZeroPad, MeanNormZeroPadParticles
        import numpy
        from stopwatch import stopwatch
        
        sw=stopwatch()
        swall=stopwatch()
        
        import ROOT
        
        fileTimeOut(filename,120) #give eos a minute to recover
        rfile = ROOT.TFile(filename)
        tree = rfile.Get("deepntuplizer/tree")
        self.nsamples=tree.GetEntries()
        
        print('took ', sw.getAndReset(), ' seconds for getting tree entries')
        
        
        # split for convolutional network
        
        x_global = MeanNormZeroPad(
            filename,None,
            [self.branches[0]],
            [self.branchcutoffs[0]],self.nsamples
        )
        
        x_cpf = MeanNormZeroPadParticles(
            filename,None,
            self.branches[1],
            self.branchcutoffs[1],self.nsamples
        )
                
        x_sv = MeanNormZeroPadParticles(
            filename,None,
            self.branches[2],
            self.branchcutoffs[2],self.nsamples
        )
        
        print('took ', sw.getAndReset(), ' seconds for mean norm and zero padding (C module)')
        
        npy_array = self.readTreeFromRootToTuple(filename)
        
        reg_truth=npy_array['gen_pt_WithNu'].view(numpy.ndarray)
        reco_pt=npy_array['jet_corr_pt'].view(numpy.ndarray)
        
        correctionfactor=numpy.zeros(self.nsamples)
        for i in range(self.nsamples):
            correctionfactor[i]=reg_truth[i]/reco_pt[i]

        truthtuple =  npy_array[self.truthclasses]
        alltruth=self.reduceTruth(truthtuple)
        
        self.x=[x_global, x_cpf, x_sv, reco_pt]
        self.y=[alltruth,correctionfactor]
        self._normalize_input_(weighter, npy_array)
def cracker(pasw):
    ti = time.time()
    guess = ''
    tests = 1
    c = 0
    m = 0

    while True:
        y = tests
        while True:
            c = y % base
            m = math.floor((y - c) / base)
            y = m
            guess = chars[(c - 1)] + guess
            if m == 0:
                break
        print(guess)
        if guess == pasw:
            tf = time.time()
            print('Got "{}" after {} tests {}'.format(
                guess, str(tests), stopwatch.stopwatch(ti, tf)))
            break
        else:
            tests += 1
            guess = ''
Esempio n. 5
0
 def __writeData(self,sample,means, weighter,outputDir,dataclass):
     import os
     import copy
     from stopwatch import stopwatch
     sw=stopwatch()
     td=copy.deepcopy(dataclass)
     
     fileTimeOut(sample,120) #once available copy to ram
     ramdisksample= '/dev/shm/'+str(os.getpid())+os.path.basename(sample)
     
     def removefile():
         os.system('rm -f '+ramdisksample)
     
     import atexit
     atexit.register(removefile)
     
     os.system('cp '+sample+' '+ramdisksample)
     try:
         td.readFromRootFile(ramdisksample,means, weighter) 
         newname=os.path.basename(sample).rsplit('.', 1)[0]
         newpath=os.path.abspath(outputDir+newname+'.z')
         td.writeOut(newpath)
         print('converted and written '+newname+'.z in ',sw.getAndReset(),' sec')
         self.samples.append(newname+'.z')
         self.nsamples+=td.nsamples
         self.sampleentries.append(td.nsamples)
         td.clear()
         self.writeToFile(outputDir+'/snapshot.dc')
     except Exception as e:
         removefile()
         raise e
     removefile()
Esempio n. 6
0
 def readFromRootFile(self,filename,TupleMeanStd, weighter):
     from preprocessing import MeanNormApply, MeanNormZeroPad, MeanNormZeroPadParticles,createDensityMap
     import numpy
     from stopwatch import stopwatch
     
     sw=stopwatch()
     swall=stopwatch()
     
     import ROOT
     
     fileTimeOut(filename,120) #give eos a minute to recover
     rfile = ROOT.TFile(filename)
     tree = rfile.Get("deepntuplizer/tree")
     self.nsamples=tree.GetEntries()
     
     
     x_ch = createDensityMap(filename,TupleMeanStd,
                                'Cpfcan_erel',
                                self.nsamples,
                                ['Cpfcan_eta','jet_eta',7,0.5],
                                ['Cpfcan_phi','jet_phi',7,0.5],
                                'nCpfcand',-1)
     x_neu = createDensityMap(filename,TupleMeanStd,
                                'Npfcan_erel',
                                self.nsamples,
                                ['Npfcan_eta','jet_eta',7,0.5],
                                ['Npfcan_phi','jet_phi',7,0.5],
                                'nNpfcand',-1)
     x_sv = createDensityMap(filename,TupleMeanStd,
                                'LooseIVF_sv_enratio',
                                self.nsamples,
                                ['LooseIVF_sv_eta','jet_eta',5,0.3],
                                ['LooseIVF_sv_phi','jet_phi',5,0.3],
                                'LooseIVF_nsv')
     
     for i in range(20):
         print(x_sv[i])
     
     self.w=[numpy.zeros(10)]
     self.x=[x_ch,x_neu]
     self.y=[numpy.zeros(10)]
     
     
     
     
Esempio n. 7
0
        def writeData_async(index, woq, wrlck):

            logger.info('async started')

            sw = stopwatch()
            td = self.dataclass()
            sample = self.sourceList[index]

            if self.batch_mode or self.no_copy_on_convert:
                tmpinput = sample

                def removefile():
                    pass
            else:
                tmpinput = tempstoragepath + '/' + str(
                    os.getpid()) + os.path.basename(sample)

                def removefile():
                    os.system('rm -f ' + tmpinput)

                import atexit
                atexit.register(removefile)

                logger.info('start cp')
                os_ret = os.system('cp ' + sample + ' ' + tmpinput)
                if os_ret:
                    raise Exception("copy to ramdisk not successful for " +
                                    sample)

            success = False
            out_samplename = ''
            out_sampleentries = 0
            sbasename = os.path.basename(sample)
            newname = sbasename[:sbasename.rfind('.')] + '.djctd'
            newpath = os.path.abspath(outputDir + newname)

            try:
                logger.info('convertFromSourceFile')
                td.writeFromSourceFile(tmpinput,
                                       self.weighterobjects,
                                       istraining=True,
                                       outname=newpath)
                print('converted and written ' + newname + ' in ',
                      sw.getAndReset(), ' sec -', index)

                out_samplename = newname
                out_sampleentries = 1
                success = True
                td.clear()
                removefile()
                woq.put((index, [success, out_samplename, out_sampleentries]))

            except:
                print('problem in ' + newname)
                removefile()
                woq.put((index, [False, out_samplename, out_sampleentries]))
                raise
Esempio n. 8
0
        def writeData_async(index, woq, wrlck):

            import copy
            from stopwatch import stopwatch
            sw = stopwatch()
            td = copy.deepcopy(self.dataclass)
            sample = self.originRoots[index]
            ramdisksample = tempstoragepath + '/' + str(
                os.getpid()) + os.path.basename(sample)

            def removefile():
                os.system('rm -f ' + ramdisksample)

            import atexit
            atexit.register(removefile)
            success = False
            out_samplename = ''
            out_sampleentries = 0
            newname = os.path.basename(sample).rsplit('.', 1)[0]
            newname += str(index)

            if usenewformat:
                newname += '.meta'
            else:
                newname += '.z'
            newpath = os.path.abspath(outputDir + newname)

            try:
                fileTimeOut(sample, 120)  #once available copy to ram
                os_ret = os.system('cp ' + sample + ' ' + ramdisksample)
                if os_ret:
                    raise Exception("copy to ramdisk not successful for " +
                                    sample)
                td.readFromRootFile(ramdisksample, self.means, self.weighter)
                #wrlck.acquire()
                td.writeOut(newpath)
                #wrlck.release()
                print('converted and written ' + newname + ' in ',
                      sw.getAndReset(), ' sec -', index)

                out_samplename = newname
                out_sampleentries = td.nsamples
                success = True
                td.clear()
                removefile()
                woq.put((index, [success, out_samplename, out_sampleentries]))

            except:
                print('problem in ' + newname)
                removefile()
                woq.put((index, [False, out_samplename, out_sampleentries]))
                raise
Esempio n. 9
0
    def __writeData(self, sample, outputDir):
        sw = stopwatch()
        td = copy.deepcopy(self.dataclass)

        fileTimeOut(sample, 120)  #once available copy to ram

        if self.batch_mode:
            tmpinput = sample

            def removefile():
                pass
        else:
            tmpinput = '/dev/shm/' + str(
                os.getpid()) + os.path.basename(sample)

            def removefile():
                os.system('rm -f ' + tmpinput)

            import atexit
            atexit.register(removefile)

            os_ret = os.system('cp ' + sample + ' ' + tmpinput)
            if os_ret:
                raise Exception("copy to ramdisk not successful for " + sample)

        try:
            td.readFromRootFile(tmpinput, self.means, self.weighter)
            sbasename = os.path.basename(sample)
            newname = sbasename[:sbasename.rfind('.')]

            if usenewformat:
                newname += '.meta'
            else:
                newname += '.z'
            newpath = os.path.abspath(outputDir + newname)
            td.writeOut(newpath)
            print('converted and written ' + newname + ' in ',
                  sw.getAndReset(), ' sec')
            self.samples.append(newname)
            self.nsamples += td.nsamples
            self.sampleentries.append(td.nsamples)
            td.clear()

            if not self.batch_mode:
                self.writeToFile(outputDir + '/snapshot.dc')

        finally:
            removefile()
Esempio n. 10
0
def test_algo(algo, data, record):
    # timing class and times data structure
    clock = stopwatch()
    clock.start()
    times = list()
    
    # test algorithm for data
    for a, b in data:
        clock.start()
        algo(a, b)
        times.append(clock.time())
    
    # find best of TAKE, find average, correct for nanoseconds, add to record
    times = sorted(times)[:TAKE // len(data)]
    average = sum(times) / len(times) / 1e9
    record[algo.__name__] = average
Esempio n. 11
0
    def __writeData(self, sample, outputDir):
        sw = stopwatch()
        td = self.dataclass()

        fileTimeOut(sample, 120)  #once available copy to ram

        sbasename = os.path.basename(sample)
        newname = sbasename[:sbasename.rfind('.')] + '.djctd'
        newpath = os.path.abspath(outputDir + newname)

        td.writeFromSourceFile(sample,
                               self.weighterobjects,
                               istraining=True,
                               outname=newpath)

        print('converted and written ' + newname + ' in ', sw.getAndReset(),
              ' sec')
        self.samples.append(newname)
        td.clear()

        if not self.batch_mode:
            self.writeToFile(outputDir + '/snapshot.djcdc')
def prepAll(force=False):
    '''
	preprocesses all files stored on properly on the drive. either mesa or shhs.
	the amount of time and errors are logged for testing purposes. optionally
	re-processing already completed files is possible.
	'''
    log, clock = get_log('Preprocessing', echo=True), stopwatch()
    filenames = fs.getAllSubjectFilenames(preprocessed=False)

    # determines already completed files
    oldFiles = fs.getAllSubjectFilenames(preprocessed=True)
    if not force:
        filenames = [fn for fn in filenames if fn not in oldFiles]
        log.print('Files already completed:   {0}'.format(len(oldFiles)))
        log.print('Files remaining:           {0}'.format(len(filenames)))
        if (len(oldFiles) > 0):
            log.printHL()
            for fn in oldFiles:
                log.print('{0} already completed'.format(fn))
    else:
        log.print('Files re-preprocessing:    {0}'.format(len(oldFiles)))
        log.print('Files remaining:           {0}'.format(len(filenames)))
    log.printHL()

    # processes each file with try/catch loop incase of errors in single files.
    clock.round()
    for i, filename in enumerate(filenames):
        try:
            subject = fs.Subject(filename=filename)
            X, y = preprocess(subject)
            fs.write_csv(filename, X, y)
            log.print('{0} preprocessed in {1}s'.format(
                filename, clock.round()))
        except Exception as e:
            log.print('{0} Exception: {1}'.format(filename, str(e)))
            clock.round()
    clock.stop()
Esempio n. 13
0
import fibonacci.fibonacci_cpp as cpp
import fibonacci.fibonacci_python as python
import fibonacci.fibonacci_strings as strings

from tqdm import trange
from stopwatch import stopwatch
from scipy.optimize import curve_fit
import pandas as pd
import matplotlib.pyplot as plt

CAP = 1e9
TRIALS = 1
MAX_I = 10000

df = pd.DataFrame()
clock = stopwatch()
universe = cpp.algorithms + python.algorithms + strings.algorithms
keys = [algo.__name__ + '_cpp' for algo in cpp.algorithms] + \
       [algo.__name__ + '_python' for algo in python.algorithms] + \
       [algo.__name__ + '_string' for algo in strings.algorithms]
skip_algo = [False] * len(universe)
for i in trange(MAX_I):
    record = pd.Series(name=i)
    f = [python.FibMatrix(i)]
    f.append(str(f[0]))
    for num, (algo, skip, key) in enumerate(zip(universe, skip_algo, keys)):
        if skip: continue
        try:
            clock.start()
            for _ in range(TRIALS):
                ans = algo(i)
Esempio n. 14
0
def _main():

    w = stopwatch.stopwatch()
    time.sleep(3.123)
    print w
Esempio n. 15
0
    def readFromRootFile(self, filename, TupleMeanStd, weighter):
        from preprocessing import MeanNormApply, MeanNormZeroPad, MeanNormZeroPadParticles
        import numpy
        from stopwatch import stopwatch

        sw = stopwatch()
        swall = stopwatch()

        import ROOT

        fileTimeOut(filename, 120)  #give eos a minute to recover
        rfile = ROOT.TFile(filename)
        tree = rfile.Get("deepntuplizer/tree")
        self.nsamples = tree.GetEntries()

        print('took ', sw.getAndReset(), ' seconds for getting tree entries')

        # split for convolutional network

        x_global = MeanNormZeroPad(filename, TupleMeanStd, [self.branches[0]],
                                   [self.branchcutoffs[0]], self.nsamples)

        x_cpf = MeanNormZeroPadParticles(filename, TupleMeanStd,
                                         self.branches[1],
                                         self.branchcutoffs[1], self.nsamples)

        x_npf = MeanNormZeroPadParticles(filename, TupleMeanStd,
                                         self.branches[2],
                                         self.branchcutoffs[2], self.nsamples)

        x_sv = MeanNormZeroPadParticles(filename, TupleMeanStd,
                                        self.branches[3],
                                        self.branchcutoffs[3], self.nsamples)

        x_reg = MeanNormZeroPad(filename, TupleMeanStd, [self.branches[4]],
                                [self.branchcutoffs[4]], self.nsamples)

        print('took ', sw.getAndReset(),
              ' seconds for mean norm and zero padding (C module)')

        Tuple = self.readTreeFromRootToTuple(filename)

        reg_truth = Tuple['gen_pt_WithNu'].view(numpy.ndarray)
        reco_pt = Tuple['jet_corr_pt'].view(numpy.ndarray)

        correctionfactor = numpy.zeros(self.nsamples)
        for i in range(self.nsamples):
            correctionfactor[i] = reg_truth[i] / reco_pt[i]

        if self.remove:
            notremoves = weighter.createNotRemoveIndices(Tuple)
            undef = Tuple['isUndefined']
            notremoves -= undef
            print('took ', sw.getAndReset(), ' to create remove indices')

        if self.weight:
            weights = weighter.getJetWeights(Tuple)
        elif self.remove:
            weights = notremoves
        else:
            print('neither remove nor weight')
            weights = numpy.empty(self.nsamples)
            weights.fill(1.)

        truthtuple = Tuple[self.truthclasses]
        #print(self.truthclasses)
        alltruth = self.reduceTruth(truthtuple)

        #print(alltruth.shape)
        if self.remove:
            print('remove')
            weights = weights[notremoves > 0]
            x_global = x_global[notremoves > 0]
            x_cpf = x_cpf[notremoves > 0]
            x_npf = x_npf[notremoves > 0]
            x_sv = x_sv[notremoves > 0]
            alltruth = alltruth[notremoves > 0]

            x_reg = x_reg[notremoves > 0]
            correctionfactor = correctionfactor[notremoves > 0]

        newnsamp = x_global.shape[0]
        print('reduced content to ',
              int(float(newnsamp) / float(self.nsamples) * 100), '%')
        self.nsamples = newnsamp

        print(x_global.shape, self.nsamples)

        self.w = [weights, weights]
        self.x = [x_global, x_cpf, x_npf, x_sv, x_reg]
        self.y = [alltruth, correctionfactor]
Esempio n. 16
0
        def writeData_async(index, woq, wrlck):

            logger.info('async started')

            sw = stopwatch()
            td = copy.deepcopy(self.dataclass)
            sample = self.originRoots[index]

            if self.batch_mode:
                tmpinput = sample

                def removefile():
                    pass
            else:
                tmpinput = tempstoragepath + '/' + str(
                    os.getpid()) + os.path.basename(sample)

                def removefile():
                    os.system('rm -f ' + tmpinput)

                import atexit
                atexit.register(removefile)

                logger.info('start cp')
                os_ret = os.system('cp ' + sample + ' ' + tmpinput)
                if os_ret:
                    raise Exception("copy to ramdisk not successful for " +
                                    sample)

            success = False
            out_samplename = ''
            out_sampleentries = 0
            sbasename = os.path.basename(sample)
            newname = sbasename[:sbasename.rfind('.')]
            if usenewformat:
                newname += '.meta'
            else:
                newname += '.z'
            newpath = os.path.abspath(outputDir + newname)

            try:
                logger.info('readFromRootFile')
                td.readFromRootFile(tmpinput, self.means, self.weighter)
                logger.info('writeOut')
                #wrlck.acquire()
                td.writeOut(newpath)
                #wrlck.release()
                print('converted and written ' + newname + ' in ',
                      sw.getAndReset(), ' sec -', index)

                out_samplename = newname
                out_sampleentries = td.nsamples
                success = True
                td.clear()
                removefile()
                woq.put((index, [success, out_samplename, out_sampleentries]))

            except:
                print('problem in ' + newname)
                removefile()
                woq.put((index, [False, out_samplename, out_sampleentries]))
                raise
Esempio n. 17
0
 def readFromRootFile(self,filename,TupleMeanStd, weighter):
     
     from preprocessing import MeanNormZeroPad, MeanNormZeroPadParticles
     import numpy
     from stopwatch import stopwatch
     
     sw=stopwatch()
     swall=stopwatch()
     
     import ROOT
     
     fileTimeOut(filename,120) #give eos two minute to recover
     rfile = ROOT.TFile(filename)
     tree = rfile.Get(self.treename)
     self.nsamples=tree.GetEntries()
     Tuple = self.readTreeFromRootToTuple(filename)
     
     ###########################################################################################
     ############ this is where you define how to read in the branches and what to do with them
     ###########################################################################################
     
     
     
     
     ############ MeanNormZeroPad means that all branches are just put into a serial list
     ############ such as: jet1_pt, jet1_eta, jet2_pt, jet2_eta, ...
     ############ if there are not suffiecient jets, the rest of the list is filled
     ############ with zero (zero padding)
     ############ In addition, the variables are transformed such that they are centred around
     ############ zero and the width of the distribution is about 1.
     ############ This is only a technica trick that makes it easier for the DNN to converge
     reco_global = MeanNormZeroPad(filename,TupleMeanStd,
                                self.branches,
                                self.branchcutoffs,self.nsamples)
     
     ############ Another choice for the preprocessing that will be important for you is
     ############ MeanNormZeroPadParticles. It does the same rescaling as MeanNormZeroPad, 
     ############ but organises the array as a 2D array per event. Such that e.g. each 
     ############ jet has its own list. This can be important when e.g. using more
     ############ evolved neural networks than just dense layers. We will come to this later,
     ############ however, I put an example already here (but commented)
     #reco_jetslist = MeanNormZeroPadParticles(filename,TupleMeanStd,
     #                           self.branches[3],      # the jet branches (see function above)
     #                           self.branchcutoffs[3], # the jet branch cut-offs (maximum six) as defined above
     #                           self.nsamples)
     
     
     ############ Here we read the branch that contains the truth information
     truth = Tuple['gen_mttbar']
     
     
     
     oldlength=self.nsamples
     if self.remove:
         notremoves=weighter.createNotRemoveIndices(Tuple)
         # this has do be done for each array produced before
         # don't forget!
         # it selects only the entries from the array that should not be removed,
         # (where the notremoves array as an entry above 0)
         reco_global=reco_global[notremoves > 0]
         truth=truth[notremoves > 0]
         
         
         print("kept "+str(int(float(self.nsamples)/float(oldlength))*100)+"%" )
         
     # we don't use weights for now, so we fill the weight array with 1
     weights=numpy.empty(self.nsamples)
     weights.fill(1.)
     self.nsamples=truth.shape[0]
     
     
     # any array that shoul dbe used by the DNN needs to be added here
     # w: these are the weights (you don't have to change this)
     # x: this is the reconstructed information to fill
     # y: the true information
     self.w=[weights]
     self.x=[reco_global]
     self.y=[truth]
     
     
     
     
     
     
     
     
     
     
     
     
Esempio n. 18
0
#!/usr/bin/python
# -*- coding: utf-8 -*-

# Copyright (c) 2011-2012 Martin Beran ([email protected])
# license MIT/X11 (read more in the file LICENSE)

# Homepage: http://www.pywlibs.net/
# Source code: https://github.com/berycz/pywlibs
# Documentation: http://www.pywlibs.net/docs/xhtml


import stopwatch

sw = stopwatch.stopwatch(dplaces=6)


FAST_XHTML = False
DOCTYPE = 'XHTML 1.0 Transitional'  # 'HTML 4.01 Transitional'
MINIMIZE = False

# Make instance of class xhtml.Xhtml
import xhtml
if FAST_XHTML:
  X = xhtml.FastXhtml(DOCTYPE, MINIMIZE)
else:
  X = xhtml.Xhtml(DOCTYPE, MINIMIZE)

print '----- Main elements (tags), comments, etc -----'
print X.doctype()
print X.comment()
print X.comment('html comment')
else:
    zeta_sp = [0.5, 0]
    zeta_t = [0, 0]
    zeta_a_lims = [-1, 1]
    n = 100
    figname = 'appendix_4_posteriors_skewed'


def D_func(x):
    return D_max - (D_max - D_min) * np.abs(x - 0.5) * 2.0


# %% Calculate posteriors
zax_mesh = np.linspace(zeta_a_lims[0], zeta_a_lims[1], num=steps)
p_zax = {}  # = np.zeros(steps) * np.nan
with stopwatch():
    for lamb in lamb_list:
        p_zax[lamb] = np.zeros(steps) * np.nan
        for i, zax in enumerate(tqdm(zax_mesh)):
            p_zax[lamb][i] = calculate_one_1D_posterior_in_2D(
                zeta_a=zax, zeta_t=zeta_t, zeta_sp=zeta_sp, n=n, V=V, V_pi=V_pi, loc_error=loc_error, lamb=lamb, axis=axis)

# Calculate the prior
lamb = 'prior'
p_zax[lamb] = np.zeros(steps) * np.nan
for i, zax in enumerate(tqdm(zax_mesh)):
    p_zax[lamb][i] = calculate_one_1D_prior_in_2D(
        zeta_a=zax, V_pi=V_pi, loc_error=loc_error)


# lg_BM = calculate_bayes_factors(zeta_ts=np.array([zeta_t]), zeta_sps=np.array([zeta_sp]), ns=[n],
Esempio n. 20
0
    def readFromRootFile(self,filename,TupleMeanStd, weighter):
        from preprocessing import MeanNormApply, MeanNormZeroPad, MeanNormZeroPadParticles
        import numpy
        from stopwatch import stopwatch
        
        sw=stopwatch()
        swall=stopwatch()
        
        import ROOT
        
        fileTimeOut(filename,120) #give eos a minute to recover
        rfile = ROOT.TFile(filename)
        tree = rfile.Get("deepntuplizer/tree")
        self.nsamples=tree.GetEntries()
        
        print('took ', sw.getAndReset(), ' seconds for getting tree entries')
        
        
        # split for convolutional network
        
        x_global = MeanNormZeroPad(filename,TupleMeanStd,
                                   [self.branches[0]],
                                   [self.branchcutoffs[0]],self.nsamples)
        
        
        x_a = MeanNormZeroPadParticles(filename,TupleMeanStd,
                                   self.branches[1],
                                   self.branchcutoffs[1],self.nsamples)
        
        x_b = MeanNormZeroPadParticles(filename,TupleMeanStd,
                                   self.branches[2],
                                   self.branchcutoffs[2],self.nsamples)
        
        
        
        
        
        print('took ', sw.getAndReset(), ' seconds for mean norm and zero padding (C module)')
        
        Tuple = self.readTreeFromRootToTuple(filename)
        
        if self.remove:
            notremoves=weighter.createNotRemoveIndices(Tuple)
        
            print('took ', sw.getAndReset(), ' to create remove indices')
        
        if self.weight:
            weights=weighter.getJetWeights(Tuple)
        elif self.remove:
            weights=notremoves
        else:
            print('neither remove nor weight')
            weights=numpy.empty(self.nsamples)
            weights.fill(1.)
        
        
        truthtuple =  Tuple[self.truthclasses]
        #print(self.truthclasses)
        alltruth=self.reduceTruth(truthtuple)
        
        #print(alltruth.shape)
        if self.remove:
            print('remove')
            weights=weights[notremoves > 0]
            x_global=x_global[notremoves > 0]
            x_a=x_a[notremoves > 0]
            x_b=x_b[notremoves > 0]
            alltruth=alltruth[notremoves > 0]
       
        newnsamp=x_global.shape[0]
        print('reduced content to ', int(float(newnsamp)/float(self.nsamples)*100),'%')
        self.nsamples = newnsamp
        
        print(x_global.shape,self.nsamples)

        self.w=[weights]
        self.x=[x_global,x_a,x_b]
        self.y=[alltruth]
Esempio n. 21
0
from stopwatch import stopwatch
import random

a_dict = {1: "þú", 2: "mamma", 3: "pabbi", 4: "Tommi", 5: "TAMAMAMAMMA"}

a = stopwatch()

if "mamma" in a_dict:
    print("mamma")

# for elements in a_list:

#     secret_globes = 0
#     while secret_globes != 9:
#         secret_globes += 1

a.stop()

print(a)
        print("Tessellation completed in % .1f s" % (time.time() - t0))

        # Save the new mesh into the .rwa file
        rwa_data = Analyses(trajectory)
        rwa_data.add(mesh, label=label)
        save_rwa(rwa_fullpath, rwa_data, force=True)
    else:
        print("Loaded a previously calculated tessellation mesh")

    equal_tree = rwa_data[label]
    equal_tree_distr = distributed(equal_tree.data)
    cells = rwa_data[label].data
    cells_len = cells.location_count.size

    # Calculate the Bayes factors
    with stopwatch('Bayes factor calculations'):
        calculate(rwa_fullpath, results_folder=None, bl_produce_maps=False,
                  snr_label='snr', sigma=sigma)

    # Get the calculated Bayes factors
    rwa_data = load_rwa(rwa_fullpath)
    log10_Bs = rwa_data[label]['snr']['bayes_factor'].data['lg_B']['lg_B'].copy()

    # Mark surface cells, where the results may not be accurate
    surface_cells = get_exterior_cells(cells)
    cells.tessellation.cell_label = np.ones(cells_len, dtype=bool)
    cells.tessellation.cell_label[surface_cells] = False
    log10_Bs[surface_cells] = np.nan

    # Save to structures
    log10_Bs_combined.append(log10_Bs)
Esempio n. 23
0
    def readFromRootFile(self, filename, TupleMeanStd, weighter):
        from preprocessing import MeanNormApply, createCountMap, createDensity, MeanNormZeroPad, createDensityMap, MeanNormZeroPadParticles
        import numpy
        from stopwatch import stopwatch

        sw = stopwatch()
        swall = stopwatch()

        import ROOT

        fileTimeOut(filename, 120)  #give eos a minute to recover
        rfile = ROOT.TFile(filename)
        tree = rfile.Get("deepntuplizer/tree")
        self.nsamples = tree.GetEntries()

        print('took ', sw.getAndReset(), ' seconds for getting tree entries')

        # split for convolutional network

        x_global = MeanNormZeroPad(filename, TupleMeanStd, [self.branches[0]],
                                   [self.branchcutoffs[0]], self.nsamples)

        x_cpf = MeanNormZeroPadParticles(filename, TupleMeanStd,
                                         self.branches[1],
                                         self.branchcutoffs[1], self.nsamples)

        x_npf = MeanNormZeroPadParticles(filename, TupleMeanStd,
                                         self.branches[2],
                                         self.branchcutoffs[2], self.nsamples)

        x_sv = MeanNormZeroPadParticles(filename, TupleMeanStd,
                                        self.branches[3],
                                        self.branchcutoffs[3], self.nsamples)

        #here the difference starts
        nbins = 8

        x_chmap = createDensity(
            filename,
            inbranches=['Cpfcan_ptrel', 'Cpfcan_etarel', 'Cpfcan_phirel'],
            modes=['sum', 'average', 'average'],
            nevents=self.nsamples,
            dimension1=['Cpfcan_eta', 'jet_eta', nbins, 0.45],
            dimension2=['Cpfcan_phi', 'jet_phi', nbins, 0.45],
            counterbranch='nCpfcand',
            offsets=[-1, -0.5, -0.5])

        x_neumap = createDensity(
            filename,
            inbranches=['Npfcan_ptrel', 'Npfcan_etarel', 'Npfcan_phirel'],
            modes=['sum', 'average', 'average'],
            nevents=self.nsamples,
            dimension1=['Npfcan_eta', 'jet_eta', nbins, 0.45],
            dimension2=['Npfcan_phi', 'jet_phi', nbins, 0.45],
            counterbranch='nCpfcand',
            offsets=[-1, -0.5, -0.5])

        x_chcount = createCountMap(filename, TupleMeanStd, self.nsamples,
                                   ['Cpfcan_eta', 'jet_eta', nbins, 0.45],
                                   ['Cpfcan_phi', 'jet_phi', nbins, 0.45],
                                   'nCpfcand')

        x_neucount = createCountMap(filename, TupleMeanStd, self.nsamples,
                                    ['Npfcan_eta', 'jet_eta', nbins, 0.45],
                                    ['Npfcan_phi', 'jet_phi', nbins, 0.45],
                                    'nNpfcand')

        print('took ', sw.getAndReset(),
              ' seconds for mean norm and zero padding (C module)')

        Tuple = self.readTreeFromRootToTuple(filename)

        if self.remove:
            notremoves = weighter.createNotRemoveIndices(Tuple)
            undef = Tuple['isUndefined']
            notremoves -= undef
            print('took ', sw.getAndReset(), ' to create remove indices')

        if self.weight:
            weights = weighter.getJetWeights(Tuple)
        elif self.remove:
            weights = notremoves
        else:
            print('neither remove nor weight')
            weights = numpy.empty(self.nsamples)
            weights.fill(1.)

        truthtuple = Tuple[self.truthclasses]
        #print(self.truthclasses)
        alltruth = self.reduceTruth(truthtuple)

        regtruth = Tuple['gen_pt_WithNu']
        regreco = Tuple['jet_corr_pt']

        #print(alltruth.shape)
        if self.remove:
            print('remove')
            weights = weights[notremoves > 0]
            x_global = x_global[notremoves > 0]
            x_cpf = x_cpf[notremoves > 0]
            x_npf = x_npf[notremoves > 0]
            x_sv = x_sv[notremoves > 0]

            x_chmap = x_chmap[notremoves > 0]
            x_neumap = x_neumap[notremoves > 0]

            x_chcount = x_chcount[notremoves > 0]
            x_neucount = x_neucount[notremoves > 0]

            alltruth = alltruth[notremoves > 0]

            regreco = regreco[notremoves > 0]
            regtruth = regtruth[notremoves > 0]

        newnsamp = x_global.shape[0]
        print('reduced content to ',
              int(float(newnsamp) / float(self.nsamples) * 100), '%')
        self.nsamples = newnsamp

        x_map = numpy.concatenate((x_chmap, x_neumap, x_chcount, x_neucount),
                                  axis=3)

        self.w = [weights, weights]
        self.x = [x_global, x_cpf, x_npf, x_sv, x_map, regreco]
        self.y = [alltruth, regtruth]
def simulate_a_free_hookean_dumbbell(parameters,
                                     plot=False,
                                     recalculate=False,
                                     save_figure=False,
                                     file=r'.\trajectory.dat',
                                     seed=None,
                                     verbose=False,
                                     show=True):
    """
    Simulate the trajectories of two particles connected by 1 spring and only along the x axis.
    Units of measurements:
    D1, D2 --- um^2/s,
    gamma --- kg/s,
    k12 --- kg/s^2,
    T --- s,
    angle --- angle of bond beetween the particles in the lab system measured counterclockwise (in radians).

    Input:
    true_parameters --- a dictionary that must contain D1, D2, k12, T, dt, gamma, angle, L
    """

    # Load parameters
    # hash_sequence =
    D1, D2, n1, n2, n12, M, dt, L0 = [
        parameters[key] for key in 'D1 D2 n1 n2 n12 M dt L0'.split()
    ]
    N = M

    # % Constants
    # kB = 1.38e-11  # kg*um^2/s^2/K
    atol = 1e-16
    rtol = 1e-6
    alpha = 1  # the degree of implicitness
    bl_loaded = False
    min_dt_factor = 100  # Make sure dt used in simulation is smaller than system time scale by
    # at least this factor
    # max_terms = 100
    # min_N = int(1e4)

    if seed is not None:
        np.random.seed(seed)
    else:
        np.random.seed()

    phi0 = np.random.uniform(0, 2 * np.pi)
    R0 = np.array([
        -L0 * np.cos(phi0), -L0 * np.sin(phi0), L0 * np.cos(phi0),
        L0 * np.sin(phi0)
    ]) / 2

    # Calculate on a smaller mesh if necessary
    max_eta = 1e-2
    min_l0 = 20
    N_intermediate_points = 1

    l0 = L0 / np.sqrt(4 * np.max([D1, D2]) * dt)
    if l0 < min_l0:
        N_intermediate_points = int(np.ceil(min_l0**2 / l0**2))

    if n12 > 0:
        eta = n12 * dt
        if eta > max_eta:
            N_intermediate_points = np.max(
                [N_intermediate_points,
                 int(np.ceil(eta / max_eta))])
    # print(l0, eta)

    # max_dt = np.min(time_scales) / min_dt_factor
    # print('Time scales / dt: ', time_scales / dt)
    # print('dt: {0:.2g}'.format(dt))
    true_dt = dt
    if N_intermediate_points > 1:
        # N_intermediate_points = int(np.ceil(dt / max_dt))
        dt = dt / N_intermediate_points
        # bl_rescaled = True
        # N *= N_intermediate_points
        if verbose:
            print(
                f'For the accuracy of the simulations, time step reduced by the factor of '
                f'{N_intermediate_points} from {true_dt:.2g} to {dt:.2g}')
    else:
        N_intermediate_points = 1

    if verbose:
        print(f'l0 = {l0:g}, eta = {eta:g}')

    # n1, n2, n12 = np.array([k1, k2, k12]) / gamma

    # # Hash the parameters to be able to reload
    hash = None
    # hash, _ = hash_from_dictionary(dim=2, true_parameters=true_parameters)
    #
    # # Reload if requested
    # if not recalculate:
    #     dict_data, loaded = load_data(hash)
    #     # print('sim', dict_data, loaded)
    #     # return
    #     if loaded:
    #         t, R, dR = [dict_data[key] for key in 't R dR'.split()]
    #
    #         # Plot
    #         if plot:
    #             plot_trajectories(R, save=save_figure)
    #
    #         # print(f'Trajectories reloaded. Hash: {hash}')
    #         return t, R, dR, hash

    # A = np.array([[-n1 - n12, 0, n12, 0],
    #               [0, -n1, 0, 0],
    #               [n12, 0, -n2 - n12, 0],
    #               [0, 0, 0, -n2]], dtype=np.float64)
    # a = L0 * np.array([[-n12, 0, n2 + n12, 0]]).transpose()
    # b = np.diag(np.sqrt([2 * D1, 2 * D1, 2 * D2, 2 * D2]))
    # print(b)
    b = np.transpose(np.sqrt(2 * np.array([[D1, D1, D2, D2]])))

    # lambdas, U = np.linalg.eig(A)
    # Um1 = np.linalg.inv(U)
    # diag = np.diag(lambdas)
    # # Am1 = np.linalg.inv(A)
    # # print('lambs = ', lambdas)
    # # print('Um1 @ A @ U = ', Um1 @ A @ U)

    # Choose dt and N (number of jumps)
    # dt = 1e-2
    # N = np.ceil(T / dt).astype(int)

    # T = dt * N
    t = np.arange(N + 1) * true_dt

    # R0 = np.transpose([np.hstack([r10, r20])])
    R = np.zeros((4, N + 1)) * np.nan
    R[:, 0] = R0

    # print(f'R array size is {sys.getsizeof(R) / 2 ** 20} MB')

    # Q0 = R0 + Am1 @ a
    # print('R0', R0)
    # print('R init', R)

    def a(_R):
        out = np.empty((4, 1))
        R_diff = _R[2:] - _R[:2]  # R2 - R1
        out[:2, :] = R_diff
        out[2:, :] = -R_diff
        L = np.linalg.norm(R_diff)
        out = out * n12 * (L - L0) / L
        return out

    # Generate noise

    def equation(R_next, dW, R_current):
        R_next = np.reshape(R_next, (-1, 1))
        eqn = R_next - (R_current + (alpha * a(R_next) +
                                     (1 - alpha) * a(R_current)) * dt + b * dW)
        return np.reshape(eqn, -1)

    # Iterate over steps
    R_next = R0.reshape((4, 1))
    with stopwatch('Simulation'):
        for i in trange(N, desc='Simulating'):
            for j in range(N_intermediate_points):
                # Solve the equation
                # print('a', R[:, i, np.newaxis])
                # print('g', np.shape(R[:, i, np.newaxis]))
                dW = np.random.normal(0, np.sqrt(dt), size=(4, 1))
                sol = root(equation, R_next, args=(dW, R_next))
                # print(sol)
                R_next = np.reshape(sol.x, (4, 1))

            R[:, i + 1] = R_next[:, 0]

    # # print('R not rotated', R)
    # # Rotate the result if necessary. Q is the rotation matrix
    # # phi = angle / 180 * np.pi
    # phi = angle
    # # print('phi', phi)
    # # raise RuntimeError('stop')
    # Q = np.array([[cos(phi), - sin(phi), 0, 0],
    #               [sin(phi), cos(phi), 0, 0],
    #               [0, 0, cos(phi), -sin(phi)],
    #               [0, 0, sin(phi), cos(phi)]
    #               ])
    # # print(Q)
    # # print(R[:, 0])
    #
    # R_rotated = R.copy()
    # if angle:
    #     for i in range(N + 1):
    #         # print(Q @ R[:, i, None])
    #         R_rotated[:, i, None] = Q @ R[:, i, None]
    #
    # R = R_rotated
    # # print(R[:, 0])

    # # Reorder components to [x1, y1, x2, y2]
    # new_R = R[(0, 2, 1, 3), :]
    # print('new_R', new_R)

    # # Resample to the original time step
    # R = R[:, ::N_intermediate_points]
    # t = t[::N_intermediate_points]
    print('Calculated number of points: ', np.shape(R)[1])

    #  Displacements
    dR = R[:, 1:] - R[:, :-1]
    # print(f'R array size is {sys.getsizeof(R) / 2 ** 20} MB')

    # Save the trajectories and simulation parameters
    # print(t, R, dR)
    dict_data = {'t': t, 'R': R, 'dR': dR, **parameters}
    # print('full dict', dict_data)
    # save_data(dict_data=dict_data, hash=hash)

    # print('R: ', R)

    if plot:
        plot_trajectories(t, R, dR, parameters, save=save_figure, show=show)
    #
    # return (t, X, dX, Y, dY)
    return t, R, dR, hash
Esempio n. 25
0
    def readFromRootFile(self, filename, TupleMeanStd, weighter):
        from preprocessing import MeanNormApply, MeanNormZeroPad, createDensityMap, createCountMap, MeanNormZeroPadParticles
        import numpy
        from stopwatch import stopwatch

        sw = stopwatch()
        swall = stopwatch()

        import ROOT

        fileTimeOut(filename, 120)  #give eos a minute to recover
        rfile = ROOT.TFile(filename)
        tree = rfile.Get("deepntuplizer/tree")
        self.nsamples = tree.GetEntries()

        print('took ', sw.getAndReset(), ' seconds for getting tree entries')

        # split for convolutional network

        x_global = MeanNormZeroPad(filename, TupleMeanStd, [self.branches[0]],
                                   [self.branchcutoffs[0]], self.nsamples)

        #here the difference starts
        x_chmap = createDensityMap(filename,
                                   TupleMeanStd,
                                   'Cpfcan_ptrel',
                                   self.nsamples,
                                   ['Cpfcan_eta', 'jet_eta', 20, 0.5],
                                   ['Cpfcan_phi', 'jet_phi', 20, 0.5],
                                   'nCpfcand',
                                   -1,
                                   weightbranch='Cpfcan_puppiw')

        x_chcount = createCountMap(filename, TupleMeanStd, self.nsamples,
                                   ['Cpfcan_eta', 'jet_eta', 20, 0.5],
                                   ['Cpfcan_phi', 'jet_phi', 20, 0.5],
                                   'nCpfcand')

        x_neumap = createDensityMap(filename,
                                    TupleMeanStd,
                                    'Npfcan_ptrel',
                                    self.nsamples,
                                    ['Npfcan_eta', 'jet_eta', 20, 0.5],
                                    ['Npfcan_phi', 'jet_phi', 20, 0.5],
                                    'nNpfcand',
                                    -1,
                                    weightbranch='Npfcan_puppiw')

        x_neucount = createCountMap(filename, TupleMeanStd, self.nsamples,
                                    ['Npfcan_eta', 'jet_eta', 20, 0.5],
                                    ['Npfcan_phi', 'jet_phi', 20, 0.5],
                                    'nNpfcand')

        print('took ', sw.getAndReset(),
              ' seconds for mean norm and zero padding (C module)')

        Tuple = self.readTreeFromRootToTuple(filename)

        if self.remove:
            notremoves = weighter.createNotRemoveIndices(Tuple)
            undef = Tuple['isUndefined']
            notremoves -= undef
            print('took ', sw.getAndReset(), ' to create remove indices')

        if self.weight:
            weights = weighter.getJetWeights(Tuple)
        elif self.remove:
            weights = notremoves
        else:
            print('neither remove nor weight')
            weights = numpy.ones(self.nsamples)

        pttruth = Tuple[self.regtruth]
        ptreco = Tuple[self.regreco]

        truthtuple = Tuple[self.truthclasses]
        #print(self.truthclasses)
        alltruth = self.reduceTruth(truthtuple)

        x_map = numpy.concatenate((x_chmap, x_chcount, x_neumap, x_neucount),
                                  axis=3)

        #print(alltruth.shape)
        if self.remove:
            print('remove')
            weights = weights[notremoves > 0]
            x_global = x_global[notremoves > 0]
            x_map = x_map[notremoves > 0]
            alltruth = alltruth[notremoves > 0]
            pttruth = pttruth[notremoves > 0]
            ptreco = ptreco[notremoves > 0]

        newnsamp = x_global.shape[0]
        print('reduced content to ',
              int(float(newnsamp) / float(self.nsamples) * 100), '%')
        self.nsamples = newnsamp
        print(x_global.shape, self.nsamples)

        self.w = [weights]
        self.x = [x_global, x_map, ptreco]
        self.y = [alltruth, pttruth]
cy = ball1_pos[1]
cy2 = ball2_pos[1]
vfa = 0
vfr = 0
ang1 = 0
choque = [0, 0]
choque2 = [0, 0]
liner_pos = [0, ball2_pos[1]]
lineb_pos = [0, ball1_pos[1]]
ang1m = 0.0
ang2m = 0.0
masa_a = 5.0 # kg
masa_r = 5.0 # kg
e = 1.0
teta2 = 0
reloj = stopwatch()
reloj.crear_timer()
bounds = False
reloj_roja = stopwatch()
reloj_roja.crear_timer()
tiempo_colision = ""
font_face = 'Arial'
color1 = "#2672F4"
color2 = "#FB3E3E"
color3 = "#54340F"

def draw(c):
    # metodo que se encargar de dibujar la simulacion en la ventana
    
    global choque, choque2, tiempo_colision
    tipo = ""
Esempio n. 27
0
import signal
from stopwatch import stopwatch

def handler(signum, frame):
	print "Forever is over! hehe"
	raise Exception("end of time")

def loop_forever():
	a = 0
	import time
	while 1:
		a = a + 1
		print (a)
		time.sleep(1)


if __name__ == '__main__':
	signal.signal(signal.SIGALRM, handler)
	signal.alarm(10)
	try:
		stopwatch(15)
	except Exception, exc:
		print exc
Esempio n. 28
0
    def readFromRootFile(self,filename,TupleMeanStd, weighter):
        from preprocessing import MeanNormApply, MeanNormZeroPad, MeanNormZeroPadParticles
        import numpy
        from stopwatch import stopwatch
        
        sw=stopwatch()
        swall=stopwatch()
        
        import ROOT
        
        fileTimeOut(filename,120) #give eos a minute to recover
        rfile = ROOT.TFile(filename)
        tree = rfile.Get("deepntuplizer/tree")
        self.nsamples=tree.GetEntries()
        
        print('took ', sw.getAndReset(), ' seconds for getting tree entries')
        
        
        # split for convolutional network
        
        x_global = MeanNormZeroPad(filename,TupleMeanStd,
                                   [self.branches[0]],
                                   [self.branchcutoffs[0]],self.nsamples)
        
        x_cpf = MeanNormZeroPadParticles(filename,TupleMeanStd,
                                   self.branches[1],
                                   self.branchcutoffs[1],self.nsamples)
        
        x_npf = MeanNormZeroPadParticles(filename,TupleMeanStd,
                                   self.branches[2],
                                   self.branchcutoffs[2],self.nsamples)
        
        x_sv = MeanNormZeroPadParticles(filename,TupleMeanStd,
                                   self.branches[3],
                                   self.branchcutoffs[3],self.nsamples)
        
        
        
        print('took ', sw.getAndReset(), ' seconds for mean norm and zero padding (C module)')
        
        Tuple = self.readTreeFromRootToTuple(filename)
        
        if self.remove:
            notremoves=weighter.createNotRemoveIndices(Tuple)
            undef=Tuple['isUndefined']
            notremoves-=undef
            print('took ', sw.getAndReset(), ' to create remove indices')
        
        if self.weight:
            weights=weighter.getJetWeights(Tuple)
        elif self.remove:
            weights=notremoves
        else:
            print('neither remove nor weight')
            weights=numpy.empty(self.nsamples)
            weights.fill(1.)
        
        
        truthtuple =  Tuple[self.truthclasses]
        #print(self.truthclasses)
        alltruth=self.reduceTruth(truthtuple)

        mask = Tuple[['nCpfcand','nNpfcand','nsv']]
        maskListNpf = []
        maskListCpf = []
        maskListSv = []

        for i in range(0,Tuple.shape[0]):
            nMax = int(mask[i][0])
            if(nMax>25): nMax=25
            list0 = [[1.]*nMax+[0.]*(25-nMax)]*8
            nMax = int(mask[i][1])
            if(nMax>25): nMax=25
            maskListNpf.append(list0)

            nMax = int(mask[i][1])
            if(nMax>25): nMax=25
            list1 = [[1.]*nMax+[0.]*(25-nMax)]*4
            maskListCpf.append(list1)
            nMax = int(mask[i][2])
            if(nMax>4): nMax=4
            list2 = [[1.]*nMax+[0.]*(4-nMax)]*8
            maskListSv.append(list2)
            
        
        maskListNpf = numpy.asarray(maskListNpf,dtype=float)
        maskListCpf = numpy.asarray(maskListCpf,dtype=float)
        maskListSv = numpy.asarray(maskListSv,dtype=float)
        print ('zero shapes ', maskListNpf.shape, ' ' ,maskListCpf.shape  , ' ' ,maskListSv.shape )
        #print(alltruth.shape)
        if self.remove:
            print('remove')
            weights=weights[notremoves > 0]
            x_global=x_global[notremoves > 0]
            x_cpf=x_cpf[notremoves > 0]
            x_npf=x_npf[notremoves > 0]
            x_sv=x_sv[notremoves > 0]
            maskListNpf = maskListNpf[notremoves > 0]
            maskListCpf = maskListCpf[notremoves > 0]
            maskListSv = maskListSv[notremoves > 0]
            alltruth=alltruth[notremoves > 0]
       
        newnsamp=x_global.shape[0]
        print('reduced content to ', int(float(newnsamp)/float(self.nsamples)*100),'%')
        self.nsamples = newnsamp
        
        print(x_global.shape,self.nsamples)

        self.w=[weights]
        print (' types ',  type (x_cpf) , type (maskListNpf), ' ' ,type(maskListCpf) , ' ' , type(maskListSv) )
        self.x=[x_global,x_cpf,x_npf,x_sv,maskListNpf,maskListCpf,maskListSv]
        self.y=[alltruth]
Esempio n. 29
0
from stopwatch import stopwatch
sw=stopwatch()
count=1
sw.start()
while count<=1000000:
        count+=1
sw.stop()
print(sw.getElapsedTime())
Esempio n. 30
0
def morse_parser(decoder):
    # get capture from default camera
    cap = cv2.VideoCapture(0)

    # create timers fro timing pauses and lights
    light_timer = stopwatch()
    pause_timer = stopwatch()

    light_array = []
    b_x, b_y, b_w, b_h = 230, 360, 220 + 230, 360 - 180 # bounding box coords
    light_found = False
    newline = False

    def run_decoder(arr=[]):
        morse = decoder.to_morse_string(arr)
        print(decoder.to_alpha(morse), end="", flush=True)

    while 1:
        ret, frame = cap.read() #get frame from camera
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV) # convert frame capture to HSV values

        # filtering to remove  grains
        kernel = np.ones((5, 5), np.float32) / 25
        dst = cv2.filter2D(hsv, -1, kernel) # stage 2 of filtering (this is what you modify after frame)

        # lower and upper hsv values -- gets white values
        lower = np.array([0, 0, 250])
        upper = np.array([180, 10, 255])


        mask = cv2.inRange(dst, lower, upper)  # generate mask based on values on the dst
        res = cv2.bitwise_and(frame, frame, mask=mask)  # overlay mask on top of video
        blur = cv2.medianBlur(res, 5) # apply blur to soften mask

        # retrieve edges of detection
        edges = cv2.Canny(blur, 100, 200)

        cv2.rectangle(frame, (b_x, b_y), (b_w, b_h), (0, 255, 0), 2) # draw bounding box rectangle

        # get contours from edges -- just let opencv do its thing
        im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)

        if len(contours) > 0:
            # print("LIGHT DETECTED")
            c = max(contours, key=cv2.contourArea)  # get the largest area contour

            (x, y), radius = cv2.minEnclosingCircle(c)  # get an enclosing circle for the contour

            center = (int(x), int(y)) # get center of circle (for coords)
            radius = int(radius)

            # logic to check if the circle is inside the bounding box
            if b_w > x > b_x:
                if b_h < y < b_y:
                    light_found = True # if it is flip the light found toggle
                    # cv2.drawContours(frame, c, -1, (255, 0, 0), 3)
                    cv2.circle(frame, center, radius, (255, 0, 0), 4) #draw the cirlce
            else:
                light_found = False # toggle

        else: # no light found -- toggle
            light_found = False

        if light_found:
            newline = True
            if pause_timer.is_running(): #handle timer
                pause_timer.stop()

            if not light_timer.is_running():
                light_timer.start()
        else:
            # morse code decoding beyond this point!
            if light_timer.is_running():
                light_timer.stop()
                light_array.append(round(light_timer.get_elapsed(), 2))
            else:
                if not pause_timer.is_running():
                    pause_timer.start()
                if decoder.get_pause_range()[0] <= pause_timer.get_elapsed() < decoder.get_pause_range()[1]:
                    run_decoder(light_array)
                    light_array.clear()

                if pause_timer.get_elapsed() >= decoder.get_space():
                    if newline:
                        print("\n")
                        newline = False
        # PROGRESSION:

        #draw the edges and original image (frame)
        cv2.imshow("edges", edges)
        cv2.imshow('frame', frame)

        k = cv2.waitKey(5) & 0xFF
        if k == 27:
            break

    print(light_array)
    cv2.destroyAllWindows()
    cap.release()
Esempio n. 31
0
    def readFromRootFile(self,filename,TupleMeanStd, weighter):
        from preprocessing import MeanNormApply, MeanNormZeroPad, MeanNormZeroPadParticles, MeanNormZeroPadBinned
        import numpy
        from stopwatch import stopwatch
        
        sw=stopwatch()
        swall=stopwatch()
        
        import ROOT
        
        fileTimeOut(filename,120) #give eos a minute to recover
        rfile = ROOT.TFile(filename)
        tree = rfile.Get("deepntuplizer/tree")
        self.nsamples=tree.GetEntries()
        #self.nsamples = 10 #TESTING

        print('took ', sw.getAndReset(), ' seconds for getting tree entries')
        
        # split for convolutional network
        x_global = MeanNormZeroPad(filename,TupleMeanStd,
                                   [self.branches[0]],
                                   [self.branchcutoffs[0]],self.nsamples)
        
        # needed
        # (dimension #1, center #1, nbins 1, half width 1)
        # (dimension #2, center #2, nbins 2, half width 2)
        # sum o stack -- max to stack/zero pad        
        x_cpf, sum_cpf = MeanNormZeroPadBinned(
            filename, 'nCpfcand', self.nsamples,
            ('Cpfcan_eta', 'jet_eta', self.nbins, self.jet_radius), #X axis
            ('Cpfcan_phi', 'jet_phi', self.nbins, self.jet_radius), #Y axis
            (TupleMeanStd, self.branches[1], self.branchcutoffs[1]), #means/std, branches to use, #per-bin # of particles to be kept            
            (self.sums_scaling['charged'], self.binned_sums['charged']), #variables to be summed (no zero padding yet)
        )

        x_npf, sum_npf = MeanNormZeroPadBinned(
            filename, 'nNpfcand', self.nsamples,
            ('Npfcan_eta', 'jet_eta', self.nbins, self.jet_radius), 
            ('Npfcan_phi', 'jet_phi', self.nbins, self.jet_radius), 
            (TupleMeanStd, self.branches[2], self.branchcutoffs[2]),
            (self.sums_scaling['neutral'], self.binned_sums['neutral']),
        )
        
        x_sv, sum_sv = MeanNormZeroPadBinned(
            filename, 'nsv', self.nsamples, 
            ('sv_eta', 'jet_eta', self.nbins, self.jet_radius), 
            ('sv_phi', 'jet_phi', self.nbins, self.jet_radius), 
            (TupleMeanStd, self.branches[3], self.branchcutoffs[3]),
            (self.sums_scaling['svs'], self.binned_sums['svs']),
        )

        #merging sum variables together
        x_sum = numpy.concatenate((sum_cpf, sum_npf, sum_sv), axis=3)

        print('took ', sw.getAndReset(), ' seconds for mean norm and zero padding (C module)')
        
        Tuple = self.readTreeFromRootToTuple(filename)

        if self.remove:
            notremoves=weighter.createNotRemoveIndices(Tuple)
            undef=Tuple['isUndefined']
            notremoves-=undef
            print('took ', sw.getAndReset(), ' to create remove indices')
        
        if self.weight:
            weights=weighter.getJetWeights(Tuple)
        elif self.remove:
            weights=notremoves
        else:
            print('neither remove nor weight')
            weights=numpy.empty(self.nsamples)
            weights.fill(1.)
        
        
        truthtuple =  Tuple[self.truthclasses]
        #print(self.truthclasses)
        alltruth=self.reduceTruth(truthtuple)
        pt_truth = Tuple[self.regtruth]
        
        #print(alltruth.shape)
        if self.remove:
            print('remove')
            weights = weights[notremoves > 0]
            x_global = x_global[notremoves > 0]
            x_cpf = x_cpf[notremoves > 0]
            x_npf = x_npf[notremoves > 0]
            x_sv  = x_sv[notremoves > 0]
            x_sum = x_sum[notremoves > 0]
            alltruth = alltruth[notremoves > 0]
            pt_truth = pt_truth[notremoves > 0]
       
        newnsamp=x_global.shape[0]
        print('reduced content to ', int(float(newnsamp)/float(self.nsamples)*100),'%')
        self.nsamples = newnsamp
        
        self.w = [weights]
        self.x = [x_global, x_cpf, x_npf, x_sv, x_sum]
        self.y = [alltruth, pt_truth]
Esempio n. 32
0
    def readFromRootFile(self,filename,TupleMeanStd, weighter):
        from preprocessing import MeanNormApply, MeanNormZeroPad, MeanNormZeroPadParticles
        import numpy
        from stopwatch import stopwatch
        import c_meanNormZeroPad
        c_meanNormZeroPad.zeroPad()
        
        sw=stopwatch()
        swall=stopwatch()
        
        import ROOT
        
        fileTimeOut(filename,120) #give eos a minute to recover
        rfile = ROOT.TFile(filename)
        tree = rfile.Get("deepntuplizer/tree")
        self.nsamples=tree.GetEntries()
        
        print('took ', sw.getAndReset(), ' seconds for getting tree entries')
        
        
        # split for convolutional network
        
        x_global = MeanNormZeroPad(filename,TupleMeanStd,
                                   [self.branches[0]],
                                   [self.branchcutoffs[0]],self.nsamples)
        
        x_cpf = MeanNormZeroPadParticles(filename,TupleMeanStd,
                                   self.branches[1],
                                   self.branchcutoffs[1],self.nsamples)
        
        x_npf = MeanNormZeroPadParticles(filename,TupleMeanStd,
                                   self.branches[2],
                                   self.branchcutoffs[2],self.nsamples)
        
     
        
        print('took ', sw.getAndReset(), ' seconds for mean norm and zero padding (C module)')
        
        nparray = self.readTreeFromRootToTuple(filename)        
        if self.remove:
            notremoves=weighter.createNotRemoveIndices(nparray)
            undef=nparray['isUndefined']
            hf = np_slice.any(axis=1)
            notremoves -= undef
            print('took ', sw.getAndReset(), ' to create remove indices')
        
        if self.weight:
            weights=weighter.getJetWeights(nparray)
        elif self.remove:
            weights=notremoves
        else:
            print('neither remove nor weight')
            weights=numpy.ones(self.nsamples)
        
        pttruth = nparray[self.regtruth]
        ptreco  = nparray[self.regreco]        
        truthtuple =  nparray[self.truthclasses]
        #print(self.truthclasses)
        alltruth=self.reduceTruth(truthtuple)

        #
        # sort vectors (according to pt at the moment)
        #
        idxs = x_cpf[:,:,0].argsort() #0 is pt ratio
        xshape = x_cpf.shape
        static_idxs = numpy.indices(xshape)
        idxs = idxs.reshape((xshape[0], xshape[1], 1))
        idxs = numpy.repeat(idxs, xshape[2], axis=2)
        x_cpf = x_cpf[static_idxs[0], idxs, static_idxs[2]]

        idxs = x_npf[:,:,0].argsort() #0 is pt ratio
        xshape = x_npf.shape
        static_idxs = numpy.indices(xshape)
        idxs = idxs.reshape((xshape[0], xshape[1], 1))
        idxs = numpy.repeat(idxs, xshape[2], axis=2)
        x_npf = x_npf[static_idxs[0], idxs, static_idxs[2]]

        #print(alltruth.shape)
        if self.remove:
            print('remove')
            weights=weights[notremoves > 0]
            x_global=x_global[notremoves > 0]
            x_cpf = x_cpf[notremoves > 0]
            x_npf = x_npf[notremoves > 0]
           # x_npf=x_npf[notremoves > 0]
            alltruth=alltruth[notremoves > 0]
            pttruth=pttruth[notremoves > 0]
            ptreco=ptreco[notremoves > 0]
                        
        newnsamp=x_global.shape[0]
        print('reduced content to ', int(float(newnsamp)/float(self.nsamples)*100),'%')
        self.nsamples = newnsamp
        
        self.w=[weights]
        self.x=[x_global,x_cpf,x_npf,ptreco]
        self.y=[alltruth,pttruth]
def calibrate(decoder):
    cap = cv2.VideoCapture(0)

    light_timer = stopwatch()
    pause_timer = stopwatch()

    calib_light_array = []

    b_x, b_y, b_w, b_h = 230, 360, 220 + 230, 360 - 180
    light_found = False
    while True:
        ret, frame = cap.read()
        hsv = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)

        # filtering to remove  grains
        kernel = np.ones((5, 5), np.float32) / 25
        dst = cv2.filter2D(hsv, -1, kernel)

        # lower and upper hsv values
        lower = np.array([0, 0, 250])
        upper = np.array([180, 10, 255])

        mask = cv2.inRange(dst, lower, upper)  # generate mask based on values
        res = cv2.bitwise_and(frame, frame,
                              mask=mask)  # overlay mask ontop of video
        blur = cv2.medianBlur(res, 5)

        # retrive edges of detection
        edges = cv2.Canny(blur, 100, 200)
        # draw bounding box
        cv2.rectangle(frame, (b_x, b_y), (b_w, b_h), (0, 255, 0), 2)

        # get contours from edges
        im2, contours, hierarchy = cv2.findContours(edges, cv2.RETR_TREE,
                                                    cv2.CHAIN_APPROX_SIMPLE)
        # print(lightFound)
        if len(contours) > 0:
            # print("LIGHT DETECTED")
            c = max(contours, key=cv2.contourArea)

            (x, y), radius = cv2.minEnclosingCircle(c)
            center = (int(x), int(y))
            radius = int(radius)

            if b_w > x > b_x:
                if b_h < y < b_y:
                    light_found = True
                    cv2.circle(frame, center, radius, (255, 0, 0), 5)
            else:
                light_found = False

            # cv2.drawContours(frame, c, -1, (255, 0, 0), 3)
        else:
            light_found = False

        if light_found:
            if pause_timer.is_running():
                pause_timer.stop()
                calib_light_array.append(round(pause_timer.get_elapsed(), 2))

            if not light_timer.is_running():
                light_timer.start()
        else:
            if light_timer.is_running():
                light_timer.stop()
                calib_light_array.append(round(light_timer.get_elapsed(), 2))
            else:
                if not pause_timer.is_running():
                    pause_timer.start()
                else:
                    if pause_timer.get_elapsed() >= 4 and len(
                            calib_light_array) > 0:
                        print("Calibration Complete")
                        # print(".(pause).(pause).(pause).(pause)+(space) -(pause)-(pause)-")
                        del (calib_light_array[0])
                        print(calib_light_array)
                        decoder.calibrate(calib_light_array)
                        break

        # PROGRESSION:

        cv2.imshow("edges", edges)
        cv2.imshow('Calibrator', frame)

        k = cv2.waitKey(5) & 0xFF
        if k == 27:
            break

    cv2.destroyAllWindows()
    cap.release()