def cuttree( oldname, newname, oldtree, branch, check ) :
    oldfile = r.TFile( oldname )
    oldtree = oldfile.Get( oldtree )
    nentries = oldtree.GetEntries()
    nTotVars = oldtree.GetLeaf( branch ).GetLen()  
    chi2data = array('d',[0]*nTotVars) 
    oldtree.SetBranchAddress( branch, chi2data )
    
    newfile = r.TFile( newname, "recreate" )
    newfile.cd()
    newtree = oldtree.CloneTree(0)

    prog = ProgressBar(0, nentries, 77, mode='fixed')
    oldprog = str(prog)

    for i in range(0,nentries) :
        prog.increment_amount()
        if oldprog != str(prog):
            print prog, "\r",
            stdout.flush()
            oldprog=str(prog)
        oldtree.GetEntry(i)
        if check( chi2data[0] ) :
            newtree.Fill()

    newtree.AutoSave()
     
    oldfile.Close()
    newfile.Close()
def calculate_entry_histograms( plots, chain ) :
    ##assert canvas is not None, "Canvas must be specified in calculate_histograms"
    # setup our 2d histos
    histos = []
    chi2histos = []
    for p in plots :
        entryhisto, chi2histo = initialize_histo( p )
        histos.append(entryhisto)
        chi2histos.append(chi2histo)

    nentries = chain.GetEntries()
    prog = ProgressBar(0, nentries+1, 77, mode='fixed', char='#')
    for entry in range(0,nentries+1) :
        prog.increment_amount()
        print prog,'\r',
        stdout.flush()
        chain.GetEntry(entry)
        for h, c, plot in zip( histos, chi2histos, plots ) :
            indices = plot.get_indices()
            vals = [ chain.treeVars["predictions"][ index ] for index in indices ]
            nbins = plot.bins
            ibin = h.FindBin(*vals)
            max_bin = h.FindBin(*plot.max_vals)
            if ibin != 0 and ibin < max_bin :
                chi2 = chain.treeVars["predictions"][0]
                if chi2 < c.GetBinContent(ibin) :
                    c.SetBinContent(ibin, chi2)
                    h.SetBinContent(ibin, entry)

    print
    return histos
Exemple #3
0
 def solve_5(self, period, N):
     result = np.identity(N, dtype=np.complex)
     I = np.identity(N, dtype=np.complex)
     He = self.matrix_electric
     Hs = self.matrix_static
     prog = ProgressBar(0, self.fine_step / 6, 50, mode='fixed', char='#')
     #python version
     for i in xrange(0, self.fine_step, 6):
         prog.increment_amount()
         print prog, '\r',
         sys.stdout.flush()
         k1 = (He * self.E_arr[period][i] + Hs)
         tmp = He * self.E_arr[period][i + 1] + Hs
         k2 = np.dot(tmp, I + k1 * 0.25 * self.dt)
         tmp = He * self.E_arr[period][i + 2] + Hs
         k3 = np.dot(tmp, I + (k1 * 3.0 / 32.0 + k2 * 9.0 / 32.0) * self.dt)
         tmp = He * self.E_arr[period][i + 3] + Hs
         k4 = np.dot(
             tmp, I +
             (k1 * 1932.0 - k2 * 7200.0 + k3 * 7296.0) * self.dt / 2197.0)
         tmp = He * self.E_arr[period][i + 4] + Hs
         k5 = np.dot(
             tmp, I + (k1 * 439.0 / 216.0 - k2 * 8.0 + k3 * 3680.0 / 513.0 -
                       k4 * 845.0 / 4104.0) * self.dt)
         tmp = He * self.E_arr[period][i + 5] + Hs
         k6 = np.dot(
             tmp,
             I + (-1.0 * k1 * 8.0 / 27.0 - k2 * 2.0 - k3 * 3544.0 / 2565.0 +
                  k4 * 1859.0 / 4104.0 - k5 * 11.0 / 40.0) * self.dt)
         result = np.dot(
             I + (k1 * 16.0 / 135.0 + k3 * 6656.0 / 12825.0 + k4 * 28561.0 /
                  56430.0 - k5 * 9.0 / 50.0 + k6 * 2.0 / 55.0) * self.dt,
             result)
     return result
Exemple #4
0
	def delete_files(self, file_list ):
		'''create files. The filenames are supplied as a list'''
		
		
		count = 0
		total = len(file_list)
		if self.verbose: print "\nTrying to delete " + str(total) + " files"
		prog = ProgressBar(count, total, 77, mode='fixed', char='#')
		
		before = time.time()
		for file_name in file_list:
			ret = self.delete_file( file_name )
			
			if self.verbose:
				count += 1
				prog.increment_amount()
				print prog, '\r',
				sys.stdout.flush()

			
			if ret == False:
				print "Failed to delete file " + str( file_name )
				return False
		print

		after = time.time()
		self.profiling_printer("Deleting " + str(len(file_list)) + " files", after - before)
		return True
Exemple #5
0
    def compute_initial_allocation(self,disp_progress = True):

        self.to_assign = np.random.permutation(range(self.N)).tolist()

        if disp_progress:
            prog = ProgressBar(0, len(self.to_assign), 77, mode='fixed')
            oldprog = str(prog)

        while True:
            if disp_progress:
                #<--display progress
                prog.increment_amount()
                if oldprog != str(prog):
                        print prog, "\r",
                        sys.stdout.flush()
                        oldprog=str(prog)
                #-->

            i = self.to_assign.pop()
            dists = sdis.cdist(np.atleast_2d(self.X[i]),self.centroids,self.metric).ravel()
            sorted_centroids = np.argsort(dists)
            for c in sorted_centroids:
                if self.weight_per_cluster[c]+self.weights[i]<weight_limit:
                    self.clusters[c].append(i)
                    self.weight_per_cluster[c]+=self.weights[i]
                    break

            if not(self.to_assign):
                break
Exemple #6
0
def do_preprocess_simulation(heap, pre_event_list, event_list):
    prog = ProgressBar(0,
                       len(pre_event_list) + len(event_list),
                       77,
                       mode='fixed',
                       char='#',
                       autoprint=True)
    frees_and_reallocs = {}
    preprocessed_memory = 0

    for event in event_list:
        if event.event_type == Event.eRealloc:
            frees_and_reallocs[event.pDataPrev] = 0
        elif event.event_type == Event.eFree:
            frees_and_reallocs[event.pData] = 0
        prog.increment_amount()

    for event in pre_event_list:
        if event.event_type == event.eAlloc:
            if frees_and_reallocs.has_key(event.pData):
                heap.track(event)
                preprocessed_memory += event.allocSize
        elif event.event_type == Event.eRealloc:
            if frees_and_reallocs.has_key(event.pData):
                heap.track(event)
                preprocessed_memory += event.allocSize
        prog.increment_amount()
    print "\n\npreprocessed_memory = %s" % locale.format(
        '%d', preprocessed_memory, True)
def fill_and_save_data_hists( mcf, plots,entry_hists, modes, contribs,predicts ) :
    axes = [ "X", "Y", "Z" ]
    chain = MCAnalysisChain( mcf )
    nentries = chain.GetEntries()
    
    KOhack=KOhack_class(mcf)
    for p , h in zip(plots,entry_hists) :
#############################################
        if check_entry_KO_hack(p,KOhack):
            KOhack.init_hack(p)
#############################################
            
        histo_cont = {}
        contrib_cont = {}
        predict_cont = {}

        print p.short_names
        firstbin, lastbin = get_histogram_bin_range(h,space=p)
        for mode in modes :
            # here need to add in check on contrib and make one for each contribution
            hname = histo_name( p.short_names, entry_histo_prefix )+ "_" + mode
            histo_cont[mode] =initialize_histo( p,hname,data =True ) 
            base_val = 1e9
            if mode == "pval" :
                base_val = 0.0
            for bin in range( firstbin, lastbin + 1 ) :
                histo_cont[mode].SetBinContent( bin, base_val )

        for c in contribs : # contribs is a list of Contribution objects
            hname = histo_name( p.short_names, entry_histo_prefix )+ "_dX_" + c.short_name
            contrib_cont[c.short_name] = initialize_histo( p,hname,data =True )
            for bin in range( firstbin, lastbin + 1 ) :
                contrib_cont[c.short_name].SetBinContent( bin, 0.0 )

        for pred in predicts : # predicts is a list of Contribution objects
            hname = histo_name( p.short_names, entry_histo_prefix )+ "_pred_" + pred.short_name
            predict_cont[pred.short_name] = initialize_histo( p,hname,data =True )
            for bin in range( firstbin, lastbin + 1 ) :
                predict_cont[pred.short_name].SetBinContent( bin, 0.0 )

        prog = ProgressBar(0, (lastbin-firstbin)+1, 77, mode='fixed', char='#')
        for i in range( firstbin, lastbin+1 ) :
            prog.increment_amount()
            print prog,'\r',
            stdout.flush()
            entry = int( h.GetBinContent(i) )
            if entry > 0 :
                chain.GetEntry(entry)
#############################################
                if check_entry_KO_hack(p,KOhack): 
                     KOhack.set_ssi_bin_centre(h,i)
#############################################
                fill_bins( histo_cont, contrib_cont,predict_cont, contribs,predicts  , i, chain, mcf, KOhack )
        perform_zero_offset( histo_cont["dchi"],space=p )
        print
        save_hdict_to_root_file( histo_cont,  mcf.FileName, mcf.DataDirectory)
        save_hdict_to_root_file( contrib_cont, mcf.FileName, mcf.DataDirectory)
        save_hdict_to_root_file( predict_cont, mcf.FileName, mcf.DataDirectory)
Exemple #8
0
    def sweep_multiprocessing(self,sweep_n,start,end,points,filename='./test.txt'):
        """
        nu[sweep_n] is sweeped.
        Sweep the frequency and output the result to filename.
        """
        ###############################
        ##multiprocessing preparation
        ##############################
        core = 10
        points = points//core*core # points per thread
        self.result = [[0.0 for i in range(self.n+1)]for j in range(points)]#this is the matrix which store the result, it will be saved to file later.
        job = self.allocate_job(start,end,points,core)

        
        ################################
        ##This are codes for progress bar
        ###############################
        prog = ProgressBar(0, points, 50, mode='fixed', char='#')
        ##the linear algebra start here
        a = np.zeros(self.N)
        a[self.N-1] = 1 #1 because rho_11+rho_22 ... =1
        a = np.matrix(a)
        a = a.T

        done_queue = multiprocessing.Queue()
        process_list = []
        for x in range(core):
            process_list.append(multiprocessing.Process(target = sweep_mp,args = (job[x],self.system,self.nu2,a,self.add_freq,self.index,sweep_n,self.n,done_queue)))

        tStart = time.time()
        print 'start'
        for p in process_list:
            p.start()

        stop_num = 0
        while stop_num != core:
            a = done_queue.get()
            if a == 'STOP':
                stop_num += 1
            else:
                self.result[a[0]] = a[1]
                prog.increment_amount()
                print prog, '\r',
                sys.stdout.flush()

        print '\n'
        for p in process_list:
            p.join()
            print "%s.exitcode = %s" %(p.name, p.exitcode)

        tStop = time.time()
        print"spend",(tStop - tStart),"second"
            
        self.sweep_save_file(filename,points)
Exemple #9
0
def generate_hierarchy(event_list, stack_dict):
    prog = ProgressBar(0,
                       len(event_list),
                       77,
                       mode='fixed',
                       char='#',
                       autoprint=True)
    #create a hierarchal track of the allocation event
    root = CallStatistics(0)

    #create a heap to track live allocated blocks and current memory status overall
    heap = Heap()
    for event in event_list:
        #    print event.text()

        # if the event is a free then we need to find out the size of the block we are freeing
        # @todo this could be part of the event stream when using debug CRT but probably not for release builds?
        if event.event_type == Event.eFree:
            size_free = heap.block_size(event.pData)
        elif event.event_type == Event.eRealloc:
            size_free = heap.block_size(event.pDataPrev)
        else:
            size_free = 0

        if event.event_type == Event.eAlloc or event.event_type == Event.eRealloc:
            size_alloc = event.allocSize
        else:
            size_alloc = 0

        # only now that we have our size information on potential free blocks do we
        # let heap track event (since the block in question might get freed)
        heap.track(event)

        # start at the tree root and walk each stack call in this event and tally the event with this call and it's callee's
        stat_obj = root
        # start at the first call and walk down until we hit the desired leaf
        stack = stack_dict[event.stack_id]
        for index in range(len(stack.calls) - 1, stack.leaf_index - 2, -1):
            call = stack.calls[index]
            child_obj = stat_obj.add_child_call(call)
            # add stats for the child to the parent
            stat_obj.sample_child_event(size_alloc, size_free)
            if index == stack.leaf_index - 1:
                # add leaf stats on the child leaf node
                child_obj.sample_self_event(size_alloc, size_free)
            stat_obj = child_obj  # step down the tree for next call
        prog.increment_amount()
    print "\n\n"
    return root
Exemple #10
0
def build_callstack_detail_html(stack_dict):
    html = ""
    prog = ProgressBar(0,
                       len(stack_dict),
                       77,
                       mode='fixed',
                       char='#',
                       autoprint=True)
    stacks_sorted = sorted(stack_dict.values(),
                           key=operator.attrgetter('display_id'))
    for i, stack in enumerate(stacks_sorted):
        if MemTrack.call_stack_write_limit > 0 and i > MemTrack.call_stack_write_limit:
            break
        html += stack.html_table()
        html += "\n\n"
        prog.increment_amount()
    return html
Exemple #11
0
def summarize(cmds, key, type, stack_dict):
    #create a summary list based off of accumulating stats based on the given key
    prog = ProgressBar(0,
                       len(cmds),
                       77,
                       mode='fixed',
                       char='#',
                       autoprint=True)
    all_total = 0
    summary_dict = {}
    null_summary_key = CallStatistics(0)
    for cmd in cmds:
        if type == Event.eAny or cmd.event_type == type:
            size = cmd.allocSize
            all_total += size

            stack = stack_dict.get(
                cmd.stack_id)  # lookup the stack for this event
            # check stack for key first
            if stack.__dict__.has_key(key):
                summary_key = stack.__dict__[key]
            elif SymbolDB.symbol_cache.has_key(stack.leaf()):
                addr_info = SymbolDB.symbol_cache[stack.leaf()]
                summary_key = addr_info.__dict__[key]
            else:
                summary_key = null_summary_key
            summary = summary_dict.setdefault(summary_key, Summary())
            summary.sample(cmd, stack)
            summary.key = summary_key
        prog.increment_amount()
    print "\n\n"

    summaries_sorted = sorted(summary_dict.items(),
                              key=lambda (k, v): v.accumulator.value,
                              reverse=True)

    # after all the summaries are collected go ahead and generate some of the
    # final stats in each summary
    summaries = [summary[1] for summary in summaries_sorted]

    for summary in summaries:
        summary.prepare(all_total)
    return summaries
def calculate_entry_histograms( plots, chain ) :
    ##assert canvas is not None, "Canvas must be specified in calculate_histograms"
    # setup our 2d histos
    vars = v.mc_variables()
    # KOhack class gets initiated, because it has to be checked "if KOhack is applied"
    KOhack=KOhack_class(plots[0].mcf)
    histos = []
    chi2histos = []
    for p in plots :
        hname = histo_name( p.short_names, entry_histo_prefix )
        cname = histo_name( p.short_names, chi2_histo_prefix )

        entryhisto = initialize_histo( p,hname,entry=True )
        chi2histo  = initialize_histo( p,cname,chi2 =True )

        histos.append(entryhisto)
        chi2histos.append(chi2histo)

        if check_entry_KO_hack(p,KOhack):
            KOhack.init_hack(p)

    nentries = chain.GetEntries()
    prog = ProgressBar(0, nentries+1, 77, mode='fixed', char='#')
    for entry in range(0,nentries+1) :
        prog.increment_amount()
        print prog,'\r',
        stdout.flush()
        chain.GetEntry(entry)
        for h, c, plot in zip( histos, chi2histos, plots ) :
            vals_list = get_values_list_from_chain_and_histo(chain,plot,vars,s,KOhack,h)
            for vals in vals_list:
                nbins = plot.bins
                ibin = h.FindBin(*vals)
                max_bin = h.FindBin(*plot.max_vals)
                if ibin != 0 and ibin < max_bin :
                    chi2 = get_modified_entry_chi2(vals,chain,KOhack)
                    if chi2 < c.GetBinContent(ibin) :
                        c.SetBinContent(ibin, chi2)
                        h.SetBinContent(ibin, entry)

    print
    return histos
Exemple #13
0
	def create_files(self, file_list ):
		'''create files. The filenames are supplied as a list'''
		count = 0
		total = len(file_list)
		
		if self.verbose: print "\nTrying to create " + str(total) + " files"
		
		prog = ProgressBar(count, total, 77, mode='fixed', char='#')
	
		counter = 0
		last_time =time.time()

		before = time.time()
		for file_name in file_list:
			ret = self.create_empty_file( file_name )
		    	
			if self.verbose:
				count += 1
				prog.increment_amount()
				print prog, '\r',
				sys.stdout.flush()

			if ret == False:
				print "Failed to create file " + str( file_name )
				return False
			else:
				counter+=1

			if counter % 1000 == 0 and self.profiling:
				diff = time.time() - last_time
				last_time = time.time()	
				f = open("/tmp/basicStat.csv","a")
				f.write(str(counter) + "," + str(diff) + "\n")
				f.flush()
				f.close()
		print

		after = time.time()
		files_per_second = str( len(file_list) / (after-before) )
		self.profiling_printer("Creating " + str(len(file_list)) + " files (" + files_per_second +  " per second)", after - before)

		return True
 def solve_5(self, period,N):
     result = np.identity(N,dtype = np.complex)
     I = np.identity(N,dtype = np.complex)
     He = self.matrix_electric
     Hs = self.matrix_static
     prog = ProgressBar(0, self.fine_step/6, 50, mode='fixed', char='#')        
     #python version
     for i in xrange(0,self.fine_step,6):
         prog.increment_amount()
         print prog, '\r',
         sys.stdout.flush()
         k1 = (He*self.E_arr[period][i] + Hs)
         tmp = He*self.E_arr[period][i+1]+Hs
         k2 = np.dot(tmp,I+k1*0.25*self.dt)
         tmp = He*self.E_arr[period][i+2]+Hs
         k3 = np.dot(tmp,I+(k1*3.0/32.0+k2*9.0/32.0)*self.dt)
         tmp = He*self.E_arr[period][i+3]+Hs
         k4 = np.dot(tmp,I+(k1*1932.0-k2*7200.0+k3*7296.0)*self.dt/2197.0)
         tmp = He*self.E_arr[period][i+4]+Hs
         k5 = np.dot(tmp,I+(k1*439.0/216.0-k2*8.0+k3*3680.0/513.0-k4*845.0/4104.0)*self.dt)
         tmp = He*self.E_arr[period][i+5]+Hs
         k6 = np.dot(tmp,I+(-1.0*k1*8.0/27.0-k2*2.0-k3*3544.0/2565.0+k4*1859.0/4104.0-k5*11.0/40.0)*self.dt)
         result = np.dot(I+(k1*16.0/135.0+k3*6656.0/12825.0+k4*28561.0/56430.0-k5*9.0/50.0+k6*2.0/55.0)*self.dt,result)
     return result
Exemple #15
0
    def greedy_for_bound(self,best_in_next = 100, direction = 'west', disp_progress=True,width=40,wgpenalty=0,start=-180,init=True):
        if init:
            print 'initialization...'
            self.to_assign = np.random.permutation(range(self.N)).tolist()
            '''
            if not(np.all([self.centroids[i][1]<=self.centroids[i+1][1] for i in range(len(self.centroids)-1)])):
                print 'warning: centroids were not sorted by longitude. I reorder them'
                self.centroids = np.array(sorted(self.centroids, key=lambda x: x[1]))
                
            self.init_clusters_with_centroids()
            '''
            self.centroids = np.zeros((0,2))
            self.K = 0
        
        shiftedlong = np.where(self.X[self.to_assign][:,1]<start,self.X[self.to_assign][:,1]+360,self.X[self.to_assign][:,1])
        if direction=='west':
            print 'sorting gifts per longitude...'
            self.to_assign = np.array(self.to_assign)[np.argsort(shiftedlong)]
        elif direction=='east':
            print 'sorting gifts per longitude...'
            self.to_assign = np.array(self.to_assign)[np.argsort(shiftedlong)[::-1]]
        elif direction=='south':
            print 'sorting gifts per latitude...'
            self.to_assign = np.array(self.to_assign)[np.argsort(self.X[self.to_assign][:,0])[::-1]]
        elif direction=='north':
            print 'sorting gifts per latitude...'
            self.to_assign = np.array(self.to_assign)[np.argsort(self.X[self.to_assign][:,0])]
        else:
            raise ValueError('direction not implemented')
        self.to_assign = self.to_assign.tolist()
        print 'done.'
        
        if disp_progress:
            prog = ProgressBar(0, len(self.to_assign), 77, mode='fixed')
            oldprog = str(prog)

        while True:
            if disp_progress:
                #<--display progress
                prog.increment_amount()
                if oldprog != str(prog):
                        print prog, "\r",
                        sys.stdout.flush()
                        oldprog=str(prog)
                #-->
        
            
            if self.K == 0:
                self.create_new_cluster(self.to_assign[0])
                del self.to_assign[0]
                continue
            
            candidates = self.to_assign[:best_in_next]
                
            bounds_inc = []
            for i in candidates:
                km = np.searchsorted(self.centroids[:,1], self.X[i][1]-width)
                kp = np.searchsorted(self.centroids[:,1], self.X[i][1]+width)
                bounds_inc.extend([(self.bound_increase_for_adding_gift_in_cluster(i,k),i,k)
                                   for k in range(km,kp) if self.weight_per_cluster[k]+self.weights[i]<weight_limit-wgpenalty])
                
            if not bounds_inc:
                self.create_new_cluster(self.to_assign[0])
                del self.to_assign[0]
                continue
                
            sorted_bounds_inc = sorted(bounds_inc)
            assigned = False
            for inc,i,c in sorted_bounds_inc:
                if inc> 2*self.distances_to_pole[i]*sleigh_weight:
                    #import pdb;pdb.set_trace()
                    self.create_new_cluster(self.to_assign[0])
                    assigned = True
                    del self.to_assign[0]
                    #print 'one more clust '+str(self.K)
                    #if self.K>1500:
                    #    import pdb;pdb.set_trace()#TMP
                    break
                if self.weight_per_cluster[c]+self.weights[i]<weight_limit-wgpenalty:
                    self.add_in_tour(i,c)
                    assigned = True
                    self.to_assign.remove(i)
                    break

            if not assigned:
                raise Exception('not able to assign a trip in this window of longitudes.')

            if not(self.to_assign):
                break
linecount = 0
revblock = ""
isrevisionblock = False
ispageblock = False
isknownline = False
pagetitle = ""
pageid = ""
pagecount = 0
pageRedirect = False

prog = ProgressBar(linecount, numlinesinfile, 100, mode='fixed', char='#')

for txtline in open(xmlfile):
    try:
        isknownline = False
        prog.increment_amount()
        linecount += 1
        if isrevisionblock:
            revblock = revblock + txtline
            #start: this is what we do when we've finished a revision
            if txtline.find("</revision>") > 0:
                isrevisionblock = False
                isknownline = True
                try:
                    processRevision(cleanString(revblock))
                except:
                    exceptionType, exceptionValue, exceptionTraceback = sys.exc_info()
                    log.write("Error Processing Revision with stacktrace: " + str(traceback.print_tb(exceptionTraceback)))
                    log.write("\n161 Unexpected error:" + str(sys.exc_info()[0]) + "Line " + str(linecount) + ": Error Processing Revision:\n" + revblock)
                    errorxmlfile.write(revblock)
                #print sys.exc_info()
Exemple #17
0
def main():
    # define command line arguments/parser
    parser = optparse.OptionParser('%%prog %s' % __version__)
    parser.add_option("-i",
                      "--input",
                      help="Input log file path",
                      metavar="FILE")
    parser.add_option("-d",
                      "--dict",
                      help="Input dictionary file path",
                      metavar="FILE")
    parser.add_option("-a",
                      "--address_db",
                      help="Input address symbols sql db file path",
                      metavar="FILE")
    parser.add_option("-x",
                      "--histogram",
                      help="Report histogram",
                      metavar="ENABLE",
                      action="store_true",
                      default=True)
    parser.add_option("-X",
                      "--nohistogram",
                      help="DO NOT report histogram",
                      metavar="ENABLE",
                      action="store_false",
                      dest="histogram")
    parser.add_option("-c",
                      "--callhier",
                      help="Report call hierarchy",
                      metavar="ENABLE",
                      action="store_true",
                      default=True)
    parser.add_option("-C",
                      "--nocallhier",
                      help="Do NOT Report call hierarchy",
                      metavar="ENABLE",
                      action="store_false",
                      dest="callhier")
    parser.add_option("-e",
                      "--errors",
                      help="Report errors",
                      metavar="ENABLE",
                      action="store_true",
                      default=True)
    parser.add_option("-E",
                      "--noerrors",
                      help="Do NOT Report errors",
                      metavar="ENABLE",
                      action="store_false",
                      dest="errors")
    parser.add_option("-y",
                      "--callstack_detail",
                      help="Write callstack detail to report",
                      metavar="ENABLE",
                      action="store_true",
                      default=True)
    parser.add_option("-Y",
                      "--no_callstack_detail",
                      help="Do NOT Report callstack detail",
                      metavar="ENABLE",
                      action="store_false",
                      dest="callstack_detail")

    parser.add_option("-p",
                      "--prune_callstacks",
                      help="Prune callstacks above memory manager functions",
                      metavar="ENABLE",
                      action="store_true",
                      default=True)
    parser.add_option("-P",
                      "--no_prune_callstacks",
                      help="Do NOT prune callstacks",
                      metavar="ENABLE",
                      action="store_false",
                      dest="prune_callstacks")

    parser.add_option("-l",
                      "--limit",
                      help="size limit for reporting information",
                      type="int",
                      default=0x10000)
    parser.add_option("",
                      "--limit_event_reads",
                      help="allocation limit for reporting branch",
                      type="int",
                      default=0x7FFFFFF)
    parser.add_option(
        "",
        "--call_stack_write_limit",
        help=
        "limit number of callstacks written to report (development feature)",
        type="int",
        default=-1)

    parser.add_option("",
                      "--begin_marker",
                      help="Marker ID to start simulation",
                      type="int",
                      default=-1)
    parser.add_option("",
                      "--end_marker",
                      help="Marker ID to end simulation",
                      type="int",
                      default=-1)

    parser.add_option("-S",
                      "--simple_processing",
                      help="Do simple total allocated/freed analysis",
                      metavar="ENABLE",
                      action="store_true",
                      default=False)

    parser.add_option("-v",
                      "--verbose",
                      help="Do verbose logging",
                      metavar="ENABLE",
                      action="store_true")
    opts, args = parser.parse_args()

    in_name = 'memtrack_log.dat'
    in_dict = 'memtrack_dict.dat'
    in_address = 'address.db'

    ##########################
    # apply command line arbuments
    #  pprint.pprint( opts )
    verbose = opts.verbose
    if opts.input:
        in_name = opts.input
    if opts.dict:
        in_dict = opts.dict
    if opts.address_db:
        in_address = opts.address_db

    if not os.path.exists(in_name):
        print >> sys.stderr, 'Could not find input file: %s' % in_name
        sys.exit(128)

    if not os.path.exists(in_dict):
        print >> sys.stderr, 'Could not find dictionary: %s' % in_dict
        sys.exit(128)

    if not os.path.exists(in_address):
        print >> sys.stderr, 'Could not find symbol db: %s' % in_address
        sys.exit(128)

    MemTrack.event_log_read_limit = opts.limit_event_reads
    MemTrack.reporting_size_limit = opts.limit
    MemTrack.prune_callstacks = opts.prune_callstacks
    MemTrack.call_stack_write_limit = opts.call_stack_write_limit

    ##########################
    print "Loading...\n\n"

    ### cache the address symbol db in a dict for ease of use
    print "Caching symbols...\n"
    SymbolDB.symbol_cache = build_address_symbol_cache(in_address)
    print "\n\n"

    ### Read in the call stack dictionary
    print "Reading callstack dictionary...\n"
    stack_dict = read_callstack_dictionary(in_dict)
    print "\n\n"

    ### Read the event log
    print "Reading event log..."
    event_list = read_event_log(in_name)
    print "\n\n"

    ##########################
    pre_event_list = []

    if opts.begin_marker >= 0 or opts.end_marker >= 0:
        print "Pruning list...\n\n"
        begin_index = 0
        if opts.begin_marker >= 0:
            begin_index = find_marker(event_list, opts.begin_marker)
            if begin_index == -1:
                begin_index = 0
            else:
                print "  Found begin marker: %d at index %d" % (
                    opts.begin_marker, begin_index)

        end_index = len(event_list)
        if opts.end_marker >= 0:
            end_index = find_marker(event_list, opts.end_marker)
            if end_index == -1:
                end_index = len(event_list)
            else:
                print "  Found end marker  : %d at index %d" % (
                    opts.end_marker, end_index)

        if begin_index > 0:
            pre_event_list = event_list[:begin_index]

        event_list = event_list[begin_index:end_index]

        print "\n  Pruned events that will be tracked: %d\n" % len(event_list)
        gc.collect()

    if len(event_list) == 0:
        return

    if opts.simple_processing:
        do_simple_processing(pre_event_list, event_list)
        return

    ##########################
    print "Processing...\n\n"

    lpath, ltail = os.path.split(in_name)
    lname, lext = os.path.splitext(ltail)
    outName = lname.split(' ')[0]
    outName = outName.split('_')[0]

    ### histogram
    if opts.histogram:
        print "<histogram feature not available w/o html and graph generation.>\n\n"
        print "\n\n"

    ### heap simulation
    #create a heap to track live allocated blocks and current memory status overall
    heap = Heap()
    if len(pre_event_list) > 0:
        print "Preprocessing events...\n\n"
        do_preprocess_simulation(heap, pre_event_list, event_list)
        print "\n\n"

    print "Simulating events...\n\n"
    prog = ProgressBar(0,
                       len(event_list),
                       77,
                       mode='fixed',
                       char='#',
                       autoprint=True)
    for event in event_list:
        heap.track(event)
        prog.increment_amount()
    print "\n"
    if len(heap.errors()):
        print "simulation errors: %d of %d" % (len(
            heap.errors()), len(event_list))
    else:
        print "simulation errors: NONE"
    print "\n\n"

    ### call hierarchy
    if opts.callhier:
        print "Generating call hierarchy allocation tree...\n\n"
        hierarchy_root = generate_hierarchy(event_list, stack_dict)
        print "entry points: %d" % len(hierarchy_root.children)


##########################
    print "complete."
Exemple #18
0
def do_simple_processing(pre_event_list, event_list):
    allocated = 0
    reallocated = 0
    reallocated_freed = 0
    freed = 0
    num_events = 0
    blocks = {}

    print "Preprocessing (looking for frees and reallocs)..."
    prog = ProgressBar(0,
                       len(event_list),
                       77,
                       mode='fixed',
                       char='#',
                       autoprint=True)
    # find all free's and realloc's.
    # the logic is that there will be a lot less free's and realloc's than alloc's
    # and so we only track the addresses which will be free'd or realloc'd instead
    # of all of the addresses which are alloc'd
    # NOTE: we only care about free's and realloc's within our begin/end markers
    for event in event_list:
        if event.event_type == Event.eRealloc:
            blocks[event.pDataPrev] = 0
        elif event.event_type == Event.eFree:
            blocks[event.pData] = 0
        prog.increment_amount()

    print "\n\nSimulating events..."
    prog = ProgressBar(0,
                       len(pre_event_list) + len(event_list),
                       77,
                       mode='fixed',
                       char='#',
                       autoprint=True)
    for event in pre_event_list:
        if event.event_type == event.eAlloc:
            if blocks.has_key(event.pData):
                blocks[event.pData] = event.allocSize
        elif event.event_type == Event.eRealloc:
            if blocks.has_key(event.pData):
                blocks[event.pData] = event.allocSize
        prog.increment_amount()

    for event in event_list:
        num_events += 1
        if event.event_type == event.eAlloc:
            allocated += event.allocSize
            if blocks.has_key(event.pData):
                blocks[event.pData] = event.allocSize
        elif event.event_type == Event.eRealloc:
            reallocated += event.allocSize
            reallocated_freed += blocks[event.pDataPrev]
            blocks[event.pData] = 0
            if blocks.has_key(event.pData):
                blocks[event.pData] = event.allocSize
        elif event.event_type == Event.eFree:
            freed += blocks[event.pData]
            blocks[event.pData] = 0
        prog.increment_amount()

    total = allocated + reallocated - reallocated_freed - freed

    print "\n\n"
    print "allocated =         %15s" % locale.format('%d', allocated, True)
    print "reallocated =       %15s" % locale.format('%d', reallocated, True)
    print "reallocated_freed = %15s" % locale.format('%d', reallocated_freed,
                                                     True)
    print "freed =             %15s" % locale.format('%d', freed, True)
    print "-----------------------------------"
    print "total =             %15s" % locale.format('%d', total, True)
    print "\n"
    print "total events simulated  = %s" % locale.format(
        '%d', num_events, True)
def fill_and_save_data_hists( mcf, modes, hlist, contribs,predicts ) :
    axes = [ "X", "Y", "Z" ]
    chain = MCAnalysisChain( mcf )
    nentries = chain.GetEntries()

    for h in hlist :
        histo_cont = {}
        contrib_cont = {}
        predict_cont = {}

        h_dim = get_histogram_dimension(h)
        dim_range = range(h_dim)

        axis_nbins = []
        axis_mins = []
        axis_maxs = []
        axis_bins = []
        axis_titles = []

        th_arg_list  = []

        user_notify_format = ""
        user_notify = []

        title_format = "%s"
        title_items = [ h.GetTitle() ]
        for axis in dim_range :
            axis_nbins.append( eval( "h.GetNbins%s()" % axes[axis] ) )
            axis_mins.append( eval( "h.Get%saxis().GetXmin()" % axes[axis] ) )
            axis_maxs.append( eval( "h.Get%saxis().GetXmax()" % axes[axis] ) )
            axis_bins.append( eval( "h.Get%saxis().GetXbins().GetArray()" % axes[axis] ) )
            axis_titles.append( eval( "h.Get%saxis().GetTitle()" % axes[axis] ) )

            th_arg_list.append( axis_nbins[-1] )
            th_arg_list.append( axis_mins[-1] )
            th_arg_list.append( axis_maxs[-1] )

            user_notify_format += ": [ %.2e, %.2e ] :"
            user_notify.append( axis_mins[-1] )
            user_notify.append( axis_maxs[-1] )

            title_format += ";%s"
            title_items.append( axis_titles[-1] )

        print user_notify_format % tuple(user_notify)

        title = title_format % tuple(title_items)

        firstbin, lastbin = get_histogram_bin_range(h)
        for mode in modes :
            # here need to add in check on contrib and make one for each contribution
            histo_cont[mode] = eval( 'r.TH%dD( h.GetName() + "_" + mode, title, *th_arg_list )' % h_dim )
            base_val = 1e9
            if mode == "pval" :
                base_val = 0.0
            for bin in range( firstbin, lastbin + 1 ) :
                histo_cont[mode].SetBinContent( bin, base_val )
        for c in contribs : # contribs is a list of Contribution objects
            contrib_cont[c.short_name] = eval( 'r.TH%dD( h.GetName() + "_dX_" + c.short_name, title, *th_arg_list )' % h_dim )
            for bin in range( firstbin, lastbin + 1 ) :
                contrib_cont[c.short_name].SetBinContent( bin, 0.0 )
        for p in predicts : # predicts is a list of Contribution objects
            predict_cont[p.short_name] = eval( 'r.TH%dD( h.GetName() + "_pred_" + p.short_name, title, *th_arg_list )' % h_dim )
            for bin in range( firstbin, lastbin + 1 ) :
                predict_cont[p.short_name].SetBinContent( bin, 0.0 )
            print "yes", p

        prog = ProgressBar(0, (lastbin-firstbin)+1, 77, mode='fixed', char='#')
        for i in range( firstbin, lastbin+1 ) :
            prog.increment_amount()
            print prog,'\r',
            stdout.flush()
            entry = int( h.GetBinContent(i) )
            if entry > 0 :
                chain.GetEntry(entry)
                fill_bins( histo_cont, contrib_cont,predict_cont, contribs,predicts  , i, chain, mcf )
        perform_zero_offset( histo_cont["dchi"] )
        print
        save_hdict_to_root_file( histo_cont,  mcf.FileName, mcf.DataDirectory)
        save_hdict_to_root_file( contrib_cont, mcf.FileName, mcf.DataDirectory)
        save_hdict_to_root_file( predict_cont, mcf.FileName, mcf.DataDirectory)
def recalc_to_file( collection, output_file = "" ) :
    model  = models.get_model_from_file(collection)
    lhoods = models.get_lhood_from_file(collection)
    outfile = collection.FileName if output_file == "" else output_file
    print "Output file is %s" % outfile

    # initialise the MC-variables
    MCVdict=v.mc_variables()

    chain = MCRecalcChain( collection )
    nentries = chain.GetEntries()

    begin = getattr( collection, "StartEntry", 0)
    end   = getattr( collection, "EndEntry", nentries+1)

    total_delta = 0

    # create trees in scope of outfile
    out = r.TFile(outfile,"recreate")
    chi2tree = chain.chains["predictions"].CloneTree(0)

    # might need to do address of on contirbvars
    nTotVars = chain.nTotVars["predictions"]
    contribvars = array('d',[0.0]*nTotVars)
    contribtree = r.TTree( 'contribtree', 'chi2 contributions')
    varsOutName = "vars[%d]/D" % ( nTotVars )
    contribtree.SetMaxTreeSize(10*chi2tree.GetMaxTreeSize())
    contribtree.Branch("vars",contribvars,varsOutName)

    # same with lhood
    nLHoods = len(lhoods.keys())
    lhoodvars = array('d',[0.0]*nLHoods)
    lhoodtree = r.TTree( 'lhoodtree', 'lhood contributions')
    varsOutName = "vars[%d]/D" % ( nLHoods )
    lhoodtree.SetMaxTreeSize(10*chi2tree.GetMaxTreeSize())
    lhoodtree.Branch("vars",lhoodvars,varsOutName)

    # want to save best fit point entry number: create new tree and branch
    bfname = getattr( collection, "BestFitEntryName", "BestFitEntry"  )
    bft=r.TTree(bfname, "Entry")
    bfn=array('i',[0])
    bft.Branch('EntryNo',bfn,'EntryNo/I')

    # and the minChi minEntry
    minChi=1e9
    minEntry=-1
    count=-1 # becuase the first entry has number 0

    prog = ProgressBar(begin, end, 77, mode='fixed', char='#')
    for entry in range(begin,end) :

        prog.increment_amount()
        print prog,'\r',
        stdout.flush()

        chain.GetEntry(entry)
        if good_point( chain.treeVars["predictions"], collection ) :
            delta = 0.
            chi2 = 0

            for constraint in model :
                MCV=MCVdict[constraint.short_name]
                v_index = MCV.get_index(collection)

                chi2_t = constraint.get_chi2( chain.treeVars["predictions"][v_index] )
                contribvars[v_index] = chi2_t
                chi2 += chi2_t
            for i,lh in enumerate(lhoods.values()) :
                chi2_t = lh.get_chi2( chain.treeVars["predictions"] )
                lhoodvars[i] = chi2_t
                chi2 += chi2_t

            chi2 += spectrum_constraints( chain.treeVars["predictions"], collection )

            if chi2 > getattr(collection, "MinChi2", 0 ) and \
               chi2 < getattr(collection, "MaxChi2", 1e9 ) :
                # This was inserted to check on if there was a significant
                # calculation error ( average deltachi2 per entry: 1e-15 )
                if __DEBUG :
                    delta_chi2_val = chi2_t - chain.treeVars["contributions"][key]
                    delta = delta + delta_chi2_val
                    total_delta = total_delta + abs(delta)
                chain.treeVars["predictions"][0] = chi2
                contribvars[0] = chi2
                chi2tree.Fill()
                contribtree.Fill()
                lhoodtree.Fill()
                count+=1
                #dealing with minChi
                if chi2 < minChi:
                    minChi=chi2
                    minEntry=count

    #Saving best fit Entry number
    bft.GetEntry(0)
    bfn[0]=minEntry
    bft.Fill()

    bft.AutoSave()
    chi2tree.AutoSave()
    contribtree.AutoSave()
    lhoodtree.AutoSave()

    out.Close()

    if __DEBUG :
        print "\n--------------------------\n"
        print "   TOTAL    (    MEAN    )"
        print "%10e(%10e)" % ( total_delta, (total_delta/(end-begin)) )
        print "\n--------------------------\n"