Example #1
0
def MCexp(archVistas, archCons, exp):
    vistas = cargarCQ(archVistas)
#     print "\nVistas: "
#     pprint.pprint(vistas)
    consultas = cargarCQ(archCons)
    #import psyco      # used for improved performance
    #psyco.full()      # used for improved performance
    for q in consultas:
#         print "Q:"
#         pprint.pprint(q)
        tiempoi = resource.getrusage(resource.RUSAGE_SELF)[0]
        bs = createMCDs(vistas, q)
        tiempof1 = resource.getrusage(resource.RUSAGE_SELF)[0]

        if exp == 'RW':
            rs = combinarMCDs(q, bs)
            for r in rs:
                print r
            tiempof2 = resource.getrusage(resource.RUSAGE_SELF)[0]
        elif exp == 'MCD':
            rs = []
            for b in bs:
                print b
            tiempof2=tiempof1
            
        print str(len(bs)) + '\t' + str(len(rs)) + '\t' + str(tiempof1-tiempoi) + '\t' + str(tiempof2-tiempof1)
Example #2
0
def VariableElimination(Factors, Ordering, verbose=True):    
    """ Variable Elimination algorithm """
    start_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
    tw = 0 # elimination width
    wtw = 0 # weigthed width
    delta_mem = 0
    max_memory = 0
    for var in Ordering:
        if verbose:
        	print "-%6s\t" % var.label, 
        	sys.stdout.flush()
        B = []
        for f in Factors:
        	if var in f.scope:
        		B.append(f)
        for f in B:
        	Factors.remove(f)
    	f = ParetoSetFactorSumBProduct(B,[var],False)
    	Factors.append(f)
        delta_mem = (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) - start_mem
        max_memory = max(delta_mem, max_memory)
        if verbose:
            dim = "%dx%d" % (f.num_tables,f.dimension)
            print "[width: %3d,\tdim: %10s,\tsize:%10d,\tmem: %d MB]" % (len(f.scope),dim,f.num_tables*f.dimension,delta_mem / 1000000.0)
            tw = max(tw, len(f.scope))
            wtw = max(wtw, f.dimension)
            sys.stdout.flush()
    f = Factors.pop()
    while len(Factors) > 0:
        fp = Factors.pop()
        f = SetFactorProduct(f,fp)
    return f, tw, wtw, max_memory
Example #3
0
 def _run_analyzers_on_event(self):
     '''Run all analysers on the current event, self.event. 
     Returns a tuple (success?, last_analyzer_name).
     '''
     for i,analyzer in enumerate(self._analyzers):
         if not analyzer.beginLoopCalled:
             analyzer.beginLoop(self.setup)
         start = timeit.default_timer()
         if self.memReportFirstEvent >=0 and iEv >= self.memReportFirstEvent:           
             memNow=resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
             if memNow > self.memLast :
                print  "Mem Jump detected before analyzer %s at event %s. RSS(before,after,difference) %s %s %s "%( analyzer.name, iEv, self.memLast, memNow, memNow-self.memLast)
             self.memLast=memNow
         ret = analyzer.process( self.event )
         if self.memReportFirstEvent >=0 and iEv >= self.memReportFirstEvent:           
             memNow=resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
             if memNow > self.memLast :
                print "Mem Jump detected in analyzer %s at event %s. RSS(before,after,difference) %s %s %s "%( analyzer.name, iEv, self.memLast, memNow, memNow-self.memLast)
             self.memLast=memNow
         if self.timeReport:
             self.timeReport[i]['events'] += 1
             if self.timeReport[i]['events'] > 0:
                 self.timeReport[i]['time'] += timeit.default_timer() - start
         if ret == False:
             return (False, analyzer.name)
     return (True, analyzer.name)
Example #4
0
def memory_usage():

    """
    This function ...
    :return: 
    """

    # If we are on linux
    if platform == "linux" or platform == "linux2":

        kilobytes = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # peak memory usage (bytes on OS X, kilobytes on Linux)
        gigabytes = kilobytes * 1e-6

        return gigabytes

    # If we are on Mac OS X
    elif platform == "darwin":

        kilobytes = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss # peak memory usage (bytes on OS X, kilobytes on Linux)
        gigabytes = kilobytes * 1e-9

        return gigabytes

    # We don't support Windows
    elif platform == "win32": raise EnvironmentError("The Windows operating system is not supported")

    # Unrecognized platform
    else: raise EnvironmentError("Unrecognized platform")
Example #5
0
def conv_insts(fam, fam_io_manager, sp, sp_io_manager, 
               ninst=1, update_freq=100, verbose=False):
    n = 0
    for point in sp.points():
        param_uuid = uuid.uuid4()
        sp.record_point(point, param_uuid, sp_io_manager)
        for i in range(ninst):
            inst_uuid = uuid.uuid4()
            inst = sp.gen_inst(point, inst_uuid, sp_io_manager)
            fam.record_inst(inst, inst_uuid, param_uuid, sp.name, 
                            fam_io_manager)
            if n % update_freq == 0:
                if verbose:
                    # print('Total writes: {0}'.format(
                    #         sum([tbl.n_writes for tbl in fam_tables.values() + sp_tables.values()])))
                    print('Memusg before collect: {0}'.format(
                            resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
                gc.collect()
                if verbose:
                    print('Memusg after collect: {0}'.format(
                            resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
                    print('{0} instances have been converted'.format(n))
            n += 1
    
    if verbose:
        print('{0} instances have been converted'.format(n))
    def wrapper(*args, **kwargs):
        """
        """
        # grab the pre-run memory info:
        num_objects_before = len(gc.get_objects())
        bytes_before = 0
        if sys.platform == "Darwin":
            import resource
            bytes_before = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0 / 1024.0

        # run the function:
        res = func(*args, **kwargs)

        # report any non-destroyed QObjects:
        # Note, this will usually run before the main objects have been destroyed by the
        # event loop so it's important to cross-check the output with subsequent lines.
        report_non_destroyed_qobjects()

        # cleanup and grab the post-run memory info:
        gc.collect()
        bytes_after = 0
        if sys.platform == "Darwin":
            bytes_after = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0 / 1024.0
        num_objects_after = len(gc.get_objects())

        # and report any difference in memory usage:
        bytes_diff = bytes_after - bytes_before
        obj_diff = num_objects_after - num_objects_before
        msg = ("Memory before: %0.2fMb, current: %0.2fMb, leaked: %0.2fMb (%d new Python objects)"
               % (bytes_before, bytes_after, bytes_diff, obj_diff))
        app = sgtk.platform.current_bundle()
        app.log_debug(msg)

        # return the result:
        return res
Example #7
0
File: ci.py Project: efcs/zorg
    def execute(self, verbose=False):
        if verbose:
            note('executing: %s' % ' '.join("'%s'" % arg
                                            for arg in self.command))

        start_rusage = resource.getrusage(resource.RUSAGE_CHILDREN)
        start_time = time.time()

        p = subprocess.Popen(self.command,
                             stdout=open(self.stdout_path, 'w'),
                             stderr=open(self.stderr_path, 'w'),
                             env=self.env)
        self.result = p.wait() == 0

        end_time = time.time()
        end_rusage = resource.getrusage(resource.RUSAGE_CHILDREN)
        self.metrics["user_time"] = end_rusage.ru_utime - start_rusage.ru_utime
        self.metrics["sys_time"] = end_rusage.ru_stime - start_rusage.ru_stime
        self.metrics["wall_time"] = end_time - start_time

        if verbose:
            note("command executed in -- "
                 "user: %.4fs, wall: %.4fs, sys: %.4fs" % (
                    self.metrics["user_time"], self.metrics["wall_time"],
                    self.metrics["sys_time"]))
Example #8
0
def report(count):
    global last, last_u, last_s, start
    headers = ['RSS', 'MajFlt', 'user', 'sys', 'ms']
    ru = resource.getrusage(resource.RUSAGE_SELF)
    now = time.time()
    rss = int(ru.ru_maxrss/1024)
    if not rss:
        rss = linux_memstat().get('VmRSS', '??')
    fields = [rss,
              ru.ru_majflt,
              int((ru.ru_utime - last_u) * 1000),
              int((ru.ru_stime - last_s) * 1000),
              int((now - last) * 1000)]
    fmt = '%9s  ' + ('%10s ' * len(fields))
    if count >= 0:
        print(fmt % tuple([count] + fields))
    else:
        start = now
        print(fmt % tuple([''] + headers))
    sys.stdout.flush()

    # don't include time to run report() in usage counts
    ru = resource.getrusage(resource.RUSAGE_SELF)
    last_u = ru.ru_utime
    last_s = ru.ru_stime
    last = time.time()
Example #9
0
def resources(bot, user, chan, realtarget, *args):
	if chan is not None: replyto = chan
	else: replyto = user

	uptime = time.time() - bot.parent.starttime
	m, s = divmod(uptime, 60)
	h, m = divmod(m, 60)
	d, h = divmod(h, 24)
	try:
		res = resource.getrusage(resource.RUSAGE_BOTH)
	except:
		res = resource.getrusage(resource.RUSAGE_SELF)

	bot.slowmsg(replyto, "Resource usage:")
	for i, v in (
		('uptime (s)', "%d (%d days %02d:%02d:%02d)" % (uptime, d, h, m, s)),
		('utime (s)', res.ru_utime),
		('stime (s)', res.ru_stime),
		('memory (MiB)', (res.ru_maxrss/1024.0)),
		('I/O (blocks)', res.ru_inblock+res.ru_oublock),
		('page faults', res.ru_majflt),
		('signals', res.ru_nsignals),
		('context switches (voluntary)', res.ru_nvcsw),
		('context switches (involuntary)', res.ru_nivcsw),
	):
		bot.slowmsg(replyto, "- %s: %s" % (i, v))
	bot.slowmsg(replyto, "EOL.")
Example #10
0
def captureImage ():
	global framenum
	global webcam
	global lastFrame

	print "captureImage"
	# Debug test to check for memory leak when changing camera resolution
	print "memory before capture = " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
	if (IMAGE_WIDTH <> VIDEO_WIDTH) or (IMAGE_HEIGHT <> VIDEO_HEIGHT):
		webcam.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, IMAGE_WIDTH)
		webcam.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, IMAGE_HEIGHT)
	
		ret, lastFrame = webcam.read()
		webcam.set(cv2.cv.CV_CAP_PROP_FRAME_WIDTH, VIDEO_WIDTH)
		webcam.set(cv2.cv.CV_CAP_PROP_FRAME_HEIGHT, VIDEO_HEIGHT)
	else:
		ret, lastFrame = webcam.read()
	print "memory after capture = " + str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
	framenum = framenum + 1 
	stFileName = getFileName(framenum)
	cv2.imwrite(stFileName,lastFrame)
	print "Saved " + stFileName
	# scale the lastFrame image to match the VIDEO_WIDTH and VIDEO_HEIGHT
	lastFrame = scaleImage(lastFrame, VIDEO_WIDTH, VIDEO_HEIGHT)
	modifiedMovie()	
Example #11
0
    def process(self, iEv ):
        """Run event processing for all analyzers in the sequence.

        This function is called by self.loop,
        but can also be called directly from
        the python interpreter, to jump to a given event.
        """
        self.event = Event(iEv, self.events[iEv], self.setup)
        self.iEvent = iEv
        for i,analyzer in enumerate(self.analyzers):
            if not analyzer.beginLoopCalled:
                analyzer.beginLoop(self.setup)
            start = timeit.default_timer()
            if self.memReportFirstEvent >=0 and iEv >= self.memReportFirstEvent:           
                memNow=resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
                if memNow > self.memLast :
                   print  "Mem Jump detected before analyzer %s at event %s. RSS(before,after,difference) %s %s %s "%( analyzer.name, iEv, self.memLast, memNow, memNow-self.memLast)
                self.memLast=memNow
            ret = analyzer.process( self.event )
            if self.memReportFirstEvent >=0 and iEv >= self.memReportFirstEvent:           
                memNow=resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
                if memNow > self.memLast :
                   print "Mem Jump detected in analyzer %s at event %s. RSS(before,after,difference) %s %s %s "%( analyzer.name, iEv, self.memLast, memNow, memNow-self.memLast)
                self.memLast=memNow
            if self.timeReport:
                self.timeReport[i]['events'] += 1
                if self.timeReport[i]['events'] > 0:
                    self.timeReport[i]['time'] += timeit.default_timer() - start
            if ret == False:
                return (False, analyzer.name)
        if iEv<self.nPrint:
            self.logger.info( self.event.__str__() )
        return (True, analyzer.name)
Example #12
0
def profiling(logger, key=None, scale=1, to_profile=True, **kwargs):
    """ contextmanager to log function call time """
    if not to_profile:
        yield
        return

    log_kwargs = kwargs

    if key:
        log_kwargs['key'] = key
        logger.debug('profiling_start', **log_kwargs)
    before_time = time.time()
    # on mac, it seems ru_maxrss returns byte, but on unix it seems to return KB
    before_max_mem_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * KB_TO_BYTE
    try:
        yield
    finally:
        after_time = time.time()
        after_max_mem_usage = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * KB_TO_BYTE

        num_secs = float(after_time - before_time) / scale if scale else 0
        log_kwargs['num_secs'] = num_secs
        log_kwargs['pre_max_mem_used_bytes'] = before_max_mem_usage
        log_kwargs['post_max_mem_used_bytes'] = after_max_mem_usage
        log_kwargs['max_mem_used_bytes'] = after_max_mem_usage - before_max_mem_usage
        log_kwargs['max_mem_used'] = humanize.naturalsize(after_max_mem_usage - before_max_mem_usage)
        level = logging.INFO if num_secs > 0.1 else logging.DEBUG

        logger.log(level, "profiling_end", **log_kwargs)
def run(input_filename, output_filename):
    articles = defaultdict(set)

    without_identifiers = set()

    reader = csv.reader(open(input_filename, 'r'))

    try:
        biggest = 0

        for i, article in enumerate(reader):
            article = Article(*article)
            identifiers = [(k,v) for k,v in article._asdict().items() if k in IDENTIFIERS and v]
            data = None # dict(identifiers)
            if not identifiers:
                without_identifiers.add(article.id)
                continue
            articles[identifiers[0]].add(article.id)
            for identifier in identifiers[1:]:
                if articles[identifiers[0]] is not articles[identifier]:
                    articles[identifiers[0]] |= articles[identifier]
                    articles[identifier] = articles[identifiers[0]]
                    if len(articles[identifier]) > biggest:
                        biggest = len(articles[identifier])

            if i % 10000 == 0:
                print "%7d" % i, resource.getrusage(resource.RUSAGE_SELF)[2], biggest
                if resource.getrusage(resource.RUSAGE_SELF)[2] > 1e7:
                    print "Using too much memory"
                    raise Exception
    except Exception, e:
        print e
Example #14
0
def test_memory():
    arches = [ 'VexArchX86', 'VexArchPPC32', 'VexArchAMD64', 'VexArchARM' ]
    # we're not including VexArchMIPS32 cause it segfaults sometimes

    for i in xrange(10000):
        try:
            s = hex(random.randint(2**100,2**100*16))[2:]
            a = random.choice(arches)
            p = pyvex.IRSB(bytes=s, arch=a)
        except pyvex.PyVEXError:
            pass

    kb_start = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

    for i in xrange(20000):
        try:
            s = hex(random.randint(2**100,2**100*16))[2:]
            a = random.choice(arches)
            p = pyvex.IRSB(bytes=s, arch=a)
        except pyvex.PyVEXError:
            pass
    del p
    gc.collect()

    kb_end = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

    # allow a 2mb leeway
    nose.tools.assert_less(kb_end - kb_start, 2000)
Example #15
0
def LogVariableElimination(Factors, Ordering, verbose=True):    
    """ Variable Elimination algorithm """
    start_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
    tw = 0 # elimination width
    wtw = 0 # weigthed width
    delta_mem = 0
    max_memory = 0
    for var in Ordering:
        if verbose:
        	print "-%s" % var.label, 
        	sys.stdout.flush()
        B = []
        for f in Factors:
        	if var in f.scope:
        		B.append(f)
        for f in B:
        	Factors.remove(f)
    	f = LogFactorSumBProduct(B,[var])
    	Factors.append(f)
        delta_mem = (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) - start_mem
        max_memory = max(delta_mem, max_memory)
        if verbose:
            print "[tw: %d,\tdim: %d,\tmem: %d MB]" % (len(f.scope),f.dimension,delta_mem / 1000000.0)
            tw = max(tw, len(f.scope))
            wtw = max(wtw, f.dimension)
            sys.stdout.flush()
    f = Factors.pop()
    while len(Factors) > 0:
        fp = Factors.pop()
        f = LogFactorProduct(f,fp)
    if verbose:
        print
    return f, tw, wtw, max_memory
Example #16
0
def print_memory_consumption():
    print memory_usage()
    import resource
    print resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

    p = psutil.Process(os.getpid())
    print "Process status: "+str(p.memory_info())
Example #17
0
    def __mmc_store_log(self, final=False):
        try:
            from mmc.models import MMCLog

            memory = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
            resources = resource.getrusage(resource.RUSAGE_SELF)

            utime = resources.ru_utime - self._mmc_resources.ru_utime
            stime = resources.ru_stime - self._mmc_resources.ru_stime
            div = 1024.0 if 'linux' in sys.platform else 1048576.0
            self._mmc_elapsed_time = time.time() - self._mmc_start_time

            MMCLog.logging(
                instance=self._mmc_log_instance,
                start=self._mmc_start_date,
                hostname=self._mmc_hostname,
                script=self._mmc_script,
                elapsed="%0.2f" % self._mmc_elapsed_time,
                success=self._mmc_success,
                error_message=self._mmc_error_message,
                traceback=self._mmc_traceback,
                sys_argv=self.__mmc_get_sys_argv(),
                memory="%0.2f" % (memory / div),
                cpu_time="%0.2f" % (utime + stime),
                stdout_messages=self.__mmc_get_stdout(),
                pid=os.getpid(),
                queries=self.__mmc_get_queries(),
                is_fixed=False if self._mmc_success is False else None,
                end=now() if final else self._mmc_log_instance.end
            )
        except Exception as err:
            stderr("[MMC] Logging broken with message: {0}".format(err))
    def process_request(self, request):

        def usertime_after():
            return resource.getrusage(resource.RUSAGE_SELF).ru_utime

        def kerneltime_after():
            return resource.getrusage(resource.RUSAGE_SELF).ru_stime

        def maxrss_after():
            return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

        request.honey_builder = libhoney.Builder({
            "method": request.method,
            "scheme": request.scheme,
            "path": request.path,
            "query": request.GET,
            "isSecure": request.is_secure(),
            "isAjax": request.is_ajax(),
            "isUserAuthenticated": request.user.is_authenticated(),
            "username": request.user.username,
            "host": request.get_host(),
            "ip": request.META['REMOTE_ADDR'],

            "usertime_before": resource.getrusage(resource.RUSAGE_SELF).ru_utime,
            "kerneltime_before": resource.getrusage(resource.RUSAGE_SELF).ru_stime,
            "maxrss_before": resource.getrusage(resource.RUSAGE_SELF).ru_maxrss,
        }, [
            usertime_after,
            kerneltime_after,
            maxrss_after,
        ])

        return None
Example #19
0
 def one_step(self, fast = True):
     start = resource.getrusage(resource.RUSAGE_SELF).ru_utime
     self.flockstep.step(self.flock, fast)
     end = resource.getrusage(resource.RUSAGE_SELF).ru_utime
     self.time_elapsed += end - start
     self.step += 1
     self.sample()
Example #20
0
def getTotalCpuTime():
    """Gives the total cpu time, including the children. 
    """
    me = resource.getrusage(resource.RUSAGE_SELF)
    childs = resource.getrusage(resource.RUSAGE_CHILDREN)
    totalCpuTime = me.ru_utime+me.ru_stime+childs.ru_utime+childs.ru_stime
    return totalCpuTime
Example #21
0
def resource_runtime_plot(f, start, end, stride, repeats, verbose=False):
    """
    Measures running time of a function using the resource module.
    The function is run with all the integer values in the given range
    (start and end), this is repeated n (repeats)times.
    Returns the tuple (arguments, results, average), a result is the average
    runtime for that argument, and average is the total average time for all
    arguments.
    """
    arguments = range(start, end, stride)
    results = [0 for i in arguments]
    mult = 1.0 / repeats
    average = 0  
    
    if verbose:
        print "\nmasuring function ", f.__name__
    for j in xrange(repeats):
        if verbose:
            print "\nstarting repeat {0} of {1}".format(j, repeats)
        for i in xrange(len(arguments)):
            if verbose:
                print "measuring function for argument", arguments[i]
            start = resource.getrusage(resource.RUSAGE_SELF)
            f(arguments[i])
            end = resource.getrusage(resource.RUSAGE_SELF)
            time = (end.ru_utime - start.ru_utime) * mult
            results[i] += time
            average += time

    return arguments, results, average
Example #22
0
    def dfs(self):
        visitedStates= Set()
        stack = []
        maxSize = 1
        current = self
        stack.append(self)
        nodeExpand = 1
        foundGoal = False

        if current.isGoalState():
            mem  = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
            return True, current, maxSize, nodeExpand, mem

        while len(stack) != 0:
            current = stack.pop()
            nodeExpand += 1
            for direct in DIRECTION:
                childList = current.simulateMove(direct)
                if childList != None:
                   if tuple(childList) not in visitedStates:
                        current.child.append(State(self.n, childList, current, direct))
                        stack.append(current.child[-1])
                        visitedStates.add(tuple(current.child[-1].list))
                        maxSize = max(len(stack),maxSize)
                        if current.child[-1].isGoalState():
                            current = current.child[-1]
                            foundGoal = True
                            break
            # compare for max size of the queue
            if foundGoal:
                break
        mem  = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
        return foundGoal, current, maxSize, nodeExpand, mem
Example #23
0
def get_kmer_counts(input, output, k, ns, nprocs, verbose):
    """Analyse kmers. Multiprocessing enabled"""
    #define base2digit dict for 4-char seq
    base2digit = {"A": "0", "C": "1", "G": "2", "T": "3"}    
    if ns:
        #change to 5-char seq if Ns in seq
        base2digit = {"A": "0", "C": "1", "G": "2", "N": "3", "T": "4"}
    #init mer counts
    #255 for uint8 #65,535 for uint16 or #4,294,967,295 for uint32 
    merCounts = np.zeros(len(base2digit)**k/2, dtype='uint16')
    #start pool #maxtasksperchild=1000)
    p = Pool(nprocs, initializer=init_args, initargs=(k, ns, base2digit)) 
    #process reads
    for i, ids in enumerate(p.imap_unordered(seq2mers, SeqIO.parse(input, 'fastq'), \
                                             chunksize=100), 1):
        if not i%1e4:
            sys.stderr.write(" %s [%s Mb]\r"%(i, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024))
        for mid in ids:
            merCounts[mid] += 1
    sys.stderr.write(" %s [%s Mb]\n"%(i, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1024))
    #get mer freq
    maxCount    = merCounts.max()
    if maxCount < 100:
        maxCount = 100
    occurencies = [0]*maxCount
    for c in merCounts:
        occurencies[c-1] += 1
    #write to file
    output.write("\n".join("%s\t%s"%xy for xy in enumerate(occurencies,1))+"\n")
    return occurencies
Example #24
0
    def test_dumps_usage(self):
        '''
        repeatedly serialize, check that usage doesn't go up
        '''
        if cdumps is None:
            logger.warn('no C dumps(), skipping test_dumps_usage')
            return
        start_usage = resource.getrusage(resource.RUSAGE_SELF)
        usage_history = [start_usage]
        for o in _range(_TEST_OUTER):
            for i in _range(_TEST_COUNT):
                ob = _randob()
                blob = cdumps(ob)
                # and silently drop the result. I hope the garbage collector works!
            t_usage = resource.getrusage(resource.RUSAGE_SELF)
            usage_history.append(t_usage)
        end_usage = usage_history[-1]
        dmaxrss = end_usage.ru_maxrss - start_usage.ru_maxrss
        didrss = end_usage.ru_idrss - start_usage.ru_idrss
        dmaxrsspct = ((end_usage.ru_maxrss != 0) and (dmaxrss / end_usage.ru_maxrss)) or 0
        didrsspct = ((end_usage.ru_idrss != 0) and (didrss / end_usage.ru_idrss)) or 0

        sys.stderr.write('maxrss: {} - {}, d={} ({:.2f}%)\n'.format(start_usage.ru_maxrss, end_usage.ru_maxrss, dmaxrss, dmaxrsspct * 100.0))
        sys.stderr.write('idrss: {} - {}, d={} ({:.2f}%)\n'.format(start_usage.ru_idrss, end_usage.ru_idrss, didrss, didrsspct * 100.0))

        assert (dmaxrsspct) < 0.05, [x.ru_maxrss for x in usage_history]
        assert (didrsspct) < 0.05, [x.ru_idrss for x in usage_history]
Example #25
0
 def update(self):
     import time
     self.current += 1
     self.percentage = int(round(100*float(self.current)/self.total))
     if self.percentage % self.printint == 0 and self.percentage != self.lpercentage:
         self.stf=int(round((self.total-self.current)/((self.current-self.lcurrent)/(time.time()-self.ltime))))
         if self.type == 'full' and self.logfile: self.logfile.write(
             '#Progress => '+str(self.percentage)+'%, '+
             str( round((self.current-self.lcurrent)/(time.time()-self.ltime),2) )+' '+self.unit+'/second, '+
             time.strftime("%A, %d %b %Y %H:%M:%S",time.localtime())+
             ', left: '+str(self.stf/60/60)+'h '+str(self.stf/60%60)+'min '+str(self.stf%60)+'s')
         if self.mem:
             import resource
             total_memory_used = (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss + resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss)
             this_process_memory_used = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
             if total_memory_used/1024/1024 > 1024:
                 self.logfile.write(', using '+str(round(float(total_memory_used)/1024/1024/1024,2))+' ('+str(round(float(this_process_memory_used)/1024/1024/1024,2))+') GB.\n')
             elif total_memory_used/1024 > 1024:
                 self.logfile.write(', using '+str(round(float(total_memory_used)/1024/1024,2))+' ('+str(round(float(this_process_memory_used)/1024/1024,2))+') MB.\n')
             else:
                 self.logfile.write(', using '+str(round(float(total_memory_used)/1024,2))+' ('+str(round(float(this_process_memory_used)/1024,2))+') KB.\n')
         else:    self.logfile.write('\n')
         if self.type == 'minimal': self.logfile.write('..')
         self.ltime = time.time()
         self.lcurrent = self.current
         self.lpercentage = self.percentage
def generic_batch_processor_v2(harvester, bman_list):

    error_map = {}
    next_bman_list = []
    failed_list = []
    max_step_size = 50
    step_size = max_step_size #full throttle!
    fail_ratio = 0.15
    step_factor = 1.66
    lap_start = time.time()
    bman_total = 1
    error_sum = 0

    while bman_list:
        usage = resource.getrusage(resource.RUSAGE_SELF)
        logger.info(u"New batch. Size:%d for %s Mem:%s MB" % (len(bman_list), harvester,unicode(getattr(usage, "ru_maxrss")/(1024.0))))

        if (E_UNEX in error_map and error_map[E_UNEX] / float(bman_total) > fail_ratio) or error_sum > 4:
            step_size = int(step_size / step_factor) if int(step_size / step_factor) > 1 else 1
            del error_map[E_UNEX]
        else:
            step_size = step_size * 2 if step_size * 2 < max_step_size else max_step_size

        split_bman = [bman_list[i:i+step_size] for i  in range(0, len(bman_list), step_size)]
        bman_total = len(split_bman)

        for (counter, bman_chunk) in enumerate(split_bman,1):
            
            if not(E_UNEX in error_map and error_map[E_UNEX] / float(bman_total) > fail_ratio) or not (E_USER_QUOTA in error_map):
                actual_fail_ratio = error_map[E_UNEX] / float(bman_total) if E_UNEX in error_map else 0
                usage = resource.getrusage(resource.RUSAGE_SELF)
                logger.info(u"bman_chunk (%d/%d) chunk_total:%s InQueue:%d fail_ratio:%s > %s Mem:%s KB" % (counter, bman_total, len(bman_chunk), len(next_bman_list), actual_fail_ratio, fail_ratio, getattr(usage, "ru_maxrss")/(1024.0)))

                if E_QUOTA in error_map:
                    logger.info("Quota error, waiting for 10 minutes")
                    del error_map[E_QUOTA]
                    time.sleep(10*60)
                
                if (time.time() - lap_start) < 1:
                    logger.info(u"Speed too fast. will wait 1 sec")
                    time.sleep(1)

                lap_start = time.time()
                error = gbp_core(harvester, bman_chunk, error_map, next_bman_list, failed_list)
                error_sum = error_sum + 1 if error else 0
                logger.info(u"gbp_core: len(next_bman_list): %s" % len(next_bman_list))
            elif E_USER_QUOTA in error_map:
                logger.error("bman(%d/%d) User quota reached. Aborting the harvest!" % (counter, bman_total))
                failed_list += bman_chunk
            else:
                logger.info("bman(%d/%d) Failed ratio too high. Retrying with smaller batch" % (counter, bman_total))
                next_bman_list += bman_chunk

        bman_list = next_bman_list
        next_bman_list = []

    usage = resource.getrusage(resource.RUSAGE_SELF)
    readable_failed_list = [failed_list[j]["request"]["relative_url"] for j in range(0, len(failed_list))]
    logger.debug(u"END harvesting. Mem:%s MB" % (getattr(usage, "ru_maxrss")/(1024.0)))
    logger.debug(u"Failed list: %s" % (readable_failed_list))
Example #27
0
    def run(self):
        self.comm.start()       # Start the "Comm" thread
        while not self.running:
            self.handle_message(True)

        rudata_start = resource.getrusage(resource.RUSAGE_SELF)
        count = 0
        while(self.running and count < self.iterations):
            # Non CS:
            self.label()        # labels mark the points where we can "break"
            self.work()
            self.label()
            self.enter_critical_section()
            print("Process %d(clock:%d) has entered critical section."%
                  (self.peerid,self.clock))
            self.label()
            self.work()
            self.label()
            print("Process %d is leaving critical section."%self.peerid)
            self.leave_critical_section()
            count += 1

        rudata_end = resource.getrusage(resource.RUSAGE_SELF)
        utime = rudata_end.ru_utime - rudata_start.ru_utime
        stime = rudata_end.ru_stime - rudata_start.ru_stime
        send(self.net, LOG, (utime, stime, rudata_end.ru_maxrss))
        while self.running:
            self.handle_message(True)
Example #28
0
    def moveFilesToFinalLocation(self):
        success = True
        # free some memory for file copy command
        if self.debug:
            print('DEBUG: max mem used A:', resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
        self.deleteSampleTree()
        if self.debug:
            print('DEBUG: max mem used B:', resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)

        for tmpFileName in self.tmpFiles:
            outputFileName = self.outputFolder + '/' + self.tmpFolder.join(tmpFileName.split(self.tmpFolder)[1:])
            print ('copy ', tmpFileName, ' to ', outputFileName)
            if self.fileLocator.fileExists(outputFileName):
                self.deleteFile(outputFileName)
            #command = 'xrdcp -d 1 ' + self.fileLocator.getXrootdFileName(tmpFileName) + ' ' + self.fileLocator.getXrootdFileName(outputFileName)
            #print('the command is', command)
            #sys.stdout.flush()
            #returnCode = subprocess.call([command], shell=True)
            copySuccessful = self.fileLocator.cp(tmpFileName, outputFileName)
            if not copySuccessful:
                success = False
                print('\x1b[31mERROR: copy failed for {tmpfile}->{outputfile} !\x1b[0m'.format(tmpfile=tmpFileName,
                                                                                                outputfile=outputFileName))
            else:
                # delete temporary file if copy was successful
                self.deleteFile(tmpFileName)
        return success
Example #29
0
	def run_protocol(self):

		if self.sockets == None:
			""" Need to set up sockets """
			self.setup_sockets()
		elif len(self.sockets) == 1:
			""" This is a non-leader node """
			self.leader_socket = self.sockets[0]
			self.sockets = None

		try:
			self.run_phase2()
			self.run_phase3()
			self.run_phase4()
			self.run_phase5()
		except:
			self.cleanup_sockets()
			raise
		self.cleanup_sockets()

		self.info("Finished in %g seconds" % (time() - self.start_time))
		self.critical("SUCCESSROUND:SHUFFLE, RID:%d, NNOD:%d, WALLTIME:%g, USR:%g, SYS:%g\n\t%s" % \
				(self.round_id,
				 self.n_nodes, 
				 time() - self.start_time, 
				 resource.getrusage(resource.RUSAGE_SELF).ru_utime - self.rusage_start[0],
				 resource.getrusage(resource.RUSAGE_SELF).ru_stime - self.rusage_start[1],
				 self.size_string()))
Example #30
0
    def astar(self):
        visitedStates = Set()
        maxSize = 1
        heap = []
        nodeExpand = 0
        current = self
        foundGoal = False

        if current.isGoalState():
            mem  = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
            return True, current, maxSize, nodeExpand, mem

        f = self.hFun()+self.gFun()
        heapq.heappush(heap, (f, self))
        while len(heap) > 0:
            current = heapq.heappop(heap)[1]
            nodeExpand += 1
            for direct in DIRECTION:
                childList = current.simulateMove(direct)
                if childList !=  None:
                    if tuple(childList) not in visitedStates:
                        current.child.append(State(self.n, childList, current, direct))
                        heapq.heappush(heap, (current.child[-1].hFun() + current.child[-1].gFun(), current.child[-1]))
                        visitedStates.add(tuple(current.list))
                        maxSize = max(len(heap),maxSize)
                        if current.child[-1].isGoalState():
                            current = current.child[-1]
                            foundGoal = True
                            break
            # compare for max size of the queue
            if foundGoal:
                break

        mem  = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000
        return foundGoal, current, maxSize, nodeExpand, mem
def run():
    memory1 = resource.getrusage(
        resource.RUSAGE_SELF).ru_maxrss  #get initial memory peak

    #load in the json parameter file here
    jsonfile = sys.argv[1]

    if jsonfile:
        with open(jsonfile, 'rb') as ff:
            params = json.load(ff)

    else:
        params = dict()

    print 'my params:', params

    #extract the parameters
    N = params.get('N', 20)
    # dx = params.get('dx', 1)
    total_steps = params.get('steps', 2)
    sumatra_label = params.get('sumatra_label', '')

    c, rho_s, c_alpha, c_beta = sympy.symbols("c_var rho_s c_alpha c_beta")
    f_0 = rho_s * (c - c_alpha)**2 * (c_beta - c)**2

    mesh = fp.PeriodicGrid2D(nx=N, ny=N, dx=.5, dy=.5)

    c_alpha = 0.3
    c_beta = 0.7
    kappa = 2.0
    M = 5.0
    c_0 = 0.5
    epsilon = 0.01
    rho_s = 5.0
    filepath = os.path.join('Data', sumatra_label)

    c_var = fp.CellVariable(mesh=mesh, name=r"$c$", hasOld=True)

    # array of sample c-values: used in f versus c plot
    vals = np.linspace(-.1, 1.1, 1000)

    c_var = fp.CellVariable(mesh=mesh, name=r"$c$", hasOld=True)

    x, y = np.array(mesh.x), np.array(mesh.y)

    out = sympy.diff(f_0, c, 2)

    exec "f_0_var = " + repr(out)

    #f_0_var = -A + 3*B*(c_var - c_m)**2 + 3*c_alpha*(c_var - c_alpha)**2 + 3*c_beta*(c_var - c_beta)**2

    def f_0(c):
        return rho_s * ((c - c_alpha)**2) * ((c_beta - c)**2)

    def f_0_var(c_var):
        return 2 * rho_s * ((c_alpha - c_var)**2 + 4 * (c_alpha - c_var) *
                            (c_beta - c_var) + (c_beta - c_var)**2)

    # free energy
    def f(c):
        return (f_0(c) + .5 * kappa * (c.grad.mag)**2)

    f_data = []
    time_data = []

    def save_data(f, time):
        f_data.append(f.value)
        time_data.append(time)
        np.savetxt(os.path.join(filepath, '1a.txt'), zip(time_data, f_data))

    eqn = fp.TransientTerm(
        coeff=1.) == fp.DiffusionTerm(M * f_0_var(c_var)) - fp.DiffusionTerm(
            (M, kappa))

    elapsed = 0.0
    steps = 0
    dt = 0.01
    total_sweeps = 2
    tolerance = 1e-1
    # duration = 1000.0

    c_var[:] = c_0 + epsilon * (np.cos(0.105 * x) * np.cos(0.11 * y) + \
                                (np.cos(0.13 * x) * np.cos(0.087 * y))**2 + \
                                + np.cos(0.025 * x - 0.15 * y) * np.cos(0.07 * x - 0.02 * y))
    c_var.updateOld()
    solver = Solver()

    while steps < total_steps:
        res0 = eqn.sweep(c_var, dt=dt, solver=solver)

        for sweeps in range(total_sweeps):
            res = eqn.sweep(c_var, dt=dt, solver=solver)

        if (res < (res0 * tolerance)):
            steps += 1
            #       elapsed += dt
            dt *= 1.1
            c_var.updateOld()

            if (steps % (total_steps / 10.0) == 0):
                # record the volume integral of the free energy
                save_data(
                    f_0_var(c_var).cellVolumeAverage * mesh.numberOfCells,
                    elapsed)
                # pickle the data on c as a function of space at this particular time
                fp.dump.write({
                    'time': steps,
                    'var': c_var
                }, os.path.join(filepath, '1a{0}.pkl'.format(steps)))

        else:
            dt *= 0.8
            c_var[:] = c_var.old

    print ' '

    #memory stuff saves
    filepath = os.path.join('Data', sumatra_label)
    #Keep track os how much memory was used and dump into a txt file
    memory2 = resource.getrusage(
        resource.RUSAGE_SELF).ru_maxrss  #final memory peak
    memory_diff = (memory2 - memory1, )
    filename2 = 'memory_usage.txt'
    np.savetxt(os.path.join(filepath, filename2), memory_diff)
Example #32
0
 def clocku():
     """clocku() -> floating point number
     Return the *USER* CPU time in seconds since the start of the process.
     This is done via a call to resource.getrusage, so it avoids the
     wraparound problems in time.clock()."""
     return resource.getrusage(resource.RUSAGE_SELF)[0]
 def tearDown(self):
     unitS = "MB" if platform.system() == "Darwin" else "GB"
     rusageMax = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
     logger.info("Maximum resident memory size %.4f %s", rusageMax / 10 ** 6, unitS)
     endTime = time.time()
     logger.info("Completed %s at %s (%.4f seconds)", self.id(), time.strftime("%Y %m %d %H:%M:%S", time.localtime()), endTime - self.__startTime)
Example #34
0
with open(out_csv_path, 'w', newline="") as f:
    writer = csv.writer(f)
    writer.writerows(csv_cl)

### HANDLING READS LEFT OUT & OUTPUT READS ###
leftOut = cu.get_leftOut(grps, n_reads)
n_left_out = len(leftOut)

#create new fasta + utgs
logger.info('Writing fasta...')
new_fasta = ''
new_fasta += cu.add_grps(grpd_utgs)

# add left out
if skip_left_out == False:
    logger.info('Adding external reads')
    reads_fq = open(reads_path, 'r').read().split('\n')
    div = cu.get_div(reads_path)
    logger.info(div)
    new_fasta += cu.add_reads(leftOut, reads_fq, div, n_reads)

# write
out_file = open(out_reads_path, "w")
out_file.write(new_fasta)
out_file.close()

max_mem = resource.getrusage(resource.RUSAGE_SELF).ru_maxrss

logger.info('Time elapsed: ' + str(time.time() - start_time))
logger.info('Max memory used (in MB): ' + str(round(max_mem / 1000.0, 3)))
logger.info('DONE')
Example #35
0
    def fit(self, X, Y):
        import sklearn.svm

        # Calculate the size of the kernel cache (in MB) for sklearn's LibSVM. The cache size is
        # calculated as 2/3 of the available memory (which is calculated as the memory limit minus
        # the used memory)
        try:
            # Retrieve memory limits imposed on the process
            soft, hard = resource.getrlimit(resource.RLIMIT_AS)

            if soft > 0:
                # Convert limit to units of megabytes
                soft /= 1024 * 1024

                # Retrieve memory used by this process
                maxrss = resource.getrusage(resource.RUSAGE_SELF)[2] / 1024

                # In MacOS, the MaxRSS output of resource.getrusage in bytes; on other platforms,
                # it's in kilobytes
                if sys.platform == 'darwin':
                    maxrss = maxrss / 1024

                cache_size = (soft - maxrss) / 1.5

                if cache_size < 0:
                    cache_size = 200
            else:
                cache_size = 200
        except Exception:
            cache_size = 200

        self.C = float(self.C)
        self.epsilon = float(self.epsilon)
        self.tol = float(self.tol)
        self.shrinking = check_for_bool(self.shrinking)
        self.degree = int(self.degree)
        self.gamma = float(self.gamma)
        if check_none(self.coef0):
            self.coef0 = 0.0
        else:
            self.coef0 = float(self.coef0)
        self.verbose = int(self.verbose)
        self.max_iter = int(self.max_iter)

        self.estimator = sklearn.svm.SVR(
            kernel=self.kernel,
            C=self.C,
            epsilon=self.epsilon,
            tol=self.tol,
            shrinking=self.shrinking,
            degree=self.degree,
            gamma=self.gamma,
            coef0=self.coef0,
            cache_size=cache_size,
            verbose=self.verbose,
            max_iter=self.max_iter
        )
        self.scaler = sklearn.preprocessing.StandardScaler(copy=True)

        self.scaler.fit(Y.reshape((-1, 1)))
        Y_scaled = self.scaler.transform(Y.reshape((-1, 1))).ravel()
        self.estimator.fit(X, Y_scaled)
        return self
Example #36
0
def estimate_expression_param(expr_data):
    EmissionParameters, Sequences, Background, Paths, sample_size, bg_type = expr_data
    '''
    This function estimates the parameters for the expression GLM
    '''
    print('Start estimation of expression parameters')
    # 1) Get the library size
    bg_type = EmissionParameters['BckType']
    lib_size = EmissionParameters['LibrarySize']
    bck_lib_size = EmissionParameters['BckLibrarySize']
    start_params = EmissionParameters['ExpressionParameters'][0]
    disp = EmissionParameters['ExpressionParameters'][1]

    # 2) Estimate dispersion
    print('Constructing GLM matrix')
    t = time.time()
    # 3) Compute sufficient statistics
    print 'Estimating expression parameters: before GLMMatrix'
    print 'Memory usage: %s (kb)' % resource.getrusage(
        resource.RUSAGE_SELF).ru_maxrss

    try:
        Sequences.close()
    except:
        pass
    try:
        Background.close()
    except:
        pass

    Sequences = h5py.File(EmissionParameters['DataOutFile_seq'], 'r')
    Background = h5py.File(EmissionParameters['DataOutFile_bck'], 'r')

    A, w, Y, rep = construct_glm_matrix(EmissionParameters, Sequences,
                                        Background, Paths)

    print 'Estimating expression parameters: after GLMMatrix'
    print 'Memory usage: %s (kb)' % resource.getrusage(
        resource.RUSAGE_SELF).ru_maxrss
    #pdb.set_trace()
    print 'Done: Elapsed time: ' + str(time.time() - t)

    #make sure that matrix A is in the right format
    if not sp.sparse.isspmatrix_csc(A):
        A = csc_matrix(A)

    print 'Estimating expression parameters: before GLMMatrix'
    print 'Memory usage: %s (kb)' % resource.getrusage(
        resource.RUSAGE_SELF).ru_maxrss

    #create the offset for the library size
    offset = np.zeros_like(rep)
    for i in range(EmissionParameters['NrOfReplicates']):
        offset[rep == (i + 1)] = lib_size[str(i)]
    if bg_type != 'None':
        for i in range(EmissionParameters['NrOfBckReplicates']):
            offset[rep == -(i + 1)] = bck_lib_size[str(i)]

    # 4) Fit GLM
    print('Fitting GLM')
    t = time.time()

    print 'Estimating expression parameters: before fitting'
    print 'Memory usage: %s (kb)' % resource.getrusage(
        resource.RUSAGE_SELF).ru_maxrss

    start_params, disp = fit_glm(A,
                                 w,
                                 Y,
                                 offset,
                                 sample_size,
                                 disp,
                                 start_params,
                                 norm_class=EmissionParameters['norm_class'])

    print 'Estimating expression parameters: afer fitting'
    print 'Memory usage: %s (kb)' % resource.getrusage(
        resource.RUSAGE_SELF).ru_maxrss

    del A, w, Y, offset

    print 'Estimating expression parameters: afer cleanup'
    print 'Memory usage: %s (kb)' % resource.getrusage(
        resource.RUSAGE_SELF).ru_maxrss
    print 'Done: Elapsed time: ' + str(time.time() - t)

    # 5) Process the output
    EmissionParameters['ExpressionParameters'] = [start_params, disp]
    print('Finishes expression parameter estimation')

    return EmissionParameters
             "w").write(txt)

    if OUTPUT_RES64:
        print("fslview %s %s -t .5 &" %
              (outfilename.replace("_tiv", "_affcrop"),
               outfilename.replace("_tiv", "_affcrop_outseg_mask")))

    print(" Elapsed time for subject %4.2fs " % (time.time() - Ti))
    print(" To display using fslview, try:")
    print("  fslview %s %s -t .5 %s -t .5 &" %
          (fname, outfilename.replace(
              "_tiv", "_mask_L"), outfilename.replace("_tiv", "_mask_R")))

    allsubjects_scalar_report.append(
        (fname, scalar_output_report[0], scalar_output_report[1][0],
         scalar_output_report[1][1]))

if 1:  #OUTPUT_DEBUG:
    print("Peak memory used (Gb) " +
          str(resource.getrusage(resource.RUSAGE_SELF)[2] / (1024. * 1024)))

print("Done")

if len(sys.argv[1:]) > 1:
    outfilename = (os.path.dirname(fname)
                   or ".") + "/all_subjects_hippo_report.csv"
    txt_entries = ["%s,%4f,%4f,%4f\n" % s for s in allsubjects_scalar_report]
    open(outfilename,
         "w").writelines(["filename,eTIV,hippoL,hippoR\n"] + txt_entries)
    print("Volumes of every subjects saved as " + outfilename)
Example #38
0
 def on_epoch_end(self, epoch, log={}):
     print('RAM Usage {:.2f} GB'.format(
         resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1e6))
Example #39
0
 def process_request(self, request):
     self._start_time = time.time()
     if self.has_content:
         self._start_rusage = resource.getrusage(resource.RUSAGE_SELF)
 def __call__(self, progress, data):
     return 'RAM: {0:10.1f} MB'.format(
         resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024)
Example #41
0
    def __init__(self, sbyconfig, taskname, workdir, early_logs):
        self.options = dict()
        self.used_options = set()
        self.engines = list()
        self.script = list()
        self.files = dict()
        self.verbatim_files = dict()
        self.models = dict()
        self.workdir = workdir
        self.status = "UNKNOWN"

        self.exe_paths = {
            "yosys": "yosys",
            "abc": "yosys-abc",
            "smtbmc": "yosys-smtbmc",
            "suprove": "suprove",
            "aigbmc": "aigbmc",
            "avy": "avy",
        }

        self.tasks_running = []
        self.tasks_all = []

        self.start_clock_time = time()

        ru = resource.getrusage(resource.RUSAGE_CHILDREN)
        self.start_process_time = ru.ru_utime + ru.ru_stime

        self.summary = list()

        self.logfile = open("%s/logfile.txt" % workdir, "w")

        for line in early_logs:
            print(line, file=self.logfile)
        self.logfile.flush()

        mode = None
        key = None

        with open("%s/config.sby" % workdir, "w") as f:
            for line in sbyconfig:
                print(line, file=f)

        with open("%s/config.sby" % workdir, "r") as f:
            for line in f:
                raw_line = line
                if mode in ["options", "engines", "files"]:
                    line = re.sub(r"\s*(\s#.*)?$", "", line)
                    if line == "" or line[0] == "#":
                        continue
                else:
                    line = line.rstrip()
                # print(line)
                if mode is None and (len(line) == 0 or line[0] == "#"):
                    continue
                match = re.match(r"^\s*\[(.*)\]\s*$", line)
                if match:
                    entries = match.group(1).split()
                    assert len(entries) > 0

                    if entries[0] == "options":
                        mode = "options"
                        assert len(self.options) == 0
                        assert len(entries) == 1
                        continue

                    if entries[0] == "engines":
                        mode = "engines"
                        assert len(self.engines) == 0
                        assert len(entries) == 1
                        continue

                    if entries[0] == "script":
                        mode = "script"
                        assert len(self.script) == 0
                        assert len(entries) == 1
                        continue

                    if entries[0] == "file":
                        mode = "file"
                        assert len(entries) == 2
                        current_verbatim_file = entries[1]
                        assert current_verbatim_file not in self.verbatim_files
                        self.verbatim_files[current_verbatim_file] = list()
                        continue

                    if entries[0] == "files":
                        mode = "files"
                        assert len(entries) == 1
                        continue

                    assert False

                if mode == "options":
                    entries = line.split()
                    assert len(entries) == 2
                    self.options[entries[0]] = entries[1]
                    continue

                if mode == "engines":
                    entries = line.split()
                    self.engines.append(entries)
                    continue

                if mode == "script":
                    self.script.append(line)
                    continue

                if mode == "files":
                    entries = line.split()
                    if len(entries) == 1:
                        self.files[os.path.basename(entries[0])] = entries[0]
                    elif len(entries) == 2:
                        self.files[entries[0]] = entries[1]
                    else:
                        assert False
                    continue

                if mode == "file":
                    self.verbatim_files[current_verbatim_file].append(raw_line)
                    continue

                assert False
Example #42
0
def display_performance(delta):
    print("RAM max usage: {} MB".format(
        resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024))
    print("Total run time: {} ms".format(delta))
Example #43
0
def knightRuizAlg(A, tol=1e-6, f1 = False):

    ##FUNCTION DESCRIPTION
    # knighRuizAlg is an implementation of the matrix balancing algorithm
    #  developed by Knight and Ruiz. The goal is to take a matrix A and
    #  find a vector x such that, diag(x)*A*diag(x) returns a doubly
    #  stochastic matrix

    ##PARAMETERS
    #A is a given numpy array
    #tol is error tolerance
    #f1 boolean indicating if the intermediate convergance statistics
    # should also be outputted
    n = A.shape[0]
    e = np.ones((n,1), dtype = np.float64)
    res = []


    Delta = 3
    delta = 0.1
    x0 = np.copy(e)
    g = 0.9

    etamax = eta = 0.1
    stop_tol = tol*0.5
    x = np.copy(x0)

    rt = tol**2.0
    v = x * (A.dot(x))
    rk = 1.0 - v
#    rho_km1 = np.dot(rk.T, rk)[0, 0]
    rho_km1 = ((rk.transpose()).dot(rk))[0,0]
    rho_km2 = rho_km1
    rout = rold = rho_km1
    
    MVP = 0 #we'll count matrix vector products
    i = 0 #outer iteration count

    if f1:
        print("it in. it res\n"),

    while rout > rt: #outer iteration
        i += 1

        if i > 30:
            break

        k = 0
        y = np.copy(e)
        innertol = max(eta ** 2.0 * rout, rt)
        
        while rho_km1 > innertol: #inner iteration by CG
            k += 1
            if k == 1:
                Z = rk / v
                p = np.copy(Z)
                #rho_km1 = np.dot(rk.T, Z)
                rho_km1 = (rk.transpose()).dot(Z)
            else:
                beta = rho_km1 / rho_km2
                p = Z + beta * p

            if k > 10:
                break

            #update search direction efficiently
            w = x * A.dot(x * p) + v * p
            # alpha = rho_km1 / np.dot(p.T, w)[0,0]
            alpha = rho_km1 / (((p.transpose()).dot(w))[0,0])
            ap = alpha * p
            #test distance to boundary of cone
            ynew = y + ap
            
            if np.amin(ynew) <= delta:
                
                if delta == 0:
                    break

                ind = np.where(ap < 0.0)[0]
                gamma = np.amin((delta - y[ind]) / ap[ind])
                y += gamma * ap
                break

            if np.amax(ynew) >= Delta:
                ind = np.where(ynew > Delta)[0]
                gamma = np.amin((Delta - y[ind]) / ap[ind])
                y += gamma * ap
                break

            y = np.copy(ynew)
            rk -= alpha * w
            rho_km2 = rho_km1
            Z = rk / v
            #rho_km1 = np.dot(rk.T, Z)[0,0]
            rho_km1 = ((rk.transpose()).dot(Z))[0,0]
        x *= y
        v = x * (A.dot(x))
        rk = 1.0 - v
        #rho_km1 = np.dot(rk.T, rk)[0,0]
        rho_km1 = ((rk.transpose()).dot(rk))[0,0]
        rout = rho_km1
        MVP += k + 1
        
        #update inner iteration stopping criterion
        rat = rout/rold
        rold = rout
        res_norm = rout ** 0.5
        eta_o = eta
        eta = g * rat
        if g * eta_o ** 2.0 > 0.1:
            eta = max(eta, g * eta_o ** 2.0)
        eta = max(min(eta, etamax), stop_tol / res_norm)
        if f1:
            print("%03i %06i %03.3f %e %e \n") % \
                (i, k, res_norm, rt, rout), 
            res.append(res_norm)
    if f1:
        print("Matrix - vector products = %06i\n") % \
            (MVP),
    
    #X = np.diag(x[:,0])   
    #x = X.dot(A.dot(X))
    return [x,(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss/1000)]
Example #44
0
    def run(self):
        self.handle_str_option("mode", None)
        assert self.opt_mode in ["bmc", "prove", "cover", "live"]

        self.expect = ["PASS"]
        if "expect" in self.options:
            self.expect = self.options["expect"].upper().split(",")
            self.used_options.add("expect")

        for s in self.expect:
            assert s in ["PASS", "FAIL", "UNKNOWN", "ERROR", "TIMEOUT"]

        self.handle_bool_option("multiclock", False)
        self.handle_bool_option("wait", False)
        self.handle_int_option("timeout", None)

        self.handle_str_option("smtc", None)
        self.handle_str_option("tbtop", None)

        if self.opt_smtc is not None:
            for engine in self.engines:
                assert engine[0] == "smtbmc"

        self.copy_src()

        if self.opt_mode == "bmc":
            import sby_mode_bmc
            sby_mode_bmc.run(self)

        elif self.opt_mode == "prove":
            import sby_mode_prove
            sby_mode_prove.run(self)

        elif self.opt_mode == "live":
            import sby_mode_live
            sby_mode_live.run(self)

        elif self.opt_mode == "cover":
            import sby_mode_cover
            sby_mode_cover.run(self)

        else:
            assert False

        for opt in self.options.keys():
            assert opt in self.used_options

        self.taskloop()

        total_clock_time = int(time() - self.start_clock_time)

        ru = resource.getrusage(resource.RUSAGE_CHILDREN)
        total_process_time = int((ru.ru_utime + ru.ru_stime) -
                                 self.start_process_time)

        self.summary = [
            "Elapsed clock time [H:MM:SS (secs)]: %d:%02d:%02d (%d)" %
            (total_clock_time // (60 * 60), (total_clock_time // 60) % 60,
             total_clock_time % 60, total_clock_time),
            "Elapsed process time [H:MM:SS (secs)]: %d:%02d:%02d (%d)" %
            (total_process_time // (60 * 60), (total_process_time // 60) % 60,
             total_process_time % 60, total_process_time),
        ] + self.summary

        for line in self.summary:
            self.log("summary: %s" % line)

        assert self.status in ["PASS", "FAIL", "UNKNOWN", "ERROR", "TIMEOUT"]

        if self.status in self.expect:
            self.retcode = 0
        else:
            if self.status == "PASS": self.retcode = 1
            if self.status == "FAIL": self.retcode = 2
            if self.status == "ERROR": self.retcode = 3
            if self.status == "UNKNOWN": self.retcode = 4
            if self.status == "TIMEOUT": self.retcode = 5

        with open("%s/%s" % (self.workdir, self.status), "w") as f:
            for line in self.summary:
                print(line, file=f)
Example #45
0
t1 = time.time()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                      log_device_placement=True)) as session:
    result = session.run(sum_operation_all)
t2 = time.time()

time1 = (t2 - t1) * 1000.

# It can be hard to see the results on the terminal with lots of output -- add some newlines to improve readability.
print("\n" * 5)
print("Shape:", shape, "Device:", device_name)
print("Time taken sum_operation_all:", (time1))
print("\n" * 2)
print("Memory: {} Kb".format(
    resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
#print(result)
print("\n" * 2)
print("\n" * 5)

t1 = time.time()
with tf.Session(config=tf.ConfigProto(allow_soft_placement=True,
                                      log_device_placement=True)) as session:
    result = session.run(sum_operation_0)
t2 = time.time()

time2 = (t2 - t1) * 1000.
# It can be hard to see the results on the terminal with lots of output -- add some newlines to improve readability.
print("\n" * 5)
print("Shape:", shape, "Device:", device_name)
print("Time taken sum_operation_0:", (time2))
def help():
    print 'Documentation for the profile module can be found '
    print "in the Python Library Reference, section 'The Python Profiler'."


if hasattr(os, 'times'):

    def _get_time_times(timer=os.times):
        t = timer()
        return t[0] + t[1]


_has_res = 0
try:
    import resource
    resgetrusage = lambda: resource.getrusage(resource.RUSAGE_SELF)

    def _get_time_resource(timer=resgetrusage):
        t = timer()
        return t[0] + t[1]

    _has_res = 1
except ImportError:
    pass


class Profile():
    bias = 0

    def __init__(self, timer=None, bias=None):
        self.timings = {}
Example #47
0
def send_anonymous_stats(start_time):
    """
    Send anonymous usage statistics

    Example use:
        current_stat = return_stat_file_dict(csv_file)
        add_update_csv(csv_file, 'stat', current_stat['stat'] + 5)
    """
    try:
        client = InfluxDBClient(STATS_HOST, STATS_PORT, STATS_USER, STATS_PASSWORD, STATS_DATABASE)
        # Prepare stats before sending
        uptime = (time.time() - start_time) / 86400.0  # Days
        add_update_csv(STATS_CSV, 'uptime', uptime)

        version_num = db_retrieve_table_daemon(
            AlembicVersion, entry='first')
        version_send = version_num.version_num if version_num else 'None'
        add_update_csv(STATS_CSV, 'alembic_version', version_send)

        outputs = db_retrieve_table_daemon(Output)
        add_update_csv(STATS_CSV, 'num_relays', get_count(outputs))

        inputs = db_retrieve_table_daemon(Input)
        add_update_csv(STATS_CSV, 'num_sensors', get_count(inputs))
        add_update_csv(STATS_CSV, 'num_sensors_active',
                       get_count(
                           inputs.filter(Input.is_activated == True)))

        conditionals = db_retrieve_table_daemon(Conditional)
        add_update_csv(STATS_CSV, 'num_conditionals', get_count(conditionals))
        add_update_csv(STATS_CSV, 'num_conditionals_active',
                       get_count(
                           conditionals.filter(Conditional.is_activated == True)))

        pids = db_retrieve_table_daemon(PID)
        add_update_csv(STATS_CSV, 'num_pids', get_count(pids))
        add_update_csv(STATS_CSV, 'num_pids_active',
                       get_count(pids.filter(PID.is_activated == True)))

        lcds = db_retrieve_table_daemon(LCD)
        add_update_csv(STATS_CSV, 'num_lcds', get_count(lcds))
        add_update_csv(STATS_CSV, 'num_lcds_active',
                       get_count(lcds.filter(LCD.is_activated == True)))

        math = db_retrieve_table_daemon(Math)
        add_update_csv(STATS_CSV, 'num_maths', get_count(math))
        add_update_csv(STATS_CSV, 'num_maths_active',
                       get_count(math.filter(Math.is_activated == True)))

        methods = db_retrieve_table_daemon(Method)
        add_update_csv(STATS_CSV, 'num_methods',
                       get_count(methods))
        add_update_csv(STATS_CSV, 'num_methods_in_pid',
                       get_count(pids.filter(PID.method_id != '')))

        timers = db_retrieve_table_daemon(Timer)
        add_update_csv(STATS_CSV, 'num_timers', get_count(timers))
        add_update_csv(STATS_CSV, 'num_timers_active',
                       get_count(timers.filter(
                           Timer.is_activated == True)))

        country = geocoder.ip('me').country
        if not country:
            country = 'None'
        add_update_csv(STATS_CSV, 'country', country)
        add_update_csv(STATS_CSV, 'ram_use_mb',
                       resource.getrusage(
                           resource.RUSAGE_SELF).ru_maxrss / float(1000))

        add_update_csv(STATS_CSV, 'Mycodo_revision', MYCODO_VERSION)

        # Combine stats into list of dictionaries
        new_stats_dict = return_stat_file_dict(STATS_CSV)
        formatted_stat_dict = []
        for each_key, each_value in new_stats_dict.items():
            if each_key != 'stat':  # Do not send header row
                formatted_stat_dict = add_stat_dict(formatted_stat_dict,
                                                    new_stats_dict['id'],
                                                    each_key,
                                                    each_value)

        # Send stats to secure, remote influxdb server (only write permission)
        client.write_points(formatted_stat_dict)
        logger.debug("Sent anonymous usage statistics")
        return 0
    except requests.ConnectionError:
        logger.debug("Could not send anonymous usage statistics: Connection "
                     "timed out (expected if there's no internet or the "
                     "server is down)")
    except Exception as except_msg:
        logger.exception(
            "Could not send anonymous usage statistics: {err}".format(
                err=except_msg))
    return 1
Example #48
0
def get_mem():
    # Memory profiling
    # From https://goo.gl/HkfNpu
    if sys.platform == "darwin":
        return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / (1024.0**2)
    return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1024.0
Example #49
0
def main():
    modules = {
        "demography": test_demography,
        "highlevel": test_highlevel,
        "lowlevel": test_lowlevel,
        "dict_encoding": test_dict_encoding,
    }
    parser = argparse.ArgumentParser(
        description="Run tests in a loop to stress low-level interface")
    parser.add_argument("-m",
                        "--module",
                        help="Run tests only on this module",
                        choices=list(modules.keys()))
    args = parser.parse_args()
    test_modules = list(modules.values())
    if args.module is not None:
        test_modules = [modules[args.module]]

    # Need to do this to silence the errors from the file_format tests.
    logging.basicConfig(level=logging.ERROR)

    print("iter\ttests\terr\tfail\tskip\tRSS\tmin\tmax\tmax@iter")
    max_rss = 0
    max_rss_iter = 0
    min_rss = 1e100
    iteration = 0
    last_print = time.time()
    devnull = open(os.devnull, 'w')
    while True:
        # We don't want any random variation in the amount of memory
        # used from test-to-test.
        random.seed(1)
        testloader = unittest.TestLoader()
        suite = testloader.loadTestsFromModule(test_modules[0])
        for mod in test_modules[1:]:
            suite.addTests(testloader.loadTestsFromModule(mod))
        runner = unittest.TextTestRunner(verbosity=0, stream=devnull)
        result = runner.run(suite)
        rusage = resource.getrusage(resource.RUSAGE_SELF)
        if max_rss < rusage.ru_maxrss:
            max_rss = rusage.ru_maxrss
            max_rss_iter = iteration
        if min_rss > rusage.ru_maxrss:
            min_rss = rusage.ru_maxrss

        # We don't want to flood stdout, so we rate-limit to 1 per second.
        if time.time() - last_print > 1:
            print(iteration,
                  result.testsRun,
                  len(result.failures),
                  len(result.errors),
                  len(result.skipped),
                  rusage.ru_maxrss,
                  min_rss,
                  max_rss,
                  max_rss_iter,
                  sep="\t",
                  end="\r")
            last_print = time.time()
            sys.stdout.flush()

        iteration += 1
    sys.exit(0)
import itk
import resource

ImageType = itk.Image[itk.F, 3]
converter = itk.PyBuffer[ImageType]

# adding +1 to numpy created once
inputNumpyVolume = np.ones([100, 100, 100], dtype=np.float32)
n = 10
M = []
X = range(n)
for i in range(n):
    inputNumpyVolume += 1
    inputVolume = converter.GetImageViewFromArray(inputNumpyVolume)
    M.append(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)

if M[5] - M[4] > 1000:
    print('Memory leak!')
    sys.exit(1)

# creating new numpy volume each time
M = []
X = [x + n for x in range(n)]
for i in range(n):
    inputNumpyVolume = np.ones([100, 100, 100], dtype=np.float32)
    inputVolume = converter.GetImageViewFromArray(inputNumpyVolume)
    M.append(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)
if M[5] - M[4] > 1000:
    print('Memory leak!')
    sys.exit(1)
 def memory():
     res = resource.getrusage(resource.RUSAGE_SELF)
     return res[4]
Example #52
0
 def get_memory_usage_in_bytes():
     return resource.getrusage(resource.RUSAGE_SELF).ru_maxrss * (2**10)
"""
hmm?

ModuleNotFoundError ...

ma-, iiya

"""
import resource
import time

RESOURCES = {
    ('ru_utime', 'User time'),
    ('ru_stime', 'System time'),
    ('ru_maxrss', 'Max. Resident Set Size'),
    ('ru_ixrss', 'Shared Memory Size'),
    # (,),
}

usage = resource.getrusage(resource.RUSAGE_SELF)

for name, desc in RESOURCES:
    print('{:<25} ({:<10}) = {}'.format(desc.name, getattr(usage, name)))
Example #54
0
def logstats():
    """This is useful 'atexit'.
    """
    LOG('maxrss:%9d' % (resource.getrusage(resource.RUSAGE_SELF).ru_maxrss))
Example #55
0
 def get_memory_usage():
   # getrusage returns kb on linux, bytes on mac
   units_per_mb = 1024
   if platform.system() == "Darwin":
     units_per_mb = 1024*1024
   return ('Memory usage: %.1f MB' % (int(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss) / units_per_mb))
Example #56
0
                line = next(iterator)
            except StopIteration:
                done_looping = True
            else:
                #print(line)
                pass


def foo2():
    with open(filename, "r") as filehandle:
        for line in filehandle:
            #print(line)
            pass


if __name__ == '__main__':

    start_time = timeit.default_timer()
    foo2()
    print("foo2 Executed in ", timeit.default_timer() - start_time, "seconds")

    start_time = timeit.default_timer()
    foo1()
    print("foo2 Executed in ", timeit.default_timer() - start_time, "seconds")

    kilobytes = resource.getrusage(
        resource.RUSAGE_SELF
    ).ru_maxrss  # peak memory usage (bytes on OS X, kilobytes on Linux)
    megabytes = kilobytes / 1024

    print("Max memory usage : " + str(megabytes) + "MB")
            else:
                binary_features.append(0)
        del grams_string
        yield [f_id] + binary_features


if __name__ == '__main__':
    start_time = timeit.default_timer()
    dict_all = join_ngrams()
    features_all = []
    for i in range(1, 10):
        p, n = num_instances('trainLabels.csv', i)
        features_all += Heap_gain(p, n, i, dict_all)  # 750 * 9
    train_data = gen_df(features_all, train=True, verbose=False)
    with open('train_data_750.csv', 'wb') as outfile:
        wr = csv.writer(outfile, delimiter=',', quoting=csv.QUOTE_ALL)
        for row in train_data:
            wr.writerow(row)
    test_data = gen_df(features_all, train=False, verbose=False)
    with open('test_data_750.csv', 'wb') as outfile:
        wr = csv.writer(outfile, delimiter=',', quoting=csv.QUOTE_ALL)
        for row in test_data:
            wr.writerow(row)
    elapsed = str(int(timeit.default_timer() - start_time))
    ram = str(resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000)
    op = str('\n' + '***************** For Join Grams ****************** ' +
             '\n' + 'Time ' + elapsed + ' RAM ' + ram)
    with open('Log.txt', 'a') as file:
        file.write(op)
        file.write("\n")
    print "DONE!"
Example #58
0
def meminfo(str):
    print(str, resource.getrusage(resource.RUSAGE_SELF).ru_maxrss / 1000)
Example #59
0
 def Using(point):
     usage = resource.getrusage(resource.RUSAGE_SELF)
     return '''%s: usertime=%s systime=%s mem=%s mb
        ''' % (point, usage[0], usage[1],
               (usage[2] * resource.getpagesize()) / 1000000.0)
def ast(initial_state):

    # start time
    st_time = time.clock()

    # for A star frontier is a priority queue with ability
    # to modify task priority in place
    frontier = PriorityQueue()
    manhat_dis = initial_state.manhattan_dis()
    # node will have (board, depth/path_cost - i.e. g(n))
    frontier.add_task(initial_state, manhat_dis)

    # to have efficinet way to check for existence of a state in frontier
    explored_states = set()
    nodes_expanded = 0
    max_fringe_size = -1
    max_search_depth = -1

    while frontier.is_not_empty() > 0:

        node = frontier.pop_task()
        explored_states.add(node)

        if node.is_goal_state():
            # return (path to goal, cost_of_path, nodes expanded, fringe_size
            path_to_goal = []
            cur_state = node
            while cur_state.parent:
                path_to_goal.append(cur_state.move_frm_parent)
                cur_state = cur_state.parent
            path_to_goal = path_to_goal[::-1]
            cost_of_path = len(path_to_goal)
            fringe_size = frontier.size()
            search_depth = node.path_cost
            runtime = time.clock() - st_time
            if not os.name == 'nt':
                import resource
                max_ram_usage = resource.getrusage(
                    resource.RUSAGE_SELF).ru_maxrss  # get ru_maxrss
            else:
                max_ram_usage = None

            write_output(path_to_goal, cost_of_path, nodes_expanded,
                         fringe_size, max_fringe_size, search_depth,
                         max_search_depth, runtime, max_ram_usage)
            return

        # the node out of queue is not a goal and we need to expand it
        nodes_expanded += 1
        #print(nodes_expanded)

        for neighbor in node.neighbors():

            if not (neighbor in explored_states
                    or frontier.has_task(neighbor)):
                frontier.add_task(
                    neighbor, neighbor.path_cost + neighbor.manhattan_dis())
                max_fringe_size = max(max_fringe_size, frontier.size())
                max_search_depth = max(max_search_depth, neighbor.path_cost)
            elif frontier.has_task(neighbor):
                frontier.remove_task(neighbor)
                frontier.add_task(
                    neighbor, neighbor.path_cost + neighbor.manhattan_dis())

    print("ERROR: ast should never reach this point")