def _evaluate_handle(self, vd, handle):
        if self.args.debug_memory_leaks:
            print(mem_top())

        additional_fetches = []

        for hook in vd.hooks:
            hook_fetches = hook.before_run(self, vd)
            if hook_fetches is not None:
                additional_fetches += hook_fetches

        estimations, additional = self._predict_handle(handle,
                                                       additional_fetches)

        for uid, (est_time, est_freq) in estimations.items():
            aa = vd.dataset.get_annotated_audio_by_uid(uid)

            for hook in vd.hooks:
                hook.every_aa(self, vd, aa, est_time, est_freq)

        for hook in vd.hooks:
            hook.after_run(self, vd, additional)

        if self.args.debug_memory_leaks:
            print(mem_top())
Example #2
0
    def _evaluate_handle(self, vd, handle):
        if self.args.debug_memory_leaks:
            print(mem_top())

        additional_fetches = []

        for hook in vd.hooks:
            hook_fetches = hook.before_predict(self, vd)
            if hook_fetches is not None:
                additional_fetches += hook_fetches

        fetched_values = self._predict_handle(handle, additional_fetches)

        timer = time.time()
        estimations = self._process_estimations(fetched_values)
        print("_process_estimations() {:.2f}s".format(time.time() - timer))

        for hook in vd.hooks:
            hook.after_predict(self, vd, estimations, fetched_values)

        for uid, est in estimations.items():
            aa = vd.dataset.get_annotated_audio_by_uid(uid)

            for hook in vd.hooks:
                hook.every_aa(self, vd, aa, est)

        for hook in vd.hooks:
            hook.after_run(self, vd, fetched_values)

        if self.args.debug_memory_leaks:
            print(mem_top())
Example #3
0
def debug_mem_usage():
    import psutil
    from mem_top import mem_top
    pid = getpid()
    logger.debug(
        'MEM USAGE for PID {}, MEM_INFO: {}\n{}'.format(
            pid, psutil.Process().memory_info(), mem_top()))
    def next(self, custom_pipeline={}):

        # Interpolate zoom and drag
        self.pipeline["mmv_rotation"] = self.window_handlers.rotation
        self.pipeline["mmv_zoom"] = self.window_handlers.zoom
        self.pipeline["mmv_drag"] = self.window_handlers.drag

        # Set the pipeline attributes if shader is not frozen
        if not self.freezed_pipeline:
            self.pipeline["mmv_frame"] += 1
            self.pipeline["mmv_time"] = self.pipeline["mmv_frame"] / self.fps

        if MMVShaderMGL.DEVELOPER_RAM_PROFILE:
            from mem_top import mem_top
            if self.pipeline["mmv_frame"] % 300 == 0:
                logging.debug(mem_top())

        # The resolution needs to be scaled with SSAA
        self.pipeline["mmv_resolution"] = (self.width * self.ssaa,
                                           self.height * self.ssaa)

        # Assign user custom pipelines.
        # NOTE: Don't forget to write (uniform (type) name;) on the GLSL file
        # and also be sure that the name is unique, we don't check this here due Python performance
        for key, value in custom_pipeline.items():
            self.pipeline[key] = value

        # print(self.pipeline)
        self._render(pipeline=self.pipeline)
Example #5
0
    def start(self):
        i = 0
        thread_stac = set()
        random.shuffle(self.thread_list)
        for thread in self.thread_list:
            if not thread:
                continue
            thread_stac.add(thread)
            thread.start()
            i += 1
            thread_to_kill = Process(name='empty')
            while i >= self.max_count:
                for thread_ in thread_stac:
                    if not thread_.is_alive():
                        thread_to_kill = thread_
                        i -= 1
                        break

            if 'empty' in thread_to_kill.name:
                continue
            gc.collect()
            print(thread_to_kill.name, len(thread_stac))

            thread_stac.discard(thread_to_kill)
            gc.collect()

        logger.debug(mem_top())
Example #6
0
 def req_debug_handler(self):
     try:
         from mem_top import mem_top
         dat = mem_top(limit=50, width=150, sep='\n')
         self.send_response("text/plain", dat)
     except:
         self.send_response("text/html", "no mem_top")
Example #7
0
 def mem_top(self, environ, start_resp):
     # memory-leak debug
     try:
         from mem_top import mem_top
     except ImportError as exc:
         return self.resp(start_resp, codes.NOT_FOUND, repr(exc))
     else:
         return self.resp(start_resp, codes.OK, str(mem_top()))
Example #8
0
 def __onMemtop(self):
     """mem_top report"""
     QApplication.setOverrideCursor(QCursor(Qt.WaitCursor))
     try:
         print(mem_top(limit=100, width=400))
     except Exception as exc:
         logging.error(str(exc))
     QApplication.restoreOverrideCursor()
Example #9
0
def getsystemstatus():
    global startupdone
    if startupdone:
        msg = ""
        msg += "Processing Queue: %d\n" % tracking_controls[0].camera_queue.qsize()
        cpu_usage_string = os.popen("cat /proc/loadavg").readline()
        msg += "CPU Usage:        %s" % cpu_usage_string        
        if len(tracking_controls[0].tracking_results)>0:
            msg += "\n\nDiagnostic Message from last tracking computation\n"
            msg += "<pre>"+tracking_controls[0].tracking_results[-1]['msg']+"</pre>"
        msg+="\n\n+<pre>"+mem_top()+"</pre>"
        return msg
    else:
        return "0"
Example #10
0
def memory_details(task, method=None):
    if method == 'pympler':
        from pympler import muppy, summary
        all_objs = muppy.get_objects()
        summary.print_(summary.summarize(all_objs))
    elif method == 'mem_top':
        from mem_top import mem_top
        task.log(mem_top())
    else:
        import subprocess
        result = subprocess.check_output(
            'ps --no-headers -eo pmem,vsize,rss,pid,cmd | sort -k 1 -nr',
            shell=True)
        task.log('\n' + result.decode('utf8'))
Example #11
0
 def p():
     LOG.debug(mem_top())
Example #12
0
            #    A[:, target_node] = -1
            #print('result: {}'.format(res))
        except BaseException as e:
            print(e)
            A[convexNodes[parent_node - 1], :] = -1
        #with open('prediction-parent-all.txt','a') as writer:
        #    writer.write(json.dumps({'parent_node':  parent_node,'target_node': convexNodesArr[target_node]+1,'res':res,'alpha': Aone})+'\n')

        #time.sleep(5)

        if num_nodes > 0:
            Atwo = {}
            print(convexNodes.keys())
            for x in range(len(A[convexNodes[parent_node - 1], :])):
                Atwo[convexNodesArr[x] + 1] = A[convexNodes[parent_node -
                                                            1], :][x]

            with open('prediction-parent-childs.txt', 'a') as writer:
                writer.write(
                    json.dumps({
                        'parent_node': parent_node,
                        'alpha': Atwo
                    }) + '\n')
            #with open('results.txt','a') as writer:
            #    writer.write(res)

        # perform gc collect after iteration save up memory
        #gc.collect()
        print(mem_top())
        #time.sleep(1)
Example #13
0
def get_all_pagename_sentences(dumpfile, vdt_map):
    print("Getting vdt & sentences map from the dump file...")
    dump = mwxml.Dump.from_file(open(dumpfile))
    total_sentence_count = 0
    page_count = 0
    ignored_sentence_count = 0
    valid_sentence_count = 0
    iteration = 0
    link_regex = r'(\[\[([a-zA-Z\u0080-\uFFFF ()]+)\]\]|\[\[([a-zA-Z\u0080-\uFFFF ()]+)\|([a-zA-Z\u0080-\uFFFF ]+)\]\])'

    for page in dump:

        # Ignore disambiguation pages.
        if DISAMBIGUATION_REFERENCE in page.title:
            continue

        percentage = (page_count * 100.0) / TOTAL_PAGE_COUNT
        page_links_hashes = {}
        page_count += 1
        if iteration > 10000:
            print(iteration)
            print(
                "==================== Before Garbage Collection ===================="
            )
            print(mem_top())

            gc.collect()
            print(
                "==================== After Garbage Collection ===================="
            )
            print(mem_top())

            iteration = 0

        for revision in page:

            iteration += 1
            # print(iteration)

            if isinstance(revision.text, str):
                # Get the matched strings.
                wiki_syntaxed_text = prepare_text(revision.text)
                matches = re.finditer(link_regex, wiki_syntaxed_text)
                if matches:
                    for m in matches:
                        # Get the hash of a matched link.
                        hash_of_link = hashlib.sha256(
                            m.group(1).encode('utf-8')).hexdigest()

                        seen_text = m.group(4)
                        if seen_text is None:
                            seen_text = m.group(2)

                        page_name = m.group(2)
                        if page_name is None:
                            page_name = m.group(3)

                        page_links_hashes[hash_of_link] = {
                            'wiki_text': m.group(1),
                            'page_name': page_name,
                            'seen_text': seen_text
                        }
                # print_dict(page_links_hashes)

                # Change the wiki_text in the text with the hash
                hash_replaced_text = wiki_syntaxed_text
                for hash_value, text_map in page_links_hashes.items():
                    hash_replaced_text = hash_replaced_text.replace(
                        text_map['wiki_text'], hash_value)

                # Get rid of tables and other wiki syntax objects.
                # hash_replaced_text = get_salt_text(hash_replaced_text)

                # Separate sentences with nltk
                sentences_with_hash = sent_tokenize(hash_replaced_text)

                # Find the sentences with the hashes and replace the hash with the seen_text. Save starting and
                # ending positions.

                for hash_value_, text_map_ in page_links_hashes.items():
                    for sentence in sentences_with_hash:
                        if hash_value_ in sentence:
                            # Check for unwanted text parts.

                            normal_sentence = replace_hash_values_with_seen_text(
                                sentence, page_links_hashes)
                            normal_sentence = get_salt_text(normal_sentence)
                            if not is_valid_sentence(sentence):
                                continue

                            total_sentence_count += 1
                            try:
                                vdt_start_index = normal_sentence.index(
                                    text_map_['seen_text'])
                                vdt_end_index = vdt_start_index + len(
                                    text_map_['seen_text'])

                                # Increase total sentence count.
                                valid_sentence_count += 1
                                write_one_row(percentage, vdt_map,
                                              text_map_['page_name'],
                                              normal_sentence, vdt_start_index,
                                              vdt_end_index)
                            except Exception as e:
                                print(e)
                        # write_ignored_sentence(page.title,normal_sentence)
        #print("% [", percentage, "] of pages processed. From page: [", page.title, "] Found: [", len(page_links_hashes),"] pagelinks.")

    print("Finished getting all sentences. (@_@)")
    print("Total Sentence Count: ", total_sentence_count)
    print("Ignored Sentence Count: ", ignored_sentence_count)
    print("Valid Sentence Count: ", valid_sentence_count)
    print("Valid/Total Ratio: ",
          (valid_sentence_count * 100.0) / total_sentence_count)
import lichen.lichen as lch
import matplotlib.pyplot as plt

from datetime import datetime,timedelta,date
import matplotlib.dates as mdates
import matplotlib.ticker as ticker
from cogent_utilities import sec2days

import sys
import seaborn as sn

#from pympler.tracker import SummaryTracker
#tracker = SummaryTracker()
from mem_top import mem_top

mem_top()


vals = np.loadtxt(sys.argv[1])

seconds = vals[:,0]
org_days = sec2days(seconds)
org_energies = vals[:,1]
org_risetimes = vals[:,2]

elo = 0.5
ehi = 3.3
enbins = 70
ewidth = (ehi-elo)/enbins

dlo = 0.0
Example #15
0
 def dumpmem(self):
     self.logger.debug(mem_top.mem_top())
def printit(logger):
    logger.debug(mem_top(verbose_types=[dict, list], width=200))
Example #17
0
 def do_mem_top(self, line):
     print(mem_top.mem_top())
def _harvest_register_worker_fn(worker_index,
                                reg_uri,
                                instances,
                                serial_chunk_size=INSTANCES_PER_FILE,
                                **kwargs):
    endpoint_func = kwargs['endpoint_func']
    endpoint_rule = kwargs['endpoint_rule']
    replace_s = kwargs['replace_s']
    replace_r = kwargs['replace_r']
    n_instances = len(instances)
    if n_instances < 1:
        return True
    est_secs = -1
    avg_inst_secs = -1
    serial_groups = grouper(instances, serial_chunk_size, mutable=True)
    first_group = True
    total_instances_done = 0
    iig = -1
    n_serial_groups = len(serial_groups)
    while True:
        try:
            instance_s_group = serial_groups.pop()
        except IndexError:
            break
        iig += 1
        start_serial_group_time = time.perf_counter()
        info_message_pref = "P[{}] Wkr: {}  Set: {}/{},".format(
            str(os.getpid()), worker_index + 1, iig + 1, n_serial_groups)
        total_in_group = len(instance_s_group)
        with open(
                "{}/{}_p{}_s{}.nt".format(OUTPUT_DIRECTORY,
                                          reg_uri_to_filename(reg_uri),
                                          str(worker_index + 1), str(iig + 1)),
                'ab+') as inst_file:
            iiig = -1
            while True:
                try:
                    inst = instance_s_group.pop()
                except IndexError:
                    break
                iiig += 1
                start_instance_time = 0 if first_group else time.perf_counter()
                local_instance_url = inst.replace(replace_s, replace_r)
                del inst
                info_message = "{} Inst: {}/{}, ".format(
                    info_message_pref, iiig + 1, total_in_group)
                est_sfx = " First group - No est remaining." if first_group else " Wkr est {}".format(
                    seconds_to_human_string(est_secs))
                info(info_message + local_instance_url + est_sfx)
                total_instances_done += 1
                m = endpoint_rule.match("|" + local_instance_url)
                dummy_request_uri = "http://localhost:5000" + local_instance_url + \
                                    "?_view={:s}&_format=application/n-triples".format(HARVESTABLE_INSTANCE_VIEW)
                del local_instance_url
                test_context = app.test_request_context(dummy_request_uri)
                try:
                    if len(m) < 1:
                        with test_context:
                            resp = endpoint_func()
                    else:
                        with test_context:
                            resp = endpoint_func(**m)
                except NotFoundError:
                    with open(
                            "{}_not_found.txt".format(
                                reg_uri_to_filename(reg_uri)), 'a+') as nf:
                        nf.write("{}\n".format(dummy_request_uri))
                    continue
                except Exception as e:
                    import traceback
                    with open(
                            "{}_error.txt".format(
                                reg_uri_to_filename(reg_uri)), 'a+') as nf:
                        nf.write("{}\n".format(dummy_request_uri))
                        nf.write("{}\n".format(repr(e)))
                        traceback.print_tb(e.__traceback__, file=nf)
                        nf.write('\n')
                    traceback.print_tb(e.__traceback__)  #to stderr
                    continue
                if isinstance(resp, pyldapi.Renderer):
                    resp.format = "application/n-triples"
                    resp = resp.render()
                if hasattr(resp, 'status_code') and hasattr(resp, 'data'):
                    assert resp.status_code == 200
                    if isinstance(resp.data, bytes):
                        data = resp.data
                    elif isinstance(resp.data, str):
                        data = resp.data.encode(encoding='utf-8')
                    else:
                        raise RuntimeError(
                            "response.data in the wrong format.")
                    inst_file.write(data)
                elif isinstance(resp, rdflib.Graph):
                    g = resp
                    g.serialize(destination=inst_file, format="nt")
                if first_group:
                    continue
                end_instance_time = time.perf_counter()
                instance_time = end_instance_time - start_instance_time
                if instance_time > 0:
                    avg_inst_secs = (avg_inst_secs + instance_time) / 2.0
                    instances_left = n_instances - total_instances_done
                    est_secs = avg_inst_secs * instances_left
            # End of per-instance processing
        end_serial_group_time = time.perf_counter()
        del instance_s_group
        if DEBUG_MODE:
            debug(mem_top())
        serial_group_time = end_serial_group_time - start_serial_group_time
        if first_group:
            avg_inst_secs = serial_group_time / total_in_group
            first_group = False
        else:
            this_avg = serial_group_time / total_in_group
            if this_avg > 0:
                avg_inst_secs = (avg_inst_secs + this_avg) / 2.0
        instances_left = n_instances - total_instances_done
        est_secs = avg_inst_secs * instances_left
    return True
Example #19
0
def main(graph: str, strategy: str, variant: str, datasrc: str, nagts: int,
         duration: int, soc_name: str, exec_id: int, dirpath_execs: str,
         dirpath_logs: str, depth: int, trace_agents: bool,
         trace_agents_estimates: bool, interaction: bool):

    # If the agents' estimates are traced, then their individual idlenesses
    # are also traced
    trace_agents = True if trace_agents_estimates else trace_agents

    variant_sfx = '_' + variant if variant != '' else ''

    print('{}: # {}, {}{}, {} agents, #{}\n' \
          .format(misc.timestamp(), graph, strategy, variant_sfx, nagts,
                  exec_id))

    if interaction:
        print("interaction mode: depth: {}".format(depth))

    exec_path = pathformatter.build_exec_path(graph=graph,
                                              strt=strategy,
                                              exec_id=exec_id,
                                              nagts=nagts,
                                              soc_name=soc_name,
                                              execs_rep=dirpath_execs)

    log_path = pathformatter.build_log_path(graph=graph,
                                            strt=strategy,
                                            duration=duration,
                                            exec_id=exec_id,
                                            soc_name=soc_name,
                                            datasrc=datasrc,
                                            nagts=nagts,
                                            logs_rep=dirpath_logs,
                                            variant=variant)

    # Archivist's connection
    ar_cnt = SimulatedConnection()

    # Archivist
    archivist = Archivist(ar_cnt, log_path, duration, trace_agents,
                          trace_agents_estimates)

    # archivist.start()

    start = time.time()

    # Ananke's connection
    an_cnt = SimulatedConnection()

    ananke = Ananke(an_cnt, exec_path, archivist, duration, depth, graph,
                    nagts, variant, trace_agents, trace_agents_estimates,
                    interaction)
    ananke.start()
    ananke.join()

    end = time.time()
    print(misc.timestamp(), ": Time: ", (end - start), '\n')

    print(misc.timestamp(), ": `main.py`: memory usage: ", misc.get_memusage(),
          '% \n')

    if memtop:
        print(
            misc.timestamp(), ": `main.py`: Showing of top  "
            "suspects for memory leaks in your "
            "Python program with `mem_top`:")
        print(
            "{}:{}".format(
                misc.timestamp(),
                mem_top(limit=10,
                        width=100,
                        sep='\n',
                        refs_format='{'
                        'num}\t{type} {obj}',
                        bytes_format='{num}\t {obj}',
                        types_format='{num}\t {obj}',
                        verbose_types=None,
                        verbose_file_name="logs/mem_top.txt")), '\n')

    print("{}: ----------------------------------\n" \
          .format(misc.timestamp(), misc.get_memusage()))
Example #20
0
import logging
import mem_top

if __name__ == '__main__':
    logging.basicConfig(filename='logtest.log', level=logging.DEBUG)
    numbers = []
    for i in range(10):
        numbers.append(i)
        logging.debug(mem_top.mem_top())
Example #21
0
from mem_top import mem_top

print(mem_top())