Beispiel #1
0
 def run(self, key, value=None):
     if value is not None:
         obj = list(filter(lambda o: str(o) == value, get_objects()))[0]
     if key == "graph":
         from objgraph import show_refs
         if value is None:
             p = self.console.parent
             show_refs(self.console if p is None else p,
                       refcounts=True,
                       max_depth=3)
         else:
             show_refs(obj, refcounts=True, max_depth=3)
     elif key == "growth":
         from objgraph import get_leaking_objects, show_most_common_types
         show_most_common_types(objects=get_leaking_objects())
     elif key == "info":
         from psutil import Process
         p = Process(os.getpid())
         print_formatted_text(p.memory_info())
     elif key == "leaking":
         from objgraph import get_leaking_objects
         print_formatted_text(get_leaking_objects())
     elif key == "objects":
         data = [["Object", "#References"]]
         for o in get_objects():
             if isinstance(o, (Console, Module)):
                 data.append([str(o), str(getrefcount(o))])
         t = BorderlessTable(data, "Consoles/Modules")
         print_formatted_text(t.table)
     elif key == "refs":
         if value is not None:
             print_formatted_text(getrefcount(obj), ":")
             pprint(get_referrers(obj))
def memory_tracing(key_type: str = "lineno", limit: int = 15):
    """
    Traces memory consumption and prints memory-usage statistics when leaving the context
    :param key_type:
    :param limit:
    :return:
    """
    tracemalloc.start()
    print("--- Tracing memory... ---")
    try:
        # Do computation ...
        yield None
    finally:
        snapshot = tracemalloc.take_snapshot()
        # snapshot = snapshot.filter_traces((
        #     tracemalloc.Filter(False, "<frozen importlib._bootstrap>"),
        #     tracemalloc.Filter(False, "<unknown>"),
        # ))
        top_stats = snapshot.statistics(key_type)
        print("--- Memory usage statistics: ---")
        print("Top %s lines:" % limit)
        for index, stat in enumerate(top_stats[:limit], 1):
            frame = stat.traceback[0]
            # replace "/path/to/module/file.py" with "module/file.py"
            filename = os.sep.join(frame.filename.split(os.sep)[-2:])
            print("#%s: %s:%s: %.1f KiB"
                  % (index, filename, frame.lineno, stat.size / 1024))
            line = linecache.getline(frame.filename, frame.lineno).strip()
            if line:
                print('    %s' % line)

        other = top_stats[limit:]
        if other:
            size = sum(stat.size for stat in other)
            print("%s other: %.1f KiB" % (len(other), size / 1024))
        total = sum(stat.size for stat in top_stats)
        print("\nTotal allocated size: %.1f KiB" % (total / 1024))

        # May also be useful:
        import objgraph
        print("\nTypes of most common instances:")
        objgraph.show_most_common_types(limit=limit)
        print("\nObjects that do not have any referents:")
        objgraph.get_leaking_objects()
        print("\nIncrease in peak object counts since last call:")
        objgraph.show_growth(limit=limit)
        print("\ntuple objects tracked by the garbage collector:")
        objgraph.by_type('tuple')
        print("\ndict objects tracked by the garbage collector:")
        objgraph.by_type('dict')
        print("--- End of memory tracing ---")
Beispiel #3
0
    def objgraph(self, request):
        import objgraph
        limit = int(request.args.get('limit', 10))
        types = objgraph.most_common_types(limit=limit, shortnames=False)
        leaking = objgraph.most_common_types(
            limit=limit,
            objects=objgraph.get_leaking_objects(),
            shortnames=False,
        )

        # html only links
        limits = [
            'Number of items: ',
            Link('{}', request.path + '?limit={}', 10).html(),
            Link('{}', request.path + '?limit={}', 20).html(),
            Link('{}', request.path + '?limit={}', 50).html(),
        ]
        return info_response(
            request,
            'Python Objects',
            Content(
                'Python Objects for Worker pid {}'.format(os.getpid()),
                'h1',
            ),
            Content(' '.join(limits), 'p', text=False, escape=False),
            Content('Most Common Objects', 'h2'),
            Table(types),
            Content('Leaking Objects (no referrer)', 'h2'),
            Table(leaking),
        )
Beispiel #4
0
    def objgraph(self, request):
        import objgraph
        limit = int(request.args.get('limit', 10))
        types = objgraph.most_common_types(limit=limit, shortnames=False)
        leaking = objgraph.most_common_types(
            limit=limit,
            objects=objgraph.get_leaking_objects(),
            shortnames=False,
        )

        # html only links
        limits = [
            'Number of items: ',
            Link('{}', request.path + '?limit={}', 10).html(),
            Link('{}', request.path + '?limit={}', 20).html(),
            Link('{}', request.path + '?limit={}', 50).html(),
        ]
        return info_response(
            request.environ,
            'Python Objects',
            Content(
                'Python Objects for Worker pid {}'.format(os.getpid()),
                'h1',
            ),
            Content(' '.join(limits), 'p', text=False, escape=False),
            Content('Most Common Objects', 'h2'),
            Table(types, id='types'),
            Content('Leaking Objects (no referrer)', 'h2'),
            Table(leaking, id='leaking'),
        )
Beispiel #5
0
def show_leaked_objects():
    """ not very useful """
    func_that_will_leak()
    gc.collect()
    roots = objgraph.get_leaking_objects()
    print('roots len = %s' % len(roots))
    objgraph.show_most_common_types(objects=roots)
    objgraph.show_refs(roots[:3], refcounts=True, filename='roots.png')
Beispiel #6
0
def print_memory(i):
    global object_counts

    print("\n\n--------------------- MEMORY -------------------------\n")

    print("TOTAL OBJECTS\n")
    o = len(gc.get_objects())
    print(o)
    object_counts.append(o)
    del o
    print("\n")

    print("GROWTH\n")
    objgraph.show_growth()
    print("\n")

    print("COMMON TYPES\n")
    objgraph.show_most_common_types()
    print("\n")

    print("LEAKING OBJECTS\n")
    roots = objgraph.get_leaking_objects()
    print("\n")

    log.info("ROOTS pre-collect : {}\n".format(len(roots)))

    print("COMMON TYPES IN ROOTS\n")
    objgraph.show_most_common_types(objects=roots)
    print("\n")

    objgraph.show_refs(roots[:3], refcounts=True, filename=TEST_PATH + '/roots_' + str(i) + '.png')
    print("\n")

    log.info("Garbage pre collect:  " + str(len(gc.garbage)))
    gc.collect()
    log.info("Garbage post collect: " + str(len(gc.garbage)))
    print("\n")

    roots = objgraph.get_leaking_objects()
    log.info("ROOTS post-collect : {}".format(len(roots)))

    print("\n\n---------------------------------------------------\n")
Beispiel #7
0
    def all_stocks(self):
        category = []
        price_range = []
        i = 0
        for stock in self.stock_list:
            try:
                pred = self.predict_one(stock, 5)
                category.append(pred[0])
                price_range.append(
                    [pred[1][pred[0]] * 100, pred[1][pred[0] + 1] * 100])
                print(self.device + ": " + str(i))
                if i % 50 == 0:
                    objgraph.get_leaking_objects()
            except (TypeError, ValueError) as e:
                category.append(0)
                price_range.append([0.0, 0.0])
                print("Type Error for {}".format(stock))
                print(e)
            i += 1

        return category, price_range
Beispiel #8
0
def _dump_memory_impl(limit: int) -> Mapping[str, Any]:
    nb_collected = [gc.collect(generation) for generation in range(3)]
    return {
        'nb_collected':
        nb_collected,
        'most_common_types':
        objgraph.most_common_types(limit=limit, shortnames=False),
        'leaking_objects':
        objgraph.most_common_types(limit=limit,
                                   shortnames=False,
                                   objects=objgraph.get_leaking_objects())
    }
Beispiel #9
0
    def __call__(self, environ, start_response):
        request = webob.Request(environ)
        if request.path_info.startswith(self.signature):
            query = request.GET.get('query')
            obj_type = request.GET.get('object_type')
            obj_address = request.GET.get('object_address')
            if obj_address:
                # Create a graph for the object
                leaking = [objgraph.at(int(obj_address))]
                filename = tempfile.mktemp(suffix='.png')
                objgraph.show_refs(leaking, filename=filename)
                output = open(filename, 'r').read()
                os.unlink(filename)
                start_response(
                    '200 Ok',
                    [('Content-Type', 'image/png')])
                return output

            output = StringIO()
            leaking = objgraph.get_leaking_objects()
            formats.htmlHeader(output, self.signature, request.host_url, query)
            output.write('<h3>Memory usage</h3>')
            output.write('<table>' + ''.join(
                    map(lambda info: MEMORY_LINE.format(*info),
                        objgraph.most_common_types(10, leaking))) +
                         '</table>')
            if obj_type:
                # Display detail about leaking objects of a given type.
                output.write('<h4>Memory detail for %s</h4>' % obj_type)
                output.write('<ul>')
                for obj in leaking:
                    if type(obj).__name__ == obj_type:
                        output.write(OBJECT_LINE.format(
                                id(obj), cgi.escape(str(obj))))
                output.write('</ul>')
            output.write('<h3>Timing</h3>')
            formats.htmlFormat(output, query=query)
            start_response(
                '200 Ok',
                [('Content-Type', 'text/html; charset=UTF8')])
            return output.getvalue()
        scale = self.find(request.path_info)
        scale.requests.mark()
        with scale.latency.time():
            response = request.get_response(self.app)
        result = scale.statuses(response.status_int)
        if result is not None:
            result.mark()
        start_response(
            response.status,
            [a for a in response.headers.iteritems()])
        return response.app_iter
Beispiel #10
0
 def on_selection_change(self, *args):
     #pass
     if self.selection:
         #from listscreen import ListScreenItem
         objgraph.show_growth()
         print '...'
         roots = objgraph.get_leaking_objects()
         objgraph.show_most_common_types(objects=roots)
         print '...'
         objgraph.show_refs(roots[:3], refcounts=True, filename='sad.png')
         #objgraph.show_chain(objgraph.find_backref_chain(self.selection[0].__self__, objgraph.is_proper_module),filename='chain.png')
         #objgraph.show_backrefs(self.selection[0].__self__, filename='sample-backref-graph.png')
         print '...'
Beispiel #11
0
 def _status_leak(self):
     if not self.leak: return
     pid = os.getpid()
     print("%d active threads" % threading.activeCount())
     if guppy:
         print(self.heap.heap())
     if psutil:
         process = psutil.Process(pid)
         if hasattr(process, "open_files"): print(process.open_files())
         if hasattr(process, "connections"): print(process.connections())
         if hasattr(process, "num_fds"): print(process.num_fds())
         if hasattr(process, "memory_info_ex"): print(process.memory_info_ex())
         if hasattr(process, "memory_maps"): print(process.memory_maps())
     if objgraph:
         print("%d objects leaking" % len(objgraph.get_leaking_objects()))
Beispiel #12
0
def __dbg_mem(strn):
	if __DBGMEM:
		import gc, objgraph
		print
		print '#' * 80
		print '#' * 80
		print '##', strn
		print 'Collect', gc.collect()
		print 'Collect', gc.collect()
	
		roots = objgraph.get_leaking_objects()
		if roots:
			print len(roots)
			objgraph.show_most_common_types(objects=roots)
			objgraph.show_refs(roots[:3], refcounts=True, filename='tmp/%s.png' % strn.lower())
		else:
			print 'Nothing'
		print '#' * 80	
		print '#' * 80
		print
Beispiel #13
0
 def prepareDocuments(docs):
     import pickle
     print("preparing %d documents" % len(docs))
     chunksize = 15000
     if functionCollection is not None:
         for i in range(0, len(docs), chunksize):
             chunk = docs[i:i + chunksize]
             functionCollection.moveToMemory(chunk, neededDocumentFunctions)
             #print("forget unnecessary document functions...")
             gc.collect()
             if config.debug_memory:
                 print("garbage: ", len(gc.garbage))
                 print("15 most common types:")
                 objgraph.show_most_common_types(limit=15)
                 c_syntax_tree.showCMemoryStatistics()
                 showMemoryStatistics()
                 functionCollection.showMemoryStatistics()
                 functionCollection.getFunction(
                     features.stanfordTreeDocumentFunction
                 ).cachedValues.showMemoryStatistics()
                 print("leaking: ", len(objgraph.get_leaking_objects()))
Beispiel #14
0
            grbindx=pygrib.index(grib_file,
                                 'indicatorOfParameter',
                                 'indicatorOfTypeOfLevel',
                                 'level')
            
            #plottypes.windplot(grbindx, source, map_types, outdir = outdir)

            plottypes.temp_leak_plot(grbindx, source, map_types, outdir = outdir)
            print 'SELF ',resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
            print 'CHILDREN', resource.getrusage(resource.RUSAGE_CHILDREN).ru_maxrss

            #plottypes.cloud_plot(grbindx, source, map_types, outdir = outdir)

            #plottypes.prec_plot(grbindx, grbindx_prev, source, map_types, outdir = outdir)
            #sys.exit(1) # one file done

            if grbindx_prev is not None: grbindx_prev.close()    # close previous(?)
            grbindx_prev = grbindx  # def new previous
            #grbindx.close()

             
            #for k,v in globals().items(): print 'globals:', k,'=',v

        #sys.exit(1) # all files done

    #sys.exit(1) # all sources done
    gc.collect()

    import objgraph
    objgraph.get_leaking_objects()
Beispiel #15
0
def discover(A, system_params, initial_state_set, budget=None):
    final_state_set = set()
    Q = queue.Queue(maxsize=0)
    examined_state_set = set()

    # initialize the Q with initial states

    # ##!!##logger.debug('Adding initial states to Q')

    for init_state in initial_state_set:
        Q.put(init_state)


    if settings.MEMLEAK_TEST:
        objgraph.show_growth(shortnames=False)
        U.pause()

    while not Q.empty():
        #objgraph.show_growth(shortnames=False)
        abs_state = Q.get(False)

        # ##!!##logger.debug('{} = Q.get()'.format(abs_state))

        if not (A.is_terminal(abs_state)
                or abs_state in examined_state_set
                or not A.in_ROI(abs_state)):

            # ##!!##logger.debug('decided to process abs_state')

            # Mark it as examined

            examined_state_set.add(abs_state)

            # Find all reachable abstract states using simulations

            abs2rch_abs_state_dict = get_reachable_abs_states(A, system_params, [abs_state])

            # add the new reached states only if they have not been
            # processed before

            # ##!!##logger.debug('abs2rch_abs_state_dict.values()\n{}'.format(abs2rch_abs_state_dict.values()))

            rchd_abs_state_set = abs2rch_abs_state_dict[abs_state]

            # TODO: abstract away the graph maybe??
            # Hide the call behind add_relation(A1, A2)

            for rchd_abs_state in rchd_abs_state_set:

                # ##!!##logger.debug('reached abstract state {}'.format(rchd_abs_state))

                # query if the reached state is a final state or not?
                # If yes, tag it so

                if system_params.is_final(A, rchd_abs_state):
                    final_state_set.add(rchd_abs_state)
                else:

                    # print('found a final state')
                    # exit()

                    Q.put(rchd_abs_state, False)

                # moving below to the abstraction itself
                # A.add_relation(abs_state, rchd_abs_state)

                # A.G.add_edge(abs_state, rchd_abs_state)
#                    n = self.get_n_for(abs_state) + 1
#                    self.set_n_for(rchd_abs_state, n)

    # end while loop

    # ##!!##logger.debug('Abstraction discovery done')
    # ##!!##logger.debug('Printing Abstraction\n {}'.format(str(A)))

    if settings.MEMLEAK_TEST:
        objgraph.show_growth(shortnames=False)
        from core.PACell import PlantAbstractState_ as PS
#         print('PlantAbstractState.instance_ctr = ', PS.instance_ctr)
#         print('PlantAbstractState.list_size = ', len(PS.instance_store))
#         print('PlantAbstractState.set_size = ', len(PS.instance_set))

#         objgraph.show_backrefs(
#                 PS.instance_store[0], max_depth=10, too_many=5,
#                 highlight=None, filename='objgraph1.png',
#                 extra_info=None, refcounts=False, shortnames=False)
        from core.abstraction import AbstractState_ as AS
        print('num instances of plant states:', PS.instance_ctr)
        print('num instances of abstract states:', AS.instance_ctr)
        print('memory leaks below...')
        objgraph.get_leaking_objects()
        print('='*20)
        U.pause()

    return final_state_set
Beispiel #16
0
 def func_wrapper():
     pp.pprint(objgraph.get_leaking_objects())
     abort()
Beispiel #17
0
    def objgraph_leaking_objects(self, args):
        """
        This is supposed to count reference counting bugs in C-level,
        see https://mg.pov.lt/objgraph/#reference-counting-bugs

        Example:

            $ fuglu_control objgraph_leaking_objects '{"nresults": 5}'

            ----------------
            Leaking objects:
            ----------------

            params:
            * nresults: 5
            * lowercase: True
            * dont_startwith:
            * must_startwith:
            * dont_contain:
            * must_contain:

            builtins.dict : 797
            builtins.list : 132
            builtins.tuple : 28
            builtins.method : 13
            builtins.weakref : 12
        """
        res = u"----------------\n" \
            + u"Leaking objects:\n" \
            + u"----------------\n\n"
        if OBJGRAPH_EXTENSION_ENABLED:
            defaults = {
                "nresults": 20,
                "lowercase": True,
                "dont_startwith": ["builtins", "_"],
                "dont_contain": [],
                "must_startwith": [],
                "must_contain": []
            }

            if not args:
                args = {}

            # fill filter lists and other vars from dict
            res, inputdict = ControlSession.prepare_objectgraph_list_from_dict(
                args, res, defaults)
            roots = None
            types_list = None
            finalfilter = None
            try:
                roots = objgraph.get_leaking_objects()

                # build filter
                finalfilter = ControlSession.buildfilter(
                    dont_contain=inputdict["dont_contain"],
                    dont_startwith=inputdict["dont_startwith"],
                    must_contain=inputdict["must_contain"],
                    must_startwith=inputdict["must_startwith"],
                    lowercase=inputdict["lowercase"])

                types_list = objgraph.most_common_types(
                    objects=roots,
                    limit=inputdict["nresults"],
                    shortnames=False,
                    filter=finalfilter)
                for otype in types_list:
                    res += u"%s : %u\n" % otype
            except Exception as e:
                res += force_uString(e)
                self.logger.exception(e)
            finally:
                if roots:
                    del roots
                if types_list:
                    del types_list
                if finalfilter:
                    del finalfilter
        else:
            res = u"please install module 'objgraph'"
        return res
Beispiel #18
0
def _dump_memory_impl(
    limit: int,
    analyze_type: Optional[str],
    python_internals_map: bool = False,
) -> Mapping[str, Any]:
    nb_collected = [gc.collect(generation) for generation in range(3)]
    result = {
        "nb_collected":
        nb_collected,
        "most_common_types":
        objgraph.most_common_types(limit=limit, shortnames=False),
        "leaking_objects":
        objgraph.most_common_types(limit=limit,
                                   shortnames=False,
                                   objects=objgraph.get_leaking_objects()),
    }

    if python_internals_map and not analyze_type:
        analyze_type = "builtins.dict"

    if analyze_type:
        # timeout after one minute, must be set to a bit less that the timeout of the broadcast in _views.py
        timeout = time.monotonic() + 60

        mod_counts: Dict[str, int] = {}
        biggest_objects: List[Tuple[float, Any]] = []
        result[analyze_type] = {}
        for obj in objgraph.by_type(analyze_type):
            if analyze_type == "builtins.function":
                short = obj.__module__.split(
                    ".")[0] if obj.__module__ is not None else ""
                mod_counts[short] = mod_counts.get(short, 0) + 1
            else:
                if analyze_type == "builtins.dict":
                    python_internal = False
                    if not (FILES_FIELDS - set(obj.keys())):
                        python_internal = True
                    if (not ({"scope", "module", "locals", "globals"} -
                             set(obj.keys()))
                            and isinstance(obj["globals"], dict) and
                            not (FILES_FIELDS - set(obj["globals"].keys()))):
                        python_internal = True
                    if (python_internal and not python_internals_map
                            or not python_internal and python_internals_map):
                        continue
                size = get_size(obj) / 1024
                if len(biggest_objects
                       ) < limit or size > biggest_objects[0][0]:
                    biggest_objects.append((size, obj))
                    biggest_objects.sort(key=lambda x: x[0])
                    if len(biggest_objects) > limit:
                        biggest_objects = biggest_objects[-limit:]
            if time.monotonic() > timeout:
                result[analyze_type]["timeout"] = True
                break
        if analyze_type == "builtins.function":
            result[analyze_type]["modules"] = [
                dict(module=i[0],
                     nb_func=i[1]) for i in sorted(mod_counts.items(),
                                                   key=lambda x: -x[1])[:limit]
            ]
        elif analyze_type == "linecache":
            import linecache

            cache = linecache.cache
            result[analyze_type]["biggest_objects"] = sorted(
                (dict(filename=k, size_kb=get_size(v))
                 for k, v in cache.items()),
                key=lambda i: -(cast(int, i["size_kb"])),
            )
        else:
            biggest_objects.reverse()
            result[analyze_type]["biggest_objects"] = [
                dict(size_kb=i[0], repr=repr(i[1])) for i in biggest_objects
            ]
    return result
Beispiel #19
0
                for device in all_devices:
                    #do pumping if there are items in coming list
                    if device.coming:
                        is_need_to_pump = True
                        device.pump()

            assert test_dict['packet'] == packet.__dict__
            assert test_dict['recipient'] == machines[j]

    l1 = Letter(
        title='First email',
        sender='*****@*****.**',
        recipient='*****@*****.**',
        text=
        'c374hfvc39pbvnc 7hv475v4bv vh549g7h3pabv 3fg37b30v 3v3p4bgf3gv 3vb54804v 459!',
        date=datetime.now().strftime("%d-%m-%Y/%H:%M"))
    l1bytes = bytes(str(l1), 'utf-8')

    l2 = Letter(
        title='First email',
        sender='*****@*****.**',
        recipient='*****@*****.**',
        text=
        'c374hfvc39pbvnc 7hv475v4bv vh549g7h3pabv 3fg37b30v 3v3p4bgf3gv 3vb54804v 459!',
        date=datetime.now().strftime("%d-%m-%Y/%H:%M"))
    l2bytes = bytes(str(l2), 'utf-8')

    roots = objgraph.get_leaking_objects()
    print(len(roots))
    objgraph.show_most_common_types(objects=roots)
Beispiel #20
0
def get_leaking_objects(file=None, limit=5):
    roots = objgraph.get_leaking_objects()
    objgraph.show_refs(roots[:limit], refcounts=True, output=file)
 def show_leak_obj(self):
     root = objgraph.get_leaking_objects()
     logging.error("leak object: {}".format(len(root)))
     import pdb;
     pdb.set_trace()
Beispiel #22
0
def add_single_experiment(directory, df_filepath, datasets):
    csv_filenames = [
        f for f in os.listdir(directory)
        if f[0:5] == "resul" and f[-4:] == ".csv"
    ]
    print(list(os.walk(directory)))
    weights_folder = [f for f in os.walk(directory)][0][1][0]
    for filename in csv_filenames:
        experiment = directory_to_experiment_info(directory)
        with open("./" + directory + "/" + filename, "r") as csvfile:
            logging.debug("./" + directory + "/" + filename)
            plots = csv.reader(csvfile, delimiter=",")
            headers = next(plots, None)
            for header in headers:
                experiment[header] = []
            for row in plots:
                for i, header in enumerate(headers):
                    experiment[header].append(float(row[i]))

        experiment["minimum_val_loss"] = min(experiment["val_loss"])
        experiment["minimum_loss"] = min(experiment["loss"])
        number = int(filename.split("-")[-1].split(".")[0])
        experiment["number"] = number
        experiment["repeat"] = math.floor(number / 4)
        if (number % 4) / 2 < 1:
            experiment["elastic_distortions"] = True
        else:
            experiment["elastic_distortions"] = False
        if (number % 4) % 2 != 0:
            experiment["separate_channel_ops"] = True
        else:
            experiment["separate_channel_ops"] = False
        print(experiment.keys())

        if not experiment_in_dataframe(df_filepath, experiment):
            weights_file = (directory + "/" + weights_folder + "/" +
                            "mask_rcnn_fk2018_best.h5")
            config = FK2018.FKConfig()
            print("got config")

            class InferenceConfig(config.__class__):
                # Run detection on one image at a time
                GPU_COUNT = 1
                IMAGES_PER_GPU = 1

            config = InferenceConfig()
            config.display()
            # Device to load the neural network on.
            # Useful if you're training a model on the same
            # machine, in which case use CPU and leave the
            # GPU for training.
            # DEVICE = "/gpu:0"  # /cpu:0 or /gpu:0

            # Inspect the model in training or inference modes
            # values: 'inference' or 'training'
            # TODO: code for 'training' test mode not ready yet
            TEST_MODE = "inference"

            # Must call before using the dataset

            # with tf.device(DEVICE):
            tf_config = tf.ConfigProto()
            tf_config.gpu_options.allow_growth = True
            with tf.Session(config=tf_config).as_default():
                model = modellib.MaskRCNN(mode="inference",
                                          model_dir="./log",
                                          config=config)

                # Load weights
                logging.debug("Loading weights " + str(weights_file))
                model.load_weights(weights_file, by_name=True)
                image_ids = datasets[0].image_ids

                tracker = SummaryTracker()
                logging.debug("getting stats")
                logging.debug("tunasand stats")

                experiment["AP_list"], \
                experiment["classless_AP_list"], \
                experiment["precision_list"], \
                experiment["classless_precision_list"], \
                experiment["recall_list"], \
                experiment["classless_recall_list"], \
                experiment["predicted_class_list"], \
                experiment["gt_class_list"], \
                experiment["predicted_size_list"], \
                experiment["gt_size_list"], \
                experiment["overlaps"], \
                experiment["classless_overlaps_list"], \
                experiment["total_predicted_pixels"], \
                experiment["total_groundtruth_pixels"], \
                experiment["total_overlapping_pixels"] = compute_both_batch_aps(image_ids, datasets[0], model, config) #get_stats(weights_file, datasets[0])

                objgraph.show_most_common_types()
                roots = objgraph.get_leaking_objects()
                print(len(roots))
                tracker.print_diff()

                for i, dataset in enumerate(
                    ["AE_area1", "AE_area2", "AE_area3"]):
                    image_ids = datasets[i + 1].image_ids
                    logging.debug(f"aestats, {dataset}")
                    experiment[f"AP_list_{dataset}"], \
                    experiment[f"classless_AP_list_{dataset}"], \
                    experiment[f"precision_list_{dataset}"], \
                    experiment[f"classless_precision_list_{dataset}"], \
                    experiment[f"recall_list_{dataset}"], \
                    experiment[f"classless_recall_list_{dataset}"], \
                    experiment[f"predicted_class_list_{dataset}"], \
                    experiment[f"gt_class_list_{dataset}"], \
                    experiment[f"predicted_size_list_{dataset}"], \
                    experiment[f"gt_size_list_{dataset}"], \
                    experiment[f"overlaps_{dataset}"], \
                    experiment[f"classless_overlaps_list_{dataset}"], \
                    experiment[f"total_predicted_pixels_{dataset}"], \
                    experiment[f"total_groundtruth_pixels_{dataset}"], \
                    experiment[f"total_overlapping_pixels_{dataset}"]  = compute_both_batch_aps(image_ids, datasets[i+1], model, config) #get_stats(weights_file, datasets[i+1])
                    objgraph.show_growth()
                    roots = objgraph.get_leaking_objects()
                    print(len(roots))
                    tracker.print_diff()

            update_dataframe(df_filepath, experiment)
        else:
            print("already in dataframe, skipping " + filename)
Beispiel #23
0
 def func_wrapper():
     pp.pprint(objgraph.get_leaking_objects())
     abort()
Beispiel #24
0
def main(args):

    memory_used = []
    process = psutil.Process(os.getpid())

    #TODO add into argparser
    IS_EAST_IMAGE_TEST = True

    NUM_ARRAYS_PER_FILE = 10000

    #TODO decode function needs this value as part of dataset map function,  hence for now harcoded value
    # if needed chnage manually at func `numpy_array_decode` in dummy_dataset.py also
    NUM_FEATURES = 250

    NUM_IMAGES_PER_FILE = 8

    BATCH_SIZE = 4
    TRAIN_DATA = os.getcwd() + "/data/train_data_img"
    VAL_DATA = os.getcwd() + "/data/val_data_img"
    MODEL_DIR = os.getcwd() + "/data/" + "east_net"
    EXPORT_DIR = MODEL_DIR + "/" + "export"
    NUM_EPOCHS = 3
    NUM_SAMPLES_PER_FILE = NUM_IMAGES_PER_FILE

    if args["dataset"] == "numpy":
        IS_EAST_IMAGE_TEST = False
        BATCH_SIZE = 128
        TRAIN_DATA = os.getcwd() + "/data/train_data"
        VAL_DATA = os.getcwd() + "/data/val_data"
        MODEL_DIR = os.getcwd() + "/" + "data/fwd_nnet"
        EXPORT_DIR = MODEL_DIR + "/" + "export"
        NUM_EPOCHS = 3
        NUM_SAMPLES_PER_FILE = NUM_ARRAYS_PER_FILE
    elif args["dataset"] == "east":
        pass
    else:
        print_error("Invalid dataset")

    TOTAL_STEPS_PER_FILE = NUM_SAMPLES_PER_FILE / BATCH_SIZE

    if args["delete"] == True:
        print_info("Deleting old data files")
        shutil.rmtree(TRAIN_DATA)
        shutil.rmtree(VAL_DATA)

    gen_data(IS_EAST_IMAGE_TEST=IS_EAST_IMAGE_TEST,
             TRAIN_DATA=TRAIN_DATA,
             VAL_DATA=VAL_DATA,
             NUM_SAMPLES_PER_FILE=NUM_SAMPLES_PER_FILE,
             NUM_FEATURES=NUM_FEATURES,
             number_files=int(args["num_tfrecord_files"]))

    if args["mode"] == "test_iterator":
        print('objgraph growth list start')
        objgraph.show_growth(limit=50)
        print('objgraph growth list end')

        test_dataset(data_path=TRAIN_DATA,
                     BATCH_SIZE=BATCH_SIZE,
                     IS_EAST_IMAGE_TEST=IS_EAST_IMAGE_TEST)
        test_dataset(data_path=TRAIN_DATA,
                     BATCH_SIZE=BATCH_SIZE,
                     IS_EAST_IMAGE_TEST=IS_EAST_IMAGE_TEST)
        test_dataset(data_path=VAL_DATA,
                     BATCH_SIZE=BATCH_SIZE,
                     IS_EAST_IMAGE_TEST=IS_EAST_IMAGE_TEST)
        print('objgraph growth list start')
        objgraph.show_growth(limit=50)
        print('objgraph growth list end')

        return

    # print(dataset_to_iterator(data_path=TRAIN_DATA))

    if IS_EAST_IMAGE_TEST:
        model = EASTTFModel(model_root_directory="store")
    else:
        model = NNet()

    estimator = tf.estimator.Estimator(
        model_fn=model,
        config=_init_tf_config(TOTAL_STEPS_PER_FILE=TOTAL_STEPS_PER_FILE,
                               MODEL_DIR=MODEL_DIR),
        params=None)
    memory_usage_psutil()
    print('objgraph growth list start')
    objgraph.show_growth(limit=50)
    print('objgraph growth list end')

    # print(objgraph.get_leaking_objects())

    # for epoch in tqdm(range(NUM_EPOCHS)):

    print("\n\n\n\n\n\n")
    print_error(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> New Epoch")
    memory_usage_psutil()
    # memory_used.append(process.memory_info()[0] / float(2 ** 20))
    print_error(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Training")
    # train(estimator=estimator,
    #       TRAIN_DATA=TRAIN_DATA,
    #       BATCH_SIZE=BATCH_SIZE,
    #       IS_EAST_IMAGE_TEST=IS_EAST_IMAGE_TEST)
    # print_error(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> Evaluating")
    # evaluate(estimator=estimator,
    #          VAL_DATA=VAL_DATA,
    #          BATCH_SIZE=BATCH_SIZE,
    #          IS_EAST_IMAGE_TEST=IS_EAST_IMAGE_TEST)

    train_n_evaluate(estimator=estimator,
                     TRAIN_DATA=TRAIN_DATA,
                     VAL_DATA=VAL_DATA,
                     BATCH_SIZE=BATCH_SIZE,
                     IS_EAST_IMAGE_TEST=IS_EAST_IMAGE_TEST,
                     max_steps=None,
                     NUM_EPOCHS=NUM_EPOCHS)

    print('objgraph growth list start')
    objgraph.show_growth(limit=50)
    print('objgraph growth list end')
    memory_usage_psutil()

    # plt.plot(memory_used)
    # plt.title('Evolution of memory')
    # plt.xlabel('iteration')
    # plt.ylabel('memory used (MB)')
    # plt.savefig("logs/" + args["dataset"] + "_dataset_memory_usage.png")
    # plt.show()

    print_error(">>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>> New Epoch")
    export_model(estimator=estimator,
                 model_export_path=EXPORT_DIR,
                 IS_EAST_MODEL=IS_EAST_IMAGE_TEST)

    (objgraph.get_leaking_objects())
Beispiel #25
0
 def do_update(self):
     leaked = objgraph.get_leaking_objects()
     self.g.update()
     leaked = objgraph.get_leaking_objects()
     ts = objgraph.typestats(leaked)
     self.assertFalse(ts)