예제 #1
0
    def test_compare_memory(self):
        """
        Test memory usage of two collector runs with IPFIX and NetFlow v9 packets respectively.
        Then compare the two memory snapshots to make sure the libraries do not cross each other.
        TODO: more features could be tested, e.g. too big of a difference if one version is optimized better
        :return:
        """
        pkts, t1, t2 = send_recv_packets(
            generate_packets(NUM_PACKETS_PERFORMANCE, 10))
        self.assertEqual(len(pkts), NUM_PACKETS_PERFORMANCE)
        snapshot_ipfix = tracemalloc.take_snapshot()
        del pkts
        tracemalloc.clear_traces()

        pkts, t1, t2 = send_recv_packets(
            generate_packets(NUM_PACKETS_PERFORMANCE, 9))
        self.assertEqual(len(pkts), NUM_PACKETS_PERFORMANCE)
        snapshot_v9 = tracemalloc.take_snapshot()
        del pkts

        stats = snapshot_v9.compare_to(snapshot_ipfix, "lineno")
        for stat in stats:
            if stat.traceback[0].filename.endswith("netflow/ipfix.py"):
                self.assertEqual(stat.count, 0)
                self.assertEqual(stat.size, 0)

        stats = snapshot_ipfix.compare_to(snapshot_v9, "lineno")
        for stat in stats:
            if stat.traceback[0].filename.endswith("netflow/v9.py"):
                self.assertEqual(stat.count, 0)
                self.assertEqual(stat.size, 0)
예제 #2
0
파일: ptmcmc.py 프로젝트: mardom/bajes
    def run(self, track_memory_usage=False):

        # run the chains
        logger.info("Running {}x{} walkers ...".format(self._ntemps,
                                                       self.nwalkers))
        while not self.stop:

            # expand history chains
            self._expand_history()

            # make steps
            self.sample(iterations=self.ncheckpoint)

            # update sampler status
            self.update_sampler()

            # compute stopping condition
            self.stop_sampler()

            # trace memory usage
            if tracemalloc.is_tracing():
                display_memory_usage(tracemalloc.take_snapshot())
                tracemalloc.clear_traces()

        # final store inference
        self.store_inference()
 def test_clear_traces(self):
     obj, obj_traceback = allocate_bytes(123)
     traceback = tracemalloc.get_object_traceback(obj)
     self.assertIsNotNone(traceback)
     tracemalloc.clear_traces()
     traceback2 = tracemalloc.get_object_traceback(obj)
     self.assertIsNone(traceback2)
예제 #4
0
def memory_tracer():
    tracemalloc.start()
    tracemalloc.clear_traces()

    filters = (
        tracemalloc.Filter(True, aiormq.__file__),
        tracemalloc.Filter(True, pamqp.__file__),
        tracemalloc.Filter(True, aio_pika.__file__),
    )

    snapshot_before = tracemalloc.take_snapshot().filter_traces(filters)

    try:
        yield

        gc.collect()

        snapshot_after = tracemalloc.take_snapshot().filter_traces(filters)

        top_stats = snapshot_after.compare_to(snapshot_before,
                                              'lineno',
                                              cumulative=True)

        assert not top_stats
    finally:
        tracemalloc.stop()
예제 #5
0
    def test_get_traced_memory(self):
        # Python allocates some internals objects, so the test must tolerate
        # a small difference between the expected size and the real usage
        max_error = 2048

        # allocate one object
        obj_size = 1024 * 1024
        tracemalloc.clear_traces()
        obj, obj_traceback = allocate_bytes(obj_size)
        size, peak_size = tracemalloc.get_traced_memory()
        self.assertGreaterEqual(size, obj_size)
        self.assertGreaterEqual(peak_size, size)

        self.assertLessEqual(size - obj_size, max_error)
        self.assertLessEqual(peak_size - size, max_error)

        # destroy the object
        obj = None
        size2, peak_size2 = tracemalloc.get_traced_memory()
        self.assertLess(size2, size)
        self.assertGreaterEqual(size - size2, obj_size - max_error)
        self.assertGreaterEqual(peak_size2, peak_size)

        # clear_traces() must reset traced memory counters
        tracemalloc.clear_traces()
        self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))

        # allocate another object
        obj, obj_traceback = allocate_bytes(obj_size)
        size, peak_size = tracemalloc.get_traced_memory()
        self.assertGreaterEqual(size, obj_size)

        # stop() also resets traced memory counters
        tracemalloc.stop()
        self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
예제 #6
0
    def test_get_traced_memory(self):
        # Python allocates some internals objects, so the test must tolerate
        # a small difference between the expected size and the real usage
        max_error = 2048

        # allocate one object
        obj_size = 1024 * 1024
        tracemalloc.clear_traces()
        obj, obj_traceback = allocate_bytes(obj_size)
        size, peak_size = tracemalloc.get_traced_memory()
        self.assertGreaterEqual(size, obj_size)
        self.assertGreaterEqual(peak_size, size)

        self.assertLessEqual(size - obj_size, max_error)
        self.assertLessEqual(peak_size - size, max_error)

        # destroy the object
        obj = None
        size2, peak_size2 = tracemalloc.get_traced_memory()
        self.assertLess(size2, size)
        self.assertGreaterEqual(size - size2, obj_size - max_error)
        self.assertGreaterEqual(peak_size2, peak_size)

        # clear_traces() must reset traced memory counters
        tracemalloc.clear_traces()
        self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))

        # allocate another object
        obj, obj_traceback = allocate_bytes(obj_size)
        size, peak_size = tracemalloc.get_traced_memory()
        self.assertGreaterEqual(size, obj_size)

        # stop() also resets traced memory counters
        tracemalloc.stop()
        self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
예제 #7
0
파일: emcee.py 프로젝트: mardom/bajes
    def run_mcmc(self):

        while not self.stop:

            # make steps
            for results in self.sample(self._previous_state,
                                       iterations=self.ncheckpoint,
                                       tune=True):
                pass

            # update previous state
            self._previous_state = results

            # update sampler status
            self.update_sampler()

            # compute stopping condition
            self.stop_sampler()

            if tracemalloc.is_tracing():
                display_memory_usage(tracemalloc.take_snapshot())
                tracemalloc.clear_traces()

        # final store inference
        self.store_inference()
def run():
    metadata = {}
    if "COMMIT_TIMESTAMP" in os.environ:
        metadata["timestamp"] = os.environ.get("COMMIT_TIMESTAMP")
        metadata["revision"] = os.environ.get("COMMIT_SHA")
        metadata["commit_message"] = os.environ.get("COMMIT_MESSAGE").split(
            "\n")[0]
    runner = pyperf.Runner(metadata=metadata)
    pattern = os.environ.get("BENCH_PATTERN")

    args = runner.parse_args()
    if args.tracemalloc:
        bench_type = "tracemalloc"
    elif args.track_memory:
        bench_type = "trackmem"
    else:
        bench_type = "time"
    for func in discover_benchmarks():
        name = "%s.%s.%s" % (str(func.__module__), func.__name__, bench_type)
        if not pattern or fnmatch.fnmatch(name, pattern):
            client = None
            if hasattr(func, "client_defaults"):
                # create the client outside of the benchmarked function
                client = elasticapm.Client(**func.client_defaults)
                func = functools.partial(func, client=client)
                if args.tracemalloc:
                    tracemalloc.clear_traces()
            runner.bench_func(name, func)
            if client:
                client.close()
 def test_get_tracemalloc_memory(self):
     data = [allocate_bytes(123) for count in range(1000)]
     size = tracemalloc.get_tracemalloc_memory()
     self.assertGreaterEqual(size, 0)
     tracemalloc.clear_traces()
     size2 = tracemalloc.get_tracemalloc_memory()
     self.assertGreaterEqual(size2, 0)
     self.assertLessEqual(size2, size)
def stop():
    """ Stops application memory profiling """
    logging.debug("Stopping memory profiling")
    with _lock:
        if is_running():
            snapshot(_make_snapshot_name)
            tracemalloc.clear_traces()
            tracemalloc.stop()
예제 #11
0
def peak() -> int:
    """
    Take a snapshot of the current "peak memory usage", similar to an allocation version of time.perf_counter() (etc.).
    """
    _, peak = tracemalloc.get_traced_memory()
    # reset the peak for the next recording
    tracemalloc.clear_traces()
    return MallocPeak(peak)
def stop():
    """ Stops application memory profiling """
    logging.debug("Stopping memory profiling")
    with _lock:
        if is_running():
            snapshot(_make_snapshot_name)
            tracemalloc.clear_traces()
            tracemalloc.stop()
예제 #13
0
    def test_clear_traces(self):
        obj, obj_traceback = allocate_bytes(123)
        traceback = tracemalloc.get_object_traceback(obj)
        self.assertIsNotNone(traceback)

        tracemalloc.clear_traces()
        traceback2 = tracemalloc.get_object_traceback(obj)
        self.assertIsNone(traceback2)
예제 #14
0
def _momory_logger():
    current = tracemalloc.take_snapshot()
    logger.info("================== Top Current:")
    for i, stat in enumerate(current.statistics('filename')[:10], 1):
        logger.info('================== top_current: ' + str(i) + ' ' +
                    str(stat))
    logger.info("================== Top Current:")
    tracemalloc.clear_traces()
    spawn_later(60, _momory_logger)
예제 #15
0
    def test_get_tracemalloc_memory(self):
        data = [allocate_bytes(123) for count in range(1000)]
        size = tracemalloc.get_tracemalloc_memory()
        self.assertGreaterEqual(size, 0)

        tracemalloc.clear_traces()
        size2 = tracemalloc.get_tracemalloc_memory()
        self.assertGreaterEqual(size2, 0)
        self.assertLessEqual(size2, size)
예제 #16
0
 def __call__(self, environ, start_response):
     # We are only interested in request traces.
     # Each request is handled in a new process.
     tracemalloc.clear_traces()
     try:
         return self.app(environ, start_response)
     finally:
         snapshot = tracemalloc.take_snapshot()
         display_tracemalloc(snapshot, limit=self.api.env.lite_tracemalloc)
예제 #17
0
    def measure_memory_usage(self):
        tracemalloc.start()
        self.func(**self.kwargs)
        _, peak_memory_usage = tracemalloc.get_traced_memory()
        peak_memory_usage = peak_memory_usage / (1024**2)
        tracemalloc.clear_traces()

        logging.info(f"{peak_memory_usage:.6f} MiB - peak memory usage "
                     f"for '{self.func.__name__}'.")
        return peak_memory_usage
예제 #18
0
 def test_get_traces(self):
     tracemalloc.clear_traces()
     obj_size = 12345
     obj, obj_traceback = allocate_bytes(obj_size)
     traces = tracemalloc._get_traces()
     trace = self.find_trace(traces, obj_traceback)
     self.assertIsInstance(trace, tuple)
     domain, size, traceback = trace
     self.assertEqual(size, obj_size)
     self.assertEqual(traceback, obj_traceback._frames)
     tracemalloc.stop()
     self.assertEqual(tracemalloc._get_traces(), [])
예제 #19
0
    def get_mem_usage(self, func, kwargs):
        tracemalloc.clear_traces()
        tracemalloc.start()
        func(**kwargs)
        _, peak_usage_B = tracemalloc.get_traced_memory()
        tracemalloc.stop()

        kwargs_size = self.get_kwargs_size(kwargs)
        peak_usage_MB = round(peak_usage_B / 1048576, 4)
        total_usage = round(kwargs_size + peak_usage_MB, 4)

        return kwargs_size, peak_usage_MB, total_usage
예제 #20
0
    def analyse(self, lst):
        tm.clear_traces()
        tm.start()
        random.shuffle(lst)
        try:
            output = self.__sorting_module.mySort(lst)
        except Exception as e:
            raise Exception("User code exception: " + str(e))

        _, peak = tm.get_traced_memory()
        if not self._is_sorted(output, lst):
            # raise SortedError(self.__test_name, lst)
            print("Not sorted")
        tm.stop()
        return peak
예제 #21
0
    def test_get_traces(self):
        tracemalloc.clear_traces()
        obj_size = 12345
        obj, obj_traceback = allocate_bytes(obj_size)

        traces = tracemalloc._get_traces()
        trace = self.find_trace(traces, obj_traceback)

        self.assertIsInstance(trace, tuple)
        domain, size, traceback = trace
        self.assertEqual(size, obj_size)
        self.assertEqual(traceback, obj_traceback._frames)

        tracemalloc.stop()
        self.assertEqual(tracemalloc._get_traces(), [])
예제 #22
0
def memory_tracer():
    tracemalloc.start()
    tracemalloc.clear_traces()

    filters = (
        tracemalloc.Filter(True, aiormq.__file__),
        tracemalloc.Filter(True, pamqp.__file__),
        tracemalloc.Filter(True, asyncio.__file__),
    )

    snapshot_before = tracemalloc.take_snapshot().filter_traces(filters)

    def format_stat(stats):
        items = [
            "TOP STATS:",
            "%-90s %6s %6s %6s" % ("Traceback", "line", "size", "count")
        ]

        for stat in stats:
            fname = stat.traceback[0].filename
            lineno = stat.traceback[0].lineno
            items.append(
                "%-90s %6s %6s %6s" % (
                    fname,
                    lineno,
                    stat.size_diff,
                    stat.count_diff
                )
            )

        return "\n".join(items)

    try:
        yield

        gc.collect()

        snapshot_after = tracemalloc.take_snapshot().filter_traces(filters)

        top_stats = snapshot_after.compare_to(
            snapshot_before, 'lineno', cumulative=True
        )

        if top_stats:
            logging.error(format_stat(top_stats))
            raise AssertionError("Possible memory leak")
    finally:
        tracemalloc.stop()
예제 #23
0
 def _end_trace_malloc(self):
     logger.debug(msg="=== START SNAPSHOT ===")
     snapshot = tracemalloc.take_snapshot()
     snapshot = snapshot.filter_traces(filters=self._get_trace_malloc_filters())
     for stat in snapshot.statistics(key_type="lineno", cumulative=True):
         logger.debug(msg=f"{stat}")
     if self.show_memory:
         size, peak = tracemalloc.get_traced_memory()
         snapshot_size = tracemalloc.get_tracemalloc_memory()
         logger.debug(
             msg=f"❕size={self._bytes_to_megabytes(size=size)}, "
             f"❗peak={self._bytes_to_megabytes(size=peak)}, "
             f"💾snapshot_size={self._bytes_to_megabytes(size=snapshot_size)}"
         )
     if self.clear_traces:
         tracemalloc.clear_traces()
     logger.debug(msg="=== END SNAPSHOT ===")
예제 #24
0
    def fit(self, X, y):
        """Run fit with all sets of parameters.

        Parameters
        ----------
        X : array-like of shape (n_samples, n_features)

        y : array-like of shape (n_samples,) 

        """

        X, y = self._dataframe(X, y)

        # Training and evaluation input functions.
        train_input_fn = self._make_input_fn(X, y)

        # feature selection
        num_columns = self.feature
        feature_columns = []
        n_classes = len(np.unique(y))

        for feature_name in num_columns:
            feature_columns.append(
                tf.feature_column.numeric_column(feature_name,
                                                 dtype=tf.float32))
        self.est = tf.estimator.BoostedTreesClassifier(
            feature_columns,
            n_batches_per_layer=self.n_batches_per_layer,
            n_classes=n_classes,
            n_trees=self.n_trees,
            max_depth=self.max_depth,
            learning_rate=self.learning_rate,
            label_vocabulary=self.label_vocabulary,
            model_dir=self.model_dir,
            l1_regularization=1,
            l2_regularization=1)
        tracemalloc.start()
        t0 = process_time()
        self.est.train(train_input_fn,
                       max_steps=self.max_steps,
                       steps=self.steps)
        self.time_ = process_time() - t0
        self.memory = tracemalloc.get_traced_memory()[0]
        tracemalloc.clear_traces()
        return self
예제 #25
0
    def test_get_traced_memory(self):
        # Call various things that we'll be calling first, in case
        # initializing them will cause some memory to be allocated.
        allocate_bytes(1024)
        self.assertGreaterEqual(4, 3)
        self.assertLessEqual(3, 4)
        self.assertLess(3, 4)
        self.assertEqual(4, 4)

        # Python allocates some internals objects, so the test must tolerate
        # a small difference between the expected size and the real usage
        max_error = 2048

        # Allocate and destroy one object with no unnecessary code in between.
        obj_size = 1024 * 1024
        tracemalloc.clear_traces()
        obj, obj_traceback = allocate_bytes(obj_size)
        size, peak_size = tracemalloc.get_traced_memory()
        obj = None
        size2, peak_size2 = tracemalloc.get_traced_memory()

        self.assertGreaterEqual(size, obj_size)
        self.assertGreaterEqual(peak_size, size)

        self.assertLessEqual(size - obj_size, max_error)
        self.assertLessEqual(peak_size - size, max_error)

        self.assertLess(size2, size)
        self.assertGreaterEqual(size - size2, obj_size - max_error)
        self.assertGreaterEqual(peak_size2, peak_size)

        # clear_traces() must reset traced memory counters
        ae = self.assertEqual
        tracemalloc.clear_traces()
        ae(tracemalloc.get_traced_memory(), (0, 0))

        # allocate another object
        obj, obj_traceback = allocate_bytes(obj_size)
        size, peak_size = tracemalloc.get_traced_memory()
        self.assertGreaterEqual(size, obj_size)

        # stop() also resets traced memory counters
        tracemalloc.stop()
        self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
예제 #26
0
    def test_new_reference(self):
        tracemalloc.clear_traces()
        # gc.collect() indirectly calls PyList_ClearFreeList()
        support.gc_collect()

        # Create a list and "destroy it": put it in the PyListObject free list
        obj = []
        obj = None

        # Create a list which should reuse the previously created empty list
        obj = []

        nframe = tracemalloc.get_traceback_limit()
        frames = get_frames(nframe, -3)
        obj_traceback = tracemalloc.Traceback(frames, min(len(frames), nframe))

        traceback = tracemalloc.get_object_traceback(obj)
        self.assertIsNotNone(traceback)
        self.assertEqual(traceback, obj_traceback)
예제 #27
0
 def test_get_traced_memory(self):
     max_error = 2048
     obj_size = 1024 * 1024
     tracemalloc.clear_traces()
     obj, obj_traceback = allocate_bytes(obj_size)
     size, peak_size = tracemalloc.get_traced_memory()
     self.assertGreaterEqual(size, obj_size)
     self.assertGreaterEqual(peak_size, size)
     self.assertLessEqual(size - obj_size, max_error)
     self.assertLessEqual(peak_size - size, max_error)
     obj = None
     size2, peak_size2 = tracemalloc.get_traced_memory()
     self.assertLess(size2, size)
     self.assertGreaterEqual(size - size2, obj_size - max_error)
     self.assertGreaterEqual(peak_size2, peak_size)
     tracemalloc.clear_traces()
     self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
     obj, obj_traceback = allocate_bytes(obj_size)
     size, peak_size = tracemalloc.get_traced_memory()
     self.assertGreaterEqual(size, obj_size)
     tracemalloc.stop()
     self.assertEqual(tracemalloc.get_traced_memory(), (0, 0))
예제 #28
0
    def test_reset_peak(self):
        # Python allocates some internals objects, so the test must tolerate
        # a small difference between the expected size and the real usage
        tracemalloc.clear_traces()

        # Example: allocate a large piece of memory, temporarily
        large_sum = sum(list(range(100000)))
        size1, peak1 = tracemalloc.get_traced_memory()

        # reset_peak() resets peak to traced memory: peak2 < peak1
        tracemalloc.reset_peak()
        size2, peak2 = tracemalloc.get_traced_memory()
        self.assertGreaterEqual(peak2, size2)
        self.assertLess(peak2, peak1)

        # check that peak continue to be updated if new memory is allocated:
        # peak3 > peak2
        obj_size = 1024 * 1024
        obj, obj_traceback = allocate_bytes(obj_size)
        size3, peak3 = tracemalloc.get_traced_memory()
        self.assertGreaterEqual(peak3, size3)
        self.assertGreater(peak3, peak2)
        self.assertGreaterEqual(peak3 - peak2, obj_size)
예제 #29
0
 def test_get_object_traceback(self):
     tracemalloc.clear_traces()
     obj_size = 12345
     obj, obj_traceback = allocate_bytes(obj_size)
     traceback = tracemalloc.get_object_traceback(obj)
     self.assertEqual(traceback, obj_traceback)
예제 #30
0
파일: profile.py 프로젝트: Krukov/levin
 def start_trace(self):
     sys.setprofile(self._trace)
     if self._trace_mem:
         tracemalloc.clear_traces()
         tracemalloc.start()
예제 #31
0
def main():
    # workaround, start tracing IPA imports and API init ASAP
    if any('--enable-tracemalloc' in arg for arg in sys.argv):
        tracemalloc.start()

    try:
        ccname = get_ccname()
    except ValueError as e:
        print("ERROR:", e, file=sys.stderr)
        print(
            "\nliteserver requires a KRB5CCNAME env var and "
            "a valid Kerberos TGT:\n",
            file=sys.stderr)
        print("    export KRB5CCNAME=~/.ipa/ccache", file=sys.stderr)
        print("    kinit\n", file=sys.stderr)
        sys.exit(1)

    api = init_api(ccname)

    if api.env.lite_tracemalloc:
        # print memory snapshot of import + init
        snapshot = tracemalloc.take_snapshot()
        display_tracemalloc(snapshot, limit=api.env.lite_tracemalloc)
        del snapshot
        # From here on, only trace requests.
        tracemalloc.clear_traces()

    if os.path.isfile(api.env.lite_pem):
        ctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)
        ctx.load_cert_chain(api.env.lite_pem)
    else:
        ctx = None

    app = NotFound()
    app = DispatcherMiddleware(
        app, {
            '/ipa': KRBCheater(api.Backend.wsgi_dispatch, ccname),
        })

    # only profile api calls
    if api.env.lite_profiler == '-':
        print('Profiler enable, stats are written to stderr.')
        app = ProfilerMiddleware(app, stream=sys.stderr, restrictions=(30, ))
    elif api.env.lite_profiler:
        profile_dir = os.path.abspath(api.env.lite_profiler)
        print("Profiler enable, profiles are stored in '{}'.".format(
            profile_dir))
        app = ProfilerMiddleware(app, profile_dir=profile_dir)

    if api.env.lite_tracemalloc:
        app = TracemallocMiddleware(app, api)

    app = StaticFilesMiddleware(app, STATIC_FILES)
    app = redirect_ui(app)

    run_simple(
        hostname=api.env.lite_host,
        port=api.env.lite_port,
        application=app,
        processes=5,
        ssl_context=ctx,
        use_reloader=True,
        # debugger doesn't work because framework catches all exceptions
        # use_debugger=not api.env.webui_prod,
        # use_evalex=not api.env.webui_prod,
    )

def pascal_list(size):
    l = [[binomial_coefficent(n, k) for k in range(n + 1)]
         for n in range(size + 1)]
    for row in l:
        for item in row:
            pass
    stats = tracemalloc.take_snapshot().statistics("lineno")
    return stats[0].size, "bytes"


def pascal_gen(size):
    g = ((binomial_coefficent(n, k) for k in range(n + 1))
         for n in range(size + 1))
    for row in g:
        for item in row:
            pass
    stats = tracemalloc.take_snapshot().statistics("lineno")
    return stats[0].size, "bytes"


tracemalloc.start()
result = pascal_list(300)  #return 1090728 bytes

tracemalloc.stop()
tracemalloc.clear_traces()
tracemalloc.start()

result = pascal_gen(300)  # returns 1136 bytes
print(result)
예제 #33
0
파일: nodes.py 프로젝트: xxoolm/Ryven
 def update_event(self, inp=-1):
     self.set_output_val(0, tracemalloc.clear_traces())
예제 #34
0
 def analyse(title, func, *args):
     print('-' * 25, title, '-' * 25)
     tracemalloc.clear_traces()
     print(func(*args))
     mem_current, mem_peak = tracemalloc.get_traced_memory()
     print(f'{title}> current: {mem_current}, peak {mem_peak}')
예제 #35
0
 def test_get_object_traceback(self):
     tracemalloc.clear_traces()
     obj_size = 12345
     obj, obj_traceback = allocate_bytes(obj_size)
     traceback = tracemalloc.get_object_traceback(obj)
     self.assertEqual(traceback, obj_traceback)
예제 #36
0
    def run(self,
            leak_threshold=1024,
            running_leak_threshold=4096,
            save_to_json=False,
            assert_during_execution=False):
        """
        Run memory test by opening the test state machine, initializing tracemalloc, starting state machine and taking
        snapshots at each one, then filtering snapshots, checking for uncollectable objects in memory and saving
        total memory usage of the iteration.
        If there are uncollectable objects then the test fails.
        or if the memory usage increases above the defined threshold, then the memory test fails.

        Parameters:
        leak threshold (int): Maximum amount of memory usage fluctuation allowed before saying there is a memory leak

        Returns:
        dict:Returning test results

       """
        import tracemalloc
        # Choose State Machine to be Tested and Open It
        from rafcon.core import start as core_start
        sm = core_start.open_state_machine(self.paths_to_be_tested[self.key])

        # Test
        # Initialize Tracemalloc
        tracemalloc.start()

        def memory_assertion(stop, pre_run_snapshot_size, threshold):
            while not stop[0]:
                assert max(
                    self._get_total_size(
                        self._filter_snapshot(tracemalloc.take_snapshot())) -
                    pre_run_snapshot_size, 0) < threshold
                time.sleep(0.1)

        # Run Test
        for i in range(self.number_iterations):
            print("\n\n\n\n\n\n\n\n\n\n------------------------------\n")
            print("Iteration number: ", i, "\n")
            print("------------------------------\n")
            if assert_during_execution:
                memory_assertion_thread_stop = [False]
                memory_assertion_thread = threading.Thread(
                    target=memory_assertion,
                    args=(memory_assertion_thread_stop,
                          self._get_total_size(
                              self._filter_snapshot(
                                  tracemalloc.take_snapshot())),
                          running_leak_threshold))
                memory_assertion_thread.start()
            self._run_iteration(sm)
            if assert_during_execution:
                memory_assertion_thread_stop[0] = True
                memory_assertion_thread.join()
            current_snapshot = self._filter_snapshot(
                tracemalloc.take_snapshot())
            self.total_memory.append(self._get_total_size(current_snapshot))
            print(self._display_top(current_snapshot), "\n")

            # Get gc stats and update uncollectable object count
            gc_stats = gc.get_stats()
            for j in range(len(gc_stats)):
                if gc_stats[j]['uncollectable'] >= 0:
                    self.uncollected_object_count = self.uncollected_object_count + gc_stats[
                        j]['uncollectable']

            # Print stats of python's gc and objects in memory
            print("\nStats of gc: ", gc_stats, "\n")

            # Clear tracemalloc traces
            tracemalloc.clear_traces()

        print("\nTotal Memory: ", self.total_memory)
        print("Total Uncollected Objects: ", self.uncollected_object_count,
              "\n")

        # Stop Tracemalloc
        tracemalloc.stop()
        if save_to_json:
            self._save_to_json()

        total_leak = self.total_memory[-1] - self.total_memory[0]
        if total_leak < 0:
            total_leak = 0

        test_results = {
            "uncollectable": self.uncollected_object_count,
            "leak": total_leak
        }

        print("Total memory leak: ", total_leak)
        print("Number of uncollectable objects: ",
              self.uncollected_object_count)

        print(
            "\n\n\n=================== Asserting Results ===================\n\n\n"
        )

        assert (test_results["uncollectable"] == 0)
        assert (test_results["leak"] < leak_threshold)

        return test_results