Esempio n. 1
0
 def wrapper():
     t1 = timeit.default_timer()
     rez = funct()
     time = timeit.default_timer() - t1
     memory_profiler.profile(funct).__call__()
     print(f'Время : {time}')
     return rez
def memit(f):
    s = StringIO.StringIO()
    profile(f, s, precision=2)()
    s.seek(0)
    out = s.read()
    print out
    nums = mem_re.findall(out)
    print nums
    return float(nums[-1]) + float(nums[-2])
def memit(f):
    s = StringIO.StringIO()
    profile(f, s, precision=2)()
    s.seek(0)
    out = s.read()
    print out
    nums = mem_re.findall(out)
    print nums
    return float(nums[-1]) + float(nums[-2])
Esempio n. 4
0
def TrainingTCCustom(path="Dataset_lab/CROP"):
    # sizes = [64, 100, 125, 320, 640]
    sizes = [400]
    # sizes = [12]
    training_numbers = [4, 5, 6, 7, 8, 9, 10]
    # training_numbers = [8]
    for size in sizes:
        print("size", size)
        for num in training_numbers:
            print("num: ", num)
            t_operasi1 = time()
            mainCustom(path=path, trainingNumber=num, size=size)
            t_operasi2 = time()
            t_waktu_computasi = t_operasi2 - t_operasi1
            profile("DURASI:", t_waktu_computasi)
Esempio n. 5
0
def NONtrainingTCCustom(path="Dataset_lab/CROP"):
    sizes = [12, 25, 64, 100, 125, 320]
    # sizes = [12]
    training_numbers = [1, 2, 3, 4, 5, 6, 7, 8]
    # training_numbers = [10]
    # test_number = 6
    for size in sizes:
        print("size", size)
        for num in training_numbers:
            print("num: ", num)
            t_operasi1 = time()
            mainCustom(path=path, trainingNumber=num, size=size)
            t_operasi2 = time()
            t_waktu_computasi = t_operasi2 - t_operasi1
            profile("DURASI:", t_waktu_computasi)
Esempio n. 6
0
def memProfileWrapper(towrap, *funNames):
    """
        Create a wrapper for the functions you want to use, wrapping up the
        class, and putting profile wrappers on the functions in funNames.

        :param class towrap: Class to wrap
        :param str funNames: And amount of function names to wrap
        :rtype: class
        :return: memory profiled wrapped class

        For example::

            foo_mem = memProfileWrapper(foo,['my_func'])
            fooi = foo_mem()
            for i in range(5):
                fooi.my_func()

        Then run it from the command line::

            python -m memory_profiler exampleMemWrapper.py
    """
    from memory_profiler import profile

    attrs = {}
    for f in funNames:
        if hasattr(towrap, f):
            attrs[f] = profile(getattr(towrap, f))
        else:
            print("{0!s} not found in {1!s} Class".format(f, towrap.__name__))

    return type(towrap.__name__ + "MemProfileWrap", (towrap, ), attrs)
Esempio n. 7
0
def make_processor(session: sa_orm.Session,
                   processor_config: dict,
                   use_memory_profiler: bool = False):
    """
    Factory method to create a processor using partial function

    :param session: SQLAlchemy session
    :param processor_config: processor configuration dictionary
    :return: processor function
    """
    extractor_config = processor_config['extractor']
    extractor_func = getattr(extractors, extractor_config['name'])
    extractor = functools.partial(extractor_func, session,
                                  **extractor_config.get('kwargs', {}))

    loader_config = processor_config['loader']
    loader_func = getattr(loaders, loader_config['name'])
    loader = functools.partial(loader_func, session,
                               **loader_config.get('kwargs', {}))

    transformer_config = processor_config['transformer']
    transformer = functools.partial(transformers.transform_submissions,
                                    session, **transformer_config)

    processor_func = processor.process
    if use_memory_profiler:
        from memory_profiler import profile
        processor_func = profile(processor_func)

    return functools.partial(processor_func, extractor, transformer, loader)
Esempio n. 8
0
def conditional_memory_profile(func):
    """Enables memory profiling if set in the command line options."""
    if config.OPTS.profile_mem:
        from memory_profiler import profile
        return profile(func)
    else:
        return func
Esempio n. 9
0
    def wrapper(self, f, *args, **kwargs):
        # memory_profiler
        with StringIO() as s:
            rtn = profile(f, stream=s, precision=2)(*args, **kwargs)
            memory_value = self._memory_profiler_parse(s.getvalue())

        # line_profiler
        prof = LineProfiler()
        prof.add_function(f)

        rtn = prof.runcall(f, *args, **kwargs)
        with StringIO() as s:
            prof.print_stats(stream=s)
            mix, line_tmp = self._line_profiler_parse(s.getvalue())

        # memory line mix output
        template = self.L_M_TEMPLATE
        for l, m in zip(line_tmp, memory_value):
            l_m_mix = l[:5] + m
            mix.append(template.format(*l_m_mix))
        mix[self.L_M_HEADER_INDEX] = template.format(*self.L_M_HEADER)
        mix[self.L_M_SEPARATOR_INDEX] += "=" * 27
        self.logger.debug("line, memory profiler result\n" + "\n".join(mix))

        return rtn
Esempio n. 10
0
def pydio_profile(func=None, stream=None, precision=6):
    """
    Pydio wrapper for profile function from memory_profiler module

    :type
        precision: integer
        stream: i/o stream
        func: function
    """
    if sys.argv.__contains__('-mp=True') or sys.argv.__contains__('--memory_profile=True'):
        return profile(func, stream, precision)
    elif sys.argv.__contains__('-mp') or sys.argv.__contains__('--memory_profile'):
        index = sys.argv.index('-mp') if sys.argv.__contains__('-mp') else sys.argv.index('--memory_profile')
        return profile(func, stream, precision) if (str(sys.argv[index+1]).lower() == 'true') else func
    else:
        return func
Esempio n. 11
0
def memProfileWrapper(towrap, *funNames):
    """
        Create a wrapper for the functions you want to use, wrapping up the
        class, and putting profile wrappers on the functions in funNames.

        :param class towrap: Class to wrap
        :param str funNames: And amount of function names to wrap
        :rtype: class
        :return: memory profiled wrapped class

        For example::

            foo_mem = memProfile(foo,'my_func')
            fooi = foo_mem()
            for i in range(5):
                fooi.my_func()

        Then run it from the command line::

            python -m memory_profiler exampleMemWrapper.py
    """
    from memory_profiler import profile
    attrs = {}
    for f in funNames:
        if hasattr(towrap,f):
            attrs[f] = profile(getattr(towrap,f))
        else:
            print '%s not found in %s Class' % (f, towrap.__name__)

    return type(towrap.__name__ + 'MemProfileWrap', (towrap,), attrs)
Esempio n. 12
0
def profile_memory_usage(func, *args, **kwargs):
    """
    Profile the amount of memory used in a python function
    """
    from memory_profiler import profile

    return profile(func(*args, **kwargs))
Esempio n. 13
0
    def wrapper(self, f, *args, **kwargs):
        # output memory_profiler
        with StringIO() as s:
            rtn = profile(f, stream=s, precision=2)(*args, **kwargs)
            msg = "memory_profiler result\n{}".format(s.getvalue())
        self.logger.debug(msg)

        return rtn
Esempio n. 14
0
def profile_memory():
    """Profile the memory usage of the Python program with memory_profiler"""
    from memory_profiler import profile
    from memory_profiler import memory_usage
    set_logger_debug()

    rpa_eb = RPA_EB(MODEL_PATH, SHAPE, BUF_SIZE)

    # Add decorators for potentially memory eating methods
    rpa_eb._run_test = profile(rpa_eb._run_test)
    rpa_eb._proc_frame = profile(rpa_eb._proc_frame)
    rpa_eb._merge_bboxes = profile(rpa_eb._merge_bboxes)

    def test_single_proc():
        rpa_eb.run(test=True, multi_process=False, frame_num=1)

    test_single_proc()
    print("Total memory usage: {} MiB".format(memory_usage()[0]))
Esempio n. 15
0
 def wrapper(*args, **kwargs):
     m = StringIO()
     temp_func = memory_profiler.profile(func=function,
                                         stream=m,
                                         precision=4)
     output = temp_func(*args, **kwargs)
     print(m.getvalue())
     m.close()
     return output
Esempio n. 16
0
def memprof(func):
    """ requires memory_profiler
    pip install memory_profiler

    References:
        https://pypi.python.org/pypi/memory_profiler

    """
    import memory_profiler
    return memory_profiler.profile(func)
Esempio n. 17
0
def memprof(func):
    """ requires memory_profiler
    pip install memory_profiler

    References:
        https://pypi.python.org/pypi/memory_profiler

    """
    import memory_profiler
    return memory_profiler.profile(func)
Esempio n. 18
0
def mongodb(request):
    stream = StringIO()
    docs = memory_profiler.profile(_get_documents, stream=stream)(request)
    extra = '%s\nEXTRA:\n%s' % (stream.getvalue(), '')
    resp = render(request, "mongodb.html", {"documents": docs, "extra": extra})
    del docs
    print('objgraph.show_growth(limit=100):')
    objgraph.show_growth(limit=100)
    # print('objgraph.show_backrefs(get_objs()):')
    # objgraph.show_backrefs(get_objs())
    return resp
Esempio n. 19
0
def pydio_profile(func=None, stream=None, precision=6):
    """
    Pydio wrapper for profile function from memory_profiler module

    :type
        precision: integer
        stream: i/o stream
        func: function
    """
    if sys.argv.__contains__('-mp=True') or sys.argv.__contains__(
            '--memory_profile=True'):
        return profile(func, stream, precision)
    elif sys.argv.__contains__('-mp') or sys.argv.__contains__(
            '--memory_profile'):
        index = sys.argv.index('-mp') if sys.argv.__contains__(
            '-mp') else sys.argv.index('--memory_profile')
        return profile(func, stream, precision) if (str(
            sys.argv[index + 1]).lower() == 'true') else func
    else:
        return func
Esempio n. 20
0
    def wrp(func):
        def wrapper(*args, **kwargs):
            start = time.time()
            for n in range(cnt):
                value = func(*args, **kwargs)
            end = time.time()
            runtime = (10**9) * (end - start) / cnt
            print(f"\nTime of work {func.__name__}({args}): {runtime}\n")
            return value

        # декоратор profile - изменяет расход памяти исполняемым кодом.
        return profile(precision=10)(wrapper)
Esempio n. 21
0
def mprofile(model_type, exp_method):
    mlog_name = "mlog/" + model_type + "_" + exp_method + "_" + device + ".mlog"
    with open(mlog_name, "w") as log_file:
        mprofile_predict = profile(func=predict, stream=log_file)
        mprofile_predict(model_type, exp_method)
        if device == "gpu":
            log_file.write("======== VRAM INFO ========\n")
            vram = torch.cuda.max_memory_reserved(
                device=torch.device("cuda:0"))
            log_file.write("cuda:0 reserved " + str(vram) + " bytes")
        else:
            log_file.write(
                "======== CPU COMPUTATION, NO VRAM USAGE ========\n")
Esempio n. 22
0
 def wrapper(*args, **kwargs):
     m = StringIO()
     pr = cProfile.Profile()
     pr.enable()
     temp_func = memory_profiler.profile(func=function,
                                         stream=m,
                                         precision=4)
     output = temp_func(*args, **kwargs)
     print(m.getvalue())
     pr.disable()
     ps = pstats.Stats(pr)
     ps.sort_stats('cumulative').print_stats(
         '(?!.*memory_profiler.*)(^.*$)', 20)
     m.close()
     return output
Esempio n. 23
0
 def decowrapper(*args, **kwargs):
     try:
         self.start(filename, funcname)
         ret = profile(func)(*args, **kwargs)
         self.finish()
         return ret
     except Exception as e:
         for i in range(1, self.stacklevel + 1, 1):
             stack = self.stack[i].copy()
             del stack['start']
             self.critical(stack)
         self.critical(type(e))
         self.critical(e)
         self.critical(traceback.format_exc())
         sys.exit()
Esempio n. 24
0
            """Wrapper for the reference lvdmaaten/bhtsne implementation."""
            # PCA preprocessing is done elsewhere in the benchmark script
            n_iter = -1  # TODO find a way to report the number of iterations
            return run_bh_tsne(X, use_pca=False, perplexity=args.perplexity,
                               verbose=args.verbose > 0), n_iter
        methods.append(("lvdmaaten/bhtsne", bhtsne))

    if args.profile:

        try:
            from memory_profiler import profile
        except ImportError:
            raise ImportError("To run the benchmark with `--profile`, you "
                              "need to install `memory_profiler`. Please "
                              "run `pip install memory_profiler`.")
        methods = [(n, profile(m)) for n, m in methods]

    data_size = [100, 500, 1000, 5000, 10000]
    if args.all:
        data_size.append(70000)

    results = []
    basename, _ = os.path.splitext(__file__)
    log_filename = os.path.join(LOG_DIR, basename + '.json')
    for n in data_size:
        X_train = X[:n]
        y_train = y[:n]
        n = X_train.shape[0]
        for name, method in methods:
            print("Fitting {} on {} samples...".format(name, n))
            t0 = time()
Esempio n. 25
0
 def predict_mem_profile(self, img):
     wapper = profile(self.do_predict)
     return wapper(img)
    dataset = luigi.Parameter(description="Dataset to resample")
    datasets = luigi.ListParameter(
        description="Names of the datasets to use",
        visibility=luigi.parameter.ParameterVisibility.HIDDEN)

    def requires(self):
        return FindResamplingAxis(datasets=self.datasets,
                                  pool_size=self.pool_size)
    
    def output(self):
        return self._as_target("{0}.npy".format(self.dataset))
    
    def run(self):
        with self.output().temporary_path() as tmp_path:
            subprocess.run([
                "/usr/local/bin/python", "-m", "bin.resampling",
                os.path.join(self.INPUT_DIR, self.dataset),  # dataset path
                self.input().path,  # mz axis path
                tmp_path  # destination path
            ], check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)


if __name__ == '__main__':
    from memory_profiler import profile
    ResampleDataset.run = profile(ResampleDataset.run)
    if os.path.exists('/data/01-resampled/my-dataset1.npy'):
        os.remove('/data/01-resampled/my-dataset1.npy')
    luigi.build([
        ResampleDataset(dataset='my-dataset1', datasets=['my-dataset1', 'my-dataset2'])
    ], local_scheduler=True)
Esempio n. 27
0
            """Wrapper for the reference lvdmaaten/bhtsne implementation."""
            # PCA preprocessing is done elsewhere in the benchmark script
            n_iter = -1  # TODO find a way to report the number of iterations
            return run_bh_tsne(X, use_pca=False, perplexity=args.perplexity,
                               verbose=args.verbose > 0), n_iter
        methods.append(("lvdmaaten/bhtsne", bhtsne))

    if args.profile:

        try:
            from memory_profiler import profile
        except ImportError as e:
            raise ImportError("To run the benchmark with `--profile`, you "
                              "need to install `memory_profiler`. Please "
                              "run `pip install memory_profiler`.") from e
        methods = [(n, profile(m)) for n, m in methods]

    data_size = [100, 500, 1000, 5000, 10000]
    if args.all:
        data_size.append(70000)

    results = []
    basename = os.path.basename(os.path.splitext(__file__)[0])
    log_filename = os.path.join(LOG_DIR, basename + '.json')
    for n in data_size:
        X_train = X[:n]
        y_train = y[:n]
        n = X_train.shape[0]
        for name, method in methods:
            print("Fitting {} on {} samples...".format(name, n))
            t0 = time()
Esempio n. 28
0
def main(
    size_gb, path, clear, check_memory_measurement, with_line_profiler, compress_packs
):
    """Testing performance and size on disk when storing a single big file containing only zeros."""
    start_mem = get_memory()

    if (
        check_memory_measurement
    ):  # To test that the measurement of the memory is reliable
        # Test of memory allocation
        size_mb = 400
        size = size_mb * 1024 * 1024
        temp_array = b"\x00" * size  #  noqa: F841

        print("*" * 74)
        print(f"AFTER CREATING AN ARRAY of {size_mb} MBs:")
        end_mem = get_memory()
        for key, end_value in end_mem.items():
            start_value = start_mem[key]
            print(
                "{}: {} -> {} (DELTA = {} = {:.2f} MB)".format(
                    key,
                    start_value,
                    end_value,
                    end_value - start_value,
                    (end_value - start_value) / 1024.0 / 1024.0,
                )
            )
        del temp_array

        print("*" * 74)
        print("AFTER DELETING THE ARRAY:")
        end_mem = get_memory()
        for key, end_value in end_mem.items():
            start_value = start_mem[key]
            print(
                "{}: {} -> {} (DELTA = {} = {:.2f} MB)".format(
                    key,
                    start_value,
                    end_value,
                    end_value - start_value,
                    (end_value - start_value) / 1024.0 / 1024.0,
                )
            )
        print("*" * 74)

    container = Container(path)
    if clear:
        print("Clearing the container...")
        container.init_container(clear=clear)
    if not container.is_initialised:
        print("Initialising the container...")
        container.init_container()

    function = profile(main_run) if with_line_profiler else main_run
    if check_memory_measurement:
        memory_check_interval = 0.01  # seconds
        # memory_report will be a list of memory every 'interval'
        memory_report = memory_usage(
            (
                function,
                tuple(),
                {
                    "container": container,
                    "size_gb": size_gb,
                    "compress_packs": compress_packs,
                },
            ),
            interval=memory_check_interval,
        )
        # Check that it's not an empty list
        assert memory_report, (
            ">> Process too fast for checking memory usage "
            "with interval {} s!!!".format(memory_check_interval)
        )
        print(
            ">> Max memory usage (check interval {} s, {} checks performed): {:.3f} MB".format(
                memory_check_interval, len(memory_report), max(memory_report)
            )
        )
    else:
        function(container=container, size_gb=size_gb, compress_packs=compress_packs)

    end_mem = get_memory()
    for key, end_value in end_mem.items():
        start_value = start_mem[key]
        print(
            "{}: {} -> {} (DELTA = {} = {:.2f} MB)".format(
                key,
                start_value,
                end_value,
                end_value - start_value,
                (end_value - start_value) / 1024.0 / 1024.0,
            )
        )
Esempio n. 29
0
def magic_mprun(self, parameter_s=''):
    """ Execute a statement under the line-by-line memory profiler from the
    memory_profiler module.

    Usage:
      %mprun -f func1 -f func2 <statement>

    The given statement (which doesn't require quote marks) is run via the
    LineProfiler. Profiling is enabled for the functions specified by the -f
    options. The statistics will be shown side-by-side with the code through
    the pager once the statement has completed.

    Options:

    -f <function>: LineProfiler only profiles functions and methods it is told
    to profile.  This option tells the profiler about these functions. Multiple
    -f options may be used. The argument may be any expression that gives
    a Python function or method object. However, one must be careful to avoid
    spaces that may confuse the option parser. Additionally, functions defined
    in the interpreter at the In[] prompt or via %run currently cannot be
    displayed.  Write these functions out to a separate file and import them.

    One or more -f options are required to get any useful results.

    -T <filename>: dump the text-formatted statistics with the code
    side-by-side out to a text file.

    -r: return the LineProfiler object after it has completed profiling.

    -c: If present, add the memory usage of any children process to the report.
    """
    try:
        from StringIO import StringIO
    except ImportError:  # Python 3.x
        from io import StringIO

    # Local imports to avoid hard dependency.
    from distutils.version import LooseVersion
    import IPython
    ipython_version = LooseVersion(IPython.__version__)
    if ipython_version < '0.11':
        from IPython.genutils import page
        from IPython.ipstruct import Struct
        from IPython.ipapi import UsageError
    else:
        from IPython.core.page import page
        from IPython.utils.ipstruct import Struct
        from IPython.core.error import UsageError

    # Escape quote markers.
    opts_def = Struct(T=[''], f=[])
    parameter_s = parameter_s.replace('"', r'\"').replace("'", r"\'")
    opts, arg_str = self.parse_options(parameter_s, 'rf:T:c', list_all=True)
    opts.merge(opts_def)
    global_ns = self.shell.user_global_ns
    local_ns = self.shell.user_ns

    # Get the requested functions.
    funcs = []
    for name in opts.f:
        try:
            funcs.append(eval(name, global_ns, local_ns))
        except Exception as e:
            raise UsageError('Could not find function %r.\n%s: %s' % (name,
                             e.__class__.__name__, e))

    include_children = 'c' in opts
    profile = LineProfiler(include_children=include_children)
    for func in funcs:
        profile(func)

    # Add the profiler to the builtins for @profile.
    try:
        import builtins
    except ImportError:  # Python 3x
        import __builtin__ as builtins

    if 'profile' in builtins.__dict__:
        had_profile = True
        old_profile = builtins.__dict__['profile']
    else:
        had_profile = False
        old_profile = None
    builtins.__dict__['profile'] = profile

    try:
        try:
            profile.runctx(arg_str, global_ns, local_ns)
            message = ''
        except SystemExit:
            message = "*** SystemExit exception caught in code being profiled."
        except KeyboardInterrupt:
            message = ("*** KeyboardInterrupt exception caught in code being "
                       "profiled.")
    finally:
        if had_profile:
            builtins.__dict__['profile'] = old_profile

    # Trap text output.
    stdout_trap = StringIO()
    show_results(profile, stdout_trap)
    output = stdout_trap.getvalue()
    output = output.rstrip()

    if ipython_version < '0.11':
        page(output, screen_lines=self.shell.rc.screen_length)
    else:
        page(output)
    print(message,)

    text_file = opts.T[0]
    if text_file:
        with open(text_file, 'w') as pfile:
            pfile.write(output)
        print('\n*** Profile printout saved to text file %s. %s' % (text_file,
                                                                    message))

    return_value = None
    if 'r' in opts:
        return_value = profile

    return return_value
Esempio n. 30
0
from pathlib import Path
import logging
from datetime import datetime
from memory_profiler import profile
import yass
from yass import templates
import settings

if __name__ == '__main__':
    settings.run()
    start = datetime.now()
    logger = logging.getLogger(__name__)

    CONFIG = yass.read_config()

    logger.info('Templates started at second: %.2f',
                (datetime.now() - start).total_seconds())

    DIRECTORY = Path(CONFIG.data.root_folder, 'profiling')

    spike_train_cluster = str(DIRECTORY / 'spike_train_cluster.npy')

    profile(templates.run.__wrapped__)(spike_train_cluster,
                                       output_directory='profiling',
                                       if_file_exists='overwrite',
                                       save_results=True)

    logger.info('Templates finished at second: %.2f',
                (datetime.now() - start).total_seconds())
Esempio n. 31
0
    namespace = {}
    t1 = time()
    with io.open(script, encoding="utf8") as f:
        exec(f.read(), namespace)
    t2 = time()
    print("That took %.2f seconds to run" % (t2 - t1))
    plt.close('all')
    del namespace


# Execute each python script in the directory:
for script in validated_examples:
    figure_basename = op.join('fig', op.splitext(script)[0])
    if use_memprof:
        print("memory profiling ", script)
        memory_profiler.profile(run_script)()

    else:
        print('*************************************************************')
        print(script)
        print('*************************************************************')
        run_script()

if use_xvfb:
    display.stop()

# clean up stray images, pickles, npy files, etc
for globber in ('*.nii.gz', '*.dpy', '*.npy', '*.pkl', '*.mat', '*.img',
                '*.hdr'):
    for fname in glob(globber):
        os.unlink(fname)
Esempio n. 32
0
    top = TopN(args.n)

    for number in reader:
        top.push(number)

    top_n = top.get_top_n()

    print("\n".join(map(str, top_n)))


if __name__ == '__main__':
    parser = ArgumentParser()
    parser.add_argument("path", type=str)
    parser.add_argument(
        "-n",
        help="the top n integers to count",
        required=True
    )
    parser.add_argument(
        "--mem-profile",
        action='store_true',
        help="profile the script to examine it's memory performance"
    )

    args = parser.parse_args()

    if args.mem_profile:
        profile(run)(args)
    else:
        run(args)
Esempio n. 33
0
 def inner_wrapper(f):
     return memory_profiler.profile(f)
Esempio n. 34
0
def main(size_gb, path, clear, check_memory_measurement, with_line_profiler,
         compress_packs):
    """Testing performance and size on disk when storing a single big file containing only zeros."""
    start_mem = get_memory()

    if check_memory_measurement:  # To test that the measurement of the memory is reliable
        # Test of memory allocation
        size_mb = 400
        size = size_mb * 1024 * 1024
        temp_array = b'\x00' * size  #  noqa: F841

        print('*' * 74)
        print('AFTER CREATING AN ARRAY of {} MBs:'.format(size_mb))
        end_mem = get_memory()
        for key in end_mem:
            print('{}: {} -> {} (DELTA = {} = {:.2f} MB)'.format(
                key, start_mem[key], end_mem[key],
                end_mem[key] - start_mem[key],
                (end_mem[key] - start_mem[key]) / 1024. / 1024.))
        del temp_array

        print('*' * 74)
        print('AFTER DELETING THE ARRAY:')
        end_mem = get_memory()
        for key in end_mem:
            print('{}: {} -> {} (DELTA = {} = {:.2f} MB)'.format(
                key, start_mem[key], end_mem[key],
                end_mem[key] - start_mem[key],
                (end_mem[key] - start_mem[key]) / 1024. / 1024.))
        print('*' * 74)

    container = Container(path)
    if clear:
        print('Clearing the container...')
        container.init_container(clear=clear)
    if not container.is_initialised:
        print('Initialising the container...')
        container.init_container()

    function = profile(main_run) if with_line_profiler else main_run
    if check_memory_measurement:
        memory_check_interval = 0.01  # seconds
        # memory_report will be a list of memory every 'interval'
        memory_report = memory_usage((function, tuple(), {
            'container': container,
            'size_gb': size_gb,
            'compress_packs': compress_packs
        }),
                                     interval=memory_check_interval)
        # Check that it's not an empty list
        assert memory_report, (
            '>> Process too fast for checking memory usage '
            'with interval {} s!!!'.format(memory_check_interval))
        print(
            '>> Max memory usage (check interval {} s, {} checks performed): {:.3f} MB'
            .format(memory_check_interval, len(memory_report),
                    max(memory_report)))
    else:
        function(container=container,
                 size_gb=size_gb,
                 compress_packs=compress_packs)

    end_mem = get_memory()
    for key in end_mem:
        print('{}: {} -> {} (DELTA = {} = {:.2f} MB)'.format(
            key, start_mem[key], end_mem[key], end_mem[key] - start_mem[key],
            (end_mem[key] - start_mem[key]) / 1024. / 1024.))
Esempio n. 35
0
    def __call__( self, *args ):
        ''' instance of class getting called triggers this, i.e. a decorated function '''
        outf = self.getFilestream(*args)
        self.func = profile(self.func, stream=outf)

        self.func( self )
Esempio n. 36
0
def activate(func):
    if MEMORY_PROFILING:
        return profile(func)
    else:
        return func
Esempio n. 37
0
 def inner_wrapper(f):
     return memory_profiler.profile(f)
from pathlib import Path
import logging
from datetime import datetime
from memory_profiler import profile
import yass
from yass import deconvolute
import settings

if __name__ == '__main__':
    settings.run()
    start = datetime.now()
    logger = logging.getLogger(__name__)

    CONFIG = yass.read_config()

    logger.info('Deconvolution started at second: %.2f',
                (datetime.now() - start).total_seconds())

    DIRECTORY = Path(CONFIG.data.root_folder, 'profiling')

    spike_index_all = str(DIRECTORY / 'spike_index_all.npy')
    templates = str(DIRECTORY / 'templates.npy')

    profile(deconvolute.run)(spike_index_all,
                             templates,
                             output_directory='profiling')

    logger.info('Deconvolution finished at second: %.2f',
                (datetime.now() - start).total_seconds())
Esempio n. 39
0
from sums.sum_iter import sum_iter
from sums.sum_data import DATA

if __name__ == '__main__':
    import timeit, cProfile
    print(
        'Czas trwania:',
        timeit.timeit("sum_iter(DATA)",
                      number=1,
                      setup="""
from __main__ import sum_iter
from __main__ import DATA
                        """))

    cProfile.run('sum_iter(DATA)')

    print('Memory profiling')

    from memory_profiler import profile

    profile(sum_iter)(DATA)