Exemplo n.º 1
0
    def _api_run(self, *args, **kwargs):
        """ handles the stuff from the handler """

        result = {}
        logger.debug(u'PlexPy APIv2 :: Original kwargs was %s' % kwargs)

        self._api_validate(**kwargs)

        if self._api_cmd and self._api_authenticated:
            call = getattr(self, self._api_cmd)

            # Profile is written to console.
            if self._api_profileme:
                from profilehooks import profile
                call = profile(call, immediate=True)

            # We allow this to fail so we get a
            # traceback in the browser
            if self._api_debug:
                result = call(**self._api_kwargs)
            else:
                try:
                    result = call(**self._api_kwargs)
                except Exception as e:
                    logger.error(u'PlexPy APIv2 :: Failed to run %s %s %s' %
                                 (self._api_cmd, self._api_kwargs, e))

        ret = None
        # The api decorated function can return different result types.
        # convert it to a list/dict before we change it to the users
        # wanted output
        try:
            if isinstance(result, (dict, list)):
                ret = result
            else:
                raise
        except:
            try:
                ret = json.loads(result)
            except (ValueError, TypeError):
                try:
                    ret = xmltodict.parse(result, attr_prefix='')
                except:
                    pass

        # Fallback if we cant "parse the reponse"
        if ret is None:
            ret = result

        if ret or self._api_result_type == 'success':
            # To allow override for restart etc
            # if the call returns some data we are gonna assume its a success
            self._api_result_type = 'success'
        else:
            self._api_result_type = 'error'

        return self._api_out_as(
            self._api_responds(result_type=self._api_result_type,
                               msg=self._api_msg,
                               data=ret))
Exemplo n.º 2
0
    def deco(fn):
        @wraps(fn)
        def inner(*args, **kwargs):
            if deco_kwargs.get('traceback'):
                traceback.print_stack()
            print('starting %s' % fn.__name__)
            start = time.time()
            stat_profile = deco_kwargs.get('stat_profile')
            if stat_profile:
                import statprof
                statprof.reset(frequency=1000)
                statprof.start()
            try:
                return fn(*args, **kwargs)
            finally:
                fn_name = fn.__name__
                print('finished %s in %.3f s' % (fn_name, time.time() - start))
                if stat_profile:
                    statprof.stop()
                    statprof.display()

        if deco_kwargs.get('profile'):
            import profilehooks
            inner = profilehooks.profile(immediate=True)(inner)
        return inner
Exemplo n.º 3
0
def main():
    zernike_fn = zernike_moments

    parser = argparse.ArgumentParser()
    parser.add_argument('--debug', nargs='?', default=None, const='debug', choices=['debug', 'info', 'warning', 'error', 'critical']) 
    parser.add_argument('vtk_file', nargs='?', default=None)
    parser.add_argument('-o', '--order', type=int, default=3)
    parser.add_argument('-p', '--profile', nargs='?', default=None, const='stdout')
    parser.add_argument('-t', '--timecall', default=False, action='store_true')
    parser.add_argument('-v', '--validate', default=False, action='store_true')
    ns = parser.parse_args()

    if ns.debug is not None:
        logging.basicConfig(level=getattr(logging, ns.debug.upper()))

    if ns.profile is not None:
        filename = ns.profile
        if ns.profile == 'stdout':
            filename = None
        zernike_fn = profilehooks.profile(zernike_fn, immediate=False, filename=filename)

    if ns.timecall:
        zernike_fn = profilehooks.timecall(zernike_fn)

    if ns.vtk_file is not None:
        points, indices, lines, faces, depths, scalar_names, npoints, \
            input_vtk = read_vtk(ns.vtk_file)
        print(len(faces), len(points))
        X = zernike_fn(points, faces, order=ns.order, scale_input=True)
        if ns.validate:
            Y = zernike_fn(points, faces, order=ns.order, scale_input=True, pl_cls=MultiprocPipeline)
            assert np.allclose(X, Y)
    else:
        example1()
Exemplo n.º 4
0
def main():
    signal.signal(signal.SIGUSR1, handle_ipdb)
    set_umask()
    check_arguments(sys.argv)
    if sys.argv[1] == 'uuid':
        print "You can use the following unique name as your bucket name for amazon S3 or google storage:"
        print "cloudfusion_" + get_uuid()
        sys.exit()
    parser = MyParser()
    parser.add_argument('mountpoint')
    parser.add_argument('--config', help='Configuration file.')
    parser.add_argument(
        'args', nargs=argparse.REMAINDER
    )  #collect all arguments positioned after positional and optional parameters
    args = parser.parse_args()
    foreground = 'foreground' in args.args
    profiling_enabled = 'profile' in args.args
    mountpoint = args.mountpoint
    if "stop" in args.args:
        start_stopping_thread(mountpoint)
        exit(0)
    if not "log" in args.args:
        logging.getLogger().addHandler(NullHandler())
    else:
        if not os.path.exists(".cloudfusion/logs"):
            os.makedirs(".cloudfusion/logs")
        logging.config.fileConfig(
            os.path.dirname(cloudfusion.__file__) + '/config/logging.conf')
        db_logging_thread.start()
    if args.config:  #evaluates to false
        if not os.path.exists(args.config):
            exit(1)
        start_configuration_thread(mountpoint, args.config)
    if not os.path.exists(mountpoint):
        os.makedirs(mountpoint)
    if profiling_enabled:
        import inspect
        from profilehooks import profile
        import types
        for name, fn in inspect.getmembers(TransparentConfigurablePyFuseBox):
            if isinstance(fn, types.UnboundMethodType):
                if not name.startswith('_'):
                    setattr(TransparentConfigurablePyFuseBox, name,
                            profile(fn, filename='/tmp/cloudfusion_profile'))
    fuse_operations = TransparentConfigurablePyFuseBox(mountpoint)
    try:
        #first try to mount file system with big_writes option (more performant)
        FUSE(fuse_operations,
             mountpoint,
             foreground=foreground,
             nothreads=True,
             big_writes=True,
             max_read=131072,
             max_write=131072)
    except RuntimeError, e:
        FUSE(fuse_operations,
             mountpoint,
             foreground=foreground,
             nothreads=True)
Exemplo n.º 5
0
        def wrapper(*args, **kwargs):

            print(
                "Function {!r} got arguments {!r} and keyword arguments {!r}".
                format(func.__name__, args, kwargs))
            profiled_func = profilehooks.profile(func, immediate=immediate)
            result = profiled_func(*args, **kwargs)
            return result
Exemplo n.º 6
0
    def _api_run(self, *args, **kwargs):
        """ handles the stuff from the handler """

        result = {}
        logger.debug(u"PlexPy APIv2 :: API called with kwargs: %s" % kwargs)

        self._api_validate(**kwargs)

        if self._api_cmd and self._api_authenticated:
            call = getattr(self, self._api_cmd)

            # Profile is written to console.
            if self._api_profileme:
                from profilehooks import profile

                call = profile(call, immediate=True)

            # We allow this to fail so we get a
            # traceback in the browser
            if self._api_debug:
                result = call(**self._api_kwargs)
            else:
                try:
                    result = call(**self._api_kwargs)
                except Exception as e:
                    logger.error(u"PlexPy APIv2 :: Failed to run %s %s %s" % (self._api_cmd, self._api_kwargs, e))

        ret = None
        # The api decorated function can return different result types.
        # convert it to a list/dict before we change it to the users
        # wanted output
        try:
            if isinstance(result, (dict, list)):
                ret = result
            else:
                raise
        except:
            try:
                ret = json.loads(result)
            except (ValueError, TypeError):
                try:
                    ret = xmltodict.parse(result, attr_prefix="")
                except:
                    pass

        # Fallback if we cant "parse the reponse"
        if ret is None:
            ret = result

        if ret or self._api_result_type == "success":
            # To allow override for restart etc
            # if the call returns some data we are gonna assume its a success
            self._api_result_type = "success"
        else:
            self._api_result_type = "error"

        return self._api_out_as(self._api_responds(result_type=self._api_result_type, msg=self._api_msg, data=ret))
Exemplo n.º 7
0
 def new(*args, **kwargs):
     return profilehooks.profile(old,
                                 skip = skip,
                                 filename = filename,
                                 immediate = immediate,
                                 dirs = dirs,
                                 sort = sort,
                                 entries = entries,
                                 profiler=('profile',))(*args, **kwargs)
Exemplo n.º 8
0
 def wrapped(self, *args, **kwargs):
     old_stdout = sys.stdout
     my_stdout = sys.stdout = StringIO()
     start = time.time()
     res = profile(f, immediate=True, sort=['cumulative'],
         entries=80)(self, *args, **kwargs)
     end = time.time()
     sys.stdout = old_stdout
     if end - start >= threshold:
         for line in my_stdout.getvalue().split('\n'):
             logger.info(line)
     return res
Exemplo n.º 9
0
 def g(*args, **kargs):
     with closing(Tee(
         os.path.join(
             current_app.config['AIP_TEMP_PATH'],
             current_app.config['AIP_PROFILE_NAME'],
         ),
         current_app.config['AIP_PROFILE_MODE'],
     )):
         return profile(
             immediate=True,
             sort=current_app.config['AIP_PROFILE_SORT'],
         )(f)(*args, **kargs)
Exemplo n.º 10
0
def main():
    signal.signal(signal.SIGUSR1, handle_ipdb)
    set_umask()
    check_arguments(sys.argv)
    if sys.argv[1] == 'uuid':
        print "You can use the following unique name as your bucket name for amazon S3 or google storage:"
        print "cloudfusion_"+get_uuid()
        sys.exit()
    parser = MyParser()
    parser.add_argument('mountpoint')
    parser.add_argument('--config', help='Configuration file.')
    parser.add_argument('args', nargs=argparse.REMAINDER) #collect all arguments positioned after positional and optional parameters 
    args = parser.parse_args()
    foreground  = 'foreground' in args.args 
    profiling_enabled = 'profile' in args.args
    mountpoint = args.mountpoint
    if "stop" in args.args:
        start_stopping_thread(mountpoint)
        exit(0)
    if not "log" in args.args:
        logging.getLogger().addHandler(NullHandler())
    else:
        if not os.path.exists(".cloudfusion/logs"):
            os.makedirs(".cloudfusion/logs")
        logging.config.fileConfig(os.path.dirname(cloudfusion.__file__)+'/config/logging.conf')
        db_logging_thread.start()    
    if args.config: #evaluates to false
        if not os.path.exists(args.config):
            exit(1)
        start_configuration_thread(mountpoint, args.config)
    if not os.path.exists(mountpoint):
        os.makedirs(mountpoint)
    if profiling_enabled:
        import inspect
        from profilehooks import profile
        import types
        for name, fn in inspect.getmembers(TransparentConfigurablePyFuseBox):
            if isinstance(fn, types.UnboundMethodType):
                if not name.startswith('_'):
                    setattr(TransparentConfigurablePyFuseBox, name, profile(fn, filename='/tmp/cloudfusion_profile'))
    fuse_operations = TransparentConfigurablePyFuseBox(mountpoint)
    try:
        #first try to mount file system with big_writes option (more performant)
        FUSE(fuse_operations, mountpoint, foreground=foreground, nothreads=True, big_writes=True, max_read=131072, max_write=131072) 
    except RuntimeError, e:
        FUSE(fuse_operations, mountpoint, foreground=foreground, nothreads=True)
Exemplo n.º 11
0
 def deco(fn):
     @wraps(fn)
     def inner(*args, **kwargs):
         if deco_kwargs.get('traceback'):
             traceback.print_stack()
         print('starting %s' % fn.__name__)
         start = time.time()
         stat_profile = deco_kwargs.get('stat_profile')
         if stat_profile:
             import statprof
             statprof.reset(frequency=10000)
             statprof.start()
         fn(*args, **kwargs)
         fn_time = time.time() - start
         print('finished %s in %s s' % (fn.__name__, fn_time))
         if stat_profile:
             statprof.stop()
             statprof.display()
         return fn_time
     if deco_kwargs.get('profile'):
         import profilehooks
         inner = profilehooks.profile(immediate=True)(inner)
     return inner
Exemplo n.º 12
0
def bench_test(test):
    prepared = None
    if 'prepare_once' in test:
        prepared = test['prepare_once']()
        if 'h' in flags:
                print('-' * 62)

    if 'p' in flags:
        test['run'] = profile(test['run'])

    total = 0
    n = 1
    while total < 2:
        gc.disable()
        durations = [bench_once(test, prepared) for i in range(n)]
        gc.enable()

        if '1' in flags:
            break

        total = sum(d for _, d in durations)
        n *= 2

    return min(d for d, _ in durations)
Exemplo n.º 13
0
def bench_test(test):
    prepared = None
    if 'prepare_once' in test:
        prepared = test['prepare_once']()
        if 'h' in flags:
            print('-' * 62)

    if 'p' in flags:
        test['run'] = profile(test['run'])

    total = 0
    n = 1
    while total < 2:
        gc.disable()
        durations = [bench_once(test, prepared) for i in range(n)]
        gc.enable()

        if '1' in flags:
            break

        total = sum(d for _, d in durations)
        n *= 2

    return min(d for d, _ in durations)
Exemplo n.º 14
0
    def _api_run(self, *args, **kwargs):
        """ handles the stuff from the handler """

        result = {}
        logger.debug(u'PlexPy APIv2 :: API called with kwargs: %s' % kwargs)

        self._api_validate(**kwargs)

        if self._api_cmd and self._api_authenticated:
            call = getattr(self, self._api_cmd)

            # Profile is written to console.
            if self._api_profileme:
                from profilehooks import profile
                call = profile(call, immediate=True)

            # We allow this to fail so we get a
            # traceback in the browser
            try:

                result = call(**self._api_kwargs)
            except Exception as e:
                logger.error(u'PlexPy APIv2 :: Failed to run %s with %s: %s' % (self._api_cmd, self._api_kwargs, e))
                if self._api_debug:
                    cherrypy.request.show_tracebacks = True
                    # Reraise the exception so the traceback hits the browser
                    raise
                self._api_msg = 'Check the logs'

        ret = None
        # The api decorated function can return different result types.
        # convert it to a list/dict before we change it to the users
        # wanted output
        try:
            if isinstance(result, (dict, list)):
                ret = result
            else:
                raise
        except:
            try:
                ret = json.loads(result)
            except (ValueError, TypeError):
                try:
                    ret = xmltodict.parse(result, attr_prefix='')
                except:
                    pass

        # Fallback if we cant "parse the reponse"
        if ret is None:
            ret = result

        if ret or self._api_result_type == 'success':
            # To allow override for restart etc
            # if the call returns some data we are gonna assume its a success
            self._api_result_type = 'success'
        else:
            self._api_result_type = 'error'

        # Since some of them metods use a api like response for the ui
        # {result: error, message: 'Some shit happend'}
        if isinstance(ret, dict):
            if ret.get('message'):
                self._api_msg = ret.get('message', {})
                ret = {}

            if ret.get('result'):
                self._api_result_type = ret.get('result')

        return self._api_out_as(self._api_responds(result_type=self._api_result_type, msg=self._api_msg, data=ret))
Exemplo n.º 15
0
from tifffile import imread
from xpdtools.tools import map_to_binner, generate_map_bin
from profilehooks import profile
import pyFAI
from numba import jit
import numpy as np

geo = pyFAI.load("test.poni")
img = imread("test.tiff")

bo = map_to_binner

binner = bo(*generate_map_bin(geo, img.shape))
f = profile(binner.__call__)
a = binner.xy_argsort


@jit(nopython=True, cache=True)
def b(data):
    return np.max(data)


f(img.flatten(), statistic=np.max)

# median
# standard .255
# numba .2
Exemplo n.º 16
0
 def benchmark(fn):
     return profile(fn, immediate=True, filename=args.profile, stdout=None)
Exemplo n.º 17
0
 def benchmark(fn):
     return profile(fn,
                    immediate=True,
                    filename=args.profile,
                    stdout=None)
Exemplo n.º 18
0
from profilehooks import profile
from dragonboard import read

read = profile(read)
l = read('./Ped444706_1.dat')
Exemplo n.º 19
0
from tifffile import imread
from xpdtools.tools import binned_outlier, generate_binner, mask_img
from profilehooks import profile
import pyFAI

geo = pyFAI.load('test.poni')
img = imread('test.tiff')

bo = profile(binned_outlier, skip=1)
# bo = binned_outlier

binner = generate_binner(geo, img.shape)
a = binner.argsort_index
b = binner.flatcount

for i in range(2):
    bo(
        img,
        binner,
        # bs_width=None,
        mask_method='mean',
    )

# Median
# binned outlier
# median by itself .714
# median multithread .494
# numba median .270
# multithread numba .178

# mask_img
Exemplo n.º 20
0
 def profile_it(*args, **kwargs):
     if prefs['debug']:
         profile(func)(*args, **kwargs)
     else:
         func(*args, **kwargs)
Exemplo n.º 21
0
def main():
    parser = ThrowingArgumentParser()
    benchmark_group = parser.add_mutually_exclusive_group()
    benchmark_group.add_argument('-t',
                                 '--time',
                                 help="Print execution time",
                                 action='store_true')
    benchmark_group.add_argument('-p',
                                 '--profile',
                                 help="""Profile the command.
    Optional filename saves results for use with snakeviz, pstats, or
    cprofilev. Automatically launches snakeviz, if installed.""",
                                 nargs='?',
                                 metavar='STATS_FILE')

    # Using add_subparsers(metavar) until argparse.SUPPRESS support is fixed.
    # See issue http://bugs.python.org/issue22848
    parsers = parser.add_subparsers(
        help="select a command",
        parser_class=ArgumentParser,
        metavar='{version,demo,update,init,prep,train,launch,notebook}')
    version_parser = parsers.add_parser('version',
                                        help="Print the version and exit")
    version_parser.set_defaults(func='version')

    # Init Parser
    parser_init = parsers.add_parser('init',
                                     help="Initialize the topic explorer")
    init.populate_parser(parser_init)
    parser_init.set_defaults(func="init")

    # Prep Parser
    parser_prep = parsers.add_parser(
        'prep',
        help="Prep the corpus",
        formatter_class=argparse.RawDescriptionHelpFormatter)
    prep.populate_parser(parser_prep)
    parser_prep.set_defaults(func="prep")

    # Train Parser
    parser_train = parsers.add_parser('train', help="Train the LDA models")
    train.populate_parser(parser_train)
    parser_train.set_defaults(func="train")

    # Launch Parser
    parser_launch = parsers.add_parser('launch',
                                       help="Serve the trained LDA models")
    launch.populate_parser(parser_launch)
    parser_launch.set_defaults(func="launch")

    # Serve Parser
    parser_serve = parsers.add_parser(
        'serve',
        help="Serve a single LDA model, helper for `vsm launch`," +
        "rarely called directly")
    server.populate_parser(parser_serve)
    parser_serve.set_defaults(func="serve")

    # Notebook Parser
    parser_nb = parsers.add_parser('notebook',
                                   help="Create a set of IPython Notebooks")
    notebook.populate_parser(parser_nb)
    parser_nb.set_defaults(func="notebook")

    # Demo Parser
    parser_demo = parsers.add_parser('demo',
                                     help="Download and run the AP demo")
    parser_demo.set_defaults(func="demo")

    # Update Parser
    parser_update = parsers.add_parser('update',
                                       help="Update the Topic Explorer")
    parser_update.set_defaults(func="update")

    # Lang Space Parser
    parser_langspace = parsers.add_parser(
        'langspace', help="Add spaces before unicode chars")
    langspace.populate_parser(parser_langspace)
    parser_langspace.set_defaults(func="langspace")

    # fancy arg validation for manually injecting tempfile to profile arg
    try:
        try:
            args = parser.parse_args()
        except ArgumentParserError as e:
            import sys
            new_args = sys.argv[1:]
            try:
                # If the error was thrown by the '-p' argument not having a
                # valid file, fix by manually injecting a nargs break
                profile = new_args.index('-p')

                if (len(new_args) > (profile + 1)
                        and new_args[profile + 1] in parsers.choices.keys()):
                    new_args.insert(profile + 1, '-')
                    args = parser.parse_args(new_args)
                else:
                    raise e
            except ValueError:
                raise e
    except ArgumentParserError as e:
        import sys
        # Check to see if error occurs with a subparser and cause the exception
        # to arise from the subparser instead
        for p in parsers.choices.keys():
            if p in sys.argv[1:]:
                subargs_idx = sys.argv.index(p) + 1
                subargs = sys.argv[subargs_idx:]
                subparser = locals()['parser_' + p]
                # this might cause an error in the subparser, in which case
                # we actually want to show that error first
                args = subparser.parse_args(subargs)

        # Use the default error mechanism for the master parser.
        # If the code gets here, it means the error was not in a subparser
        ArgumentParser.error(parser, e.message)

    if args.profile:
        if args.profile == '-':
            import tempfile
            temphandle, args.profile = tempfile.mkstemp(suffix='.prof',
                                                        prefix='vsm.')
            print "Saving benchmark data to", args.profile

        from profilehooks import profile
        benchmark = lambda fn: profile(
            fn, immediate=True, filename=args.profile, stdout=None)

    elif args.time:
        from profilehooks import timecall
        benchmark = lambda fn: timecall(fn, immediate=False)
    else:
        benchmark = lambda fn: fn

    if args.func == 'version':
        from topicexplorer.version import __pretty_version__
        print __pretty_version__,

    elif args.func == 'init':
        args.config_file = benchmark(init.main)(args)

        print "\nTIP: Only initalizing corpus object and config file."
        print "     Next prepare the corpus using:"
        print "         vsm prep", args.config_file
        print "     Or skip directly to training LDA models using:"
        print "         vsm train", args.config_file

    elif args.func == 'prep':
        benchmark(prep.main)(args)

        print "\nTIP: Train the LDA models with:"
        print "         vsm train", args.config_file

    elif args.func == 'train':
        benchmark(train.main)(args)

        if not args.dry_run:
            print "\nTIP: launch the topic explorer with:"
            print "         vsm launch", args.config_file
            print "     or the notebook server with:"
            print "         vsm notebook", args.config_file

    elif args.func == 'launch':
        benchmark(launch.main)(args)

    elif args.func == 'serve':
        benchmark(server.main)(args)

    elif args.func == 'notebook':
        benchmark(notebook.main)(args)

    elif args.func == 'demo':
        benchmark(demo.main)(args)

    elif args.func == 'update':
        benchmark(update.main)(args)

    elif args.func == 'langspace':
        benchmark(langspace.main)(args)

    if args.profile:
        try:
            import snakeviz.cli
            print "\n\n"
            snakeviz.cli.main([args.profile])
        except ImportError:
            print """\nSnakeviz is not installed. Install with `pip install snakeviz`, 
            then run `snakeviz {}`.""".format(args.profile)
    def _api_run(self, *args, **kwargs):
        """ handles the stuff from the handler """

        result = {}
        logger.debug(u'PlexPy APIv2 :: API called with kwargs: %s' % kwargs)

        self._api_validate(**kwargs)

        if self._api_cmd and self._api_authenticated:
            call = getattr(self, self._api_cmd)

            # Profile is written to console.
            if self._api_profileme:
                from profilehooks import profile
                call = profile(call, immediate=True)

            # We allow this to fail so we get a
            # traceback in the browser
            try:

                result = call(**self._api_kwargs)
            except Exception as e:
                logger.error(u'PlexPy APIv2 :: Failed to run %s with %s: %s' %
                             (self._api_cmd, self._api_kwargs, e))
                if self._api_debug:
                    cherrypy.request.show_tracebacks = True
                    # Reraise the exception so the traceback hits the browser
                    raise
                self._api_msg = 'Check the logs'

        ret = None
        # The api decorated function can return different result types.
        # convert it to a list/dict before we change it to the users
        # wanted output
        try:
            if isinstance(result, (dict, list)):
                ret = result
            else:
                raise
        except:
            try:
                ret = json.loads(result)
            except (ValueError, TypeError):
                try:
                    ret = xmltodict.parse(result, attr_prefix='')
                except:
                    pass

        # Fallback if we cant "parse the reponse"
        if ret is None:
            ret = result

        if ret or self._api_result_type == 'success':
            # To allow override for restart etc
            # if the call returns some data we are gonna assume its a success
            self._api_result_type = 'success'
        else:
            self._api_result_type = 'error'

        # Since some of them metods use a api like response for the ui
        # {result: error, message: 'Some shit happend'}
        if isinstance(ret, dict):
            if ret.get('message'):
                self._api_msg = ret.get('message', {})
                ret = {}

            if ret.get('result'):
                self._api_result_type = ret.get('result')

        return self._api_out_as(
            self._api_responds(result_type=self._api_result_type,
                               msg=self._api_msg,
                               data=ret))
Exemplo n.º 23
0
def main(num_files, min_size, max_size, directly_to_pack, path, clear,
         num_bulk_calls, compress_packs, profile_file):
    """Testing some basic functionality of the object-store, with timing."""
    # pylint: disable=too-many-arguments,too-many-locals,too-many-statements,too-many-branches
    import random
    import time

    container = Container(path)
    if clear:
        print('Clearing the container...')
        container.init_container(clear=clear)
    if not container.is_initialised:
        print('Initialising the container...')
        container.init_container()

    files = {}

    start_counts = container.count_objects()
    print('Currently known objects: {} packed, {} loose'.format(
        start_counts['packed'], start_counts['loose']))
    print('Pack objects on disk:', start_counts['pack_files'])

    print('Generating {} files in memory...'.format(num_files))
    for _ in range(num_files):
        filename = 'filename-{}'.format(str(uuid.uuid4()))
        size = random.randint(min_size, max_size)
        content = os.urandom(size)
        files[filename] = content
    total_size = sum(len(content) for content in files.values())
    print('Done. Total size: {} bytes (~{:.3f} MB).'.format(
        total_size, (total_size // 1024) / 1024))

    if directly_to_pack:
        # Store objects (directly to pack)
        start = time.time()
        filenames = list(files.keys())
        files_content = [files[key] for key in filenames]
        hashkeys = container.add_objects_to_pack(files_content,
                                                 compress=compress_packs)
        hashkey_mapping = dict(zip(filenames, hashkeys))
        tot_time = time.time() - start
        print('Time to store {} objects DIRECTLY TO THE PACKS: {:.4} s'.format(
            num_files, tot_time))

        # Check that no loose files were created
        counts = container.count_objects()
        assert counts['loose'] == start_counts[
            'loose'], 'Mismatch (loose in packed case): {} != {}'.format(
                start_counts['loose'], counts['loose'])
        ## Cannot do this with the hash key implenentation - I might have stored the same object twice
        #assert counts['packed'
        #             ] == start_counts['packed'] + num_files, 'Mismatch (packed in packed case): {} + {} != {}'.format(
        #                 start_counts['packed'], num_files, counts['packed']
        #             )
    else:
        # Store objects (loose)
        start = time.time()
        hashkey_mapping = {}
        for filename, content in files.items():
            obj_hashkey = container.add_object(content)
            hashkey_mapping[filename] = obj_hashkey
        tot_time = time.time() - start
        print('Time to store {} loose objects: {:.4} s'.format(
            num_files, tot_time))

        # Retrieve objects (loose)
        retrieved = {}
        random_keys = list(files.keys())
        random.shuffle(random_keys)
        start = time.time()
        for filename in random_keys:
            obj_hashkey = hashkey_mapping[filename]
            retrieved_content = container.get_object_content(obj_hashkey)
            retrieved[filename] = retrieved_content
        tot_time = time.time() - start
        print('Time to retrieve {} loose objects: {:.4} s'.format(
            num_files, tot_time))

        # Check that the content is correct
        for filename in retrieved:
            assert retrieved[filename] == files[
                filename], 'Mismatch (content) for {}, {} vs {}'.format(
                    filename, retrieved[filename], files[filename])

        # Check that num_files new loose files are present now
        counts = container.count_objects()
        ## I cannot do this because I could have overlap if the object is identical and has the same hash key
        #assert counts['loose'
        #             ] == start_counts['loose'] + num_files, 'Mismatch (loose in unpacked case): {} + {} != {}'.format(
        #                 start_counts['loose'], num_files, counts['loose']
        #             )

        # Print container size info (before packing)
        size_info = container.get_total_size()
        print('Object store size info:')
        for key in sorted(size_info.keys()):
            print('- {:30s}: {}'.format(key, size_info[key]))

        # Pack all loose objects
        start = time.time()
        container.pack_all_loose(compress=compress_packs)
        tot_time = time.time() - start
        print('Time to pack all loose objects: {:.4} s'.format(tot_time))
        start = time.time()
        container.clean_storage()
        tot_time = time.time() - start
        print('Time to clean storage: {:.4} s'.format(tot_time))

        # Check that all loose files are gone
        counts = container.count_objects()
        assert not counts['loose'], 'loose objects left: {}'.format(
            os.listdir(container._get_loose_folder()))  # pylint: disable=protected-access
        ## I cannot do this because I could have overlap if the object is identical and has the same hash key
        #assert counts['packed'] == start_counts['packed'] + start_counts[
        #    'loose'] + num_files, 'Mismatch (post-pack): {} + {} + {} != {}'.format(
        #        start_counts['packed'], start_counts['loose'], num_files, counts['packed']
        #    )

    # print container size info
    size_info = container.get_total_size()
    print('Object store size info:')
    for key in sorted(size_info.keys()):
        print('- {:30s}: {}'.format(key, size_info[key]))

    # In all cases, retrieve all objects (in shuffled order)
    retrieved = {}
    random_keys = list(files.keys())
    random.shuffle(random_keys)

    # Will be needed later
    reverse_hashkey_mapping = {v: k for k, v in hashkey_mapping.items()}

    ## If you want to flush to disk and drop all disk caches, uncomment this part
    ## (note that this works on Linux only, and this requires that `sudo` has already
    ## been run earlier, so it does not ask for a password):
    # import subprocess
    # subprocess.check_output(["sync"])
    # subprocess.check_output(["sudo", "bash", "-c", "echo 3 > /proc/sys/vm/drop_caches"])

    ########################################
    # FIRST: single bulk read
    def bulk_read_data(container, hashkey_list):
        """A function to read the data in bulk.

        It's defined as a functon so it can be profiled."""
        return container.get_objects_content(hashkey_list,
                                             skip_if_missing=False)

    all_hashkeys = [hashkey_mapping[filename] for filename in random_keys]
    start = time.time()

    if profile_file is not None:
        func = profile(sort='cumtime', filename=profile_file,
                       stdout=False)(bulk_read_data)
    else:
        func = bulk_read_data
    raw_retrieved = func(container=container, hashkey_list=all_hashkeys)
    if profile_file is not None:
        print(
            "You can check the profiling results running 'snakeviz {}'".format(
                profile_file))

    tot_time = time.time() - start
    print(
        'Time to retrieve {} packed objects in random order WITH ONE BULK CALL: {} s'
        .format(num_files, tot_time))
    retrieved = {
        reverse_hashkey_mapping[key]: val
        for key, val in raw_retrieved.items()
    }
    for filename in retrieved:
        assert retrieved[filename] == files[
            filename], 'Mismatch for {}'.format(filename)

    ########################################
    # SECOND: num_bulk_calls bulk reads
    random.shuffle(random_keys)
    all_hashkeys = [hashkey_mapping[filename] for filename in random_keys]
    start = time.time()
    raw_retrieved = {}

    # Split the list into num_bulk_call even chunks
    chunk_len = len(all_hashkeys) // num_bulk_calls
    if len(all_hashkeys) % num_bulk_calls != 0:
        chunk_len += 1
    split_iterator = (all_hashkeys[start:start + chunk_len]
                      for start in range(0, len(all_hashkeys), chunk_len))

    # Retrieve in num_bulk_call chunks
    for chunk_of_hashkeys in split_iterator:
        raw_retrieved.update(
            container.get_objects_content(chunk_of_hashkeys,
                                          skip_if_missing=False))

    tot_time = time.time() - start
    print(
        'Time to retrieve {} packed objects in random order WITH {} BULK CALLS: {} s'
        .format(num_files, num_bulk_calls, tot_time))
    retrieved = {
        reverse_hashkey_mapping[key]: val
        for key, val in raw_retrieved.items()
    }
    for filename in retrieved:
        assert retrieved[filename] == files[
            filename], 'Mismatch for {}'.format(filename)

    ########################################
    # THIRD: a lot of independent reads, one per object
    random.shuffle(random_keys)
    retrieved = {}
    start = time.time()
    for filename in random_keys:
        obj_hashkey = hashkey_mapping[filename]
        retrieved_content = container.get_object_content(obj_hashkey)
        retrieved[filename] = retrieved_content
    tot_time = time.time() - start
    print('Time to retrieve {} packed objects in random order: {} s'.format(
        num_files, tot_time))

    for filename in retrieved:
        assert retrieved[filename] == files[
            filename], 'Mismatch (content) for {}, {} vs {}'.format(
                filename, retrieved[filename], files[filename])

    print('All tests passed')
Exemplo n.º 24
0
def main(num_files, min_size, max_size, directly_to_pack, path, clear,
         num_bulk_calls, compress_packs, profile_file):
    """Testing some basic functionality of the object-store, with timing."""
    # pylint: disable=too-many-arguments,too-many-locals,too-many-statements,too-many-branches
    import random
    import time

    container = Container(path)
    if clear:
        print('Clearing the container...')
        container.init_container(clear=clear)
    if not container.is_initialised:
        print('Initialising the container...')
        container.init_container()

    files = {}

    start_counts = container.count_objects()
    print('Currently known objects: {} packed, {} loose'.format(
        start_counts['packed'], start_counts['loose']))
    print('Pack objects on disk:', start_counts['pack_files'])

    print('Generating {} files in memory...'.format(num_files))
    for _ in range(num_files):
        filename = 'filename-{}'.format(str(uuid.uuid4()))
        size = random.randint(min_size, max_size)
        content = bytearray(random.getrandbits(8) for _ in range(size))
        files[filename] = content
    total_size = sum(len(content) for content in files.values())
    print('Done. Total size: {} bytes (~{:.3f} MB).'.format(
        total_size, (total_size // 1024) / 1024))

    if directly_to_pack:
        # Store objects (directly to pack)
        start = time.time()
        filenames = list(files.keys())
        files_content = [files[key] for key in filenames]
        uuids = container.add_objects_to_pack(files_content,
                                              compress=compress_packs)
        uuid_mapping = dict(zip(filenames, uuids))
        tot_time = time.time() - start
        print('Time to store {} objects DIRECTLY TO THE PACKS: {:.4} s'.format(
            num_files, tot_time))

        # Check that no loose files were created
        counts = container.count_objects()
        assert counts['loose'] == start_counts[
            'loose'], 'Mismatch (loose in packed case): {} != {}'.format(
                start_counts['loose'], counts['loose'])
        assert counts['packed'] == start_counts[
            'packed'] + num_files, 'Mismatch (packed in packed case): {} + {} != {}'.format(
                start_counts['packed'], num_files, counts['packed'])
    else:
        # Store objects (loose)
        start = time.time()
        uuid_mapping = {}
        for filename, content in files.items():
            obj_uuid = container.add_object(content)
            uuid_mapping[filename] = obj_uuid
        tot_time = time.time() - start
        print('Time to store {} loose objects: {:.4} s'.format(
            num_files, tot_time))

        # Retrieve objects (loose)
        retrieved = {}
        random_keys = list(files.keys())
        random.shuffle(random_keys)
        start = time.time()
        for filename in random_keys:
            obj_uuid = uuid_mapping[filename]
            retrieved_content = container.get_object_content(obj_uuid)
            retrieved[filename] = retrieved_content
        tot_time = time.time() - start
        print('Time to retrieve {} loose objects: {:.4} s'.format(
            num_files, tot_time))

        # Check that the content is correct
        for filename in retrieved:
            assert retrieved[filename] == files[
                filename], 'Mismatch (content) for {}'.format(filename)

        # Check that num_files new loose files are present now
        counts = container.count_objects()
        assert counts['loose'] == start_counts[
            'loose'] + num_files, 'Mismatch (loose in unpacked case): {} + {} != {}'.format(
                start_counts['loose'], num_files, counts['loose'])

        # Print container size info (before packing)
        size_info = container.get_total_size()
        print('Object store size info:')
        for key in sorted(size_info.keys()):
            print('- {:30s}: {}'.format(key, size_info[key]))

        # Pack all loose objects
        start = time.time()
        container.pack_all_loose(compress=compress_packs)
        tot_time = time.time() - start
        print('Time to pack all loose objects: {:.4} s'.format(tot_time))

        # Check that all loose files are gone
        counts = container.count_objects()
        assert not counts['loose'], 'loose objects left: {}'.format(
            os.listdir(container._get_loose_folder()))  # pylint: disable=protected-access
        assert counts['packed'] == start_counts['packed'] + start_counts[
            'loose'] + num_files, 'Mismatch (post-pack): {} + {} + {} != {}'.format(
                start_counts['packed'], start_counts['loose'], num_files,
                counts['packed'])

    # print container size info
    size_info = container.get_total_size()
    print('Object store size info:')
    for key in sorted(size_info.keys()):
        print('- {:30s}: {}'.format(key, size_info[key]))

    # In all cases, retrieve all objects (in shuffled order)
    retrieved = {}
    random_keys = list(files.keys())
    random.shuffle(random_keys)

    # Will be needed later
    reverse_uuid_mapping = {v: k for k, v in uuid_mapping.items()}

    ########################################
    # FIRST: single bulk read
    def bulk_read_data(container, uuid_list):
        """A function to read the data in bulk.

        It's defined as a functon so it can be profiled."""
        return container.get_object_contents(uuid_list, skip_if_missing=False)

    all_uuids = [uuid_mapping[filename] for filename in random_keys]
    start = time.time()

    if profile_file is not None:
        func = profile(sort='cumtime', filename=profile_file,
                       stdout=False)(bulk_read_data)
    else:
        func = bulk_read_data
    raw_retrieved = func(container=container, uuid_list=all_uuids)
    if profile_file is not None:
        print(
            "You can check the profiling results running 'snakeviz {}'".format(
                profile_file))

    tot_time = time.time() - start
    print(
        'Time to retrieve {} packed objects in random order WITH ONE BULK CALL: {} s'
        .format(num_files, tot_time))
    retrieved = {
        reverse_uuid_mapping[key]: val
        for key, val in raw_retrieved.items()
    }
    for filename in retrieved:
        assert retrieved[filename] == files[
            filename], 'Mismatch for {}'.format(filename)

    ########################################
    # SECOND: num_bulk_calls bulk reads
    random.shuffle(random_keys)
    all_uuids = [uuid_mapping[filename] for filename in random_keys]
    start = time.time()
    raw_retrieved = {}

    # Split the list into num_bulk_call even chunks
    chunk_len = len(all_uuids) // num_bulk_calls
    if len(all_uuids) % num_bulk_calls != 0:
        chunk_len += 1
    split_iterator = (all_uuids[start:start + chunk_len]
                      for start in range(0, len(all_uuids), chunk_len))

    # Retrieve in num_bulk_call chunks
    for chunk_of_uuids in split_iterator:
        raw_retrieved.update(
            container.get_object_contents(chunk_of_uuids,
                                          skip_if_missing=False))

    tot_time = time.time() - start
    print(
        'Time to retrieve {} packed objects in random order WITH {} BULK CALLS: {} s'
        .format(num_files, num_bulk_calls, tot_time))
    retrieved = {
        reverse_uuid_mapping[key]: val
        for key, val in raw_retrieved.items()
    }
    for filename in retrieved:
        assert retrieved[filename] == files[
            filename], 'Mismatch for {}'.format(filename)

    ########################################
    # THIRD: a lot of independent reads, one per object
    random.shuffle(random_keys)
    start = time.time()
    for filename in random_keys:
        obj_uuid = uuid_mapping[filename]
        retrieved_content = container.get_object_content(obj_uuid)
        retrieved[filename] = retrieved_content
    tot_time = time.time() - start
    print('Time to retrieve {} packed objects in random order: {} s'.format(
        num_files, tot_time))

    for filename in retrieved:
        assert retrieved[filename] == files[
            filename], 'Mismatch for {}'.format(filename)

    print('All tests passed')
Exemplo n.º 25
0
        if not definition_names:
            # Without a definition for a name we cannot find references.
            return []

        definition_names = usages.resolve_potential_imports(
            self._evaluator, definition_names)

        modules = set([d.get_root_context() for d in definition_names])
        modules.add(self._get_module())
        for additional_module_context in additional_module_contexts:
            modules.add(additional_module_context)
        definitions = alt_api_usages.usages(self._evaluator, definition_names,
                                            modules)
    finally:
        settings.dynamic_flow_information = temp

    return helpers.sorted_definitions(set(definitions))


if PROFILING:
    try:
        from profilehooks import profile
    except ImportError:
        logging.getLogger(__name__).error(
            'Failed to start with profiler; please install `profilehooks`.')

    usages_with_additional_modules = profile(usages_with_additional_modules,
                                             dirs=True,
                                             immediate=True)
Exemplo n.º 26
0
    p.add_argument('file', metavar='FILE', type=str)
    p.add_argument('-s', '--start', dest='start_t', default="0",
        help='start time: offset seconds or absolute %s' % Strptime_Fmt.replace('%','%%'))
    p.add_argument('-r', '--rate', dest='rate', type=str, default='x1',
        help='messages per second (x1 for real-time, 2 for 2 messages per second)')
    p.add_argument('-p', '--profile', dest='profile', default=False, action='store_true')
    p.add_argument('-f', '--filter', dest='filter', default=[], action='append',
        help='if set, only the message names specified will be played back')
    opts, unknown_args = p.parse_known_args()
    
    tstart = opts.start_t
    rt_rate = None
    fixed_rate = None
    if opts.rate.startswith('x'):
        rt_rate = float(opts.rate[1:])
    else:
        fixed_rate = float(opts.rate)
    
    node = cauv.node.Node('py-play',unknown_args)
    try:
        def playBound():
            play(opts.file, node, tstart, rt_rate, fixed_rate, opts.filter)
        if opts.profile:
            import profilehooks
            f = opts.file.replace('/', '-').replace('.','')
            profilehooks.profile(playBound,filename='playLog-%s.profile' % f,)()
        else:
            playBound()
    finally:
        node.stop()
Exemplo n.º 27
0
    def _api_run(self, *args, **kwargs):
        """ handles the stuff from the handler """

        # Make sure the device ID is not shown in the logs
        if self._api_cmd == 'register_device':
            if kwargs.get('device_id'):
                logger._BLACKLIST_WORDS.add(kwargs['device_id'])
            if kwargs.get('onesignal_id'):
                logger._BLACKLIST_WORDS.add(kwargs['onesignal_id'])

        result = {}
        logger.api_debug('Tautulli APIv2 :: API called with kwargs: %s' %
                         kwargs)

        self._api_validate(**kwargs)

        if self._api_cmd and self._api_authenticated:
            call = getattr(self, self._api_cmd)

            # Profile is written to console.
            if self._api_profileme:
                from profilehooks import profile
                call = profile(call, immediate=True)

            # We allow this to fail so we get a
            # traceback in the browser
            try:

                result = call(**self._api_kwargs)
            except Exception as e:
                logger.api_error(
                    'Tautulli APIv2 :: Failed to run %s with %s: %s' %
                    (self._api_cmd, self._api_kwargs, e))
                self._api_response_code = 500
                if self._api_debug:
                    cherrypy.request.show_tracebacks = True
                    # Reraise the exception so the traceback hits the browser
                    raise
                self._api_msg = 'Check the logs for errors'

        ret = None
        # The api decorated function can return different result types.
        # convert it to a list/dict before we change it to the users
        # wanted output
        try:
            if isinstance(result, (dict, list)):
                ret = result
            else:
                raise Exception
        except Exception:
            try:
                ret = json.loads(result)
            except (ValueError, TypeError):
                try:
                    ret = xmltodict.parse(result, attr_prefix='')
                except:
                    pass

        # Fallback if we cant "parse the response"
        if ret is None:
            ret = result

        if (ret is not None or self._api_result_type
                == 'success') and self._api_authenticated:
            # To allow override for restart etc
            # if the call returns some data we are gonna assume its a success
            self._api_result_type = 'success'
            self._api_response_code = 200

        # Since some of them methods use a api like response for the ui
        # {result: error, message: 'Some shit happened'}
        if isinstance(ret, dict):
            if ret.get('message'):
                self._api_msg = ret.pop('message', None)

            if ret.get('result'):
                self._api_result_type = ret.pop('result', None)

        if self._api_result_type == 'success' and not self._api_response_code:
            self._api_response_code = 200
        elif self._api_result_type == 'error' and not self._api_response_code:
            self._api_response_code = 400

        if not self._api_response_code:
            self._api_response_code = 500

        cherrypy.response.status = self._api_response_code
        return self._api_out_as(
            self._api_responds(result_type=self._api_result_type,
                               msg=self._api_msg,
                               data=ret))
Exemplo n.º 28
0
 def profilePlayback(self):
     import profilehooks
     profilehooks.profile(self.playbackRunloop,filename='messageLogger-playback.profile', )()
 def profiled_fn(*args, **kwargs):
     fname = os.path.join(directory, produce_new_name())
     wrapped = profile(f, filename=fname)(*args, **kwargs)
     return wrapped
Exemplo n.º 30
0
from tifffile import imread
from xpdtools.tools import generate_binner
from profilehooks import profile
import pyFAI
geo = pyFAI.load('test.poni')
img = imread('test.tiff')

bo = profile(generate_binner)

binner = bo(geo, img.shape)
binner = bo(geo, img.shape)
# 1 call 1.930
# 2 call 2.675