Exemple #1
0
    def __init__(self):
        settings_manager = SettingsManager() # Set up the settings_manager

        max_workers = settings_manager.getint('application', 'max-workers') # Get the max workers from settings manager
        profiler_on = settings_manager.getint('debugging', 'profiler-on') # Get whether there is a profiler
        absolute = settings_manager.getint('save', 'absolute') # Get whether it's an absolute path
        save_path = settings_manager.get('save', 'path') # Get whether it's an absolute path
        if not absolute:
            save_path = PROJECT_PATH + os.path.sep + save_path

        executor = ThreadPoolExecutor(max_workers=max_workers, profiler_on=profiler_on) # Set up the thread executor
        dis = Disassembler(settings_manager) # Build the disassembler
        server = PyDAServer('0.0.0.0',9000) # Set up the PyDA server
        save_manager = SaveManager(save_path)

        if profiler_on:
            profile = Profile()
            profile.enable()

        app.build_and_run(settings_manager, dis, executor, server, save_manager) # Run the interface

        if profiler_on:
            profile.disable()
            stats = executor.getProfileStats()
            if stats == None:
                stats = Stats(profile)
            else:
                stats.add(profile)
            with open('profile.stats', 'wb') as statsfile:
                stats.stream = statsfile
                stats.sort_stats('cumulative').print_stats()
Exemple #2
0
 def _run_unit(self, unit, set_title=False):
     try:
         if set_title:
             setproctitle('coordinate worker {0!r} {1!r}'
                          .format(unit.work_spec_name, unit.key))
         profiler = None
         if ((self.profile_destination and
              unit.work_spec_name in self.profile_work_specs)):
             now = datetime.now()
             unit_info = {
                 'work_spec_name': unit.work_spec_name,
                 'work_unit_key': unit.key,
                 'ymd': now.strftime('%Y%m%d'),
                 'hms': now.strftime('%H%M%S'),
             }
             destination = self.profile_destination % unit_info
             profiler = Profile()
             profiler.enable()
         unit.run()
         if profiler:
             profiler.disable()
             profiler.dump_stats(destination)
         unit.finish()
     except LostLease:
         # We don't own the unit any more so don't try to report on it
         logger.warn('Lost Lease on %r %r', unit.work_spec_name, unit.key)
     except Exception, exc:  # pylint: disable=broad-except
         unit.fail(exc)
Exemple #3
0
def profile(to=None, sort_by='cumtime'):
	'''Profiles a chunk of code, use with the ``with`` statement::
	
	    from halonctl.debug import profile
	    
	    with profile('~/Desktop/stats'):
	    	pass # Do something performance-critical here...
	
	Results for individual runs are collected into ``to``. The specifics of how
	reports are done varies depending on what type ``to`` is.
	
	* **File-like objects**: Stats are dumped, according to ``sort_by``, into the stream, separated by newlines - watch out, the file/buffer may grow very big when used in loops.
	* **List-like objects**: A number of pstats.Stats objects are appended.
	* **str and unicode**: Treated as a path and opened for appending. Tildes (~) will be expanded, and intermediary directories created if possible.
	* **None or omitted**: Results are printed to sys.stderr.
	'''
	
	if isinstance(to, six.string_types):
		to = open_fuzzy(to, 'a')
	
	to_is_stream = hasattr(to, 'write')
	to_is_list = hasattr(to, 'append')
	
	p = Profile()
	p.enable()
	yield
	p.disable()
	
	ps = Stats(p, stream=to if to_is_stream else sys.stderr)
	ps.sort_stats('cumtime')
	
	if to_is_stream or to is None:
		ps.print_stats()
	elif to_is_list:
		to.append(ps)
Exemple #4
0
def main():
    config = ConfigParser()
    try:
        config.read(os.path.expanduser("~/.plug/web.conf"))
    except Exception as err:
        print "Failed to parse config:", str(err)
        return 1

    port = 8008

    if "baseconfig" in config.sections() and "port" in config.options("baseconfig"):
        port = int(config.get("baseconfig", "port"))

    if "TREEBUG" in os.environ:
        last_int = 0
        prof = Profile()
        while True:
            prof.enable()
            try:
                serve(port=8008)
            except KeyboardInterrupt:
                if time.time() - last_int < 1:
                    break
                prof.disable()
                output = StringIO()
                ps = pstats.Stats(prof, stream=output).sort_stats("cumulative")
                ps.print_stats()
                print "\n".join([line for line in output.getvalue().splitlines()
                                 if not ("/usr/lib" in line or "{" in line)])
                output.close()
                prof = Profile()
                last_int = time.time()
    else:
        serve(port=port)
Exemple #5
0
def profile(func, max_epochs=15, use_GPU=False, cprofile=True):
    """Run a profiler on the code."""

    if cprofile:
        p = Profile()
        p.enable()
    else:
        import pycuda
        pycuda.driver.start_profiler()

    if func == "mnist":
        mnist({'use_GPU': use_GPU, 'rng': np.random.RandomState(0)},
              {'max_epochs': max_epochs, 'plotting': False, 'batch_size': 7500,
               'CG_iter': 10})
    elif func == "integrator":
        integrator({'shape': [1, 100, 1], 'layers': hf.nl.Logistic(),
                    'use_GPU': use_GPU, 'debug': False,
                    'rng': np.random.RandomState(0)},
                   {'max_epochs': max_epochs, 'CG_iter': 10},
                   n_inputs=500, sig_len=200, plots=False)
    else:
        raise ValueError("Unknown profile function")

    if cprofile:
        p.disable()

        ps = pstats.Stats(p)
        ps.strip_dirs().sort_stats('time').print_stats(20)
    else:
        pycuda.driver.stop_profiler()
def build_problem(main_img, patterns_imgs, Semiring=SemiringArgminPlusElement):

    image = MatrixPointer(list(main_img.getdata()), (main_img.size[0], main_img.size[1]))
    patterns = dict()
    for p in patterns_imgs:
        patterns[p] = MatrixPointer(list(patterns_imgs[p].getdata()), (patterns_imgs[p].size[0], patterns_imgs[p].size[1]))

    start = Vertex('start')
    end = Vertex('end')
    fake_edge = Edge(start, end, Semiring.get_zero())

    min_width = min(patterns[p].get_size()[0] for p in patterns)
    if min_width > image.get_size()[0]:
        return Graph([start, end], fake_edge)

    profile = Profile()
    profile.enable()
    vertices, edges = process_img(image, patterns, start)
    profile.disable()
    Stats(profile).sort_stats('time').print_stats()
    vertices['start'] = [start]
    vertices['end'] = [end]
    if image.get_size()[0] in vertices:
        edges = edges.union(Edge(v, end, Semiring.get_unity())
                            for v in vertices[image.get_size()[0]])

    problem = DynamicProgramming(sum(vertices.values(), []), edges)
    problem.set_start(start)
    problem.set_finish(end)

    return problem
Exemple #7
0
def profile(filename, log=None):
    """!Context manager for profiling with cProfile

    @param filename     filename to which to write profile (profiling disabled if None or empty)
    @param log          log object for logging the profile operations

    If profiling is enabled, the context manager returns the cProfile.Profile object (otherwise
    it returns None), which allows additional control over profiling.  You can obtain this using
    the "as" clause, e.g.:

        with profile(filename) as prof:
            runYourCodeHere()

    The output cumulative profile can be printed with a command-line like:

        python -c 'import pstats; pstats.Stats("<filename>").sort_stats("cumtime").print_stats(30)'
    """
    if not filename:
        # Nothing to do
        yield
        return
    from cProfile import Profile

    prof = Profile()
    if log is not None:
        log.info("Enabling cProfile profiling")
    prof.enable()
    yield prof
    prof.disable()
    prof.dump_stats(filename)
    if log is not None:
        log.info("cProfile stats written to %s" % filename)
class BenchTestResult(unittest.runner.TextTestResult):
    """
    A textual test result formatter that can display additional information
    such as profile output and benchmarks.
    """

    def __init__(self, arguments, stream, descriptions, verbosity):
        super(BenchTestResult, self).__init__(stream, descriptions, verbosity)
        settings = arguments.get_settings("test_result")
        self._sort = settings.get("profile_sort")
        self._limit = settings.get("profile_limit")
        self._benchmark = verbosity > 2
        self._profiler = None

    def startTest(self, test):
        super(BenchTestResult, self).startTest(test)
        if self._benchmark:
            self._profiler = Profile()
            self._profiler.enable()

    def stopTest(self, test):
        super(BenchTestResult, self).stopTest(test)
        if self._benchmark:
            self._profiler.disable()
            stats = Stats(self._profiler)
            stats.sort_stats(self._sort)
            stats.print_stats(self._limit)
def profile(func, file_path):
    pr = Profile()
    pr.enable()
    func()
    pr.disable()
    s = open(file_path, "w")
    sortby = "cumulative"
    ps = Stats(pr, stream=s).sort_stats(sortby)
    ps.print_stats()
Exemple #10
0
 def profiled(*args, **kargs):
     profile = Profile()
     profile.enable()
     func(*args, **kargs)
     profile.disable()
     ident = current_thread().ident
     profile.dump_stats("/tmp/%s.%s.%i.pstat" % (
         hs.hostname, func.__name__, ident
     ))
Exemple #11
0
 def profiled_func(*args, **kwargs):
     p = Profile()
     try:
         # profile the input function
         p.enable()
         r = f(*args, **kwargs)
         p.disable()
         return r
     finally:
         p.print_stats()
Exemple #12
0
def test_profile_rdg_generation():
    profiler = Profile()
    profiler.enable()

    for _ in range(5):
        level = Level(generation_type=LevelGen.Dungeon)
        generate_tiles_to(level)

    profiler.disable()
    profile_util.write_results_log(profiler)
Exemple #13
0
 def wrapper(*args, **kwargs):
     profile = Profile()
     profile.enable()
     try:
         func(*args, **kwargs)
     finally:
         profile.disable()
     try:
         thread = current_thread()
         profile.dump_stats('profile_%s.%s.%s.log' % (getpid(), thread.name, thread.ident))
     except:
         logger.exception('Failed to dump stats')
Exemple #14
0
def profiler(enable, outfile):
    try:
        if enable:
            profiler = Profile()
            profiler.enable()

        yield
    finally:
        if enable:
            profiler.disable()
            stats = Stats(profiler)
            stats.sort_stats('tottime')
            stats.dump_stats(outfile)
Exemple #15
0
def test(algo, size=10000):
	mylist = random_list(size)
	profile = Profile()
	profile.enable()
	result = profile.runcall(algo, mylist)
	profile.disable()
	stringStream = StringIO.StringIO()
	sortby = 'cumulative'
	ps = pstats.Stats(profile, stream=stringStream).sort_stats(sortby)
	ps.print_stats()
	print 
	print "\t\t\t\t{0}  sorting  {1} ints".format(algo.__name__, size)
	print stringStream.getvalue()
Exemple #16
0
        def _wrap(*kl, **kw):
            prof = Profile()
            prof.enable()

            try:
                return func(*kl, **kw)

            finally:
                prof.disable()
                result = StringIO()

                ps = Stats(prof, stream = result).sort_stats('cumulative')
                ps.print_stats()

                print result.getvalue()
def pytest_runtest_call(item):
    if SHOULD_PROFILE:
        p = Profile()
        p.enable()
        yield
        p.disable()
        stats = Stats(p)
        if SHOULD_PRINT:
            stats.sort_stats('cumulative').print_stats(50)
        if SHOULD_STORE:
             if not os.path.exists(BASEDIR):
                os.mkdir(BASEDIR)
             p.dump_stats(os.path.join(BASEDIR, '%s.pkl' % item.name))
    else:
        yield
Exemple #18
0
        def do(impl, count):
            # warm-up
            _res = [impl() for _ in _range(count)]

            profile = Profile()
            profile.enable()

            res = [impl() for _ in _range(count)]

            profile.disable()
            out = StringIO()
            stats = Stats(profile, stream=out)
            stats.strip_dirs()
            stats.sort_stats('calls').print_stats(10)
            print(out.getvalue().lstrip())
            out.close()
            return _res, res
Exemple #19
0
class BenchmarkThread(Thread):
    def __init__(self, thread_num, session, query, values, num_queries, profile):
        Thread.__init__(self)
        self.thread_num = thread_num
        self.session = session
        self.query = query
        self.values = values
        self.num_queries = num_queries
        self.profiler = Profile() if profile else None

    def start_profile(self):
        if self.profiler:
            self.profiler.enable()

    def finish_profile(self):
        if self.profiler:
            self.profiler.disable()
            self.profiler.dump_stats("profile-%d" % self.thread_num)
def compare_phi():
    """
    Problem 38
    >>> a, b = compare_phi()
    >>> a > b
    True
    """
    from cProfile import Profile
    from pstats import Stats
    pr_normal = Profile()
    pr_normal.enable()
    phi(10090)
    pr_normal.disable()
    time_normal = Stats(pr_normal).total_tt
    pr_improved = Profile()
    pr_improved.enable()
    phi_improved(10090)
    pr_improved.disable()
    time_improved = Stats(pr_improved).total_tt
    return time_normal, time_improved
Exemple #21
0
def profile_rnn_calc_G(cprofile=True):
    """Run a profiler on the recurrent curvature calculation.

    :param bool cprofile: use True if profiling on the CPU, False if using the
        CUDA profiler
    """

    inputs = np.random.randn(1024, 128, 1).astype(np.float32)
    targets = np.random.randn(1024, 128, 1).astype(np.float32)
    N = 128

    rnn = hf.RNNet([1, N, 1], use_GPU=True)
    rnn.optimizer = hf.opt.HessianFree()  # for struc_damping check
    rnn.cache_minibatch(inputs, targets)

    v = np.random.randn(rnn.W.size).astype(np.float32)

    for _ in range(2):
        # run it a few times to get rid of any startup overhead
        rnn.GPU_calc_G(v)

    if cprofile:
        start = time.time()

        p = Profile()
        p.enable()
    else:
        pycuda.driver.start_profiler()

    for _ in range(100):
        _ = rnn.GPU_calc_G(v)

    if cprofile:
        p.disable()

        print("time", time.time() - start)

        ps = pstats.Stats(p)
        ps.strip_dirs().sort_stats('time').print_stats(20)
    else:
        pycuda.driver.stop_profiler()
    def test_ensemble(self,test_file):
        assert self.authenticated, 'Not authenticated!'
        
        # download a local copy of the ensemble
        self.logger.info('Creating local ensemble')
        local_ensemble = Ensemble(self.ensemble_res,api=self.api)
        
        # make the Fields object
        source = self.api.get_source(self.source_res)
        fields = Fields(source['object']['fields'])
        
        self.logger.info('Reading test data and generating predictions')
        true_labels = []
        predict_labels = []
        pr = Profile()
        pr.enable()
        with open(test_file) as fid:
            test_reader = csv.reader(fid)
            # skip the header line
            test_reader.next()
            for row in test_reader:
                row_list = [val for val in row]
                true_labels.append(row_list.pop())
                instance = fields.pair(row_list)
                predict_labels.append(local_ensemble.predict(instance,
                                                         by_name=False,
                                                         method=1))

        pr.disable()
        ps = Stats(pr)
        self.predict_time = ps.total_tt
#        eval_args = {'combiner':1}
#        evaluation = self.api.create_evaluation(self.ensemble_res,test_data,eval_args)
#        check_resource(evaluation['resource'],self.api.get_evaluation)   
#        evaluation = self.api.get_evaluation(evaluation['resource'])
#        matrix = evaluation['object']['result']['model']['confusion_matrix']
#        self.predict_time = evaluation['object']['status']['elapsed']/1000
        if self.regression:
            self.results = (predict_labels,true_labels)
        else:
            self.results = make_confusion_matrix(true_labels,predict_labels)
Exemple #23
0
def profile_dot(cprofile=True):
    """Run a profiler on the matrix multiplication kernel.

    :param bool cprofile: use True if profiling on the CPU, False if using the
        CUDA profiler
    """
    N = 1024
    a = np.random.randn(N, N).astype(np.float32)
    b = np.random.randn(N, N).astype(np.float32)
    a_gpu = gpuarray.to_gpu(a)
    b_gpu = gpuarray.to_gpu(b)
    c_gpu = gpuarray.zeros((N, N), np.float32)

    for _ in range(2):
        # run it a few times to get rid of any startup overhead
        hf.gpu.dot(a_gpu, b_gpu, out=c_gpu)

    if cprofile:
        start = time.time()

        p = Profile()
        p.enable()
    else:
        pycuda.autoinit.context.synchronize()
        pycuda.driver.start_profiler()

    for _ in range(100):
        hf.gpu.dot(a_gpu, b_gpu, out=c_gpu, transpose_a=True,
                   transpose_b=True)
    c_gpu.get()

    if cprofile:
        p.disable()

        print("time", time.time() - start)

        ps = pstats.Stats(p)
        ps.strip_dirs().sort_stats('time').print_stats(20)
    else:
        pycuda.driver.stop_profiler()
Exemple #24
0
def profile_calc_G(cprofile=True):
    """Run a profiler on the feedforward curvature calculation.

    :param bool cprofile: use True if profiling on the CPU, False if using the
        CUDA profiler
    """

    inputs = np.random.randn(1024, 1).astype(np.float32)
    targets = np.random.randn(1024, 1).astype(np.float32)
    N = 1024

    ff = hf.FFNet([1, N, N, 1], use_GPU=True)
    ff.cache_minibatch(inputs, targets)

    v = np.random.randn(ff.W.size).astype(np.float32)

    for _ in range(5):
        # run it a few times to get rid of any startup overhead
        ff.GPU_calc_G(v)

    if cprofile:
        start = time.time()

        p = Profile()
        p.enable()
    else:
        pycuda.driver.start_profiler()

    for _ in range(500):
        _ = ff.GPU_calc_G(v)

    if cprofile:
        p.disable()

        print("time", time.time() - start)

        ps = pstats.Stats(p)
        ps.strip_dirs().sort_stats('time').print_stats(20)
    else:
        pycuda.driver.stop_profiler()
Exemple #25
0
class BenchmarkThread(Thread):
    def __init__(self, thread_num, session, query, values, num_queries, protocol_version, profile):
        Thread.__init__(self)
        self.thread_num = thread_num
        self.session = session
        self.query = query
        self.values = values
        self.num_queries = num_queries
        self.protocol_version = protocol_version
        self.profiler = Profile() if profile else None

    def start_profile(self):
        if self.profiler:
            self.profiler.enable()

    def run_query(self, key, **kwargs):
        return self.session.execute_async(self.query.format(key=key), **kwargs)

    def finish_profile(self):
        if self.profiler:
            self.profiler.disable()
            self.profiler.dump_stats("profile-%d" % self.thread_num)
Exemple #26
0
def profile(func, max_epochs=1, use_GPU=False, cprofile=True):
    """Run a profiler on the code.

    :param str func: the demo function to be profiled (can be 'mnist' or
        'integrator')
    :param int max_epochs: maximum number of iterations to run
    :param bool use_GPU: run optimization on GPU
    :param bool cprofile: if True then run the profiling on the CPU, otherwise
        use CUDA profiler
    """

    if cprofile:
        p = Profile()
        p.enable()
    else:
        import pycuda
        pycuda.driver.start_profiler()

    if func == "mnist":
        mnist({'use_GPU': use_GPU, 'rng': np.random.RandomState(0)},
              {'max_epochs': max_epochs, 'plotting': False,
               'minibatch_size': 7500, 'CG_iter': 10})
    elif func == "integrator":
        integrator({'shape': [1, 100, 1], 'layers': hf.nl.Logistic(),
                    'use_GPU': use_GPU, 'debug': False,
                    'rng': np.random.RandomState(0)},
                   {'max_epochs': max_epochs, 'CG_iter': 10},
                   n_inputs=500, sig_len=200, plots=False)
    else:
        raise ValueError("Unknown profile function")

    if cprofile:
        p.disable()

        ps = pstats.Stats(p)
        ps.strip_dirs().sort_stats('time').print_stats(20)
    else:
        pycuda.driver.stop_profiler()
Exemple #27
0
class Monitor:
    def __init__(self,
                 monitoring_dir: str,
                 with_runtime: bool = False,
                 with_profiler: bool = False):
        self._monitoring_dir = monitoring_dir
        self._duration_sampler = _new_duration_sampler(monitoring_dir)
        self._runtime_bucket = None
        self._profiler = None
        self._lock = Lock()

        if with_runtime:
            self._runtime_bucket = _start_runtime_sampler(monitoring_dir)

        if with_profiler:
            self._profiler = Profile()
            self._profiler.enable()

    def monitoring_dir(self) -> str:
        return self._monitoring_dir

    def start_duration_sample(self) -> str:
        return self._duration_sampler.sample()

    def stop_duration_sample(self, label: str, sample_id: str):
        self._duration_sampler.collect(label, sample_id)

    def stop(self):
        if self._profiler:
            with self._lock:
                self._profiler.disable()
                outfile = _profiler_file_pathname(self._monitoring_dir)
                self._profiler.dump_stats(outfile)

        self._duration_sampler.bucket().empty()
        if self._runtime_bucket:
            self._runtime_bucket.empty()
Exemple #28
0
    def run(self) -> StatusCode:
        """Run process manager.

        :return: Status code of run execution.
        :rtype: StatusCode
        """
        # Initialize
        status = self.initialize()

        # Execute
        profiler = None
        if status == StatusCode.Success:
            settings = self.service(ConfigObject)
            profile_code = settings.get('doCodeProfiling', False)
            if profile_code:
                from cProfile import Profile
                profiler = Profile()
                profiler.enable()

            status = self.execute()

            if profiler:
                profiler.disable()

        # Finalize
        if status == StatusCode.Success:
            status = self.finalize()
            # Profiling output
            if profiler:
                import io
                import pstats
                profile_output = io.StringIO()
                profile_stats = pstats.Stats(profiler, stream=profile_output).sort_stats('time')
                profile_stats.print_stats()
                self.logger.info('Profiling Statistics (sorted by time):\n{stats}', stats=profile_output.getvalue())

        return status
Exemple #29
0
    def __run(self, *args, **kwargs):

        __start = time.time()

        # notify if we don't process quickly
        if __start - self.__time_submitted > 0.05:
            self.log.warning(
                f'Starting of {self.name} took too long: {__start - self.__time_submitted:.2f}s. '
                f'Maybe there are not enough threads?')

        # start profiler
        pr = Profile()
        pr.enable()

        # Execute the function
        try:
            self._func(*args, **kwargs)
        except Exception as e:
            self.__format_traceback(e, *args, **kwargs)

        # disable profiler
        pr.disable()

        # log warning if execution takes too long
        __dur = time.time() - __start
        if self.__warn_too_long and __dur > 0.8:
            self.log.warning(
                f'Execution of {self.name} took too long: {__dur:.2f}s')

            s = io.StringIO()
            ps = Stats(pr, stream=s).sort_stats(STAT_SORT_KEY)
            ps.print_stats(0.1)  # limit to output to 10% of the lines

            for line in s.getvalue().splitlines(
            )[4:]:  # skip the amount of calls and "Ordered by:"
                if line:
                    self.log.warning(line)
Exemple #30
0
def _test_overhead_timing(dim=15):
    # prints timing for simple Gaussian vanilla mcmc
    import pstats
    from cProfile import Profile
    from io import StringIO
    # noinspection PyUnresolvedReferences
    from cobaya.samplers.mcmc import proposal  # one-time numba compile out of profiling

    LikeTest = _make_gaussian_like(dim)
    info = {
        'likelihood': {
            'like': LikeTest
        },
        'debug': False,
        'sampler': {
            'mcmc': {
                'max_samples': 1000,
                'burn_in': 0,
                "learn_proposal": False,
                "Rminus1_stop": 0.0001
            }
        }
    }
    prof = Profile()
    prof.enable()
    run(info)
    prof.disable()
    # prof.dump_stats("out.prof")  # to visualize with e.g. snakeviz
    s = StringIO()
    ps = pstats.Stats(prof, stream=s)
    print_n_calls = 10
    ps.strip_dirs()
    ps.sort_stats('time')
    ps.print_stats(print_n_calls)
    ps.sort_stats('cumtime')
    ps.print_stats(print_n_calls)
    print(s.getvalue())
Exemple #31
0
def main():
    config = ConfigParser()
    try:
        config.read(os.path.expanduser("~/.plug/web.conf"))
    except Exception as err:
        print "Failed to parse config:", str(err)
        return 1

    port = 8008

    if "baseconfig" in config.sections() and "port" in config.options(
            "baseconfig"):
        port = int(config.get("baseconfig", "port"))

    if "TREEBUG" in os.environ:
        last_int = 0
        prof = Profile()
        while True:
            prof.enable()
            try:
                serve(port=8008)
            except KeyboardInterrupt:
                if time.time() - last_int < 1:
                    break
                prof.disable()
                output = StringIO()
                ps = pstats.Stats(prof, stream=output).sort_stats("cumulative")
                ps.print_stats()
                print "\n".join([
                    line for line in output.getvalue().splitlines()
                    if not ("/usr/lib" in line or "{" in line)
                ])
                output.close()
                prof = Profile()
                last_int = time.time()
    else:
        serve(port=port)
Exemple #32
0
    def perform(self):
        """Run sequentially all child tasks, and close ressources.

        """
        result = True
        self.thread_id = threading.current_thread().ident

        self.prepare()

        pr = Profile() if self.should_profile else None

        try:
            if pr:
                pr.enable()
            for child in self.children:
                child.perform_()
        except Exception:
            log = logging.getLogger(__name__)
            msg = 'The following unhandled exception occured :\n'
            log.exception(msg)
            self.should_stop.set()
            result = False
            self.errors['unhandled'] = msg + format_exc()
        finally:
            if pr:
                pr.disable()
                meas_name = self.get_from_database('meas_name')
                meas_id = self.get_from_database('meas_id')
                path = os.path.join(self.default_path,
                                    meas_name + '_' + meas_id + '.prof')
                pr.dump_stats(path)
            self.release_resources()

        if self.should_stop.is_set():
            result = False

        return result
Exemple #33
0
    def perform(self):
        """Run sequentially all child tasks, and close ressources.

        """
        result = True
        self.thread_id = threading.current_thread().ident

        self.prepare()

        pr = Profile() if self.should_profile else None

        try:
            if pr:
                pr.enable()
            for child in self.children:
                child.perform_()
        except Exception:
            log = logging.getLogger(__name__)
            msg = 'The following unhandled exception occured :\n'
            log.exception(msg)
            self.should_stop.set()
            result = False
            self.errors['unhandled'] = msg + format_exc()
        finally:
            if pr:
                pr.disable()
                meas_name = self.get_from_database('meas_name')
                meas_id = self.get_from_database('meas_id')
                path = os.path.join(self.default_path,
                                    meas_name + '_' + meas_id + '.prof')
                pr.dump_stats(path)
            self.release_resources()

        if self.should_stop.is_set():
            result = False

        return result
Exemple #34
0
def with_profiling(f, stats_out=None, text_out=None):
	try: from cProfile import Profile
	except: from profile import Profile
	from pstats import Stats
	prof = Profile()

	prof.enable()
	retval = f()
	prof.disable()

	if stats_out:
		try:
			prof.dump_stats(stats_out)
		except (IOError,OSError) as e: # not worth losing our return value over
			warn('could not write pstats (%s)', e)
	if text_out:
		try:
			MAX_LINES = 20
			stats = Stats(prof, stream=text_out)
			stats.sort_stats('cumtime')
			stats.print_stats(MAX_LINES)
		except (IOError,OSError) as e:
			warn('could not write profiling summary (%s)', e)
	return retval
Exemple #35
0
def profile_stop(
    profiler: Profile,
    sort_field: str = "cumulative",
    dump_filename: str = None,
    dump_line_count: int = 100,
) -> None:
    """Stop a given profiler and print or dump stats.

    Args:
        profiler:
            Active profiling instance.
        sort_field:
            pstats.Stats sort field for ordering profiling results.
        dump_filename:
            File path to dump profiling stats into.
        dump_line_count:
            Maximum lines to print out.
    """
    profiler.disable()
    stats = pstats.Stats(profiler, stream=sys.stderr).sort_stats(sort_field)
    if dump_filename is None:
        stats.print_stats(dump_line_count)
    else:
        stats.dump_stats(dump_filename)
Exemple #36
0
    def _call_wrapped(self, function, idict, odict):
        """Does the same as _call, but with profiling enabled.
        """
        args = idict['args']
        kwargs = idict['kwargs']

        try:
            profile = Profile()
            profile.enable()

            data = function(*args, **kwargs)

            profile.disable()
            stream = StringIO()
            stats = pstats.Stats(profile, stream=stream)
            stats.strip_dirs()
            stats.sort_stats(1)
            stats.print_stats(20)

        except Exception:
            odict['error'] = traceback.format_exc()
        else:
            odict['data'] = data
            odict['profile'] = stream.getvalue()
def main():
    """
    >>> p = PrimeStore()
    >>> p.is_prime(3)
    True
    >>> p.is_prime(99)
    False
    >>> p.is_prime(9)
    False
    """
    from cProfile import Profile
    from pstats import Stats
    from random import randint
    p = PrimeStore()

    pr = Profile()
    pr.enable()

    for i in [randint(0, 10000) for _ in range(1000)]:
        p.is_prime(i)

    pr.disable()
    stats = Stats(pr)
    assert stats.total_tt < 0.100
Exemple #38
0
def profile(filename, log=None):
    """Context manager for profiling with cProfile.


    Parameters
    ----------
    filename : `str`
        Filename to which to write profile (profiling disabled if `None` or empty).
    log : `lsst.log.Log`, optional
        Log object for logging the profile operations.

    If profiling is enabled, the context manager returns the cProfile.Profile object (otherwise
    it returns None), which allows additional control over profiling.  You can obtain this using
    the "as" clause, e.g.:

        with profile(filename) as prof:
            runYourCodeHere()

    The output cumulative profile can be printed with a command-line like::

        python -c 'import pstats; pstats.Stats("<filename>").sort_stats("cumtime").print_stats(30)'
    """
    if not filename:
        # Nothing to do
        yield
        return
    from cProfile import Profile
    profile = Profile()
    if log is not None:
        log.info("Enabling cProfile profiling")
    profile.enable()
    yield profile
    profile.disable()
    profile.dump_stats(filename)
    if log is not None:
        log.info("cProfile stats written to %s" % filename)
Exemple #39
0
    def run(self):
        watch = StopWatch()
        watch.tag('inference', self.verbose)
        # load the MLN
        if isinstance(self.mln, MLN):
            mln = self.mln
        else:
            raise Exception('No MLN specified')

        if self.use_emln and self.emln is not None:
            mlnstrio = io.StringIO()
            mln.write(mlnstrio)
            mlnstr = mlnstrio.getvalue()
            mlnstrio.close()
            emln = self.emln
            mln = parse_mln(mlnstr + emln,
                            grammar=self.grammar,
                            logic=self.logic)

        # load the database
        if isinstance(self.db, Database):
            db = self.db
        elif isinstance(self.db, list) and len(self.db) == 1:
            db = self.db[0]
        elif isinstance(self.db, list) and len(self.db) == 0:
            db = Database(mln)
        elif isinstance(self.db, list):
            raise Exception(
                'Got {} dbs. Can only handle one for inference.'.format(
                    len(self.db)))
        else:
            raise Exception('DB of invalid format {}'.format(type(self.db)))

        # expand the
        #  parameters
        params = dict(self._config)
        if 'params' in params:
            params.update(eval("dict(%s)" % params['params']))
            del params['params']
        params['verbose'] = self.verbose
        if self.verbose:
            print((tabulate(sorted(list(params.items()),
                                   key=lambda k_v: str(k_v[0])),
                            headers=('Parameter:', 'Value:'))))
        if type(db) is list and len(db) > 1:
            raise Exception('Inference can only handle one database at a time')
        elif type(db) is list:
            db = db[0]
        params['cw_preds'] = [x for x in self.cw_preds if bool(x)]
        # extract and remove all non-algorithm
        for s in GUI_SETTINGS:
            if s in params: del params[s]

        if self.profile:
            prof = Profile()
            print('starting profiler...')
            prof.enable()
        # set the debug level
        olddebug = logger.level
        logger.level = (eval('logs.%s' %
                             params.get('debug', 'WARNING').upper()))
        result = None
        try:
            mln_ = mln.materialize(db)
            mrf = mln_.ground(db)
            inference = self.method(mrf, self.queries, **params)
            if self.verbose:
                print()
                print((headline('EVIDENCE VARIABLES')))
                print()
                mrf.print_evidence_vars()

            result = inference.run()
            if self.verbose:
                print()
                print((headline('INFERENCE RESULTS')))
                print()
                inference.write()
            if self.verbose:
                print()
                inference.write_elapsed_time()
        except SystemExit:
            traceback.print_exc()
            print('Cancelled...')
        finally:
            if self.profile:
                prof.disable()
                print((headline('PROFILER STATISTICS')))
                ps = pstats.Stats(prof,
                                  stream=sys.stdout).sort_stats('cumulative')
                ps.print_stats()
            # reset the debug level
            logger.level = olddebug
        if self.verbose:
            print()
            watch.finish()
            watch.printSteps()
        return result
Exemple #40
0
    a, b, c = Vertex(3)
    ase = AbstractStateExplorer(Constraint(), [
        Action(
            'sort_ball', Graph([
                Edge('Ball', (a, )),
                Edge('Hand', (b, )),
            ]),
            Graph([
                Edge('Ball', (a, )),
                Edge('Sorted', (a, )),
                Edge('Hand', (b, )),
            ])),
        Action('buy_orange', Graph([]), Graph([
            Edge('Orange', (a, )),
        ]))
    ])

    from cProfile import Profile
    from pstats import Stats
    p = Profile()
    p.enable()
    for i in range(10):
        ase.compile(i)
        print(len(ase.CompoundActionsList))
    p.disable()
    #sol, end_state = ase.find_solution(g) #right now it just returns the first action it applies just because anything meets the constraints
    #print('solution', sol.CompoundActionTracker)
    #print(end_state)
    #Stats(p).sort_stats('cumtime').print_stats()
Exemple #41
0
        # Build state information
        #  default is <none of these> for all variables
        state_variables = [num_var_values[i] - 1 for i in range(num_vars)]
        for state_item in xml_root:
            assert state_item.tag == 'atom',\
                "Cannot handle non-atom state items yet (got %s)" % \
                (state_item.tag)
            current_atom = state_item.find('predicate').text + "(" + \
                ", ".join([t.text for t in state_item.findall('term')]) + ")"
            assert current_atom in atom_to_var_value,\
                "SAS+ variable for %s unknown" % current_atom
            var_index, var_value = atom_to_var_value[current_atom]
            state_variables[var_index] = var_value

        if (profiling):
            build_state_profiler.disable()

        if (verbosity > 3):
            stdout.write("Current state variables:\n")
            for (i, sv) in enumerate(state_variables):
                stdout.write("\tvar%d: %d\n" % (i, sv))

        if (profiling):
            profiling_call_accumulator = 0
            get_actions_profiler.enable()

        action = None
        actions_to_consider = policy.get_actions(state_variables)
        actions_to_avoid = de_policy.get_actions(state_variables)
        actions_valid = [
            a for a in actions_to_consider
Exemple #42
0
class Measure:
    time_factor = 1000

    def __init__(self,
                 measure_session=None,
                 measure_point_name=None,
                 save_queue=None,
                 data=None):
        if data is None:
            self.measure_session = measure_session
            self.point_name = measure_point_name
            self._save_queue = save_queue
            self._profile = Profile()
        else:
            self.measure_session = data[0]
            self.point_name = data[1]
            self.load(data[2])

    def load(self, data):
        self.timings = data

    def start(self):
        self._profile.enable()
        return self

    def stop(self):
        self._profile.disable()
        self.stats = self._profile.getstats()
        self.timings = list()
        self.timestamp = datetime.utcnow()

        for stat in self.stats:
            if stat.calls is not None:
                calls = list()
                for call in stat.calls:
                    calls.append(
                        TimingStat(self.timestamp, str(self.measure_session),
                                   str(self.point_name), str(call.code),
                                   call.callcount, call.reccallcount,
                                   call.totaltime * time_factor,
                                   call.inlinetime * time_factor, None))
            else:
                calls = None

            self.timings.append(
                TimingStat(self.timestamp, str(self.measure_session),
                           str(self.point_name), str(stat.code),
                           stat.callcount, stat.reccallcount,
                           stat.totaltime * time_factor,
                           stat.inlinetime * time_factor, calls))

        return self

    def save(self):
        self._save_queue.put_nowait(
            (self.measure_session, self.point_name, self.timings))
        return self

    def __str__(self):
        buf = "<{0}: measure_session='{1}' point_name='{1}'".format(
            self.__class__, self.measure_session, self.point_name)
        if hasattr(self, "stat"):
            buf += ", stat='{}'".format(self.stats)
        if hasattr(self, "timing"):
            buf += ", timing='{}'".format(self.timings)
        buf += ">"
        return buf
Exemple #43
0
class run_info_dict(dict):
    """
    Simple dictionary class for collecting runtime information

    The typical use is as follows:

    >> my_run_info = run_info_dict()
    >> my_run_info(my_function)(my_parameters)

    With this, all runtime information is automatically collected in my_run_info.
    We can enable time-and-usage and memory profiling simply by calling
    enable_profile_time_and_usage(...) or  enable_profile_memory(...), respectively,
    before we run our function.

    We can also use the data structure directly and control the population ourselves,
    however, memory profiling is not supported by default in this case but we need to
    set and run the memory profiler ourselves, since memory_profiler expects that it
    can wrap the function

    """
    DEFAULT_TIME_FORMAT = '%Y-%m-%d %H:%M:%S.%f'

    def __init__(self, *args, **kwargs):
        super(run_info_dict, self).__init__(*args, **kwargs)
        self.__profile_time_and_usage = False
        self.__profile_memory = False
        self.__time_and_use_profiler = None
        self.__memory_profiler = None
        self.mpi_comm = mpi_helper.get_comm_world()
        self.mpi_root = 0
        self.gather_data = True

    def __call__(self, func):
        """

        :param func: The function to be wrapped for execution
        :return: A wrapped function for which we track the runtime information in self
        """
        from functools import wraps

        @wraps(func)
        def wrapper(*args, **kwargs):
            # Pre-execute recording
            self.clear()  # Clear all runtime information data and profilers
            self.record_preexecute(
            )  # Record system provenance and pre-execution data
            start_time = time.time()  # Start the execution timer
            # Execute the function
            if not self.get_profile_memory():
                result = func(
                    *args,
                    **kwargs)  # Execute the function without memory profiling
            else:
                self.__memory_profiler = memory_profiler.LineProfiler()
                result = self.__memory_profiler(func)(
                    *args,
                    **kwargs)  # Execute the function with memory profiling
            # Post-execute recording
            execution_time = time.time(
            ) - start_time  # Compute the execution time
            self.record_postexecute(
                execution_time=execution_time)  # Record post-execution data
            self.clean_up()  # Clean up empty data
            if self.gather_data:
                self.gather()  # Gather the data from all MPI ranks
            # Return the result
            return result

        # Return our wrapped function
        return wrapper

    def clear(self):
        """
        Clear the dictionary and other internal parameters

        Side Effects

            * Remove all key/value pairs from the dict
            * Set self.__time_and_use_profiler to None
            * Set self.__memory_profiler to None
            * Set self.__profile_memory to False if invalid (i.e, if set to True but memory profiling is unavailable)
            * Set self.__profile_time_and_usage to False if invalid (i.e., if set to True but profiling is unavailable)
        """
        # Make sure profiling settings are valid
        if self.get_profile_memory() and not PROFILE_MEMORY_AVAILABLE:
            self.enable_profile_time_and_usage(False)
        if self.get_profile_time_and_usage() and not PROFILE_AVAILABLE:
            self.enable_profile_memory(False)
        # Remove old profilers
        self.__time_and_use_profiler = None
        self.__memory_profiler = None
        # Clear all data from the dictionary
        return super(run_info_dict, self).clear()

    def enable_profile_memory(self, enable=True):
        """
        Enable/disable profiling of memory usage

        :param enable: boolean to enable (True) or disable (False) memory profiling

        """
        if PROFILE_MEMORY_AVAILABLE:
            if not enable and self.__profile_memory:
                log_helper.debug(__name__,
                                 "Disabled memory profiling. ",
                                 root=self.mpi_root,
                                 comm=self.mpi_comm)
            if enable and not self.__profile_memory:
                log_helper.debug(__name__,
                                 "Enabled memory profiling. ",
                                 root=self.mpi_root,
                                 comm=self.mpi_comm)
            self.__profile_memory = enable
        else:
            self.__profile_memory = False
            if enable:
                log_helper.warning(
                    __name__, 'Profiling of memory usage not available.' +
                    ' Missing memory_profiler or StringIO package')

    def enable_profile_time_and_usage(self, enable=True):
        """
        Enable/disable time and usage profiling

        :param enable: boolean to enable (True) or disable (False) time and usage profiling

        """
        if PROFILE_AVAILABLE:
            if not enable and self.__profile_time_and_usage:
                log_helper.debug(__name__,
                                 "Disabled time and usage profiling. ",
                                 root=self.mpi_root,
                                 comm=self.mpi_comm)
            if enable and not self.__profile_time_and_usage:
                log_helper.debug(__name__,
                                 "Enabled time and usage profiling. ",
                                 root=self.mpi_root,
                                 comm=self.mpi_comm)
            self.__profile_time_and_usage = enable
        else:
            self.__profile_time_and_usage = False
            if enable:
                log_helper.warning(
                    __name__, 'Profiling of time and usage not available.' +
                    ' Missing profile and/or pstats package')

    def get_profile_time_and_usage(self):
        """
        Check whether time and usage profiling is enabled

        :return: Boolean indicating whether time and usage profiling is enabled
        """
        return self.__profile_time_and_usage

    def get_profile_memory(self):
        """
        Check whether profiling of memory usage is enabled

        :return: Boolean indicating whether memory profiling is enabled
        """
        return self.__profile_memory

    def record_preexecute(self):
        """
        Record basic runtime information in this dict before the exeuction is started.


        Function used to record runtime information prior to executing the process we want to track, e.g.,
        the `execute_analysis(...)` of a standard analysis.

        The function may be overwritten in child classes to add recording of
        additional runtime information. All runtime data should be recorded in the
        main dict (i.e, self). This ensures in the case of standard analysis that
        the data is stored in the HDF5 file. Other data should be stored in separate
        variables that we may add to the object.

        When overwriting the function we should typically call super(...,self).runinfo_record_pretexecute()
        last in the custom version to ensure that the start_time is properly recorded right before
        the execution of the analysis.

        """
        log_helper.debug(__name__,
                         'Recording pre-execution runtime data',
                         root=self.mpi_root,
                         comm=self.mpi_comm)
        # Record basic runtime environment information using the platform module
        try:
            self['architecture'] = unicode(platform.architecture())
            self['java_ver'] = unicode(platform.java_ver())
            self['libc_ver'] = unicode(platform.libc_ver())
            self['linux_distribution'] = unicode(platform.linux_distribution())
            self['mac_ver'] = unicode(platform.mac_ver())
            self['machine'] = unicode(platform.machine())
            self['node'] = unicode(platform.node())
            self['platform'] = unicode(platform.platform())
            self['processor'] = unicode(platform.processor())
            self['python_branch'] = unicode(platform.python_branch())
            self['python_build'] = unicode(platform.python_build())
            self['python_compiler'] = unicode(platform.python_compiler())
            self['python_implementation'] = unicode(
                platform.python_implementation())
            self['python_revision'] = unicode(platform.python_revision())
            self['python_version'] = unicode(platform.python_version())
            self['release'] = unicode(platform.release())
            self['system'] = unicode(platform.system())
            self['uname'] = unicode(platform.uname())
            self['version'] = unicode(platform.version())
            self['win32_ver'] = unicode(platform.win32_ver())
        except:
            warnings.warn(
                "WARNING: Recording of platform provenance failed: " +
                str(sys.exc_info()))

        # Attempt to record the svn version information
        try:
            import subprocess
            self['svn_ver'] = subprocess.check_output('svnversion').rstrip(
                '\n')
        except ImportError:
            log_helper.warning(
                __name__,
                'Recording of svn version not possible. subprocess not installed',
                root=self.mpi_root,
                comm=self.mpi_comm)
        except:
            warnings.warn("Recording of svn version information failed: " +
                          str(sys.exc_info()))

        # Attempt to record software library version
        try:
            import numpy as np
            self['numpy_version_full_version'] = unicode(
                np.version.full_version)
            self['numpy_version_release'] = unicode(np.version.release)
            self['numpy_version_git_revision'] = unicode(
                np.version.git_revision)
        except ImportError:
            log_helper.warning(__name__,
                               'Recording of numpy version not possible.',
                               root=self.mpi_root,
                               comm=self.mpi_comm)

        # Attempt to record psutil data
        try:
            import psutil
            self['logical_cpu_count'] = unicode(psutil.cpu_count())
            self['cpu_count'] = unicode(psutil.cpu_count(logical=False))
            process = psutil.Process()
            self['open_files'] = unicode(process.open_files())
            self['memory_info_before'] = unicode(process.memory_info())
        except ImportError:
            log_helper.warning(
                __name__,
                'psutil not installed. Recording of part of runtime information not possible',
                root=self.mpi_root,
                comm=self.mpi_comm)
        except:
            warnings.warn(
                "Recording of psutil-based runtime information failed: " +
                str(sys.exc_info()))

        # Record the start time for the analysis
        self['start_time'] = unicode(datetime.datetime.now())

        # Enable time and usage profiling if requested
        if self.__profile_time_and_usage:
            self.__time_and_use_profiler = Profile()
            self.__time_and_use_profiler.enable()

    def record_postexecute(self, execution_time=None):
        """
        Function used to record runtime information after the task we want to track is comleted, e.g.
        the `execute_analysis(...)` function of a standard analysis.

        The function may be overwritten in child classes to add recording of
        additional runtime information.

        When overwriting the function we should call super(...,self).runinfo_record_postexecute(execution_time)
        in the custom version to ensure that the execution and end_time are properly
        recorded.

        :param execution_time: The total time it took to execute the analysis. May be None, in which
            case the function will attempt to compute the execution time based on the start_time
            (if available) and the the current time.

        :param comm: Used for logging only. The MPI communicator to be used. Default value is None,
            in which case MPI.COMM_WORLD is used.

        """
        log_helper.debug(__name__,
                         'Recording post-execution runtime data',
                         root=self.mpi_root,
                         comm=self.mpi_comm)
        # Finalize recording of post execution provenance
        self['end_time'] = unicode(datetime.datetime.now())
        if execution_time is not None:
            self['execution_time'] = unicode(execution_time)
        elif 'start_time' in self:
            start_time = run_info_dict.string_to_time(self['start_time'])
            stop_time = run_info_dict.string_to_time(self['end_time'])
            self['execution_time'] = unicode(
                stop_time - start_time
            )  # TODO: This only gives execution time in full seconds right now
        else:
            self['execution_time'] = None
        # Attempt to record psutil data
        try:
            import psutil
            process = psutil.Process()
            self['memory_info_after'] = unicode(process.memory_info())
        except ImportError:
            log_helper.warning(
                __name__,
                'psutil not installed. Recording of part of runtime information not possible',
                root=self.mpi_root,
                comm=self.mpi_comm)
        except:
            warnings.warn(
                "Recording of psutil-based runtime information failed: " +
                str(sys.exc_info()))

        # Record the time and use profiling data if possible
        if self.__time_and_use_profiler is not None:
            self.__time_and_use_profiler.disable()
            self.__time_and_use_profiler.create_stats()
            self['profile'] = unicode(self.__time_and_use_profiler.stats)
            # Save the summary statistics for the profiling data
            stats_io = StringIO.StringIO()
            profiler_stats = pstats.Stats(
                self.__time_and_use_profiler,
                stream=stats_io).sort_stats('cumulative')
            profiler_stats.print_stats()
            self['profile_stats'] = stats_io.getvalue()

        # Record the memory profiling data if possible
        if self.__memory_profiler is not None and self.get_profile_memory():
            log_helper.debug(__name__,
                             'Recording memory profiling data',
                             root=self.mpi_root,
                             comm=self.mpi_comm)
            mem_stats_io = StringIO.StringIO()
            memory_profiler.show_results(self.__memory_profiler,
                                         stream=mem_stats_io)
            self['profile_mem'] = unicode(self.__memory_profiler.code_map)
            self['profile_mem_stats'] = mem_stats_io.getvalue()

    def clean_up(self):
        """
        Clean up the runinfo object. In particular remove empty keys that
        either recorded None or recorded just an empty string.

        This function may be overwritten to also do clean-up needed
        due to additional custom runtime instrumentation.

        When overwriting this function we should call super(..., self).runinfo_clean_up()
        at the end of the function to ensure that the runinfo dictionary
        is clean, i.e., does not contain any empty entries.

        """
        log_helper.debug(__name__,
                         'Clean up runtime data',
                         root=self.mpi_root,
                         comm=self.mpi_comm)
        # Remove empty items from the run_info dict
        for ri_key, ri_value in self.items():
            try:
                if ri_value is None or len(ri_value) == 0:
                    self.pop(ri_key)
            except:
                pass

    def gather(self):
        """
        Simple helper function to gather the runtime information---that has been collected on
        multiple processes when running using MPI---on a single root process

        :return: If we have more than one processes then this function returns a
            dictionary with the same keys as usual for the run_info but the
            values are now lists with one entry per mpi processes. If we only have
            a single process, then the run_info object will be returned without
            changes. NOTE: Similar to mpi gather, the function only collects
            information on the root. All other processes will return just their
            own private runtime information.

        """
        if mpi_helper.MPI_AVAILABLE:
            if self.mpi_comm.Get_size() > 1:
                log_helper.debug(__name__,
                                 'Gather runtime data from parallel tasks',
                                 root=self.mpi_root,
                                 comm=self.mpi_comm)
                self['mpi_rank'] = self.mpi_comm.Get_rank()
                run_data = self.mpi_comm.gather(self, self.mpi_root)
                if self.mpi_comm.Get_rank() == self.mpi_root:
                    merged_run_data = {}
                    for run_dict in run_data:
                        for key in run_dict:
                            try:
                                merged_run_data[key].append(run_dict[key])
                            except KeyError:
                                merged_run_data[key] = [run_dict[key]]
                    return merged_run_data
        return self

    def get_profile_stats_object(self, consolidate=True, stream=None):
        """
        Based on the execution profile of the execute_analysis(..) function get
        ``pstats.Stats`` object to help with the interpretation of the data.

        :param consolidate: Boolean flag indicating whether multiple stats (e.g., from multiple cores)
            should be consolidated into a single stats object. Default is True.
        :param stream: The optional stream parameter to be used fo the pstats.Stats object.

        :return: A single pstats.Stats object if consolidate is True. Otherwise the function
            returns a list of pstats.Stats objects, one per recorded statistic. None is returned
            in case that the stats objects cannot be created or no profiling data is available.
        """
        from ast import literal_eval
        if stream is None:
            import sys
            stream = sys.stdout

        if 'profile' in self:
            # Parse the profile data (which is stored as a string) or in the case of MPI we may
            # have a list of strings from each MPI processes
            if isinstance(self['profile'], list):
                # Convert the profile from each MPI process independently
                profile_data = [
                    literal_eval(profile) for profile in self['profile']
                ]
            else:
                # If we only have a single stat, then convert our data to a list, so that we can
                # handle the single and multiple statistics case in the same way in the remainder of this function
                profile_data = [
                    literal_eval(self['profile']),
                ]

            # Create a list of profile objects that the pstats.Stats class understands
            profile_dummies = []
            for profile_i in profile_data:
                # Here we are creating for each statistic a dummy class on the fly that holds our
                # profile_data in the stats attributes and has an empty create_stats function.
                # This trick allows us to create a pstats.Stats object without having to write our
                # stats data to file or having to create a cProfile.Profile object first. Writing
                # the data to file involves overhead and is ugly and creating a profiler and
                # overwriting its stats is potentially problematic
                profile_dummies.append(
                    type('Profile', (object, ), {
                        'stats': profile_i,
                        'create_stats': lambda x: None
                    })())

            # Create the statistics object and return it
            if consolidate:
                profile_stats = pstats.Stats(*profile_dummies, stream=stream)
                return profile_stats
            else:
                profile_stats = [
                    pstats.Stats(profile_i, stream=stream)
                    for profile_i in profile_dummies
                ]
                return profile_stats
        else:
            return None

    @staticmethod
    def string_to_structime(time_string, time_format=None):
        """
        Covert a time string to a time.struct_time using time.strptime

        :param time_string: String with the time, e.g, with the start time of a program.
        :param time_format: The time format to be used or None in which case run_info_dict.DEFAULT_TIME_FORMAT
            will be used.

        """
        return time.strptime(
            time_string, time_format
            if time_format is not None else run_info_dict.DEFAULT_TIME_FORMAT)

    @staticmethod
    def string_to_time(time_string, time_format=None):
        """
        Convert a time string to local time object using time.mktime.

        :param time_string: String with the time, e.g, with the start time of a program.
        :param time_format: The time format to be used or None in which case run_info_dict.DEFAULT_TIME_FORMAT
            will be used.

        """
        return time.mktime(
            time.strptime(
                time_string, time_format if time_format is not None else
                run_info_dict.DEFAULT_TIME_FORMAT))
Exemple #44
0
def main():
    '''
    Entry point for the command line interface.

    Additional sub-commands can be added by specifying them in an entry point in your
    package's setup.py like this::

        'owmeta_core.commands': [
            'subcommand_name = module.path.for:TheSubCommand',
            'sub.sub.subcommand_name = module.path.for:TheSubSubSubCommand',
        ],

    Where, ``subcommand_name`` will be the name of the sub-command under the top-level
    ``owm`` command and ``module.path.for.TheSubCommand`` will be the class implementing
    the command. To add to existing sub-commands one indicate the place in the command
    hierarchy as in ``sub.sub.subcommand_name``: ``TheSubSubSubCommand`` would be
    available under the (hypothetical) existing ``owm sub sub`` command as ``owm sub sub
    subcommand_name``

    So-called "hints" can affect the way command implementations are interepreted such as
    indicating whether a method argument should be read in as a positional argument or
    an option and what a command-line option should be named (as opposed to deriving it
    from a parameter name or member variable). There is a set of hints which are a part of
    owmeta-core (see `CLI_HINTS`), but these can be augmented by specifying entry points
    like this::

        'owmeta_core.cli_hints': 'hints = module.path.for:CLI_HINTS',

    If ``module.path.for.CLI_HINTS`` is a dictionary, it will get added to the hints,
    potentially affecting any sub-commands without hints already available. The entry
    point name (``hints`` in the example) is only used for error-reporting by this module.
    Although this is not strictly enforced, adding hints for sub-commands published by
    other modules, including owmeta-core, should be avoided to ensure consistent behavior
    across installations. See `owmeta_core.cli_hints` source for the format of hints.

    See `CLICommandWrapper` for more details on how the command line options are constructed.
    '''
    logging.basicConfig()

    top_command = _augment_subcommands_from_entry_points()
    p = top_command()
    p.log_level = 'WARN'
    p.message = print
    p.repository_provider = GitRepoProvider()
    p.non_interactive = False
    if environ.get('OWM_CLI_PROFILE'):
        from cProfile import Profile
        profiler = Profile()
        profiler.enable()

    try:
        _helper(p)
    except (CLIUserError, GenericUserError) as e:
        s = str(e)
        if not s:
            from .utils import FCN
            # In case someone forgets to add a helpful message for their user error
            s = 'Received error: ' + FCN(type(e))
        die(s)
    finally:
        # Call 'disconnect' to clean up. If our top_command doesn't have a disconnect(), we
        # don't want to error-out, so check it actually exists.
        disconnect_method = getattr(p, 'disconnect', None)
        if disconnect_method:
            disconnect_method()

    if environ.get('OWM_CLI_PROFILE'):
        profiler.disable()
        profiler.dump_stats(environ['OWM_CLI_PROFILE'])
Exemple #45
0
def average_consumption():
    df = read_frame()
    print '500000 random unique households with average consumption found'
    df = df.drop_duplicates(keep=False)
    df = df.sample(n=500000)
    df['Average'] = (df['Sub_metering_1'] + df['Sub_metering_2'] + df['Sub_metering_3'])/3
    print df[:5]

def after_18():
    df = read_frame()
    print 'Households whete after 18:00 Global_active_power per minute is more than 5 kW, Sub_metering_2' + \
    'is more than others, choosen every second result from first part and every fourth from second part'
    df = df[(df['Time'] > '18:00:00') & (df['Global_active_power'] > 5) &
    (df['Sub_metering_2'] > df['Sub_metering_1']) & (df['Sub_metering_2'] > df['Sub_metering_3'])]
    df1 = df[:len(df.index)/2:2]
    df2 = df[len(df.index)/2::4]
    frames = [df1, df2]
    result = pd.concat(frames)
    print result[:5]

if __name__ == '__main__':
    profile.enable()
    clean_data()
    profile.disable()
    Stats(profile).sort_stats('time').print_stats()
    active_power()
    voltage()
    intensity()
    average_consumption()
    after_18()
Exemple #46
0
class frame(IFrame):
    framelist = set()

    def __init__(self,
                 address="http://127.0.0.1:12000/",
                 time_step=500,
                 instrument=False,
                 profiling=False,
                 wire_format="cbor",
                 compress=False):
        frame.framelist.add(self)
        self.thread = None
        self.__app = None
        self.__appname = ""
        self.__host_typemap = {}
        self.__host_wire_format = {}
        self.__typemap = {}
        self.__name2type = {}
        self.object_store = dataframe()
        self.object_store.start_recording = True
        if not address.endswith('/'):
            address += '/'
        self.__address = address
        self.__default_wire_format = wire_format
        self.__compress = compress
        self.__time_step = (float(time_step) / 1000)
        self.__new = {}
        self.__mod = {}
        self.__del = {}
        self.__observed_types = set()
        self.__observed_types_new = set()
        self.__observed_types_mod = set()
        self.__curtime = time.time()
        self.__curstep = 0
        self.__start_time = time.strftime("%Y-%m-%d_%H-%M-%S")
        self.__instrumented = instrument
        self.__profiling = profiling
        self.__sessions = {}
        self.__host_to_push_groupkey = {}
        if instrument:
            self._instruments = {}
            self._instrument_headers = []
            self._instrument_headers.append('bytes sent')
            self._instrument_headers.append('bytes received')

    def __register_app(self, app):
        self.logger = self.__setup_logger("spacetime@" + self.__appname)
        self.__host_typemap = {}
        for address, tpmap in self.__app.__declaration_map__.items():
            if address == "default":
                address = self.__address
            fulladdress = address + self.__appname
            if fulladdress not in self.__host_typemap:
                self.__host_typemap[fulladdress] = tpmap
            else:
                for declaration in tpmap:
                    self.__host_typemap[fulladdress].setdefault(
                        declaration, set()).update(set(tpmap[declaration]))

        self.__default_wire_format = (
            self.__app.__special_wire_format__["default"]
            if "default" in self.__app.__special_wire_format__ else
            self.__default_wire_format)
        for host in self.__host_typemap:
            self.__host_wire_format[host] = (
                self.__app.__special_wire_format__[host]
                if host in self.__app.__special_wire_format__ else
                self.__default_wire_format)
        all_types = set()
        for host in self.__host_typemap:
            wire_format = self.__host_wire_format[host]
            jobj = dict([(k, [tp.__realname__ for tp in v])
                         for k, v in self.__host_typemap[host].items()])
            producing, getting, gettingsetting, deleting, setting, tracking = (
                self.__host_typemap[host].setdefault(Modes.Producing, set()),
                self.__host_typemap[host].setdefault(Modes.Getter, set()),
                self.__host_typemap[host].setdefault(Modes.GetterSetter,
                                                     set()),
                self.__host_typemap[host].setdefault(Modes.Deleter, set()),
                self.__host_typemap[host].setdefault(Modes.Setter, set()),
                self.__host_typemap[host].setdefault(Modes.Tracker, set()))
            self.__typemap.setdefault(Modes.Producing, set()).update(producing)
            self.__typemap.setdefault(Modes.Getter, set()).update(getting)
            self.__typemap.setdefault(Modes.GetterSetter,
                                      set()).update(gettingsetting)
            self.__typemap.setdefault(Modes.Deleter, set()).update(deleting)
            self.__typemap.setdefault(Modes.Setter, set()).update(setting)
            self.__typemap.setdefault(Modes.Tracker, set()).update(tracking)

            all_types_host = tracking.union(producing).union(getting).union(
                gettingsetting).union(deleting).union(setting)
            all_types.update(all_types_host)
            self.__observed_types.update(all_types_host)
            self.__observed_types_new.update(
                self.__host_typemap[host][Modes.Tracker].union(
                    self.__host_typemap[host][Modes.Getter]).union(
                        self.__host_typemap[host][Modes.GetterSetter]))

            self.__observed_types_mod.update(
                self.__host_typemap[host][Modes.Getter].union(
                    self.__host_typemap[host][Modes.GetterSetter]))

            jsonobj = json.dumps({
                "sim_typemap": jobj,
                "wire_format": wire_format,
                "app_id": self.__app.app_id
            })
            try:
                self.__sessions[host] = Session()
                if platform.system() == 'Java':
                    ignoreJavaSSL()
                    self.logger.info("Using custom HTTPAdapter for Jython")
                    self.__sessions[host].mount(host, MyJavaHTTPAdapter())
                    self.__sessions[host].verify = False
                resp = requests.put(
                    host,
                    data=jsonobj,
                    headers={'content-type': 'application/json'})
            except HTTPError as exc:
                self.__handle_request_errors(resp, exc)
                return False
            except ConnectionError:
                self.logger.exception("Cannot connect to host.")
                self.__disconnected = True
                return False
        self.__name2type = dict([(tp.__realname__, tp) for tp in all_types])
        self.object_store.add_types(all_types)
        for host in self.__host_typemap:
            self.__host_to_push_groupkey[host] = set([
                self.object_store.get_group_key(tp)
                for tp in self.__host_typemap[host][Modes.GetterSetter].union(
                    self.__host_typemap[host][Modes.Setter]).union(
                        self.__host_typemap[host][Modes.Producing]).union(
                            self.__host_typemap[host][Modes.Deleter])
            ])

        return True

    @staticmethod
    def loop():
        SpacetimeConsole().cmdloop()

    def get_instrumented(self):
        """
        Returns if frame is running instrumentation. (True/False)
        """
        return self.__instrumented

    def get_curtime(self):
        """
        Returns the timestamp of the current step.
        """
        return self.__curtime

    def get_curstep(self):
        """
        Returns the current step value of the simulation.
        """
        return self.__curstep

    def get_timestep(self):
        """
        Returns the time-step value in milliseconds.
        """
        return self.__time_step

    def get_app(self):
        """
        Returns a reference to the application.
        """
        return self.__app

    def attach_app(self, app):
        """
        Receives reference to application (implementing IApplication).

        Arguments:
        app : spacetime-conformant Application

        Exceptions:
        None
        """
        self.__app = app
        self.__appname = app.__class__.__name__ + "_" + self.__app.app_id

    def run_async(self):
        """
        Starts application in non-blocking mode.

        Arguments:
        None

        Exceptions:
        None
        """
        self.thread = Parallel(target=self.__run)
        self.thread.daemon = True
        self.thread.start()

    def run_main(self):
        self.__run()

    def run(self):
        """
        Starts application in blocking mode.

        Arguments:
        None

        Exceptions:
        None
        """
        self.thread = Parallel(target=self.__run)
        self.thread.daemon = True
        self.thread.start()
        self.thread.join()

    def __clear(self):
        self.__disconnected = False
        self.__app.done = False
        self.object_store.clear_all()
        self.__new = {}
        self.__mod = {}
        self.__del = {}

    def __run(self):
        self.__clear()
        if not self.__app:
            raise NotImplementedError("App has not been attached")
        success = self.__register_app(self.__app)
        if success:
            try:
                if self.__profiling:
                    try:
                        from cProfile import Profile  # @UnresolvedImport
                        if not os.path.exists('stats'):
                            os.mkdir('stats')
                        self.__profile = Profile()
                        self.__profile.enable()
                        self.logger.info("starting profiler for %s",
                                         self.__appname)
                    except:
                        self.logger.error(
                            "Could not import cProfile (not supported in Jython)."
                        )
                        self.__profile = None
                        self.__profiling = None

                self.__pull()
                self.__app.initialize()
                self.__push()
                while not self.__app.done:
                    st_time = time.time()
                    self.__pull()
                    self.__app.update()
                    self.__push()
                    end_time = time.time()
                    timespent = end_time - st_time
                    self.__curstep += 1
                    self.__curtime = time.time()
                    # time spent on execution loop
                    if timespent < self.__time_step:
                        time.sleep(float(self.__time_step - timespent))
                    else:
                        self.logger.info("loop exceeded maximum time: %s ms",
                                         timespent)

                    # Writes down total time spent in spacetime methods
                    if self.__instrumented:
                        si.record_instruments(timespent, self)
                # One last time, because _shutdown may delete objects from the store
                self.__pull()
                self._shutdown()
                self.__push()
                self.__unregister_app()
            except ConnectionError as cerr:
                self.logger.error("A connection error occurred: %s",
                                  cerr.message)
            except HTTPError as herr:
                self.logger.error(
                    "A fatal error has occurred while communicating with the server: %s",
                    herr.message)
            except:
                self.logger.exception("An unknown error occurred.")
                raise
            finally:
                if self.__profiling:
                    self.__profile.disable()
                    self.__profile.create_stats()
                    self.__profile.dump_stats(
                        os.path.join(
                            'stats', "%s_stats_%s.ps" %
                            (self.__start_time, self.__appname)))
        else:
            self.logger.info("Could not register, exiting run loop...")

    def app_done(self):
        """
        app_done

        Returns whether app has finished running or not
        """
        return self.__app.done

    def get(self, tp, oid=None):
        """
        Retrieves objects from local data storage. If id is provided, returns
        the object identified by id. Otherwise, returns the list of all objects
        matching type tp.

        Arguments:
        tp : PCC set type being fetched
        oid : primary key of an individual object.

        Exceptions:
        - ID does not exist in store
        - Application does not annotate that type
        """
        if tp in self.__observed_types:
            if oid:
                # Have to get this to work
                return self.object_store.get(tp, oid)
            return self.object_store.get(tp)
        else:
            raise Exception("Application %s does not annotate type %s" %
                            (self.__appname, tp))

    def add(self, obj):
        """
        Adds an object to be stored and tracked by spacetime.

        Arguments:
        obj : PCC object to stored

        Exceptions:
        - Application is not annotated as a producer
        """
        if obj.__class__ in self.__typemap[Modes.Producing]:
            self.object_store.append(obj.__class__, obj)
        else:
            raise Exception("Application %s is not a producer of type %s" %
                            (self.__appname, obj.__class__))

    def delete(self, tp, obj):
        """
        Deletes an object currently stored and tracked by spacetime.

        Arguments:
        tp: PCC type of object to be deleted
        obj : PCC object to be deleted

        Exceptions:
        - Application is not annotated as a Deleter
        """

        if tp in self.__typemap[Modes.Deleter]:
            self.object_store.delete(tp, obj)
        else:
            raise Exception("Application %s is not registered to delete %s" %
                            (self.__appname, tp))

    def get_new(self, tp):
        """
        Retrieves new objects of type 'tp' retrieved in last pull (i.e. since
        last tick).

        Arguments:
        tp: PCC type for retrieving list of new objects

        Exceptions:
        None

        Note:
        Application should be annotated as  a Getter, GetterSetter, or Tracker,
        otherwise result is always an empty list.
        """
        if tp in self.__observed_types_new:
            return self.object_store.get_new(tp)
        else:
            self.logger.warn(
                ("Checking for new objects of type %s, but not "
                 "a Getter, GetterSetter, or Tracker of type. Empty list "
                 "always returned"), tp)
            return []

    def get_mod(self, tp):
        """
        Retrieves objects of type 'tp' that were modified since last pull
        (i.e. since last tick).

        Arguments:
        tp: PCC type for retrieving list of modified objects

        Exceptions:
        None

        Note:
        Application should be annotated as a Getter,or GetterSetter, otherwise
        result is always an empty list.
        """
        if tp in self.__observed_types_mod:
            return self.object_store.get_mod(tp)
        else:
            self.logger.warn(("Checking for modifications in objects of type "
                              "%s, but not a Getter or GetterSetter of type. "
                              "Empty list always returned"), tp)
            return []

    def get_deleted(self, tp):
        """
        Retrieves objects of type 'tp' that were deleted since last pull
        (i.e. since last tick).

        Arguments:
        tp: PCC type for retrieving list of deleted objects

        Exceptions:
        None

        Note:
        Application should be annotated as a Getter, GetterSetter, or Tracker,
        otherwise result is always an empty list.
        """
        if tp in self.__observed_types_new:
            return self.object_store.get_deleted(tp)
        else:
            self.logger.warn(
                ("Checking for deleted objects of type %s, but "
                 "not a Getter, GetterSetter, or Tracker of type. Empty list "
                 "always returned"), tp)
            return []

    def __handle_request_errors(self, resp, exc):
        if resp.status_code == 401:
            self.logger.error(
                "This application is not registered at the server. Stopping..."
            )
            raise
        else:
            self.logger.warn("Non-success code received from server: %s %s",
                             resp.status_code, resp.reason)

    @timethis
    def __process_pull_resp(self, resp):
        if resp and "gc" in resp:
            self.object_store.apply_changes(resp)
            self.object_store.clear_record()

    @timethis
    def __pull(self):
        if self.__disconnected:
            return
        if self.__instrumented:
            self._instruments['bytes received'] = 0
        updates = DataframeChanges_Base()
        try:
            for host in self.__host_typemap:
                type_dict = {}
                # Need to give mechanism to selectively ask for some changes. Very hard to implement in current dataframe scheme.
                resp = self.__sessions[host].get(host + "/updated", data={})
                try:
                    resp.raise_for_status()
                    if self.__instrumented:
                        self._instruments['bytes received'] = len(resp.content)
                    data = resp.content
                    #print data
                    DF_CLS, content_type = FORMATS[
                        self.__host_wire_format[host]]
                    dataframe_change = DF_CLS()
                    dataframe_change.ParseFromString(data)
                    updates.CopyFrom(dataframe_change)
                except HTTPError as exc:
                    self.__handle_request_errors(resp, exc)
            #json.dump(updates, open("pull_" + self.__appname + ".json", "a") , sort_keys = True, separators = (',', ': '), indent = 4)
            self.__process_pull_resp(updates)
        except ConnectionError:
            self.logger.exception("Disconnected from host.")
            self.__disconnected = True
            self._stop()

    @timethis
    def __push(self):
        if self.__disconnected:
            return
        if self.__instrumented:
            self._instruments['bytes sent'] = 0
        changes = self.object_store.get_record()
        #json.dump(changes, open("push_" + self.__appname + ".json", "a") , sort_keys = True, separators = (',', ': '), indent = 4)

        for host in self.__host_typemap:
            try:
                DF_CLS, content_type = FORMATS[self.__host_wire_format[host]]
                changes_for_host = DF_CLS()
                changes_for_host["gc"] = RecursiveDictionary([
                    (gck, gc) for gck, gc in changes["gc"].items()
                    if gck in self.__host_to_push_groupkey[host]
                ])
                if "types" in changes:
                    changes_for_host["types"] = changes["types"]
                dictmsg = changes_for_host.SerializeToString()
                #update_dict = {"update_dict": protomsg}
                if self.__instrumented:
                    self._instruments['bytes sent'] = sys.getsizeof(dictmsg)
                headers = {'content-type': content_type}
                if self.__compress:
                    headers['content-encoding'] = 'gzip'
                    dictmsg = zlib.compress(dictmsg)
                resp = self.__sessions[host].post(host + "/updated",
                                                  data=dictmsg,
                                                  headers=headers)
            except TypeError:
                self.logger.exception("error encoding obj. Object: %s",
                                      changes_for_host)
            except HTTPError as exc:
                self.__handle_request_errors(resp, exc)
            except ConnectionError:
                self.logger.exception("Disconnected from host.")
                self.__disconnected = True
                self._stop()

        self.object_store.clear_record()
        self.object_store.clear_buffer()

    def _shutdown(self):
        """
        _shutdown

        Called after the frame execution loop stops, in the last pull/push
        iteration
        """
        self.__app.shutdown()

    def _stop(self):
        """
        _stop

        Called by frame's command prompt on quit/exit
        """
        self.__app.done = True

    def __unregister_app(self):
        for host in self.__host_typemap:
            resp = requests.delete(host)
            self.logger.info("Successfully deregistered from %s", host)

    def __setup_logger(self, name, file_path=None):
        logger = logging.getLogger(name)
        # Set default logging handler to avoid "No handler found" warnings.
        logger.addHandler(NullHandler())
        logger.setLevel(logging.DEBUG)
        logger.debug("Starting logger for %s", name)
        return logger
class BaseExperiment(object):
    memory_measure_interval = 0.1
    """Indicates how often the memory should be measured, in seconds."""
    run_descriptions = {
        'setup_authsetup': 'always',
        'register_keygen': 'always',
        'encrypt': 'always',
        'update_keys': 'always',
        'data_update': 'always',
        'policy_update': 'always',
        'decrypt': 'always'
    }
    """
    Description of which steps to run during the experiment. Values can be one of either:
    - 'always': This step is run always, in each iteration for each case
    - 'once': The step is run for each implementation once, prior to all experiments. This can be helpful if only
    the encryption and decryption is relevant.
    - 'never': This step is never executed.
    """
    attribute_authority_descriptions = [{
        'name':
        'AUTHORITY0',
        'attributes':
        list(
            map(lambda a: a + '@AUTHORITY0', [
                'ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT',
                'NINE', 'TEN'
            ]))
    }, {
        'name':
        'AUTHORITY1',
        'attributes':
        list(
            map(lambda a: a + '@AUTHORITY1', [
                'ONE', 'TWO', 'THREE', 'FOUR', 'FIVE', 'SIX', 'SEVEN', 'EIGHT',
                'NINE', 'TEN'
            ]))
    }]  # type: List[Dict[str, Any]]
    """
    Description of the attribute authorities to use in this experiment.
    Is a list of dicts, where each dict contains at least a name and a list of attributes.
    """
    user_descriptions = [  # type: List[Dict[str, Any]]
        {
            'gid': 'BOB',
            'attributes': {
                'AUTHORITY0':
                attribute_authority_descriptions[0]['attributes'],
                'AUTHORITY1': attribute_authority_descriptions[1]['attributes']
            }
        },
        {
            'gid': 'DOCTOR',
            'attributes': {
                'AUTHORITY0':
                attribute_authority_descriptions[0]['attributes'],
                'AUTHORITY1': attribute_authority_descriptions[1]['attributes']
            }
        },
    ]
    """Description of the users to use in this experiment."""
    generated_file_sizes = [10 * 1024 * 1024]  # type: List[int]
    """List of sizes of files to randomly generate as input files before the experiment."""
    generated_file_amount = 2
    """Amount of random files to generate for each size."""
    encrypted_file_size = generated_file_sizes[0]
    """Size of the file to encrypt and decrypt."""
    read_policy = '(ONE@AUTHORITY0 OR SIX@AUTHORITY1)' \
                  ' AND (TWO@AUTHORITY0 OR SEVEN@AUTHORITY1)' \
                  ' AND (THREE@AUTHORITY0 OR EIGHT@AUTHORITY1)'
    """The read policy to use when encrypting."""
    write_policy = read_policy
    """The write policy to use when encrypting."""
    updated_read_policy = '(SIX@AUTHORITY0 OR ONE@AUTHORITY1)' \
                          ' AND (SEVEN@AUTHORITY0 OR TWO@AUTHORITY1)' \
                          ' AND (EIGHT@AUTHORITY0 OR THREE@AUTHORITY1)'
    """The read policy to use when updating the policy"""
    updated_write_policy = updated_read_policy
    """The write policy to use when updating the policy"""
    measurement_types = [
        MeasurementType.timings, MeasurementType.cpu, MeasurementType.memory
    ]
    """The types of measurements to perform in this experiment for each run."""
    measurement_types_once = [MeasurementType.storage_and_network]
    """The types of measurements to perform only once during this experiment."""
    implementations = implementations
    """The implementations to run this experiments on."""
    measurement_repeat = 100
    """The amount of times to repeat every measurement for each case and implementation."""
    def __init__(self, cases: List[ExperimentCase] = None) -> None:
        self.state = ExperimentState()  # type: ExperimentState
        self.output = ExperimentOutput(self.get_name(),
                                       self.state)  # type: ExperimentOutput
        """
        The current state of the experiment.
        This shows for example which implementation we currently use, and which measurements are performed.
        """

        # Experiment variables
        self.location = None  # type: str
        """Location of the encrypted data. Is set during the experiment"""
        self.memory_usages = None  # type: List[Tuple[str, List[float]]]
        self.cpu_times = None  # type: List[Tuple[str, float]]
        self.profiler = None  # type: Profile
        self.psutil_process = None  # type: Process

        # Use case actors
        self.central_authority = None  # type: CentralAuthority
        self.attribute_authorities = None  # type: List[AttributeAuthority]
        self.user_clients = None  # type: List[UserClient]
        self.insurance = None  # type: InsuranceService

        # Experiment cases
        if cases is None:
            cases = [ExperimentCase('base', None)]
        self.cases = cases  # type: List[ExperimentCase]

    def global_setup(self) -> None:
        """
        Setup all things for this experiment independent of run, implementation and case,
        like generating random input files.
        This method is only called once for each experiment, namely at the very start.
        """
        self.generate_files()

    def generate_files(self) -> None:
        """Generate all input files as specified by the generated_file_sizes property."""
        file_generator = RandomFileGenerator()
        input_path = self.get_experiment_input_path()
        for size in self.generated_file_sizes:
            file_generator.generate(size,
                                    self.generated_file_amount,
                                    input_path,
                                    skip_if_exists=True,
                                    verbose=True)

    @property
    def file_name(self) -> str:
        """File name of the file to encrypt."""
        return join(self.get_experiment_input_path(),
                    '%i-0' % self.encrypted_file_size)

    @property
    def update_file_name(self) -> str:
        """File name of the file containing the data to use when updating the data."""
        return join(self.get_experiment_input_path(),
                    '%i-1' % self.encrypted_file_size)

    def setup(self) -> None:
        """
        Setup for a single run of this experiment for a single implementation, case and measurement type.
        """
        if OUTPUT_DETAILED and not path.exists(
                self.output.experiment_case_iteration_results_directory()):
            os.makedirs(
                self.output.experiment_case_iteration_results_directory())
        self.reset_user_clients()
        self.clear_insurance_storage()

    def reset_user_clients(self):
        if self.user_clients is not None:
            for user_client in self.user_clients:
                user_client.monitor_network = self.state.measurement_type == MeasurementType.storage_and_network
                user_client.reset_connections()

    def tear_down(self) -> None:
        """
        Tear down after a single run of the experiment for a single implementation, case and measurement type.
        Note: this is run after the measurements are stopped, but before the measurements are finished.
        """
        if self.state.measurement_type == MeasurementType.storage_and_network:
            for authority in self.attribute_authorities:
                authority.save_attribute_keys()

    def reset_variables(self):
        self.location = None
        self.memory_usages = None
        self.cpu_times = None
        self.profiler = None
        self.psutil_process = None

        # Use case actors
        self.central_authority = None
        self.attribute_authorities = None
        self.user_clients = None
        self.insurance = None

    def setup_implementation_directories(self) -> None:
        """
        Setup the directories used in this experiment for a single implementation.
        Empties directories and create them if they do not exist.
        """
        assert self.state.implementation is not None

        # Empty storage directories
        if os.path.exists(self.get_user_client_storage_path()):
            shutil.rmtree(self.get_user_client_storage_path())

        # Create directories
        if not os.path.exists(self.get_experiment_input_path()):
            os.makedirs(self.get_experiment_input_path())
        os.makedirs(self.get_user_client_storage_path())

    def create_central_authority(self):
        self.central_authority = self.state.implementation.create_central_authority(
            storage_path=self.get_central_authority_storage_path())

    def create_attribute_authorities(
            self, implementation: BaseImplementation) -> None:
        """
        Create the attribute authorities defined in the descriptions (self.attribute_authority_descriptions).
        :param implementation: The implementation to use.
        :return: A list of attribute authorities.
        """
        self.attribute_authorities = list(
            map(lambda d: self.create_attribute_authority(d, implementation),
                self.attribute_authority_descriptions))

    def create_attribute_authority(
            self, authority_description: Dict[str, Any],
            implementation: BaseImplementation) -> AttributeAuthority:
        """
        Create an attribute authority defined in a description.
        :param authority_description: The description of the authority.
        :param implementation: The implementation to use.
        :return: The attribute authority.
        """
        attribute_authority = implementation.create_attribute_authority(
            authority_description['name'],
            storage_path=self.get_attribute_authority_storage_path())
        return attribute_authority

    def create_user_clients(self, implementation: BaseImplementation) -> None:
        """
        Create the user clients defined in the descriptions (self.user_descriptions).
        :param implementation: The implementation to use.
        :return: A list of user clients.
        """
        self.user_clients = list(
            map(lambda d: self.create_user_client(d, implementation),
                self.user_descriptions))

    def create_user_client(self, user_description: Dict[str, Any],
                           implementation: BaseImplementation) -> UserClient:
        """
        Create a user client defined in the descriptions (self.user_descriptions).
        :param user_description: The description of the user.
        :param implementation: The implementation to use.
        :return: A list of user clients.
        """
        user = User(user_description['gid'], implementation)
        client = UserClient(user,
                            implementation,
                            storage_path=self.get_user_client_storage_path(),
                            monitor_network=self.state.measurement_type ==
                            MeasurementType.storage_and_network)
        return client

    def _run_setup(self) -> None:
        # Create central authority
        self.central_authority.central_setup()
        self.central_authority.save_global_parameters()
        self._setup_insurance()

    def _setup_insurance(self) -> None:
        # Create insurance service
        self.insurance = InsuranceService(
            self.state.implementation.serializer,
            self.central_authority,
            self.state.implementation.public_key_scheme,
            storage_path=self.get_insurance_storage_path())

    def _run_authsetup(self, authority: AttributeAuthority) -> None:
        attributes = next(
            description['attributes']
            for description in self.attribute_authority_descriptions
            if description['name'] == authority.name)  # type: List[str]
        authority.setup(self.central_authority, attributes, 1)
        self.insurance.add_authority(authority)
        authority.save_attribute_keys()

    def _run_register(self, user_client: UserClient) -> None:
        # Create user clients
        user_client.register(self.insurance)

    def _run_keygen(self, user_client: UserClient) -> None:
        """
        Generate the user secret keys for each current user client by generating the
        keys at the attribute authorities. The attributes to issue/generate are taken from the user
        descriptions (self.user_descriptions)
        :requires: self.user_clients is not None
        """
        attributes = next(description['attributes']
                          for description in self.user_descriptions
                          if description['gid'] == user_client.user.gid)
        user_client.request_secret_keys_multiple_authorities(attributes,
                                                             1)  # type: ignore
        user_client.save_user_secret_keys()

    def _run_encrypt(self) -> None:
        self.location = self.user_clients[0].encrypt_file(
            self.file_name, self.read_policy, self.write_policy)

    def _run_update_keys(self, authority: AttributeAuthority) -> None:
        authority.update_keys(1)

    def _run_data_update(self) -> None:
        with open(self.update_file_name, 'rb') as update_file:
            self.user_clients[1].update_file(self.location, update_file.read())

    def _run_policy_update(self) -> None:
        # Performed by the owner, as only the owner is allowed to do this
        self.user_clients[0].update_policy_file(self.location,
                                                self.updated_read_policy,
                                                self.updated_write_policy, 1)

    def _run_decrypt(self) -> None:
        self.user_clients[1].decrypt_file(self.location)

    def run(self) -> None:
        self.global_setup()

        for implementation in self.implementations:
            self.state.implementation = implementation
            self.setup_implementation_directories()

            if self.run_descriptions['setup_authsetup'] == 'once':
                self.create_central_authority()
                self.create_attribute_authorities(self.state.implementation)
                self._run_setup()
                for authority in self.attribute_authorities:
                    self._run_authsetup(authority)
            if self.run_descriptions['register_keygen'] == 'once':
                self.create_user_clients(self.state.implementation)
                for user_client in self.user_clients:
                    self._run_register(user_client)
                    self._run_keygen(user_client)
            if self.run_descriptions['encrypt'] == 'once':
                self._run_encrypt()
            if self.run_descriptions['update_keys'] == 'once':
                for authority in self.attribute_authorities:
                    self._run_update_keys(authority)
            if self.run_descriptions['decrypt'] == 'once':
                self._run_decrypt()

            for i in range(0, self.measurement_repeat):
                for case in self.cases:
                    for measurement_type in self.measurement_types:  # type: ignore
                        self.state.iteration = i
                        self.state.case = case
                        self.state.measurement_type = measurement_type

                        self.run_current_state()

            for case in self.cases:
                for measurement_type in self.measurement_types_once:  # type: ignore
                    self.state.iteration = 0
                    self.state.case = case
                    self.state.measurement_type = measurement_type

                    self.run_current_state()

            self.reset_variables()

    def run_current_state(self) -> None:
        self.log_current_state()
        # noinspection PyBroadException
        try:
            self.setup()
            self.start_measurements()

            if self.run_descriptions['setup_authsetup'] == 'always':
                self.create_central_authority()
                self.create_attribute_authorities(self.state.implementation)
                self.run_step(ABEStep.setup, self._run_setup)
                for authority in self.attribute_authorities:
                    self.run_step(ABEStep.authsetup, self._run_authsetup,
                                  [authority])
            if self.run_descriptions['register_keygen'] == 'always':
                self.create_user_clients(self.state.implementation)
                for user_client in self.user_clients:
                    self.run_step(ABEStep.register, self._run_register,
                                  [user_client])
                    self.run_step(ABEStep.keygen, self._run_keygen,
                                  [user_client])
            if self.run_descriptions['encrypt'] == 'always':
                self.run_step(ABEStep.encrypt, self._run_encrypt)
            if self.run_descriptions['update_keys'] == 'always':
                for authority in self.attribute_authorities:
                    self.run_step(ABEStep.update_keys, self._run_update_keys,
                                  [authority])
            if self.run_descriptions['data_update'] == 'always':
                self.run_step(ABEStep.data_update, self._run_data_update)
            if self.run_descriptions['policy_update'] == 'always':
                self.run_step(ABEStep.policy_update, self._run_policy_update)
            if self.run_descriptions['decrypt'] == 'always':
                self.run_step(ABEStep.decrypt, self._run_decrypt)

            self.stop_measurements()
            self.tear_down()
            self.finish_measurements()
        except KeyboardInterrupt:
            raise
        except:
            self.output.output_error()

    def run_step(self,
                 abe_step: ABEStep,
                 method: Callable[..., None],
                 args: List[Any] = list()):
        if self.state.measurement_type == MeasurementType.memory:
            u = memory_usage((method, args, {}),
                             interval=self.memory_measure_interval)
            self.memory_usages.append(
                (abe_step.name, [min(u),
                                 max(u),
                                 max(u) - min(u),
                                 len(u)]))
        elif self.state.measurement_type == MeasurementType.cpu:
            times_before = self.psutil_process.cpu_times()
            method(*args)  # type: ignore
            times_after = self.psutil_process.cpu_times()
            self.cpu_times.append(
                (abe_step.name, (times_after.user - times_before.user) +
                 (times_after.system - times_before.system)))
        else:
            method(*args)  # type: ignore

    def log_current_state(self) -> None:
        """
        Log the current state of the experiment, so the progress of the sequence can be followed.
        """
        logging.info(
            "=> Running %s with implementation %s (%d/%d), iteration %d/%d, case %s, measurement %s"
            % (self.get_name(), self.state.implementation.get_name(),
               implementations.index(self.state.implementation) + 1,
               len(implementations), self.state.iteration + 1,
               self.measurement_repeat, self.state.case.name,
               str(self.state.measurement_type)))

    def start_measurements(self) -> None:
        """
        Start the measurements for a single run.
        """
        logging.debug("Experiment.start")
        if self.state.measurement_type == MeasurementType.timings:
            self.profiler = Profile()
            self.profiler.enable()
        elif self.state.measurement_type == MeasurementType.cpu:
            self.cpu_times = list()
            self.psutil_process = Process()
        elif self.state.measurement_type == MeasurementType.memory:
            self.memory_usages = list()

    def stop_measurements(self) -> None:
        """
        Stop the measurements for the current run, but do not export the results yet.
        """
        if self.state.measurement_type == MeasurementType.timings:
            self.profiler.disable()

    def finish_measurements(self) -> None:
        """
        Finish the stopped measurements by exporting the results to the output files.
        """
        logging.debug("Experiment.finish")
        if self.state.measurement_type == MeasurementType.timings:
            self.output.output_timings(self.profiler)
        elif self.state.measurement_type == MeasurementType.memory:
            self.output.output_case_results(
                'memory',
                self.memory_usages,
                variables=['min', 'max', 'diff', 'amount'])
        elif self.state.measurement_type == MeasurementType.storage_and_network:
            self.output.output_connections(self.get_connections())
            self.output.output_storage_space([{
                'path':
                self.get_insurance_storage_path(),
                'filename_mapper':
                lambda file: path.splitext(file)[1].strip('.')
            }, {
                'path':
                self.get_user_client_storage_path()
            }, {
                'path':
                self.get_attribute_authority_storage_path()
            }, {
                'path':
                self.get_central_authority_storage_path()
            }])
        elif self.state.measurement_type == MeasurementType.cpu:
            self.output.output_cpu_times(self.cpu_times)

    def get_user_client(self, gid: str) -> UserClient:
        """
        Gets the UserClient for the given global identifier, or returns None.
        :param gid: The global identifier.
        :return: The user client or None.
        """
        return next((x for x in self.user_clients if x.user.gid == gid), None)

    def get_attribute_authority(self, name: str) -> AttributeAuthority:
        """
        Gets the AttributeAuthority for the given name, or returns None.
        :param name: The authority name.
        :return: The attribute authority or None.
        """
        return next((x for x in self.attribute_authorities if x.name == name),
                    None)

    def get_connections(self) -> List[BaseConnection]:
        """
        Get all connections used in this experiment of which the usage should be outputted.
        :return: A list of connections
        """
        result = []  # type: List[BaseConnection]
        for user_client in self.user_clients:
            result += [user_client.insurance_connection]
            result += user_client.authority_connections.values()
        return result

    def get_name(self) -> str:
        """
        Gets the name of this experiment.
        :return: The name of this experiment.
        """
        return self.__class__.__name__

    def clear_insurance_storage(self) -> None:
        """
        Clear the storage as used by the insurance company for the ciphertexts.
        """
        if os.path.exists(self.get_insurance_storage_path()):
            shutil.rmtree(self.get_insurance_storage_path())
        os.makedirs(self.get_insurance_storage_path())

    def clear_attribute_authority_storage(self) -> None:
        """
        Clear the storage as used by the insurance company for the ciphertexts.
        """
        if os.path.exists(self.get_attribute_authority_storage_path()):
            shutil.rmtree(self.get_attribute_authority_storage_path())
        os.makedirs(self.get_attribute_authority_storage_path())

    def get_experiment_storage_base_path(self) -> str:
        """
        Gets the base path of the location to be used for storage in this experiment.
        """
        return os.path.join(os.path.dirname(os.path.realpath(__file__)),
                            '../data/experiments/%s' % self.get_name())

    def get_experiment_input_path(self) -> str:
        """
        Gets the path of the location to be used for the inputs of the experiment.
        """
        return os.path.join(self.get_experiment_storage_base_path(), 'input')

    def get_user_client_storage_path(self) -> str:
        """
        Gets the path of the location to be used for the storage of user client data.
        """
        return os.path.join(self.get_experiment_storage_base_path(),
                            self.state.implementation.get_name(), 'client')

    def get_insurance_storage_path(self) -> str:
        """
        Gets the path of the location to be used for the storage of the insurance service.
        """
        return os.path.join(self.get_experiment_storage_base_path(),
                            'insurance')

    def get_attribute_authority_storage_path(self) -> str:
        """
        Gets the path of the location to be used for the storage of the attribute authorities.
        """
        return os.path.join(self.get_experiment_storage_base_path(),
                            self.state.implementation.get_name(),
                            'authorities')

    def get_central_authority_storage_path(self) -> str:
        """
        Gets the path of the location to be used for the storage of the central authorities.
        """
        return os.path.join(self.get_experiment_storage_base_path(),
                            self.state.implementation.get_name(),
                            'central_authority')
Exemple #48
0
 def inspect(self, method):
     profile = Profile()
     profile.enable()
     method()
     profile.disable()
     return StatsDict(profile).sort_stats('cumtime')
Exemple #49
0
def main():
    with open("CTRL", mode="r") as fp:
        site_t = None
        class_t = None
        plat_n = 0
        for line in fp:
            word = line.split()
            if word[0] == "STRUC":
                for w in word:
                    worde = w.split("=")
                    if worde[0] == "ALAT":
                        alat = float(worde[1])
                plat_n = 4
            if word[0] == "DIM":
                for w in word:
                    worde = w.split("=")
                    if worde[0] == "NBAS":
                        nbas = int(worde[1])
                    if worde[0] == "NCLASS":
                        nclass = int(worde[1])
#  print(alat,nbas)
            if plat_n == 3:
                Plat = np.zeros((3, 3))
                if len(word) == 3:
                    worde = word[0].split("=")
                    Plat[0][0] = float(worde[1])
                    Plat[0][1] = float(word[1])
                    Plat[0][2] = float(word[2])
                else:
                    Plat[0][0] = float(word[1])
                    Plat[0][1] = float(word[2])
                    Plat[0][2] = float(word[3])
            elif plat_n == 2:
                Plat[1][0] = float(word[0])
                Plat[1][1] = float(word[1])
                Plat[1][2] = float(word[2])
            elif plat_n == 1:
                Plat[2][0] = float(word[0])
                Plat[2][1] = float(word[1])
                Plat[2][2] = float(word[2])
            plat_n -= 1
            if word[0] == "CLASS":
                class_t = True
                class_n = nclass
                allclass_ctrl = []
            if class_t:
                if len(word) < 5:
                    continue
                if class_n == nclass:
                    if word[2] == "Z=":
                        class_ctrl = Class_ctrl(word[1].split("=")[1], word[3],
                                                word[4].split("=")[1])
                    else:
                        class_ctrl = Class_ctrl(word[1].split("=")[1],
                                                word[2].split("=")[1],
                                                word[3].split("=")[1])
                    allclass_ctrl.append(class_ctrl)
                else:
                    if word[1] == "Z=":
                        class_ctrl = Class_ctrl(word[0].split("=")[1], word[2],
                                                word[3].split("=")[1])
                    else:
                        class_ctrl = Class_ctrl(word[0].split("=")[1],
                                                word[1].split("=")[1],
                                                word[2].split("=")[1])
                    allclass_ctrl.append(class_ctrl)
                class_n -= 1
                if class_n == 0:
                    class_t = False
            if word[0] == "SITE":
                site_t = True
                site_n = nbas
                allatom = []
            if site_t:
                if site_n == nbas:
                    atom = Atom()
                    for w in word:
                        worde = w.split("=")
                        if worde[0] == "ATOM":
                            atom.name = worde[1]
                    if len(word) == 5:
                        worde = word[2].split("=")
                        atom.pos = np.array(
                            [float(worde[1]),
                             float(word[3]),
                             float(word[4])])
                    elif len(word) == 6:
                        atom.pos = np.array(
                            [float(word[3]),
                             float(word[4]),
                             float(word[5])])
                    else:
                        print("error")
                        sys.exit()
                    allatom.append(atom)
                else:
                    atom = Atom()
                    atom.name = (word[0].split("=")[1])
                    if len(word) == 4:
                        worde = word[1].split("=")
                        atom.pos = np.array(
                            [float(worde[1]),
                             float(word[2]),
                             float(word[3])])
                    elif len(word) == 5:
                        atom.pos = np.array(
                            [float(word[2]),
                             float(word[3]),
                             float(word[4])])
                    else:
                        print("error")
                        sys.exit()
                    allatom.append(atom)
                site_n -= 1
                if site_n == 0:
                    site_t = False
            if site_t == False:
                break

    for i in allatom:
        i.ctrltuika(allclass_ctrl)
    for i in range(len(allatom)):
        print(allatom[i].name, allatom[i].ze, allatom[i].rad, allatom[i].pos)


#  print(Plat)
#  sys.exit()
#  Pos_np=np.array(Pos)
#  print(Atom)
#  print(Pos_np)
#  Plat=Plat*alat*BOHR
#  Pos_np=Pos_np*alat*BOHR
#  print(Pos_np)

    print("which is center ?(Atomname or Coordinate)")
    centeratom = input().split()
    c = 0
    origincart = np.array([0., 0., 0.])
    if len(centeratom) == 1:
        for i in range(nbas):
            if centeratom == allatom[i].name:
                c = i
                break
        origincart = allatom[c].pos
    elif len(centeratom) == 3:
        for i in range(3):
            origincart[i] = float(centeratom[i])
    else:
        print("error")
        sys.exit()

    print("How is clustersize ? (please input cluster radius)")
    clradius = float(input())

    with open("cluster.in", mode="w") as clin:
        for i in range(3):
            clin.write(str("{:>11}".format("{:.8f}".format(Plat[i][0]))))
            clin.write(str("{:>11}".format("{:.8f}".format(Plat[i][1]))))
            clin.write(str("{:>11}".format("{:.8f}".format(Plat[i][2]))))
            clin.write("\n")

        clin.write(" 0 0 0\n")
        clin.write(str("{:>12}".format("{:.8f}".format(allatom[c].pos[0]))))
        clin.write(str("{:>12}".format("{:.8f}".format(allatom[c].pos[1]))))
        clin.write(str("{:>12}".format("{:.8f}".format(allatom[c].pos[2]))))
        clin.write("\n")
        clin.write(" " + str(nbas) + "\n")
        for i in range(nbas):
            clin.write(str("{:>3}".format(allatom[i].name)))
            clin.write(str("{:>12}".format("{:.8f}".format(
                allatom[i].pos[0]))))
            clin.write(str("{:>12}".format("{:.8f}".format(
                allatom[i].pos[1]))))
            clin.write(str("{:>12}".format("{:.8f}".format(
                allatom[i].pos[2]))))
            clin.write("\n")
        clin.write(str("{:.4f}".format(alat * BOHR)) + "\n")
        for i in range(3):
            clin.write(str(clradius) + "\n")

    sys.exit()

    cutoff = clradius
    i0max = int(cutoff * FACTOR / np.sqrt(np.sum(Plat[0]**2)))
    i1max = int(cutoff * FACTOR / np.sqrt(np.sum(Plat[1]**2)))
    i2max = int(cutoff * FACTOR / np.sqrt(np.sum(Plat[2]**2)))

    clusteratom = []
    y = np.array([0., 0., 0.])
    n = 0
    pr = Profile()
    pr.enable()
    t1 = time.time()
    for i0 in range(-i0max, i0max + 1):
        for i1 in range(-i1max, i1max + 1):
            for i2 in range(-i2max, i2max + 1):
                for k in range(nbas):
                    y = i0 * Plat[0] + i1 * Plat[1] + i2 * Plat[2] + allatom[
                        k].pos - origincart
                    y *= alat * BOHR
                    if np.sqrt(sum(y**2)) < clradius:
                        atom = Atom()
                        atom.name = allatom[k].name
                        atom.ze = allatom[k].ze
                        atom.pos = copy.deepcopy(y)
                        clusteratom.append(atom)
                        n += 1
    print(n)
    t2 = time.time()
    print(t2 - t1)
    sys.exit()
    pr.disable()
    pr.print_stats()
    sys.exit()
    for i in range(len(clusteratom)):
        print(clusteratom[i].name, clusteratom[i].ze, clusteratom[i].pos)
Exemple #50
0
                        action="store_true",
                        dest="profile",
                        default=False,
                        help="Enable this flag to the profile the script")

    args = parser.parse_args()

    # Check if the file paths from the user are valid
    for file_path in [
            args.input_file, args.occupations_output_file,
            args.states_output_file
    ]:
        if not path.isdir(path.dirname(file_path)):
            print("error: invalid file path:  " + file_path)
            exit(1)

    return args


if __name__ == "__main__":
    args = get_args()
    if args.profile:
        profile = Profile()
        profile.enable()
        main(args)
        profile.disable()
        profile_stats = Stats(profile).sort_stats(SortKey.CUMULATIVE)
        profile_stats.print_stats()
    else:
        main(args)
Exemple #51
0
class ProfilerResource(BaseResource):

    loggers_names = ["tamarco.profiler"]

    def __init__(self, *args, **kwargs):
        self.logger = logging.getLogger("tamarco.profiler")
        self.profiler = None
        self.profiler_file_path = None
        self.cpu_watcher_task = None

        super().__init__(*args, **kwargs)

    async def start(self):
        if await self.is_profiler_enabled():
            self.profiler_file_path = f"/tmp/{self.microservice.name}_profile"
            self._initialize_profiler()
            self.logger.info(
                f"Started Profiler resource with file: {self.profiler_file_path} and "
                f"time between snapshots: {TIME_BETWEEN_SNAPSHOTS}")
            self.cpu_watcher_task = asyncio.ensure_future(
                self.cpu_watcher(), loop=self.microservice.loop)
        else:
            self.logger.debug("Profiler resource disabled")
        await super().start()

    async def stop(self):
        if self.profiler:
            self._stop_profiler()
        if self.cpu_watcher_task:
            self.cpu_watcher_task.cancel()
        await super().stop()

    def _initialize_profiler(self):
        self.profiler = Profile()
        self.profiler.enable()

    def _stop_profiler(self):
        assert self.profiler, "Trying to stop a profiler when it isn't initialized"
        self.profiler.disable()
        self.profiler = None

    def _restart_profiler(self):
        self._stop_profiler()
        self._initialize_profiler()

    async def is_profiler_enabled(self) -> bool:
        try:
            microservices_with_profiler = await self.settings.get(
                "microservices_with_profiler")
        except SettingNotFound:
            return False
        else:
            return self.microservice.name in microservices_with_profiler

    async def cpu_watcher(self):
        while True:
            await asyncio.sleep(TIME_BETWEEN_SNAPSHOTS)
            self.save_profile_snapshot_to_file()

    def save_profile_snapshot_to_file(self):
        try:
            with open(self.profiler_file_path, "a") as profile_file:
                self.logger.debug(
                    f"Opened profile file {self.profiler_file_path}. Saving profile information"
                )
                profile_file.write(
                    f"\n\n###############\n# DATE : {datetime.datetime.now()}\n###############\n"
                )

                stats = pstats.Stats(self.profiler, stream=profile_file)
                stats.sort_stats("tottime")
                stats.print_stats(100)

                self._restart_profiler()
        except Exception:
            self.logger.warning(
                "Unexpected exception saving profile information")
Exemple #52
0
    def test_performance(self):
        @define
        def test_func(a, b):
            def body(a_id, a_name, b_id, b_name):
                pass

            return body, [a.id, a.name, b.id, b.name]

        struct = Construct({
            'r1':
            if_(self.a_cls.id, then_=test_func.defn(self.a_cls, self.b_cls)),
            'r2':
            if_(self.a_cls.name, then_=test_func.defn(self.a_cls, self.b_cls)),
            'r3':
            if_(self.b_cls.id, then_=test_func.defn(self.a_cls, self.b_cls)),
            'r4':
            if_(self.b_cls.name, then_=test_func.defn(self.a_cls, self.b_cls)),
        })

        row = (self.session.query(*struct._columns).join(self.b_cls.a).first())

        # warm-up
        for _ in _range(5000):
            struct._from_row(row)

        profile1 = Profile()
        profile1.enable()

        for _ in _range(5000):
            struct._from_row(row)

        profile1.disable()
        out1 = StringIO()
        stats1 = Stats(profile1, stream=out1)
        stats1.strip_dirs()
        stats1.sort_stats('calls').print_stats(10)
        print(out1.getvalue().lstrip())
        out1.close()

        row = (self.session.query(
            self.a_cls.id.label('a_id'),
            self.a_cls.name.label('a_name'),
            self.b_cls.id.label('b_id'),
            self.b_cls.name.label('b_name'),
        ).join(self.b_cls.a).first())

        def make_object(row):
            Object(
                dict(
                    r1=(test_func.func(row.a_id, row.a_name, row.b_id,
                                       row.b_name) if row.a_id else None),
                    r2=(test_func.func(row.a_id, row.a_name, row.b_id,
                                       row.b_name) if row.a_name else None),
                    r3=(test_func.func(row.a_id, row.a_name, row.b_id,
                                       row.b_name) if row.b_id else None),
                    r4=(test_func.func(row.a_id, row.a_name, row.b_id,
                                       row.b_name) if row.b_name else None),
                ))

        # warm-up
        for _ in _range(5000):
            make_object(row)

        profile2 = Profile()
        profile2.enable()

        for _ in _range(5000):
            make_object(row)

        profile2.disable()
        out2 = StringIO()
        stats2 = Stats(profile2, stream=out2)
        stats2.strip_dirs()
        stats2.sort_stats('calls').print_stats(10)
        print(out2.getvalue().lstrip())
        out2.close()

        self.assertEqual(stats1.total_calls, stats2.total_calls)
Exemple #53
0
    print("Taking off")
    vehicle.simple_takeoff(altitude)  # take off to altitude

    # Wait until vehicle reaches minimum altitude
    while vehicle.location.global_relative_frame.alt < altitude * 0.95:
        print("Altitude: ", vehicle.location.global_relative_frame.alt)
        sleep(1)

    print("Reached target altitude")


def land(vehicle):
    '''Commands vehicle to land'''
    print("Returning to launch")
    vehicle.mode = VehicleMode("RTL")

    print("Closing vehicle object")
    vehicle.close()


if __name__ == "__main__":
    PR = Profile()
    PR.enable()
    main()
    PR.disable()
    STREAM = StringIO()
    SORT_BY = "cumulative"
    PS = pstats.Stats(PR, stream=STREAM).sort_stats(SORT_BY)
    PS.print_stats()
    print(STREAM.getvalue())
Exemple #54
0
def profile_while_timing(statement, setup=Environment, number=5000):
    profile = Profile()
    profile.enable()
    timeit.timeit(statement, setup, number=number)
    profile.disable()
    profile.print_stats('cumtime')
Exemple #55
0
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 31 09:10:22 2020

@author: disbr007
"""

import os
from cProfile import Profile
from pstats import Stats

prof = Profile()
# Don't time imports
prof.disable()
# Import main function to run
from dem_utils import dem_valid_data
# Set output directories for profile stats
profile_dir = r'E:\disbr007\umn\ms\scratch'
stats = os.path.join(profile_dir, '{}.stats'.format(__name__))
profile_txt = os.path.join(profile_dir, '{}_profile.txt'.format(__name__))
# Turn timing back on
prof.enable()

DEMS_PATH = r'E:\disbr007\umn\ms\scratch\banks_dems_multispec_test_5.shp'
OUT_SHP = r'E:\disbr007\umn\ms\scratch\banks_dems_multispec_test_5_vp.shp'
PRJ_DIR = r'E:\disbr007\umn\ms\scratch'
SCRATCH_DIR = r'E:\disbr007\umn\ms\scratch'
LOG_FILE = r'E:\disbr007\umn\ms\scratch\vp_profile_5.log'
PROCESSED = r'E:\disbr007\umn\ms\scratch\vp_processed_5.txt'

dem_valid_data.main(DEMS_PATH=DEMS_PATH,
Exemple #56
0
    def run(self):
        '''
        Run the MLN learning with the given parameters.
        '''
        # load the MLN
        if isinstance(self.mln, MLN):
            mln = self.mln
        else:
            raise Exception('No MLN specified')

        # load the training databases
        if type(self.db) is list and all(
                [isinstance(e, Database) for e in self.db]):
            dbs = self.db
        elif isinstance(self.db, Database):
            dbs = [self.db]
        elif isinstance(self.db, str):
            db = self.db
            if db is None or not db:
                raise Exception('no trainig data given!')
            dbpaths = [os.path.join(self.directory, 'db', db)]
            dbs = []
            for p in dbpaths:
                dbs.extend(Database.load(mln, p, self.ignore_unknown_preds))
        else:
            raise Exception(
                'Unexpected type of training databases: %s' % type(self.db))
        if self.verbose:
            print(('loaded %d database(s).' % len(dbs)))

        watch = StopWatch()

        if self.verbose:
            confg = dict(self._config)
            confg.update(eval("dict(%s)" % self.params))
            if type(confg.get('db', None)) is list:
                confg['db'] = '%d Databases' % len(confg['db'])
            print((tabulate(
                sorted(list(confg.items()), key=lambda key_v: str(key_v[0])),
                headers=('Parameter:', 'Value:'))))

        params = dict([(k, getattr(self, k)) for k in (
            'multicore', 'verbose', 'profile', 'ignore_zero_weight_formulas')])

        # for discriminative learning
        if issubclass(self.method, DiscriminativeLearner):
            if self.discr_preds == QUERY_PREDS:  # use query preds
                params['qpreds'] = self.qpreds
            elif self.discr_preds == EVIDENCE_PREDS:  # use evidence preds
                params['epreds'] = self.epreds

        # gaussian prior settings            
        if self.use_prior:
            params['prior_mean'] = self.prior_mean
            params['prior_stdev'] = self.prior_stdev
        # expand the parameters
        params.update(self.params)

        if self.profile:
            prof = Profile()
            print('starting profiler...')
            prof.enable()
        else:
            prof = None
        # set the debug level
        olddebug = logger.level
        logger.level = eval('logs.%s' % params.get('debug', 'WARNING').upper())
        mlnlearnt = None
        try:
            # run the learner
            mlnlearnt = mln.learn(dbs, self.method, **params)
            if self.verbose:
                print()
                print(headline('LEARNT MARKOV LOGIC NETWORK'))
                print()
                mlnlearnt.write()
        except SystemExit:
            print('Cancelled...')
        finally:
            if self.profile:
                prof.disable()
                print(headline('PROFILER STATISTICS'))
                ps = pstats.Stats(prof, stream=sys.stdout).sort_stats(
                    'cumulative')
                ps.print_stats()
            # reset the debug level
            logger.level = olddebug
        print()
        watch.finish()
        watch.printSteps()
        return mlnlearnt
Exemple #57
0
class GProfiler(gtk.Window):
    """An interactive graphical profiler.

    It is not visible by default; you will need to call .show() on
    the returned window.
    """
    def __init__(self):
        super(GProfiler, self).__init__()
        self.set_title(_("Layer Profiler"))
        self.set_default_size(400, 300)
        self.profile = Profile()
        self.add(gtk.VBox())

        self.ui = gtk.UIManager()
        actions = gtk.ActionGroup("ProfilerActions")
        actions.add_actions([
            ('Profiles', None, _("_Profiles")),
            ('OpenProfile', gtk.STOCK_OPEN, None, "<control>O", None, None),
            ('SaveProfile', gtk.STOCK_SAVE, None, "<control>S", None, None),
            ('ExportProfile', gtk.STOCK_CONVERT, _("_Export as Text"),
             "<control>E", None, None),
            ("Quit", gtk.STOCK_QUIT, None, None, None,
             lambda a: self.destroy()),
            ("Help", None, _("_Help")),
            ("About", gtk.STOCK_ABOUT, None, None, None, None),
        ])

        self.ui.insert_action_group(actions, -1)
        self.ui.add_ui_from_string(UI)
        self.add_accel_group(self.ui.get_accel_group())
        self.child.pack_start(self.ui.get_widget("/Menu"), expand=False)

        toolbar = gtk.Toolbar()
        self.child.pack_start(toolbar, expand=False, fill=True)
        self.model = ProfileModel()
        filtered = self.model.filter_new()
        align = gtk.Alignment(xscale=1.0, yscale=1.0)
        align.set_padding(0, 0, 6, 0)
        label = gtk.Label()
        self.entry = FilterEntry(filtered)
        label.set_mnemonic_widget(self.entry)
        label.set_text_with_mnemonic(_("_Filter:"))
        self.view = gtk.TreeView(gtk.TreeModelSort(filtered))
        self.view.connect('row-activated', self._open_file)
        self.view.set_enable_search(True)
        self.view.set_search_equal_func(self.model.matches)
        sw = gtk.ScrolledWindow()
        sw.set_policy(gtk.POLICY_NEVER, gtk.POLICY_AUTOMATIC)
        sw.set_shadow_type(gtk.SHADOW_IN)
        sw.add(self.view)
        box = gtk.HBox(spacing=3)
        box.pack_start(label, expand=False)
        box.pack_start(self.entry)
        align.add(box)
        self.child.pack_start(align, expand=False)
        self.child.pack_start(sw, expand=True)

        cell = gtk.CellRendererText()
        cell.props.ellipsize = pango.ELLIPSIZE_MIDDLE

        column = gtk.TreeViewColumn(_("Filename"), cell)
        column.add_attribute(cell, 'text', 1)
        column.set_sort_column_id(2)
        column.set_resizable(True)
        column.set_expand(True)
        self.view.append_column(column)

        column = gtk.TreeViewColumn(_("Function"), cell)
        column.add_attribute(cell, 'text', 2)
        column.set_sort_column_id(2)
        column.set_resizable(True)
        column.set_expand(True)
        self.view.append_column(column)

        cell = gtk.CellRendererText()
        column = gtk.TreeViewColumn(_("Call #"), cell)
        column.add_attribute(cell, 'text', 4)
        column.set_sort_column_id(4)
        self.view.append_column(column)
        column = gtk.TreeViewColumn(_("Total"), cell)
        column.add_attribute(cell, 'text', 6)
        column.set_sort_column_id(6)
        self.view.append_column(column)
        column = gtk.TreeViewColumn(_("Inline"), cell)
        column.add_attribute(cell, 'text', 7)
        column.set_sort_column_id(7)
        self.view.append_column(column)

        self.totalstats = gtk.Statusbar()
        self.child.pack_start(self.totalstats, expand=False)

        toggle = gtk.ToggleToolButton(gtk.STOCK_MEDIA_RECORD)
        toggle.set_active(False)
        toggle.connect('toggled', self.toggle)
        toolbar.insert(toggle, 0)
        self.entry.grab_focus()
        self.running = False
        self.child.show_all()

    def _open_file(self, view, path, column):
        code = view.get_model().model[path][0].code
        try:
            lineno = "+%d" % code.co_firstlineno
            filename = code.co_filename
        except AttributeError:
            pass
        else:
            args = ['sensible-editor', lineno, filename]
            gobject.spawn_async(args, flags=gobject.SPAWN_SEARCH_PATH)

    def toggle(self, button):
        """Turn profiling on if off and vice versa."""
        if button.get_active():
            self.start()
        else:
            self.stop()

    def snapshot(self):
        """Update the UI with a snapshot of the current profile state."""
        self.model.stats = self.profile.getstats()
        totalcalls = 0
        totaltime = 0
        for entry in self.model.stats:
            totalcalls += entry.callcount
            totaltime += entry.inlinetime
        text = _("%(calls)d calls in %(time)f CPU seconds.") % dict(
            calls=totalcalls, time=totaltime)
        self.totalstats.pop(0)
        self.totalstats.push(0, text)

    def start(self):
        """Start profiling (adding to existing stats)."""
        if not self.running:
            self.profile.enable()
            self.running = True

    def stop(self):
        """Stop profiling (but retain stats)."""
        if self.running:
            self.profile.disable()
            self.running = False
            self.snapshot()
Exemple #58
0
try:
    print("clusterize")
    kmeans.similarity_method = tfidf.euclidean_similarity
    clusters = kmeans.clusterize()
    # for cluster_name, cluster in clusters.items():
    # 	print(cluster_name, "has", len(cluster.items), "docs")
    # 	for item in cluster.items:
    # 		print('\t',item.name)
    items = list(
        chain.from_iterable(cluster.items for cluster in clusters.values()))
    # with open("results.csv", "w") as csv_file:
    #     writer = csv.DictWriter(csv_file, ["name"] + [item.name for item in items])
    #     writer.writeheader()
    #     for cu_item in items:
    #         d = {item.name:cosine_similarity(item.content, cu_item.content) for item in items}
    #         d["name"] = cu_item.name
    #         writer.writerow(d)

except KeyboardInterrupt:
    print("interupted")
else:
    print('======================')
    for cluster_name, cluster in clusters.items():
        print(cluster_name, "has", len(cluster.items), "docs")
finally:
    profiler.disable()
    profiler.dump_stats("profile.txt")
    print("we have profile")

# profiler.print_stats()
Exemple #59
0
def main(*args, **kwargs):
    """
    Entry point for the 'bw' command line utility.

    The args and path parameters are used for integration tests.
    """
    if not args:
        args = argv[1:]

    text_args = [force_text(arg) for arg in args]

    parser_bw = build_parser_bw()
    pargs = parser_bw.parse_args(args)
    if not hasattr(pargs, 'func'):
        parser_bw.print_help()
        exit(2)
    if pargs.profile:
        profile = Profile()
        profile.enable()

    path = abspath(pargs.repo_path)
    io.debug_mode = pargs.debug
    io.activate()
    io.debug(_("invocation: {}").format(" ".join([force_text(arg) for arg in argv])))

    environ.setdefault('BW_ADD_HOST_KEYS', "1" if pargs.add_ssh_host_keys else "0")

    if len(text_args) >= 1 and (
        text_args[0] == "--version" or
        (len(text_args) >= 2 and text_args[0] == "repo" and text_args[1] == "create") or
        text_args[0] == "zen" or
        "-h" in text_args or
        "--help" in text_args
    ):
        # 'bw repo create' is a special case that only takes a path
        repo = path
    else:
        while True:
            try:
                repo = Repository(path)
                break
            except NoSuchRepository:
                if path == dirname(path):
                    io.stderr(_(
                        "{x} {path} "
                        "is not a BundleWrap repository."
                    ).format(path=quote(abspath(pargs.repo_path)), x=red("!!!")))
                    io.deactivate()
                    exit(1)
                else:
                    path = dirname(path)
            except MissingRepoDependency as exc:
                io.stderr(str(exc))
                io.deactivate()
                exit(1)
            except Exception:
                io.stderr(format_exc())
                io.deactivate()
                exit(1)

    # convert all string args into text
    text_pargs = {key: force_text(value) for key, value in vars(pargs).items()}

    try:
        pargs.func(repo, text_pargs)
    finally:
        io.deactivate()
        if pargs.profile:
            profile.disable()
            profile.dump_stats(pargs.profile)
Exemple #60
0
    def run(self):
        watch = StopWatch()
        watch.tag('inference', self.verbose)
        # load the MLN
        if isinstance(self.mln, MLN):
            mln = self.mln
        else:
            raise Exception('No MLN specified')

        if self.use_emln and self.emln is not None:
            mlnstr = StringIO.StringIO()
            mln.write(mlnstr)
            mlnstr.close()
            mlnstr = str(mlnstr)
            emln = self.emln
            mln = parse_mln(mlnstr + emln,
                            grammar=self.grammar,
                            logic=self.logic)

        # load the database
        if isinstance(self.db, Database):
            db = self.db
        elif isinstance(self.db, list) and len(self.db) == 1:
            db = self.db[0]
        elif isinstance(self.db, list):
            raise Exception(
                'Got {} dbs. Can only handle one for inference.'.format(
                    len(self.db)))
        else:
            raise Exception('DB of invalid format {}'.format(type(self.db)))

        # expand the
        #  parameters
        params = dict(self._config)
        if 'params' in params:
            params.update(eval("dict(%s)" % params['params']))
            del params['params']
        if self.verbose:
            print tabulate(sorted(list(params.viewitems()),
                                  key=lambda (k, v): str(k)),
                           headers=('Parameter:', 'Value:'))
        # create the MLN and evidence database and the parse the queries
#         mln = parse_mln(modelstr, searchPath=self.dir.get(), logic=self.config['logic'], grammar=self.config['grammar'])
#         db = parse_db(mln, db_content, ignore_unknown_preds=params.get('ignore_unknown_preds', False))
        if type(db) is list and len(db) > 1:
            raise Exception('Inference can only handle one database at a time')
        elif type(db) is list:
            db = db[0]
        # parse non-atomic params


#         if type(self.queries) is not list:
#             queries = parse_queries(mln, str(self.queries))
        params['cw_preds'] = filter(lambda x: bool(x), self.cw_preds)
        # extract and remove all non-algorithm
        for s in GUI_SETTINGS:
            if s in params: del params[s]

        if self.profile:
            prof = Profile()
            print 'starting profiler...'
            prof.enable()
        # set the debug level
        olddebug = praclog.level()
        praclog.level(
            eval('logging.%s' % params.get('debug', 'WARNING').upper()))
        result = None
        try:
            mln_ = mln.materialize(db)
            mrf = mln_.ground(db)
            inference = self.method(mrf, self.queries, **params)
            if self.verbose:
                print
                print headline('EVIDENCE VARIABLES')
                print
                mrf.print_evidence_vars()

            result = inference.run()
            if self.verbose:
                print
                print headline('INFERENCE RESULTS')
                print
                inference.write()
            if self.verbose:
                print
                inference.write_elapsed_time()
        except SystemExit:
            print 'Cancelled...'
        finally:
            if self.profile:
                prof.disable()
                print headline('PROFILER STATISTICS')
                ps = pstats.Stats(prof,
                                  stream=sys.stdout).sort_stats('cumulative')
                ps.print_stats()
            # reset the debug level
            praclog.level(olddebug)
        if self.verbose:
            print
            watch.finish()
            watch.printSteps()
        return result