def run(): """ Parse the command line options and run the correct command. """ options = command_line_options() log.init_logging(getattr(logging, options.level)) if options.command == 'build': build.main(options) elif options.command == 'devel': devel.main(options) elif options.command == 'check': check.main(options) elif options.command == 'update': update.main(options) critical = log.MooseDocsFormatter.COUNTS['CRITICAL'].value errors = log.MooseDocsFormatter.COUNTS['ERROR'].value warnings = log.MooseDocsFormatter.COUNTS['WARNING'].value print 'CRITICAL:{} ERROR:{} WARNING:{}'.format(critical, errors, warnings) if critical or errors: return 1 return 0
def main(): print('Welcome to checking OpenStack stats...') print() log.init_logging() # Check vxlan link info between cmp nodes check_vxlan_link.VxlanLinkCheck().execute()
def run(): """ Parse the command line options and run the correct command. """ options = command_line_options() log.init_logging(getattr(logging, options.level)) if options.command == 'build': errno = build.main(options) elif options.command == 'check': errno = check.main(options) elif options.command == 'verify': errno = verify.main(options) critical = log.MooseDocsFormatter.COUNTS['CRITICAL'].value errors = log.MooseDocsFormatter.COUNTS['ERROR'].value warnings = log.MooseDocsFormatter.COUNTS['WARNING'].value print 'CRITICAL:{} ERROR:{} WARNING:{}'.format(critical, errors, warnings) if critical or errors or (errno != 0): return 1 return 0
def run(): """ Parse the command line options and run the correct command. """ options = command_line_options() init_large_media() log.init_logging(getattr(logging, options.level)) if options.command == 'build': errno = build.main(options) elif options.command == 'check': errno = check.main(options) elif options.command == 'verify': errno = verify.main(options) critical = log.MooseDocsFormatter.COUNTS['CRITICAL'].value errors = log.MooseDocsFormatter.COUNTS['ERROR'].value warnings = log.MooseDocsFormatter.COUNTS['WARNING'].value print 'CRITICAL:{} ERROR:{} WARNING:{}'.format(critical, errors, warnings) if critical or errors or (errno != 0): return 1 return 0
import logging from datetime import datetime from sympy import Symbol, solve import numpy as np from common.gen import LinearCongruentialGenerator, IntegerGenerator from common.log import init_logging from common.utils import confidence_interval from l2_branching_process.branching import BranchingProcess init_logging(file='logs/l1-output-%s.log' % datetime.now()) def main(): # Производящая функция s = Symbol('s', real=True) p0 = Symbol('p0') p2 = Symbol('p2') p3 = Symbol('p3') p4 = Symbol('p4') p5 = Symbol('p5') p6 = Symbol('p6') phi = p0 + p2 * (s**2) + p3 * (s**3) + p4 * (s**4) + p5 * (s** 5) + p6 * (s**6) # Конфигурации initial_value = 42352531 numbers_after_dot = 5 modelling_steps = 10 experiments_number = 100
""" jesse.testing.test_config ~~~~~~~~~~~~~ 测试日志记录功能 :copyright: (c) 2016 by ruth.ren Team. """ import logging from common.log import init_logging log = logging.getLogger(__file__) if __name__ == '__main__': init_logging() log.info('good')
parser.add_argument('-l', '--log_file', default=None, help='指定的log文件') parser.add_argument('-f', '--sql_file', help='指定执行SQL文件') args = parser.parse_args() # 配置LOG LOG_NAME = args.sql_file.split('/')[-1].replace('.sql', '.log') if args.log_file == None: log_path = LOG_DIR + datetime.now().strftime('%Y%m%d') if not os.path.exists(log_path): os.makedirs(log_path) log_file = log_path + '/etl_' + LOG_NAME else: log_file = args.log_file init_logging({ 'console_log_level': logging.INFO, 'file_log_level': logging.INFO, 'log_file': log_file }) def run_sql(sql_param, file): logging.info('RUN>>>>>>>>>>>>>>>>>>>>>>>>>>...') with io.open(file, 'r', encoding='utf8') as fr: for sql in fr.read().split(';'): try: if len(sql) > 10: sql = (sql + ';').replace('/n', '').format(**sql_param) + '\n' logging.info( '-----------------------------RUNNING----------------------------\n{}' .format(sql))
def main(): init_logging(file='logs/l3-output-%s.log' % datetime.now()) # Последовательности интенсивности поступления заявок k = Symbol('k', real=True) lambdas = [sympy.Float(0.95, 2), 0.15 * k**2 + 4, 6.25 / (k + 5)] # Конфигурации initial_value = 6450435 lcg = LinearCongruentialGenerator(initial_value) modelling_states = 11 max_modelling_states = 50 experiments_number = 200 plot_steps = 200 plot_transitions = [ np.linspace(0, 15, plot_steps), np.linspace(0, 1, plot_steps), np.linspace(0, 30, plot_steps) ] # Диаграммы состояний процессов logging.info('Generating initial state diagrams for all models...') fig, axes = plt.subplots(1, 3) for index, lambda_ in enumerate(lambdas): pb = PureBirth(tg=TimeGenerator( intensity=lambda arg: lambda_.subs(k, arg), lcg=lcg)) pb.model(max_steps=modelling_states) ax1 = axes[index] ax1.plot(pb.transitions, pb.states, drawstyle='steps') ax1.set_title('lambda=%s' % lambda_) plt.show() # Прогоны каждой из моделей for lambda_, all_transitions in zip(lambdas, plot_transitions): logging.info('Starting to process a model with lambda=%s.' % lambda_) pbs = [ PureBirth(tg=TimeGenerator( intensity=lambda arg: lambda_.subs(k, arg), lcg=lcg)) for _ in range(experiments_number) ] logging.info('Performing %s experiments...' % experiments_number) for pb in pbs: pb.model(max_transition=all_transitions[len(all_transitions) - 1], max_steps=max_modelling_states) all_states = [] logging.info('Collecting transitions for %s periods...' % len(all_transitions)) for transition in all_transitions: states = [] for pb in pbs: state = None for index, pb_transition in enumerate(pb.transitions): if pb_transition > transition: state = index - 1 break states.append( state if state is not None else len(pb.transitions)) all_states.append(states) logging.info('Calculating state probabilities...') # Расчет вероятностей каждого из событий all_probs = [] for state in range(modelling_states): all_probs.append([ states.count(state) / len(states) if states else 0 for states in all_states ]) prob_plots = reduce(operator.add, [[all_transitions, probs] for probs in all_probs]) logging.info('Calculating M and D...') # Расчет математического ожидания и дисперсии all_M = [] all_D = [] for states in all_states: M = np.sum(states) / len(states) if states else 0 M2 = np.sum(np.array(states)**2) / len(states) if states else 0 all_M.append(M) all_D.append(M2 - M**2) logging.info('Generating plot for state probabilities, M and D...') fig, (ax1, ax2) = plt.subplots(1, 2) ax1.plot(*prob_plots) ax1.legend(['P(X(t)=%s)' % state for state in range(modelling_states)], loc='upper center', ncol=2) ax2.plot(all_transitions, all_M, all_transitions, all_D) ax2.legend(['M(X)', 'D(X)'], loc='upper left') plt.show()
https://gist.github.com/alexmic/7857543 """ import warnings import pytest import api from common import log import models warnings.simplefilter("error") # Make All warnings errors while testing. # Setup Stream logging for py.test so log messages get output on errors too. # If something else sets up logging first then this won't trigger. # For example: db.py calling logging.info() or such. log.init_logging() @pytest.fixture(scope='session') def appclient(): """ :return: test client for API app. :rtype: flask.testing.FlaskClient """ app = api.create_api() client = app.test_client() # Helpers for testing API responses def validate_response(response, response_code=200, content_type='application/vnd.api+json'):
import logging from operator import concat from functools import reduce import matplotlib.pyplot as plt import numpy as np from common.gen import LinearCongruentialGenerator from common.log import init_logging from l1_markov.markov import MarkovChain init_logging() def show_transition_diagrams(models, corner_states): fig, axes = plt.subplots(2, 2) for index, model in enumerate(models): ax = axes[int(index / 2), index % 2] ax.plot(model.transitions, drawstyle='steps') ax.set_ylim(corner_states) ax.set_title('Initial state: %s' % model.initial_state) plt.show() def main(): # Матрица переходных состояний G = [ [0.3, 0.3, 0, 0.4], [0.7, 0.3, 0, 0], [0, 0, 0.4, 0.6], [0, 0, 0.2, 0.8]
def main(): init_logging(file='logs/l4-output-%s.log' % datetime.now()) # Последовательности интенсивности поступления заявок k = Symbol('k', real=True) lambdas = [ sympy.Float(0.4, 1), 1.15 * k ** 3 + 1, 0.7 / (k + 12) ** 2 ] mus = [ sympy.Float(0.3, 1), 0.33 * k ** 3, 5 / k ** 2 ] intensities = [_lambda + mu for (_lambda, mu) in zip(lambdas, mus)] # Конфигурации initial_value = 6450435 numbers_after_dot = 5 lcg = LinearCongruentialGenerator(initial_value) modelling_states = 11 max_modelling_states = 1000 pi_epsilon = 0.00001 experiments_number = 200 plot_steps = 200 plot_transitions = [ np.linspace(0, 15, plot_steps), np.linspace(0, 0.35, plot_steps), np.linspace(0, 100, plot_steps) ] # Диаграммы состояний процессов logging.info('Generating initial state diagrams for all models...') fig, axes = plt.subplots(1, 3) for index, funcs in enumerate(zip(lambdas, mus, intensities)): lambda_, mu, intensity = funcs pb = BirthAndDeath(lcg=lcg, tg=TimeGenerator(intensity=lambda arg: intensity.subs(k, arg), lcg=lcg), lambda_=lambda arg: lambda_.subs(k, arg), mu=lambda arg: mu.subs(k, arg)) pb.model(max_iterations=modelling_states) ax1 = axes[index] ax1.plot(pb.transitions, pb.states, drawstyle='steps') ax1.set_title('lambda=%s\nmu=%s' % (lambda_, mu)) plt.show() # Прогоны каждой из моделей for lambda_, mu, intensity, all_transitions \ in zip(lambdas, mus, intensities, plot_transitions): logging.info('Starting to process a model with lambda=%s and mu=%s.' % (lambda_, mu)) pbs = [BirthAndDeath(lcg=lcg, tg=TimeGenerator(intensity=lambda arg: intensity.subs(k, arg), lcg=lcg), lambda_=lambda arg: lambda_.subs(k, arg), mu=lambda arg: mu.subs(k, arg)) for _ in range(experiments_number)] logging.info('Performing %s experiments...' % experiments_number) for index, pb in enumerate(pbs): if index % 10 == 0: logging.debug('%s experiments were performed...' % index) pb.model(max_transition=all_transitions[len(all_transitions) - 1], max_steps=max_modelling_states) logging.debug('All experiments were performed.') all_states = [] logging.info('Collecting transitions for %s periods...' % len(all_transitions)) for index, transition in enumerate(all_transitions): if index % 10 == 0: logging.debug('%s transitions were collected...' % index) states = [] for pb in pbs: state = None for pb_index, pb_transition in enumerate(pb.transitions): if pb_transition > transition: state = pb.states[pb_index] break states.append(state if state is not None else pb.states[-1]) all_states.append(states) logging.debug('All transitions were collected.') logging.info('Calculating state probabilities...') # Расчет вероятностей каждого из событий all_probs = [] for state in range(modelling_states): all_probs.append([states.count(state) / len(states) if states else 0 for states in all_states]) prob_plots = reduce(operator.add, [[all_transitions, probs] for probs in all_probs]) logging.info('Calculating M and D...') # Расчет математического ожидания и дисперсии all_M = [] all_D = [] for states in all_states: M = np.sum(states) / len(states) if states else 0 M2 = np.sum(np.array(states) ** 2) / len(states) if states else 0 all_M.append(M) all_D.append(M2 - M ** 2) logging.info('Generating plot for state probabilities, M and D...') fig, (ax1, ax2, ax3) = plt.subplots(1, 3) ax1.plot(*prob_plots) ax1.legend(['P(X(t)=%s)' % state for state in range(modelling_states)], loc='upper center', ncol=2) ax2.plot(all_transitions, all_M) ax2.legend(['M(X)'], loc='upper left') ax3.plot( all_transitions, all_D) ax3.legend(['D(X)'], loc='upper left') plt.show() # Расчет финальных вероятностей pi = [1] for state in range(1, max_modelling_states): numerator = 1 denominator = 1 for inner_state in range(1, state): numerator *= lambda_.subs(k, inner_state) denominator *= mu.subs(k, inner_state) pi.append(numerator / denominator) if state > modelling_states and abs(pi[-1] - pi[-2]) < pi_epsilon: break pi_sum = sum(pi) logging.info('Listing final probabilities calculated for first %s states...' % len(pi)) for state in range(modelling_states): state_p = pi[state]/pi_sum logging.info('p%s =\t%s' % (state, np.around(float(state_p), numbers_after_dot)))
def main(): init_logging(file='logs/l6-output-%s.log' % datetime.now(), debug=True) # Конфигурации initial_value = 9645730 lcg = LinearCongruentialGenerator(initial_value) initial_gen = NormalGenerator(lcg, mu=1, sigma=2) max_step = 10000 plot_steps = 100 correlation_steps = 50 experiments_number = 100 show_white_noise_distributions = False show_corellated_noise_distributions = False hypothesis_a = 0.1 hypothesis_k = lambda count: 1.73 * count**(1 / 3) # Исходные данные sigma = np.sqrt(4) delta = 3.0 t = 50.0 # 1. Генераторы дискретного белого шума uniform_interval = np.sqrt(12 * (sigma**2)) / 2 uniform_gen = UniformGenerator(lcg, min=-uniform_interval, max=uniform_interval) normal_gen = NormalGenerator(lcg, mu=0, sigma=sigma) if show_white_noise_distributions: uniform_noise = [uniform_gen() for _ in range(0, 10000)] normal_noise = [normal_gen() for _ in range(0, 10000)] uniform_noise_histogram = np.histogram(uniform_noise, bins=30, range=(-3, 3), density=True) normal_noise_histogram = np.histogram(normal_noise, bins=30, range=(-12, 12), density=True) _, (left_ax, right_ax) = plt.subplots(1, 2) left_ax.bar(range(plot_steps), uniform_noise[:plot_steps]) right_ax.plot(uniform_noise_histogram[1][:-1], uniform_noise_histogram[0]) right_ax.set_ylim(0, 0.4) plt.show() _, (left_ax, right_ax) = plt.subplots(1, 2) left_ax.bar(range(plot_steps), normal_noise[:plot_steps]) right_ax.plot(normal_noise_histogram[1][:-1], normal_noise_histogram[0]) right_ax.set_ylim(0, 0.4) plt.show() # 2. Формирующий фильтр if show_corellated_noise_distributions: uniform_filter = ExponentiallyCorrelatedFilter(delta=delta, t=t, x_gen=initial_gen, v_gen=uniform_gen) uniform_filter.model(max_step=max_step) uniform_filter_histogram = np.histogram(uniform_filter.y, bins=30, range=(-5, 5), density=True) normal_filter = ExponentiallyCorrelatedFilter(delta=delta, t=t, x_gen=initial_gen, v_gen=normal_gen) normal_filter.model(max_step=max_step) normal_filter_histogram = np.histogram(normal_filter.y, bins=30, range=(-5, 5), density=True) _, (left_ax, right_ax) = plt.subplots(1, 2) left_ax.bar(range(plot_steps), uniform_filter.y[:plot_steps]) right_ax.plot(uniform_filter_histogram[1][:-1], uniform_filter_histogram[0]) plt.show() _, (left_ax, right_ax) = plt.subplots(1, 2) left_ax.bar(range(plot_steps), normal_filter.y[:plot_steps]) right_ax.plot(normal_filter_histogram[1][:-1], normal_filter_histogram[0]) plt.show() uniform_filters = [ ExponentiallyCorrelatedFilter(delta=delta, t=t, x_gen=initial_gen, v_gen=uniform_gen) for _ in range(experiments_number) ] normal_filters = [ ExponentiallyCorrelatedFilter(delta=delta, t=t, x_gen=initial_gen, v_gen=normal_gen) for _ in range(experiments_number) ] def auto_correlation(x, length): return np.array( [1] + [np.corrcoef(x[:-i], x[i:])[0, 1] for i in range(1, length)]) logging.info('Performing %s experiments for uniform filter' % experiments_number) for index, filter in enumerate(uniform_filters): if index and index % 10 == 0: logging.info('Experiments has been performed %s/%s' % (index, experiments_number)) filter.model(max_step=max_step) logging.info('All experiments has been performed for uniform filter') logging.info('Collecting autocorrelation, M and D for uniform filter') uniform_correlation = reduce(np.add, [auto_correlation(filter.y, correlation_steps) for filter in uniform_filters]) \ / experiments_number uniform_outputs = reduce(np.append, [filter.y for filter in uniform_filters]) uniform_M = np.mean(uniform_outputs) uniform_M2 = np.mean(uniform_outputs**2) uniform_D = uniform_M2 - uniform_M**2 logging.info('M = %s' % uniform_M) logging.info('D = %s' % uniform_D) logging.info('Performing %s experiments for normal filter' % experiments_number) for index, filter in enumerate(normal_filters): if index and index % 10 == 0: logging.info('Experiments has been performed %s/%s' % (index, experiments_number)) filter.model(max_step=max_step) logging.info('All experiments has been performed for normal filter') logging.info('Collecting autocorrelation, M and D for normal filter') normal_correlation = reduce(np.add, [auto_correlation(filter.y, correlation_steps) for filter in normal_filters]) \ / experiments_number normal_outputs = reduce(np.append, [filter.y for filter in normal_filters]) normal_M = np.mean(normal_outputs) normal_M2 = np.mean(normal_outputs**2) normal_D = normal_M2 - normal_M**2 logging.info('M = %s' % normal_M) logging.info('D = %s' % normal_D) logging.info('Plotting autocorrelation functions') plt.plot(range(correlation_steps), uniform_correlation) plt.plot(range(correlation_steps), normal_correlation) plt.show() def spectrum(w, correlation): return 2 * np.sum( [np.cos(w * k) * correlation[k] for k in range(len(correlation))]) spectrum_dots = np.linspace(0, 1, correlation_steps) uniform_spectrum = [ spectrum(w, uniform_correlation) for w in spectrum_dots ] normal_spectrum = [spectrum(w, normal_correlation) for w in spectrum_dots] logging.info('Plotting spectrum functions') plt.plot(spectrum_dots, uniform_spectrum) plt.plot(spectrum_dots, normal_spectrum) plt.show() logging.info('Testing distributions hypothesis') def normal_f(x, mu, sigma): return np.exp(-((x - mu) ** 2) / (2 * sigma ** 2)) \ / np.sqrt(2 * np.pi * sigma ** 2) def interval_Z(interval_prob, interval_count, total_count): return (interval_count - total_count * interval_prob)**2 / (total_count * interval_prob) hypothesis_intervals = np.linspace(-6, 6, plot_steps) uniform_Z = 0 normal_Z = 0 for interval_start, interval_end in zip(hypothesis_intervals[:-1], hypothesis_intervals[1:]): interval_hypothesis_prob = integrate.quad( func=lambda x: normal_f(x, mu=0, sigma=sigma), a=interval_start, b=interval_end)[0] interval_uniform_count = np.logical_and( uniform_outputs >= interval_start, uniform_outputs < interval_end).sum() interval_normal_count = np.logical_and( normal_outputs >= interval_start, normal_outputs < interval_end).sum() uniform_Z += interval_Z(interval_hypothesis_prob, interval_uniform_count, uniform_outputs.size) normal_Z += interval_Z(interval_hypothesis_prob, interval_normal_count, normal_outputs.size) from scipy.stats.distributions import chi2 def X2(a, k): return chi2.ppf(q=1 - a, df=k - 1) uniform_X2 = X2(hypothesis_a, hypothesis_k(uniform_outputs.size)) normal_X2 = X2(hypothesis_a, hypothesis_k(normal_outputs.size)) logging.info('Uniform noise Z = %s' % uniform_Z) logging.info('Uniform noise X2 = %s' % uniform_X2) logging.info('Normal noise Z = %s' % normal_Z) logging.info('Normal noise X2 = %s' % normal_X2)
#! /usr/bin/env python -i """ Load some useful stuff into the console when running python interactively. """ import os import sys from common import log def _set_prompt(): """ Color code the Python prompt based on environment. """ env = os.environ.get('ENV', 'dev') color = {'dev': '32', # Green 'stage': '33', # Yellow 'prod': '31'}.get(env) # Red sys.ps1 = '\001\033[1;%sm\002>>> \001\033[0m\002' % color sys.ps2 = '\001\033[1;%sm\002... \001\033[0m\002' % color log.init_logging(log.logging.DEBUG) _set_prompt() del sys del os del _set_prompt # Do this last so that logging is setup first. import models # pylint: disable=unused-import,wrong-import-position print('import models')
def main(): init_logging(file='logs/l5-output-%s.log' % datetime.now(), debug=True) # Последовательности параметров альфа и лямбда params = [{ 'gen': { 'alpha': 1.0, 'lambda_': 7.0 }, 'max_transition': 0.5 }, { 'gen': { 'alpha': 0.1, 'lambda_': 4.0 }, 'max_transition': 2.0 }, { 'gen': { 'alpha': 2.0, 'lambda_': 10.0 }, 'max_transition': 0.25 }] # Конфигурации initial_value = 6450435 numbers_after_dot = 5 lcg = LinearCongruentialGenerator(initial_value) max_step = 10 experiments_number = 1000 plot_steps = 30 # Предварительные прогоны logging.info('Generating initial state diagrams for all models...') fig, axes = plt.subplots(1, 3) for index, model_params in enumerate(params): model = Recovery(gen=WeibullGenerator(lcg, **model_params['gen'])) model.model(max_steps=max_step) state_pairs = zip(model.states[:-1], model.states[:-1]) transition_pairs = zip(model.transitions[:-1], model.transitions[1:]) ax = axes[index] for transition_states, transitions_H in zip(state_pairs, transition_pairs): ax.plot(transitions_H, transition_states, drawstyle='steps', color='blue') ax.set_title( 'alpha=%s\nlambda=%s' % (model_params['gen']['alpha'], model_params['gen']['lambda_'])) ax.grid() plt.show() # Прогоны каждой из моделей logging.info('Performing %s experiments for all models.' % experiments_number) for model_params in params: lambda_ = model_params['gen']['lambda_'] alpha = model_params['gen']['alpha'] logging.info( 'Performing experiments for the model with alpha=%s and lambda=%s.' % (alpha, lambda_)) models = [ Recovery(gen=WeibullGenerator(lcg, **model_params['gen'])) for _ in range(0, experiments_number) ] for index, model in enumerate(models): if index and index % 10 == 0: logging.debug('Experiments performed: %s/%s.' % (index, experiments_number)) model.model(max_transition=model_params['max_transition']) logging.debug('All experiments has finished.') logging.info('Collecting transitions for %s intervals.' % plot_steps) transitions_H = np.linspace(0, model_params['max_transition'], plot_steps) states = [] for index, transition in enumerate(transitions_H): if index and index % 10 == 0: logging.debug('Transitions collected: %s/%s.' % (index, plot_steps)) transition_states = [] for model in models: state = None for transition_index, model_transition in enumerate( model.transitions): if model_transition > transition: state = model.states[transition_index] break transition_states.append( state if state is not None else model.states[-1]) states.append(transition_states) logging.debug('All transitions have been collected.') logging.info('Calculating M and D for ksi.') all_transitions = reduce( operator.add, [model.generated_transitions for model in models]) M = (np.sum(all_transitions) / len(all_transitions)).__float__() M2 = np.sum(np.array(all_transitions)**2) / len(all_transitions) D = (M2 - M**2).__float__() M_theoretical = 1 / lambda_ * gamma(1 + 1 / alpha) D_theoretical = (1 / lambda_)**2 * gamma(1 + 2 / alpha) - M_theoretical**2 logging.info('experimental M = %s.' % (np.around(M, numbers_after_dot))) logging.info('theoretical M = %s.' % (np.around(M_theoretical, numbers_after_dot))) logging.info('experimental D = %s.' % (np.around(D, numbers_after_dot))) logging.info('theoretical D = %s.' % (np.around(D_theoretical, numbers_after_dot))) logging.info('Calculating H(t).') # Расчет функции восстановления H = [] for transition_states in states: transition_M = np.sum(transition_states) / len(transition_states) \ if transition_states else 0 H.append(transition_M) logging.info('Plotting H(t).') plt.plot(transitions_H, H) plt.title('H(t)') plt.show() logging.info('Calculating experimental F(t).') F = [] transitions_F = np.linspace(0, model_params['max_transition'], plot_steps) generated_transitions = [ model.generated_transitions for model in models ] generated_transitions = np.array( list(reduce(np.append, generated_transitions))) for transition in transitions_F: transition_F = (generated_transitions <= transition).sum() / generated_transitions.size F.append(transition_F) logging.info('Calculating theoretical F(t).') F_theoretical = 1 - np.exp(-(lambda_ * transitions_F)**alpha) logging.info('Calculating f(t).') f = [] for index, transition in enumerate(transitions_F[:-1]): left_condition = generated_transitions > transition next_transition = transitions_F[index + 1] right_condition = generated_transitions <= next_transition number_of_transitions = np.logical_and(left_condition, right_condition).sum() transition_f = number_of_transitions / generated_transitions.size / ( next_transition - transition) f.append(transition_f) f.append(0) f = np.array(f) logging.info('Calculating theoretical f(t).') f_theoretical = alpha * lambda_ * (lambda_ * transitions_F) ** (alpha - 1) \ * np.exp(-(lambda_ * transitions_F) ** alpha) logging.info('Calculating G(t).') G = 1 - np.array(F) logging.info('Calculating theoretical G(t).') G_theoretical = np.exp(-(lambda_ * transitions_F)**alpha) G[G == 0] = G[G != 0].min() logging.info('Calculating phi(t).') phi = f / G logging.info('Calculating theoretical phi(t).') phi_theoretical = alpha * lambda_ * (lambda_ * transitions_F)**(alpha - 1) logging.info('Plotting F(t), f(t), G(t) and phi(t).') fig, axes = plt.subplots(2, 2) axes[0, 0].plot(transitions_F, F) axes[0, 0].plot(transitions_F, F_theoretical) axes[0, 0].set_title('F(t)') axes[0, 1].plot(transitions_F + (transitions_F[1] - transitions_F[0]), f) axes[0, 1].plot(transitions_F, f_theoretical) axes[0, 1].set_title('f(t)') axes[1, 0].plot(transitions_F, G) axes[1, 0].plot(transitions_F, G_theoretical) axes[1, 0].set_title('G(t)') axes[1, 1].plot(transitions_F, phi) axes[1, 1].plot(transitions_F, phi_theoretical) axes[1, 1].set_title('phi(t)') plt.show()