return [ GroupSplitSpec(p=0.5, attr_set={'flu': 'I'}), GroupSplitSpec(p=0.5, attr_set={'flu': 'R'}), ] if group.has_attr({'flu': 'R'}): return [ GroupSplitSpec(p=0.7, attr_set={'flu': 'R'}), GroupSplitSpec(p=0.3, attr_set={'flu': 'S'}), ] probe_grp_size_flu = GroupSizeProbe.by_attr( 'flu', 'flu', ['S', 'I', 'R'], msg_mode=ProbeMsgMode.DISP, persistance=ProbePersistanceDB(), memo='Mass distribution across flu status') p = GroupSizeProbe.by_attr('flu', 'flu', ['S', 'I', 'R'], persistance=ProbePersistanceDB()) (Simulation().add_probe( GroupSizeProbe.by_attr('flu', 'flu', ['S', 'I', 'R'], msg_mode=ProbeMsgMode.DISP)).add_probe(p).add_rule( SIRRule()).add_group( Group(m=1000, attr={'flu': 'S'})).run(26)) series = [{ 'var': 'p0',
import sys from inspect import getsourcefile sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', '..')) from pram.data import GroupSizeProbe, ProbePersistanceDB from pram.entity import GroupQry, Site # ---------------------------------------------------------------------------------------------------------------------- fpath_out = os.path.join(os.path.dirname(__file__), '..', 'out', '02-flu.sqlite3') if os.path.isfile(fpath_out): os.remove(fpath_out) pp = ProbePersistanceDB(fpath_out) # ---------------------------------------------------------------------------------------------------------------------- def probe_flu_at(school, name=None): return GroupSizeProbe( name=name or school.name, queries=[ GroupQry(attr={ 'flu': 's' }, rel={ 'school': school }), GroupQry(attr={ 'flu': 'i' }, rel={ 'school': school }), GroupQry(attr={ 'flu': 'r' }, rel={ 'school': school }) ], qry_tot=GroupQry(rel={ 'school': school }), persistance=pp, var_names=['ps', 'pi', 'pr', 'ns', 'ni', 'nr'] )
def analysis_simple(tx_dict=None, population=10000, iteration=16): if tx_dict is None: tx_dict = dict( round_seed_a=0.3, round_seed_failure=0.67, round_a_b=0.2, round_a_failure=0.6, round_b_c=0.3, round_b_failure=0.4, round_c_success=0.5, round_c_failure=0.5, round_success_success=1, round_success_failure=0, round_failure_success=0, round_failure_failure=1, # round_a_a=0.2, # round_b_b=0.2, # round_c_c=0.2, ) old_p = GroupSizeProbe.by_attr( 'stage', 'stage', ['seed', 'a', 'b', 'c', 'success', 'failure'], persistance=ProbePersistanceMem(), msg_mode=ProbeMsgMode.CUMUL) p = GroupSizeProbe.by_attr('stage', 'stage', ['seed', 'a', 'b', 'c', 'success', 'failure'], persistance=ProbePersistanceDB()) sim = (Simulation().add( [UserStartupGrowthRule(tx_dict), p, Group(m=population)]).run(iteration)) series = [ { 'var': 'n0' }, { 'var': 'n1' }, { 'var': 'n2' }, { 'var': 'n3' }, { 'var': 'n4' }, { 'var': 'n5' }, ] col_names = { 'n0': 'seed', 'n1': 'a', 'n2': 'b', 'n3': 'c', 'n4': 'success', 'n5': 'failure' } probe_data = p.probe_data(series) probe_frame: pd.DataFrame probe_frame = pd.DataFrame.from_dict(probe_data) probe_frame.rename(columns=col_names, inplace=True) probe_frame["i"] = probe_frame["i"] + 1 initial_condition = { 'seed': population, 'a': 0, 'b': 0, 'c': 0, 'success': 0, 'failure': 0, 'i': 0 } probe_frame = pd.concat( [pd.DataFrame(initial_condition, index=[0]), probe_frame[:]]).reset_index(drop=True) probe_frame.drop(columns=['i'], inplace=True) print(probe_frame) d = probe_frame.iloc[0] print(d) # for chart plot_data = probe_frame.iloc[0] # probe_names = probe_frame print(plot_data.values) print(list(plot_data.index)) # data_source = ColumnDataSource(probe_frame) return probe_frame
def analysis_Complex_version2(tx_dict=None, population=100, iteration=20): np.set_printoptions(suppress=True) if tx_dict is None: tx_dict = dict( round_seed_a=0.3, round_seed_failure=0.67, round_a_b=0.2, round_a_failure=0.3, round_b_c=0.3, round_b_failure=0.2, round_c_success=0.3, round_c_failure=0.4, round_success_success=1, round_success_failure=0, round_failure_success=0, round_failure_failure=1, round_a_a=0.4, round_b_b=0.3, round_c_c=0.3, ) # seed = 1, a = 0, b = 0, c = 0, success = 0, failure = 0 tx_matrix = np.array([ [ 0, tx_dict['round_seed_a'], 0, 0, 1 - tx_dict['round_seed_failure'] - tx_dict['round_seed_a'], tx_dict['round_seed_failure'] ], [ 0, tx_dict['round_a_a'], tx_dict['round_a_b'], 0, 1 - tx_dict['round_a_failure'] - tx_dict['round_a_a'] - tx_dict['round_a_b'], tx_dict['round_a_failure'] ], [ 0, 0, tx_dict['round_b_b'], tx_dict['round_b_c'], 1 - tx_dict['round_b_failure'] - tx_dict['round_b_c'] - tx_dict['round_b_b'], tx_dict['round_b_failure'] ], [ 0, 0, 0, tx_dict['round_c_c'], 1 - tx_dict['round_c_failure'] - tx_dict['round_c_c'], tx_dict['round_c_failure'] ], [ 0, 0, 0, 0, 1 - tx_dict['round_success_failure'], tx_dict['round_success_failure'] ], [ 0, 0, 0, 0, 1 - tx_dict['round_failure_failure'], tx_dict['round_failure_failure'] ], ]) old_p = GroupSizeProbe.by_attr( 'stage', 'stage', ['seed', 'a', 'b', 'c', 'success', 'failure'], persistance=ProbePersistanceMem(), msg_mode=ProbeMsgMode.CUMUL) p = GroupSizeProbe.by_attr('stage', 'stage', ['seed', 'a', 'b', 'c', 'success', 'failure'], persistance=ProbePersistanceDB()) sim = (Simulation().add( [UserStartupGrowthRuleVersion2(tx_dict), p, Group(m=population)]).run(iteration)) print(sim.probes[0].get_msg()) print() series = [ { 'var': 'p0' }, { 'var': 'p1' }, { 'var': 'p2' }, { 'var': 'p3' }, { 'var': 'p4' }, { 'var': 'p5' }, ] col_names = { "p0": "p_seed", "p1": "p_a", "p2": "p_b", "p3": "p_c", "p4": "p_success", "p5": "p_failure" } names = ["p0", "p1", "p2", "p3", "p4", "p5"] probe_data: dict probe_data = p.probe_data(series) probe_data_ordered = OrderedDict() for name in names: probe_data_ordered[col_names.get(name)] = probe_data.pop(name) # pp(probe_data_ordered) probe_data_frame = pd.DataFrame.from_dict(probe_data_ordered) pp(probe_data_frame) value_df = pd.DataFrame(columns=list(probe_data_ordered.keys())) # print(value_df) # proportion of investment investment_proportion = OrderedDict(seed=1, a=0, b=0, c=0, success=0, failure=0) # hyper-parameters # stage_multiplier = {"seed": 3, "a": 5, "b": 7, "c": 5, "success": 5, "failure": 0} # stage_multiplier_dict = OrderedDict(seed=3, a=5, b=7, c=5, success=5, failure=0) stage_multiplier_list = [1, 1.1, 1.25, 1.3, 1, 0] # stage_multiplier_list = [1, 2, 3, 4, 5, 0] inital_investment_dict = OrderedDict( seed=population * investment_proportion.get("seed"), a=population * investment_proportion.get("a"), b=population * investment_proportion.get("b"), c=population * investment_proportion.get("c"), success=population * investment_proportion.get("success"), failure=population * investment_proportion.get("failure"), ) sim_length = len(probe_data_ordered.get('p_a')) valuation_list = [population] for i in range(sim_length): # print(valuation_list[i] * probe_data_frame.iloc[[i]]) val = valuation_list[i] * probe_data_frame.iloc[i] val = stage_multiplier_list * val valuation_list.append(np.sum(val)) print(valuation_list) # print("sim_length",sim_length) # # # total_investment = sum(inital_investment) # print("inital_investment",inital_investment_dict) probe_frame: pd.DataFrame probe_frame = pd.DataFrame.from_dict(probe_data) probe_frame.rename(columns=col_names, inplace=True) probe_frame["i"] = probe_frame["i"] + 1 initial_condition = { 'seed': population, 'a': 0, 'b': 0, 'c': 0, 'success': 0, 'failure': 0, 'i': 0, "p_seed": 1, "p_a": 0, "p_b": 0, "p_c": 0, "p_success": 0, "p_failure": 0 } probe_frame = pd.concat( [pd.DataFrame(initial_condition, index=[0]), probe_frame[:]]).reset_index(drop=True) probe_frame.drop(columns=['i'], inplace=True) print(probe_frame) # d = probe_frame.iloc[0] # print(d) # for chart plot_data = probe_frame.iloc[0] # probe_names = probe_frame print(plot_data.values) print(list(plot_data.index)) # data_source = ColumnDataSource(probe_frame) probe_frame.to_csv("data_file.csv") return probe_frame
def analysis_Complex_version1(tx_dict=None, population=10000, iteration=20): if tx_dict is None: tx_dict = dict( round_seed_a=0.3, round_seed_failure=0.67, round_a_b=0.2, round_a_failure=0.3, round_b_c=0.3, round_b_failure=0.2, round_c_success=0.3, round_c_failure=0.4, round_success_success=1, round_success_failure=0, round_failure_success=0, round_failure_failure=1, round_a_a=0.4, round_b_b=0.3, round_c_c=0.3, ) tx_numpy_array = np.array([]) old_p = GroupSizeProbe.by_attr( 'stage', 'stage', ['seed', 'a', 'b', 'c', 'success', 'failure'], persistance=ProbePersistanceMem(), msg_mode=ProbeMsgMode.CUMUL) p = GroupSizeProbe.by_attr('stage', 'stage', ['seed', 'a', 'b', 'c', 'success', 'failure'], persistance=ProbePersistanceDB()) sim = (Simulation().add( [UserStartupGrowthRuleVersion2(tx_dict), p, Group(m=population)]).run(iteration)) print(sim.probes[0].get_msg()) print() series = [ { 'var': 'p0' }, { 'var': 'p1' }, { 'var': 'p2' }, { 'var': 'p3' }, { 'var': 'p4' }, { 'var': 'p5' }, { 'var': 'n0' }, { 'var': 'n1' }, { 'var': 'n2' }, { 'var': 'n3' }, { 'var': 'n4' }, { 'var': 'n5' }, ] col_names = { 'n0': 'seed', 'n1': 'a', 'n2': 'b', 'n3': 'c', 'n4': 'success', 'n5': 'failure', "p0": "p_seed", "p1": "p_a", "p2": "p_b", "p3": "p_c", "p4": "p_success", "p5": "p_failure" } probe_data = p.probe_data(series) probe_frame: pd.DataFrame probe_frame = pd.DataFrame.from_dict(probe_data) probe_frame.rename(columns=col_names, inplace=True) probe_frame["i"] = probe_frame["i"] + 1 initial_condition = { 'seed': population, 'a': 0, 'b': 0, 'c': 0, 'success': 0, 'failure': 0, 'i': 0, "p_seed": 1, "p_a": 0, "p_b": 0, "p_c": 0, "p_success": 0, "p_failure": 0 } probe_frame = pd.concat( [pd.DataFrame(initial_condition, index=[0]), probe_frame[:]]).reset_index(drop=True) probe_frame.drop(columns=['i'], inplace=True) print(probe_frame) # d = probe_frame.iloc[0] # print(d) # for chart plot_data = probe_frame.iloc[0] # probe_names = probe_frame print(plot_data.values) print(list(plot_data.index)) # data_source = ColumnDataSource(probe_frame) probe_frame.to_csv("data_file.csv") return probe_frame
from pram.data import GroupSizeProbe, ProbeMsgMode, ProbePersistanceDB from pram.entity import Group from pram.sim import Simulation from Autogenerated1 import Autogenerated1 probe_grp_size_flu = GroupSizeProbe.by_attr('flu', 'flu', ['S', 'I', 'R'], msg_mode=ProbeMsgMode.DISP, persistance=ProbePersistanceDB(), memo='Mass distribution across flu status') p = GroupSizeProbe.by_attr('flu', 'flu', ['S', 'I', 'R'], persistance=ProbePersistanceDB()) (Simulation(). add_probe(GroupSizeProbe.by_attr('flu', 'flu', ['S', 'I', 'R'], msg_mode=ProbeMsgMode.DISP)). add_probe(p). add_rule(Autogenerated1()). add_group(Group(m=1000, attr={ 'flu': 'S' })). run(26) ) # (Simulation(). # add_probe(GroupSizeProbe.by_attr('flu', 'flu', ['S', 'I', 'R'], msg_mode=ProbeMsgMode.DISP)). # add_rule(Autogenerated()). # add_probe(p). # run(26) # ), caption = {Auto Generated Code}, label = autogen # # series = [ { 'var': 'p0', 'lw': 0.75, 'linestyle': '-', 'marker': 'o', 'color': 'red', 'markersize': 0, 'lbl': 'S' }, { 'var': 'p1', 'lw': 0.75, 'linestyle': '--', 'marker': '+', 'color': 'blue', 'markersize': 0, 'lbl': 'I' }, { 'var': 'p2', 'lw': 0.75, 'linestyle': ':', 'marker': 'x', 'color': 'green', 'markersize': 0, 'lbl': 'R' }