def walkplot(cons): """ Interactive function to walk through the metabolites in the media. It prints a growth plot of the consortium for each four metabolites. INPUT -> cons: MMODES consortium object already simulated """ # Call always after cons.runn()! if not hasattr(cons, "outplot"): print("Consortium must run before analyzing the output of the run!") return global plt if not plt: import matplotlib.pyplot as plt # keep original inputs original_metplots = dcp(cons.mets_to_plot) original_outplot = dcp(cons.outplot) cons.outplot = "tmp.png" mets = [k for k in cons.media] # loop over metabolites, plot them and let the user close the window for m in range(0, len(mets), 4): cons.mets_to_plot = [mets[m], mets[m + 1], mets[m + 2], mets[m + 3]] plot_comm(cons) plt.show() print("Image number", m / 4) # clean temporary files, return to orginal parameters plt.close("all") os.remove(cons.outplot) cons.mets_to_plot = original_metplots cons.outplot = original_outplot return
def main(input, trainset, testset): whole = pd.read_csv(input, sep="\t") whole["Mitarbeiter ID"] = whole["Mitarbeiter ID"].apply(json.loads) whole.loc[:, "Tag"] = whole["Tag"].apply(datetime.fromisoformat) # whole = pd.DataFrame(whole[np.logical_and( # whole.Tag >= datetime(2019,1,1), # whole.Tag < datetime(2019,4,1) # )]) train, test = train_test_split(whole, random_state=341223) all_train_ma_ids = sorted( list(set([it for ll in train["Mitarbeiter ID"] for it in ll]))) should_not_belong_to_test = [] for irow, row in tqdm( test.iterrows(), total=len(test), desc="check test set for rows that don't occur in training set"): for ma_id in row["Mitarbeiter ID"]: if not is_in(all_train_ma_ids, ma_id): should_not_belong_to_test.append(irow) train = pd.concat([train, dcp(test.loc[should_not_belong_to_test])]) test = dcp(test.loc[~test.index.isin(should_not_belong_to_test)]) train.to_csv(trainset, index=False, sep="\t") test.to_csv(testset, index=False, sep="\t")
def __init__(self, _genome=None, _other=None, _isolated=False): import cortex.network as cn self.ID = 0 # Increment the global species ID if not _isolated: Species.ID += 1 self.ID = Species.ID Species.Populations[self.ID] = self # Species champion self.champion = None # Overall species fitness computed from the fitness # of networks belonging to this species self.fitness = Fitness() # Set of networks belonging to this species self.nets = set() # Species genome (list of layer definitions) self.genome = [] if _other is None: # print("Creating genome from initial layer definitions") self.genome = dcp( cn.Net.Init.Layers) if _genome is None else dcp(_genome) else: # print("Copying genome from species", _other.ID) self.genome = dcp(_other.genome) print(">>> Species", self.ID, "created")
def __init__(self, data, model, keyList): self.helper = helper.Helper() self.data = dcp(data) self.initialModel = dcp(model) for key in keyList.keys(): setattr(self, key, keyList[key]) self.initialStuntingTrend = -0.0 # percentage decrease in stunting prevalence per year self.initialStuntingTrend = ( self.initialStuntingTrend / 100.0 * self.timestep ) # fractional decrease in stunting prevalence per timestep self.referenceMortality = {} self.probStuntedIfPrevStunted = {} self.fracStuntedIfDiarrhea = {} self.probStuntedIfCovered = {} self.probCorrectlyBreastfedIfCovered = {} self.probStuntedComplementaryFeeding = {} self.probStuntedAtBirth = {} self.stuntingUpdateAfterInterventions = {} for ageName in self.ages: self.stuntingUpdateAfterInterventions[ageName] = 1.0 self.setReferenceMortality() self.setProbStuntingProgression() self.setProbStuntedAtBirth()
def oneModelRunWithOutput(self, allocationDictionary): import costcov import data from copy import deepcopy as dcp costCov = costcov.Costcov() spreadsheetData = data.readSpreadsheet(self.dataSpreadsheetName, self.helper.keyList) model, derived, params = self.helper.setupModelConstantsParameters(spreadsheetData) costCoverageInfo = self.getCostCoverageInfo() # run the model modelList = [] timestepsPre = 12 for t in range(timestepsPre): model.moveOneTimeStep() modelThisTimeStep = dcp(model) modelList.append(modelThisTimeStep) # update coverages after 1 year targetPopSize = getTargetPopSizeFromModelInstance(self.dataSpreadsheetName, self.helper.keyList, model) newCoverages = {} for i in range(0, len(spreadsheetData.interventionList)): intervention = spreadsheetData.interventionList[i] newCoverages[intervention] = costCov.function(allocationDictionary[intervention], costCoverageInfo[intervention], targetPopSize[intervention]) / targetPopSize[intervention] model.updateCoverages(newCoverages) for t in range(self.numModelSteps - timestepsPre): model.moveOneTimeStep() modelThisTimeStep = dcp(model) modelList.append(modelThisTimeStep) return modelList
def connect_detec_point(self, A, EM, x, y, visual=0): Xk_1 = A * 0 Xk = dcp(Xk_1) Xk[x, y] = 255 if visual: winName = 'find connect' cv2.namedWindow(winName, cv2.WINDOW_NORMAL) while (Xk != Xk_1).any(): if visual: cv2.imshow(winName, Xk) cv2.waitKey(1) # |cv2.destroyWindow(winName) Xk_1 = Xk temp = self.dilation(Xk_1, EM) Xk = cv2.bitwise_and(dcp(temp), dcp(A)) X, Y = np.where(Xk == 255) mx = np.mean(X) my = np.mean(Y) area = len(X) data = [X, Y, area, mx, my] statics = pd.DataFrame([data], columns=['X', 'Y', 'Area', 'mx', 'my']) if visual: cv2.waitKey(1) cv2.destroyWindow(winName) return (statics, Xk)
def __init__(self, data, model, keyList): self.helper = helper.Helper() self.data = dcp(data) self.initialModel = dcp(model) for key in keyList.keys(): setattr(self, key, keyList[key]) self.initialStuntingTrend = -0. # percentage decrease in stunting prevalence per year self.initialStuntingTrend = self.initialStuntingTrend / 100. * self.timestep # fractional decrease in stunting prevalence per timestep self.referenceMortality = {} self.probStuntedIfPrevStunted = {} self.fracStuntedIfDiarrhea = {} self.probStuntedIfCovered = {} self.probCorrectlyBreastfedIfCovered = {} self.probStuntedComplementaryFeeding = {} self.probStuntedAtBirth = {} self.stuntingUpdateAfterInterventions = {} for ageName in self.ages: self.stuntingUpdateAfterInterventions[ageName] = 1. self.setReferenceMortality() self.setProbStuntingProgression() self.setProbStuntedAtBirth()
def negamaxsearch(cfirst_player_pieces, csecond_player_pieces, cturn, depth, possibal_piece=0): global all_pieces first_player_pieces = dcp(cfirst_player_pieces) second_player_pieces = dcp(csecond_player_pieces) turn = dcp(cturn) if depth <= 0: return evaluation(first_player_pieces, second_player_pieces, turn), possibal_piece else: best = -50 best_move = 0 all_possible_pieces = set(all_pieces) - (set(first_player_pieces) | set(second_player_pieces)) for possible_piece in all_possible_pieces: v_first_player_pieces, v_second_player_pieces, v_turn = game_move( first_player_pieces, second_player_pieces, turn, possible_piece) value, best_piece = negamaxsearch(v_first_player_pieces, v_second_player_pieces, v_turn, depth - 1, possible_piece) value *= -1 if value > best: best = value best_move = possible_piece return best, best_move
def oneModelRunWithOutput(self, allocationDictionary): import costcov import data from copy import deepcopy as dcp costCov = costcov.Costcov() spreadsheetData = data.readSpreadsheet(self.dataSpreadsheetName, self.helper.keyList) model, derived, params = self.helper.setupModelConstantsParameters( spreadsheetData) costCoverageInfo = self.getCostCoverageInfo() # run the model modelList = [] timestepsPre = 12 for t in range(timestepsPre): model.moveOneTimeStep() modelThisTimeStep = dcp(model) modelList.append(modelThisTimeStep) # update coverages after 1 year targetPopSize = getTargetPopSizeFromModelInstance( self.dataSpreadsheetName, self.helper.keyList, model) newCoverages = {} for i in range(0, len(spreadsheetData.interventionList)): intervention = spreadsheetData.interventionList[i] newCoverages[intervention] = costCov.function( allocationDictionary[intervention], costCoverageInfo[intervention], targetPopSize[intervention]) / targetPopSize[intervention] model.updateCoverages(newCoverages) for t in range(self.numModelSteps - timestepsPre): model.moveOneTimeStep() modelThisTimeStep = dcp(model) modelList.append(modelThisTimeStep) return modelList
def __init__(self, name, directory, out_irq_num = 0, in_irq_num = 0, mmio_num = 0, pio_num = 0, mmio = None, pio = None, **qomd_kw ): super(SysBusDeviceType, self).__init__(name, directory, **qomd_kw) self.out_irq_num = out_irq_num self.in_irq_num = in_irq_num self.mmio_num = mmio_num self.pio_num = pio_num self.mmio_size_macros = [] # Qemu requires that any RAM, ROM or ROM device has unique name. Some # devices has such MMIOs. Also, different names simplify understanding # of `info mtree` HMP command output during debugging. self.mmio_name_macros = [] self.pio_size_macros = [] self.pio_address_macros = [] self.mmio = {} if mmio is None else dcp(mmio) self.pio = {} if pio is None else dcp(pio) self.mmio_names = {} self.add_state_field_h("SysBusDevice", "parent_obj", save = False) for irqN in range(0, self.out_irq_num): self.add_state_field_h("qemu_irq", self.get_Ith_irq_name(irqN), save = False ) for mmioN in range(0, self.mmio_num): self.add_state_field_h("MemoryRegion", self.get_Ith_mmio_name(mmioN), save = False ) self.add_fields_for_mmio(self.mmio.get(mmioN, list())) for ioN in range(0, self.pio_num): self.add_state_field_h("MemoryRegion", self.get_Ith_io_name(ioN), save = False ) self.add_fields_for_regs(self.pio.get(ioN, list())) self.timer_declare_fields() self.char_declare_fields() self.block_declare_fields()
def getCostCoverageInfo(self): import data from copy import deepcopy as dcp spreadsheetData = data.readSpreadsheet(self.dataSpreadsheetName, self.helper.keyList) costCoverageInfo = {} for intervention in spreadsheetData.interventionList: costCoverageInfo[intervention] = {} costCoverageInfo[intervention]['unitcost'] = dcp(spreadsheetData.costSaturation[intervention]["unit cost"]) costCoverageInfo[intervention]['saturation'] = dcp(spreadsheetData.costSaturation[intervention]["saturation coverage"]) return costCoverageInfo
def get_target_grasp_pose(source_pcd): # copy the source pcd and transform into robot frame source_pcd_original = dcp(source_pcd) source_pcd_original.paint_uniform_color([0, 0, 1]) source_pcd_original.transform(transform_cam_to_base_hand_calibrated) # copy one to be processed, and crop source_pcd_to_process = dcp(source_pcd_original) source_pcd_to_process.crop(workspace_bounding_box) # pre-processing for registration # transform the source pcd to an arbitrary pose far away from the target source_pcd_to_process.transform( init_transformation_for_global_registration) # compute fpfh source_down, source_fpfh = preprocess_point_cloud(source_pcd_to_process) # global result_fast = execute_fast_global_registration(source_down, target_down, source_fpfh, target_fpfh) # icp refinement result_refine = refine_registration(source_down, target_down, result_fast.transformation) # final transformation should includes the initial transformation transform_source_to_target = np.matmul( result_refine.transformation, init_transformation_for_global_registration) # visualizing result new_grasp_frame = o3d.geometry.TriangleMesh.create_coordinate_frame( size=0.2) transform_target_to_source = np.linalg.inv(transform_source_to_target) transform_target_grasp = np.matmul(transform_target_to_source, transform_base_to_reference_grasp) new_grasp_frame.transform(transform_target_grasp) print('[INFO] Visualizing the found grasping pose') o3d.visualization.draw_geometries( [robot_frame, new_grasp_frame, source_pcd_original, target], window_name='Grasping pose proposal', width=1200, height=960) # convert transformation matrix to coordinate and quaternion rotation_quat = quat.from_rotation_matrix( transform_target_grasp[:-1, :-1]) # this is already normalized rotation_quat = quat.as_float_array(rotation_quat).tolist() # wxyz translate_matrix = transform_target_grasp[:-1, -1].tolist() # xyz # construct and return a PoseStamped msg pose = dcp(pose_msg) pose.pose.position.x = translate_matrix[0] pose.pose.position.y = translate_matrix[1] pose.pose.position.z = translate_matrix[2] pose.pose.orientation.w = rotation_quat[0] pose.pose.orientation.x = rotation_quat[1] pose.pose.orientation.y = rotation_quat[2] pose.pose.orientation.z = rotation_quat[3] return pose
def execute_body(body, s, env, stack): for stmt in body: if type(stmt) == AxiomCall: if type(s) == set: for f in env[stmt.ax]: if type(f) == Val: f = f.val lasts = dcp(s) s = {f(x, env, stack) for x in s} if s != lasts: break elif type(s) == str: for f in env[stmt.ax]: if type(f) == Val: f = f.val lasts = dcp(s) s = f(s, env, stack) if s != lasts: break elif type(stmt) == FixpointCall: last = None if type(s) == set: for f in env[stmt.ax]: if type(f) == Val: f = f.val lasts = dcp(s) s = {f(x, env, stack) for x in s} if s != lasts: while last != s: last = s s = {f(x, env, stack) for x in s} break continue elif type(s) == str: for f in env[stmt.ax]: if type(f) == Val: f = f.val lasts = dcp(s) s = f(s, env, stack) if lasts != s: while last != s: last = s s = f(s, env, stack) elif type(stmt) == Sect: if stmt.var == 'str': s = env[stmt.args[0]][0].val elif stmt.var == 'strs': s = {env[a][0].val for a in stmt.args} continue return s
def init_V(S, goal, g = 1.0, ng = None): V, V_ = {}, {} for state in S: s = tuple(state) if s not in V: V[s], V_[s] = 0.0, 0.0 if s in goal: V[s], V_[s] = g, g if ng != None and s in ng: V[s], V_[s] = 0.0, 0.0 return dcp(V), dcp(V_)
def make_pipelines_mp(self): ''' Core algorithm, coming soon. :return: ''' ret_list = [] for k1, v1 in self.process_dict['preprocessing'].items(): steps = [] parameters = {} k1_list = [] if k1 != 'do_nothing': steps.append((k1, v1)) if k1 in self.parameters_dict: for k, v in self.parameters_dict[k1].items(): key = k1 + "__" + k parameters[key] = v k1_list.append(key) for k2, v2 in self.process_dict['decomposition'].items(): k2_list = [] if k2 != 'do_nothing': steps.append((k2, v2)) if k2 in self.parameters_dict: for k, v in self.parameters_dict[k2].items(): key = k2 + "__" + k parameters[key] = v k2_list.append(key) for k3, v3 in self.process_dict['model'].items(): k3_list = [] if k3 in self.parameters_dict: for k, v in self.parameters_dict[k3].items(): key = k3 + "__" + k parameters[key] = v k3_list.append(key) steps.append((k3, v3)) # r_key = ((k1 + "+") if k1 != 'do_nothing' else "") + ((k2 + "+") if k2 != 'do_nothing' else "") + k3 r_key = k1 + "+" + k2 + "+" + k3 self.result[r_key] = {} ret_list.append( [r_key, Pipeline(steps=dcp(steps)), dcp(parameters)]) steps.remove((k3, v3)) for key in k3_list: del parameters[key] if k2 != 'do_nothing': steps.remove((k2, v2)) for key in k2_list: del parameters[key] if k1 != 'do_nothing': steps.remove((k1, v1)) for key in k1_list: del parameters[key] return ret_list
def makePregnantWomen(self, inputData): import model annualPregnancies = dcp(inputData.demographics['number of pregnant women']) annualBirths = dcp(inputData.demographics['number of live births']) populationSize = annualPregnancies birthRate = annualBirths / annualPregnancies projectedBirths = dcp(inputData.projectedBirths) baseBirths = float(projectedBirths[0]) numYears = len(projectedBirths)-1 annualGrowth = (projectedBirths[numYears]-baseBirths)/float(numYears)/baseBirths pregnantWomen = model.PregnantWomen(birthRate, populationSize, annualGrowth) return pregnantWomen
def __init__(self, gli_dict=None, **OGS_Config): super(GLI, self).__init__(**OGS_Config) self.file_ext = ".gli" self.force_writing = True if gli_dict is None: self.__dict = dcp(EMPTY_GLI) elif check_gli_dict(gli_dict): self.__dict = gli_dict else: print("given gli_dict is not valid.. will set default") self.__dict = dcp(EMPTY_GLI)
def reset(self): self.P_h_g = dcp(self.P_g) self.ctrl_belief_P1 = dcp(self.init_ctrl_belief) self.decoys = dcp(self.init_decoys) for goals in self.decoys: if self.decoys[goals][0] in self.true_goal: self.ctrl_belief_P1[goals] = 1.0 else: self.ctrl_belief_P1[goals] = 0.0 self.record = [] self.true_record = [] self.total_KL = 0
def cell_copy(cell): copy = {'secs': {}, 'secLists': {}, 'globals': {}} for sec in cell['secs']: copy['secs'][sec] = {'geom': dcp(cell['secs'][sec]['geom']), 'topol': dcp(cell['secs'][sec]['topol']), 'mechs': {}, 'ions': {} } for mech in cell['secs'][sec]['mechs']: copy['secs'][sec]['mechs'][mech] = dcp(cell['secs'][sec]['mechs'][mech]) for ion in cell['secs'][sec]['ions']: copy['secs'][sec]['ions'][ion] = dcp(cell['secs'][sec]['ions'][ion]) return copy
def get_mnist_dataloaders( labeled_sample_num=10, unlabeled_class_sample_nums=None, train_transform=None, val_transform=None, dataloader_params={}, ): train_set = MNIST(root=DATA_PATH, train=True, download=True) val_set = MNIST(root=DATA_PATH, train=False, download=True, transform=val_transform) val_set_index = _draw_equal_dataset( val_set.targets, num_samples=4000, allowed_classes=[0, 1, 2, 3, 4] ) val_set = Subset(val_set, val_set_index) labeled_index = _draw_equal_dataset( train_set.targets, labeled_sample_num, allowed_classes=[0, 1, 2, 3, 4] ) labeled_set = Subset(dcp(train_set), labeled_index) _override_transformation(labeled_set, train_transform) unlabeled_index = _draw_inequal_dataset( train_set.targets, class_sample_nums=unlabeled_class_sample_nums, excluded_index=labeled_index.tolist(), ) unlabeled_set = Subset(dcp(train_set), unlabeled_index) _override_transformation(unlabeled_set, train_transform) assert set(labeled_index.tolist()) & set(unlabeled_index.tolist()) == set() del train_set show_dataset(labeled_set) show_dataset(unlabeled_set) show_dataset(val_set) labeled_loader = DataLoader( labeled_set, sampler=RandomSampler( data_source=labeled_set, replacement=True, num_samples=int(1e5) ), **dataloader_params ) unlabeled_loader = DataLoader( unlabeled_set, sampler=RandomSampler( data_source=unlabeled_set, replacement=True, num_samples=int(1e5) ), **dataloader_params ) val_loader = DataLoader(val_set, num_workers=1, batch_size=16) return labeled_loader, unlabeled_loader, val_loader
def __init__(self, group2index, partion2index, group_sample_num=4, partition_sample_num=1, shuffle=False) -> None: self._group2index, self._partition2index = dcp(group2index), dcp( partion2index) assert 1 <= group_sample_num <= len( self._group2index.keys()), group_sample_num self._group_sample_num = group_sample_num self._partition_sample_num = partition_sample_num self._shuffle = shuffle
def getCostCoverageInfo(self): import data from copy import deepcopy as dcp spreadsheetData = data.readSpreadsheet(self.dataSpreadsheetName, self.helper.keyList) costCoverageInfo = {} for intervention in spreadsheetData.interventionList: costCoverageInfo[intervention] = {} costCoverageInfo[intervention]['unitcost'] = dcp( spreadsheetData.costSaturation[intervention]["unit cost"]) costCoverageInfo[intervention]['saturation'] = dcp( spreadsheetData.costSaturation[intervention] ["saturation coverage"]) return costCoverageInfo
def _renew_goals(self): # this function refreshes goals block_r_pos = self.sim.data.get_site_xpos('block_r') block_b_pos = self.sim.data.get_site_xpos('block_b') block_r_ground_pos = dcp(block_r_pos) block_r_ground_pos[2] = self.block_ground_pos_z block_b_ground_pos = dcp(block_b_pos) block_b_ground_pos[2] = self.block_ground_pos_z # absolute position of blue block on top of red block block_b_pos_on_red = dcp(block_r_pos) block_b_pos_on_red[2] += self.block_height sub_goals = { "pick_blue": np.concatenate([ # gripper state & position self.grasping_gripper_state, (block_b_ground_pos + GRASPING_HEIGHT_OFFSET).ravel(), # absolute positions of blocks block_r_ground_pos.ravel(), block_b_ground_pos.ravel(), ]), "blue_on_red": np.concatenate([ # gripper state & position self.grasping_gripper_state, (block_b_pos_on_red + GRASPING_HEIGHT_OFFSET).ravel(), # absolute positions of blocks block_r_ground_pos.ravel(), block_b_pos_on_red.ravel(), ]), "ending_br": np.concatenate([ # gripper state & position ending_gripper_state, GRIP_END_POS.ravel(), # absolute positions of blocks block_r_ground_pos.ravel(), block_b_pos_on_red.ravel(), ]), } # a final goal is a binary vector which represents the completion of subgoals final_goals = {} gripper_target_positions = {} object_target_positions = {} _ = np.zeros(len(sub_goals)) for i, (k, v) in enumerate(sub_goals.items()): final_goals[k] = _.copy() final_goals[k][i] += 1.0 gripper_target_positions[k] = v[2:5].copy() object_target_positions[k] = v[-6:].copy() if not self.binary_final_goal: final_goals = dcp(sub_goals) assert len(sub_goals) == len(final_goals) == len( gripper_target_positions) == len(object_target_positions) return dcp(sub_goals), dcp(final_goals), dcp( gripper_target_positions), dcp(object_target_positions)
def makePregnantWomen(self, inputData): import model annualPregnancies = dcp( inputData.demographics['number of pregnant women']) annualBirths = dcp(inputData.demographics['number of live births']) populationSize = annualPregnancies birthRate = annualBirths / annualPregnancies projectedBirths = dcp(inputData.projectedBirths) baseBirths = float(projectedBirths[0]) numYears = len(projectedBirths) - 1 annualGrowth = (projectedBirths[numYears] - baseBirths) / float(numYears) / baseBirths pregnantWomen = model.PregnantWomen(birthRate, populationSize, annualGrowth) return pregnantWomen
def creating_session(self): if self.round_number == 1: # if you want to create a new ordering file # gets executed if 'create_ques_order' SESSION_CONFIG is True if self.session.config['create_ques_order']: # create question number 0 to 6 (7 questions) ques = [i for i in range(7)] # open writable file in folder 'storage' in otree root directory with open('storage/questOrder.csv', 'w', newline='') as csvfile: # create a csv write object (tab delimited) storewriter = csv.writer(csvfile, delimiter='\t') # get player objects for p in self.get_players(): # make deep copy to avoid resetting values for all players randomOrder = dcp(ques) # randomize question list for each player shuffle(randomOrder) # assign order to player's participants vars p.participant.vars['questOrder'] = randomOrder # make a deep copy from the quest list line = dcp(randomOrder) # add player ID to front of order list line.insert(0, p.id) # write each line from the list above to the csvfile storewriter.writerow(line) # closing is done automatically ''' Now every player has a questions order in their participant vars 'questOrder'. You can use this list item and condition in each number in views.py or a template''' else: # of create_ques_order is False, open file for reading with open('storage/questOrder.csv', 'r') as csvfile: # create reader object storereader = csv.reader(csvfile, delimiter='\t') # read row addrow = [] for row in storereader: # create list x list object addrow.append(row) j = 0 # add vars to participant for p in self.get_players(): # assign to stored questOrder to player p.participant.vars['questOrder'] = addrow[j][1:] j = 1 + j
def get_graph_for_feasible_solution(g: Graph): fg = dcp(g) b = 0 neg = 0 for n in fg.node_list: b += n.value if n.value < 0: neg += n.value if b != 0: return None s = Node(-neg) t = Node(neg) for n in fg.node_list: if n.value > 0: a = Arc(s, n, 0, n.value) s.outList.append(a) n.inList.append(a) fg.arc_list.append(a) elif n.value < 0: a = Arc(n, t, 0, -n.value) n.outList.append(a) t.inList.append(a) fg.arc_list.append(a) fg.node_list.insert(0, s) fg.node_list.append(t) fg.number() fg.s = s fg.t = t return fg
def set_P(S, A, epsilon): #wall_cord, P = {} for state in S: s = tuple(state) explore = [] for act in A.keys(): temp = tuple(np.array(s) + np.array(A[act])) explore.append(temp) # print s, explore for a in A.keys(): P[s, a] = {} P[s, a][s] = 0 s_ = tuple(np.array(s) + np.array(A[a])) unit = epsilon / 3 if list(s_) in S: P[s, a][s_] = 1 - epsilon for _s_ in explore: if tuple(_s_) != s_: if list(_s_) in S: P[s, a][tuple(_s_)] = unit else: P[s, a][s] += unit else: P[s, a][s] = 1 - epsilon for _s_ in explore: if _s_ != s_: if list(_s_) in S: P[s, a][tuple(_s_)] = unit else: P[s, a][s] += unit return dcp(P)
def randomWander(self, fnum): for _it in range(self.max_iter): # 最外层循环(周游次数) self.probs = (self.phm**self.a) * self.prefix for k in range(self.city): # 周游循环 for i in range(self.ant_num): # 对每个蚂蚁进行循环 if k == self.city - 1: self.ants[i].back2Start() cost, path = self.ants[i].calculate(self.adjs) if cost < self.cost: self.cost = cost self.shortest = dcp(path) else: pos, choice_vec = self.choiceForAnt(i) # print(pos, choice_vec) next_pos = rd.choices(pos, choice_vec, k=1)[0] self.ants[i].updatePos(next_pos) self.costs.append(self.cost) self.updateSecretion() for i in range(self.ant_num): self.ants[i].reset() print("Iter %d / %d" % (_it, self.max_iter)) self.shortest = self.exchange(self.shortest) print("Result(%d):" % (fnum), self.shortest) print("Random wader(%d ants) for %d times completed." % (self.ant_num, self.max_iter)) self.draw(fnum)
def getZa(self, incidence, breastfeedingDistribution): bfDistribution = dcp(breastfeedingDistribution) Za = {} for ageName in self.ages: riskSum = self.getDiarrheaRiskSum(ageName, bfDistribution) Za[ageName] = incidence[ageName] / riskSum return Za
def list_to_osm(data, destination_file_name, lane_widths=None): writer = osmwriter.OSMWriter(destination_file_name) version = int(1) uuid = 1 for k in range(len(data)): way_ = data[k] way_ids = [] way_node0_uuid = None print 'First point in way: ', utm.from_latlon(way_[0][0], way_[0][1]) for kk in range(len(way_)): pt = way_[kk] # lat, lon if lane_widths: tags = {'width': str(lane_widths[k][kk])} else: tags = None writer.node(uuid, pt[0], pt[1], tags=tags, version=version) way_ids.append(uuid) if len(way_ids) == 1: # this is the first node in the way. We want the way to be closed since # the track is circular, so save this uuid for later to append it to the # end of the way's uuid's. way_node0_uuid = dcp(uuid) uuid += 1 # add the first node to the end since we know this is a nice perfect loop way_ids.append(way_node0_uuid) writer.way(uuid, {}, way_ids, version=version) uuid += 1 writer.close()
def SemiSupervisedParallelDataLoaders( self, labeled_transforms: List[Callable[[Image.Image], Tensor]], unlabeled_transforms: List[Callable[[Image.Image], Tensor]], val_transforms: List[Callable[[Image.Image], Tensor]], test_transforms: List[Callable[[Image.Image], Tensor]], target_transform: Callable[[Tensor], Tensor] = None, ) -> Tuple[DataLoader, DataLoader, DataLoader, DataLoader]: _dataloader_params = dcp(self.dataloader_params) def _override_transforms(dataset, img_transform_list, target_transform_list): # here deep copying the datasets are needed. return [ self.override_transforms(dcp(dataset), img_trans, target_trans) for img_trans, target_trans in zip( img_transform_list, target_transform_list ) ] ( labeled_set, unlabeled_set, val_set, test_set, ) = self._init_labeled_unlabled_val_and_test_sets() target_transform_list = repeat(target_transform) labeled_sets = _override_transforms( labeled_set, labeled_transforms, target_transform_list ) unlabeled_sets = _override_transforms( unlabeled_set, unlabeled_transforms, target_transform_list ) val_sets = _override_transforms(val_set, val_transforms, target_transform_list) test_sets = _override_transforms( test_set, test_transforms, target_transform_list ) labeled_set = CombineDataset(*labeled_sets) unlabeled_set = CombineDataset(*unlabeled_sets) val_set = CombineDataset(*val_sets) test_set = CombineDataset(*test_sets) if self._if_use_indiv_bz: _dataloader_params.update( {"batch_size": self.batch_params.get("labeled_batch_size")} ) labeled_loader = DataLoader(labeled_set, **_dataloader_params) if self._if_use_indiv_bz: _dataloader_params.update( {"batch_size": self.batch_params.get("unlabeled_batch_size")} ) unlabeled_loader = DataLoader(unlabeled_set, **_dataloader_params) _dataloader_params.update({"shuffle": False, "drop_last": False}) if self._if_use_indiv_bz: _dataloader_params.update( {"batch_size": self.batch_params.get("val_batch_size")} ) val_loader = DataLoader(val_set, **_dataloader_params) test_loader = DataLoader(test_set, **_dataloader_params) return labeled_loader, unlabeled_loader, val_loader, test_loader
def _grouped_dataloader( self, dataset: MedicalImageSegmentationDataset, use_infinite_sampler: bool = False, **dataloader_params: Dict[str, Union[int, float, bool]], ) -> DataLoader: """ return a dataloader that requires to be grouped based on the reg of patient's pattern. :param dataset: :param shuffle: :return: """ dataloader_params = dcp(dataloader_params) batch_sampler = PatientSampler( dataset=dataset, grp_regex=dataset._re_pattern, shuffle=dataloader_params.get("shuffle", False), verbose=self.verbose, infinite_sampler=True if use_infinite_sampler else False, ) # having a batch_sampler cannot accept batch_size > 1 dataloader_params["batch_size"] = 1 dataloader_params["shuffle"] = False dataloader_params["drop_last"] = False return DataLoader(dataset, batch_sampler=batch_sampler, **dataloader_params)
def _generate_goal(self): block_pos, _ = self._p.getBasePositionAndOrientation(self.object_bodies['block']) block_pos = np.array(block_pos) end_effector_tip_initial_position = self.robot.end_effector_tip_initial_position.copy() block_target_position = end_effector_tip_initial_position + \ self.np_random.uniform(-self.obj_range, self.obj_range, size=3) if self.target_one_table: block_target_position = self.object_initial_pos['block'][2] picking_grip_pos = block_pos.copy() picking_grip_pos[-1] += self.gripper_tip_offset placing_grip_pos = block_target_position.copy() placing_grip_pos[-1] += self.gripper_tip_offset sub_goals = { "pick": np.concatenate([ # gripper state & position picking_grip_pos, [0.03], # absolute positions of blocks block_pos, ]), "place": np.concatenate([ # gripper state & position placing_grip_pos, [0.03], # absolute positions of blocks block_target_position, ]), } final_goals = dcp(sub_goals) goal_images = { "pick": self._generate_goal_image(0.55, picking_grip_pos, block_pos), "place": self._generate_goal_image(0.55, placing_grip_pos, block_target_position), } return sub_goals, final_goals, goal_images
def reduce(self, decoy): self.P_h_g.pop(decoy) self.decoys.pop(decoy) self.ctrl_belief_P1.pop(decoy) temp_sum = sum(self.P_h_g.values()) flag = False for goal in self.P_h_g.keys(): if self.P_h_g[goal] < 0.01: flag = True break normalize_flag = True normalized_p = {} ##do the normalization if flag: for goal in self.decoys: self.P_h_g[goal] = 1.0 / len(self.P_h_g) else: for goal in self.decoys: normalized_p[goal] = self.P_h_g[goal] / temp_sum if normalized_p[goal] > 0.99: new_temp_sum = temp_sum - self.P_h_g[goal] self.P_h_g[goal] = 0.99 for g in self.P_h_g: if not g == goal: self.P_h_g[g] = ( 1 - 0.99) * self.P_h_g[g] / new_temp_sum normalize_flag = False break if normalize_flag: self.P_h_g = dcp(normalized_p)
def diffCost(p1:np.array, p2:np.array, col = True): r, c = p1.shape _p1 = dcp(p1) _p2 = dcp(p2) _p1 = _p1.astype(float) _p2 = _p2.astype(float) if col: # 以图片的列作为讨论依据 pic1 = np.zeros((1, r)) pic2 = np.zeros((1, r)) for i in range(c): pic1 += i**2 / (c**3) * (_p1[:, i]) pic2 += (1 - i**2 / c**2) / c * (_p2[:, i]) else: # 以行为讨论依据 pic1 = p1[-1, :] pic2 = p2[0, :] return np.linalg.norm((pic1 - pic2))
def get_dataloader(name: str = None, aug: bool = False, DataLoader_DICT: dict = {}): DataLoader_DICT = dcp(DataLoader_DICT) assert name in ("cifar10", "svhn") print(f"data aug: {bool(aug)}.") if name == "cifar10": SemiDatasetHandler = Cifar10SemiSupervisedDatasetInterface( tra_img_transformation=default_cifar10_aug_transformation["train"] if aug else default_cifar10_transformation["train"], val_img_transformation=default_cifar10_aug_transformation["val"] if aug else default_cifar10_transformation["val"], verbose=True, ) else: SemiDatasetHandler = SVHNSemiSupervisedDatasetInterface( tra_img_transformation=default_svhn_aug_transformation["train"] if aug else default_svhn_transformation["train"], val_img_transformation=default_svhn_aug_transformation["val"] if aug else default_svhn_transformation["val"], verbose=True, ) DataLoader_DICT.pop("name", None) label_loader, unlabel_loader, val_loader = SemiDatasetHandler.SemiSupervisedDataLoaders( **DataLoader_DICT) return label_loader, unlabel_loader, val_loader
def getDiarrheaRiskSum(self, ageName, breastfeedingDistribution): bfDistribution = dcp(breastfeedingDistribution) riskSum = 0.0 for breastfeedingCat in self.breastfeedingList: RDa = self.data.RRdiarrhea[ageName][breastfeedingCat] pab = bfDistribution[ageName][breastfeedingCat] riskSum += RDa * pab return riskSum
def publish(self): pub = rospy.Publisher('survey', Marker, latch=True) m = dcp(self.marker) m.id = 0 m.pose.position.x = self.position[0]# - float(self.UTMdatum['E']) m.pose.position.y = self.position[1]# - float(self.UTMdatum['N']) m.pose.position.z = self.position[2] pub.publish(m)
def normalize(self): ''' Return new normalized vector, i.e. length of 1 ''' protein = self.protein self.protein = None v = dcp(self) v.protein = protein return v/abs(v)
def __neg__(self): ''' Returns new Vector = self * -1 ''' protein = self.protein self.protein = None v = dcp(self) v.protein = protein return v*(-1)
def negamaxsearch(cfirst_player_pieces,csecond_player_pieces,cturn, depth, possibal_piece=0): global all_pieces first_player_pieces=dcp(cfirst_player_pieces) second_player_pieces=dcp(csecond_player_pieces) turn=dcp(cturn) if depth<=0: return evaluation(first_player_pieces,second_player_pieces,turn), possibal_piece else: best=-50 best_move=0 all_possible_pieces=set(all_pieces)-(set(first_player_pieces)|set(second_player_pieces)) for possible_piece in all_possible_pieces: v_first_player_pieces,v_second_player_pieces, v_turn=game_move(first_player_pieces,second_player_pieces,turn, possible_piece) value, best_piece=negamaxsearch(v_first_player_pieces,v_second_player_pieces,v_turn, depth-1, possible_piece) value*=-1 if value>best: best=value best_move=possible_piece return best, best_move
def __loop(self): while True: cur_ext_ip = get_external_ip() changed = cur_ext_ip != self.ext_ip old_ext_ip = dcp(self.ext_ip) self.ext_ip = cur_ext_ip with self.lock: if self.is_kill: return if changed: old_ext_ip_info = dcp(self.ext_ip_info) self.ext_ip_info = geoip.geolite2.lookup(self.ext_ip).get_info_dict() print 'IP has changed.' print self.ext_ip print self.location_string() self.menu_item_extip.get_child().set_text(self.ext_ip) self.alert(old_info=old_ext_ip_info) self.menu_item_loc.get_child().set_text(self.location_string()) sleep(5.0)
def setProbStuntedComplementaryFeeding(self, stuntingDistributionArg, coverageArg): coverage = dcp(coverageArg) stuntingDistribution = dcp(stuntingDistributionArg) coEffs = self.getComplementaryFeedingQuarticCoefficients(stuntingDistribution, coverage) baselineProbStuntingComplementaryFeeding = self.getBaselineProbabilityViaQuarticByAge(coEffs) probStuntedComplementaryFeeding = {} for ageName in self.ages: probStuntedComplementaryFeeding[ageName] = {} p0 = baselineProbStuntingComplementaryFeeding[ageName] probStuntedComplementaryFeeding[ageName]["Complementary feeding (food secure with promotion)"] = p0 for group in self.data.foodSecurityGroups: OR = self.data.ORstuntingComplementaryFeeding[ageName][group] probStuntedComplementaryFeeding[ageName][group] = p0 * OR / (1.0 - p0 + OR * p0) pi = probStuntedComplementaryFeeding[ageName][group] if pi < 0.0 or pi > 1.0: raise ValueError( "probability of stunting complementary feeding, at outcome %s, age %s, is out of range (%f)" % (group, ageName, pi) ) self.probStuntedComplementaryFeeding = probStuntedComplementaryFeeding
def minimizeTimes(self): """subtracts the lowest existing timestamp value from all msg times Designed to work on time-based architectures. relies on get_tmin function. """ from copy import deepcopy as dcp tmin = self.get_tmin() for t in self.srcData: old = dcp(self.srcData[t]) new_t = t - tmin self.outData[new_t] = old
def getTotalInitialAllocation(data, costCoverageInfo, targetPopSize): import costcov costCov = costcov.Costcov() allocation = [] for intervention in data.interventionList: coverageFraction = array([dcp(data.interventionCoveragesCurrent[intervention])]) coverageNumber = coverageFraction * targetPopSize[intervention] if coverageNumber == 0: spending = array([0.]) else: spending = costCov.inversefunction(coverageNumber, costCoverageInfo[intervention], targetPopSize[intervention]) allocation.append(spending) return allocation
def publish_array(self): pub = rospy.Publisher('survey', MarkerArray, latch=True) array = MarkerArray() m_id = 0 for p in self.position: m = dcp(self.marker) m.id = m_id m.pose.position.x = p[0] - float(self.UTMdatum['E']) m.pose.position.y = p[1] - float(self.UTMdatum['N']) m.pose.position.z = p[2] array.markers.append(m) m_id += 1 pub.publish(array)
def __truediv__(self,value): ''' Returns new Vector = self / scalar Vector.desc contains history of operation with index position in protein.atoms ''' protein = self.protein self.protein = None v = dcp(self) v.protein = protein v.x = self.x/float(value) v.y = self.y/float(value) v.z = self.z/float(value) v.desc = 'idx:'+str(v.desc)+'/'+str(value) return v
def __rmul__(self,value): ''' Returns new Vector = scalar * self Vector.desc contains history of operation with index position in protein.atoms ''' protein = self.protein self.protein = None v = dcp(self) v.protein = protein v.x = self.x*float(value) v.y = self.y*float(value) v.z = self.z*float(value) v.desc = 'idx:'+str(v.desc)+'*'+str(value) return v
def __mul__(self, value): """ Returns new Vector = self * scalar Vector.desc contains history of operation with index position in protein.atoms """ protein = self.protein self.protein = None v = dcp(self) v.protein = protein v.x = self.x * float(value) v.y = self.y * float(value) v.z = self.z * float(value) v.desc = "idx:" + str(v.desc) + "*" + str(value) return v
def getTotalInitialAllocation(data, costCoverageInfo, targetPopSize): import costcov from copy import deepcopy as dcp costCov = costcov.Costcov() allocation = [] for intervention in data.interventionList: coverageFraction = dcp(data.coverage[intervention]) coverageNumber = coverageFraction * targetPopSize[intervention] if coverageNumber == 0: spending = 0. else: spending = costCov.inversefunction(coverageNumber, costCoverageInfo[intervention], targetPopSize[intervention]) allocation.append(spending) return allocation
def pchip(x, y, xnew, deriv = False, method='pchip'): xnew = [xnew] sortzip = dcp(sorted(zip(x,y))) xs = [a for a,b in sortzip] ys = [b for a,b in sortzip] x = dcp(xs) y = dcp(ys) if not isinstance(xnew,collections.Sequence): # Is this reliable enough...? Exception('Error: Values to interpolate for with PCHIP have not been given in sequence form (e.g. list or array)!') xnew = dcp(sorted(xnew)) # print x # print y if method=='pchip': # WARNING, need to rename this function something else... m = pchip_slopes(x, y) # Compute slopes used by piecewise cubic Hermite interpolator. ynew = pchip_eval(x, y, m, xnew, deriv) # Use these slopes (along with the Hermite basis function) to interpolate. elif method=='smoothinterp': from utils import smoothinterp ynew = smoothinterp(xnew, x, y) if deriv: if len(xnew)==1: print('WARNING, length 1 smooth interpolation derivative not implemented') ynew = [0.0] # WARNING, temp else: ynew = (diff(ynew)/diff(xnew)).tolist() # Calculate derivative explicitly ynew.append(ynew[-1]) # Duplicate the last element so the right length else: raise Exception('Interpolation method "%s" not understood' % method) if type(y)==type(array([])): ynew = array(ynew) # Try to preserve original type return ynew[0]
def getComplementaryFeedingQuarticCoefficients(self, stuntingDistribution, coverageArg): coverage = dcp(coverageArg) coEffs = {} for iAge in range(len(self.ages)): ageName = self.ages[iAge] OR = [1.0] * 4 OR[0] = 1.0 OR[1] = self.data.ORstuntingComplementaryFeeding[ageName][ "Complementary feeding (food secure without promotion)" ] OR[2] = self.data.ORstuntingComplementaryFeeding[ageName][ "Complementary feeding (food insecure with promotion and supplementation)" ] OR[3] = self.data.ORstuntingComplementaryFeeding[ageName][ "Complementary feeding (food insecure with neither promotion nor supplementation)" ] FracSecure = 1.0 - self.data.demographics["fraction food insecure"] FracCoveredEduc = coverage["Complementary feeding (education)"] FracCoveredSupp = coverage["Complementary feeding (supplementation)"] Frac = [0.0] * 4 Frac[0] = FracSecure * FracCoveredEduc Frac[1] = FracSecure * (1 - FracCoveredEduc) Frac[2] = (1 - FracSecure) * FracCoveredSupp Frac[3] = (1 - FracSecure) * (1 - FracCoveredSupp) FracStunted = self.helper.sumStuntedComponents(stuntingDistribution[ageName]) # [i] will refer to the three non-baseline birth outcomes A = Frac[0] * (OR[1] - 1.0) * (OR[2] - 1.0) * (OR[3] - 1.0) B = ( (OR[1] - 1.0) * (OR[2] - 1.0) * (OR[3] - 1.0) * ( sum(Frac[0] / (OR[i] - 1.0) for i in (1, 2, 3)) + sum(OR[i] * Frac[i] / (OR[i] - 1.0) for i in (1, 2, 3)) - FracStunted ) ) C = ( sum(Frac[0] * (OR[i] - 1.0) for i in (1, 2, 3)) + sum( OR[i] * Frac[i] * ((OR[1] - 1.0) + (OR[2] - 1.0) + (OR[3] - 1.0) - (OR[i] - 1.0)) for i in (1, 2, 3) ) - sum(FracStunted * (OR[1] - 1.0) * (OR[2] - 1.0) * (OR[3] - 1.0) / (OR[i] - 1.0) for i in (1, 2, 3)) ) D = Frac[0] + sum(OR[i] * Frac[i] for i in (1, 2, 3)) - sum(FracStunted * (OR[i] - 1.0) for i in (1, 2, 3)) E = -FracStunted coEffs[ageName] = [A, B, C, D, E] return coEffs
def __mul__(self,other): ''' Returns new Vector with old properties, but transformed by transformation matrix ''' protein = other.protein other.protein = None # must add to avoid deepcopy recursive ultra madness transformed_Vector = dcp(other) transformed_Vector.protein = protein transformed_Vector = transformed_Vector.translateBy(-self.source) new_x = (self.a*transformed_Vector.x + self.b*transformed_Vector.y + self.c*transformed_Vector.z) new_y = (self.d*transformed_Vector.x + self.e*transformed_Vector.y + self.f*transformed_Vector.z) new_z = (self.g*transformed_Vector.x + self.h*transformed_Vector.y + self.i*transformed_Vector.z) transformed_Vector.x = round(new_x,7) transformed_Vector.y = round(new_y,7) transformed_Vector.z = round(new_z,7) transformed_Vector = transformed_Vector.translateBy(self.target) transformed_Vector.desc += 'Transformed' return transformed_Vector
def onTgtUpdate(self, msg): # if self.DEBUG: print('In ErrorNode::onRefUpdate') self.tgt_pose = dcp(msg) self.time_sync()
# read data into list, with each item corresponding to a different lane center file # data = [] for center_file_name in lane_center_file_names: pts = [] with open(center_file_name, 'r') as file: for line in file: pts.append([float(val) for val in line.split()]) data.append(pts) writer = osmwriter.OSMWriter(destination_file_name) version = int(1) uuid = 1 for way_ in data: way_ids = [] way_node0_uuid = None for pt in way_: # lat, lon writer.node(uuid, pt[0], pt[1], version=version) way_ids.append(uuid) if len(way_ids) == 1: # this is the first node in the way. We want the way to be closed since # the track is circular, so save this uuid for later to append it to the # end of the way's uuid's. way_node0_uuid = dcp(uuid) uuid += 1 # add the first node to the end since we know this is a nice perfect loop way_ids.append(way_node0_uuid) writer.way(uuid, {}, way_ids, version=version) uuid += 1 writer.close()
# initialise newCoverages={} for intervention in inputData.interventionList: newCoverages[intervention] = inputData.coverage[intervention] # allocation of funding investment = array([investmentIncrease]) # calculate coverage (%) targetPopSize = {} targetPopSize[chosenIntervention] = 0. for iAge in range(numAgeGroups): ageName = helper.keyList['ages'][iAge] targetPopSize[chosenIntervention] += inputData.targetPopulation[chosenIntervention][ageName] * modelX.listOfAgeCompartments[iAge].getTotalPopulation() targetPopSize[chosenIntervention] += inputData.targetPopulation[chosenIntervention]['pregnant women'] * modelX.pregnantWomen.populationSize costCovParams = {} costCovParams['unitcost'] = array([dcp(inputData.costSaturation[chosenIntervention]["unit cost"])]) costCovParams['saturation'] = array([dcp(inputData.costSaturation[chosenIntervention]["saturation coverage"])]) additionalPeopleCovered = costCov.function(investment, costCovParams, targetPopSize[chosenIntervention]) # function from HIV additionalFractionCovered = additionalPeopleCovered / targetPopSize[chosenIntervention] print "additional coverage: %g"%(additionalFractionCovered) newCoverages[chosenIntervention] += additionalFractionCovered print "new coverage: %g"%(newCoverages[chosenIntervention]) # scale up intervention modelX.updateCoverages(newCoverages) # Run model for t in range(numsteps-1): modelX.moveOneTimeStep() pickle.dump(modelX, outfile) outfile.close()
def __init__(self, data, derived, keyList): self.derived = derived for key in keyList.keys(): setattr(self, key, keyList[key]) self.causesOfDeath = dcp(data.causesOfDeath) self.conditions = dcp(data.conditions) self.demographics = dcp(data.demographics) #self.rawMortality = dcp(data.rawMortality) self.causeOfDeathDist = dcp(data.causeOfDeathDist) self.stuntingDistribution = dcp(data.stuntingDistribution) self.wastingDistribution = dcp(data.wastingDistribution) self.breastfeedingDistribution = dcp(data.breastfeedingDistribution) self.RRdeathStunting = dcp(data.RRdeathStunting) self.RRdeathWasting = dcp(data.RRdeathWasting) self.RRdeathBreastfeeding = dcp(data.RRdeathBreastfeeding) self.RRdeathByBirthOutcome = dcp(data.RRdeathByBirthOutcome) #self.ORstuntingProgression = dcp(data.ORstuntingProgression) self.incidences = dcp(data.incidences) #self.RRdiarrhea = dcp(data.RRdiarrhea) #self.ORstuntingCondition = dcp(data.ORstuntingCondition) #self.ORstuntingBirthOutcome = dcp(data.ORstuntingBirthOutcome) self.birthOutcomeDist = dcp(data.birthOutcomeDist) #self.ORstuntingIntervention = dcp(data.ORstuntingIntervention) #self.ORappropriatebfIntervention = dcp(data.ORappropriatebfIntervention) self.ageAppropriateBreastfeeding = dcp(data.ageAppropriateBreastfeeding) self.coverage = dcp(data.coverage) self.effectivenessMortality = dcp(data.effectivenessMortality) self.affectedFraction = dcp(data.affectedFraction) self.effectivenessIncidence = dcp(data.effectivenessIncidence) self.interventionsMaternal = dcp(data.interventionsMaternal) self.foodSecurityGroups = dcp(data.foodSecurityGroups)
def __init__(self, name, dictOfBoxes, agingRate, keyList): self.name = name self.dictOfBoxes = dcp(dictOfBoxes) self.agingRate = agingRate for key in keyList.keys(): setattr(self, key, keyList[key])
def updateCoverages(self, newCoverageArg): #newCoverage is a dictionary of coverages by intervention newCoverage = dcp(newCoverageArg) # call initialisation of probabilities related to interventions self.derived.setProbStuntedIfCovered(self.params.coverage, self.params.stuntingDistribution) self.derived.setProbCorrectlyBreastfedIfCovered(self.params.coverage, self.params.breastfeedingDistribution) self.derived.setProbStuntedIfDiarrhea(self.params.incidences, self.params.breastfeedingDistribution, self.params.stuntingDistribution) self.derived.setProbStuntedComplementaryFeeding(self.params.stuntingDistribution, self.params.coverage) # get combined reductions from all interventions mortalityUpdate = self.params.getMortalityUpdate(newCoverage) stuntingUpdate = self.params.getStuntingUpdate(newCoverage) incidenceUpdate = self.params.getIncidenceUpdate(newCoverage) birthUpdate = self.params.getBirthOutcomeUpdate(newCoverage) newFracCorrectlyBreastfed = self.params.getAppropriateBFNew(newCoverage) stuntingUpdateComplementaryFeeding = self.params.getStuntingUpdateComplementaryFeeding(newCoverage) # MORTALITY for ageGroup in self.listOfAgeCompartments: ageName = ageGroup.name #update mortality for cause in self.params.causesOfDeath: self.derived.referenceMortality[ageName][cause] *= mortalityUpdate[ageName][cause] # BREASTFEEDING for ageGroup in self.listOfAgeCompartments: ageName = ageGroup.name SumBefore = self.derived.getDiarrheaRiskSum(ageName, self.params.breastfeedingDistribution) correctPractice = self.params.ageAppropriateBreastfeeding[ageName] agePop = ageGroup.getTotalPopulation() numCorrectBefore = ageGroup.getNumberCorrectlyBreastfed(correctPractice) numCorrectAfter = agePop * newFracCorrectlyBreastfed[ageName] numShifting = numCorrectAfter - numCorrectBefore numIncorrectBefore = agePop - numCorrectBefore fracCorrecting = 0. if numIncorrectBefore > 0.01: fracCorrecting = numShifting / numIncorrectBefore self.params.breastfeedingDistribution[ageName][correctPractice] = newFracCorrectlyBreastfed[ageName] # update breastfeeding distribution incorrectPractices = [practice for practice in self.breastfeedingList if practice!=correctPractice] for practice in incorrectPractices: self.params.breastfeedingDistribution[ageName][practice] *= 1. - fracCorrecting ageGroup.distribute(self.params.stuntingDistribution, self.params.wastingDistribution, self.params.breastfeedingDistribution) SumAfter = self.derived.getDiarrheaRiskSum(ageName, self.params.breastfeedingDistribution) self.params.incidences[ageName]['Diarrhea'] *= SumAfter / SumBefore # update incidence of diarrhea beta = self.derived.getFracDiarrheaFixedZ() stuntingUpdateDueToBreastfeeding = self.params.getStuntingUpdateDueToIncidence(beta) # INCIDENCE incidencesBefore = {} incidencesAfter = {} for ageGroup in self.listOfAgeCompartments: ageName = ageGroup.name #update incidence incidencesBefore[ageName] = self.params.incidences[ageName]['Diarrhea'] self.params.incidences[ageName]['Diarrhea'] *= incidenceUpdate[ageName]['Diarrhea'] incidencesAfter[ageName] = self.params.incidences[ageName]['Diarrhea'] # get flow on effect to stunting due to changing incidence Z0 = self.derived.getZa(incidencesBefore, self.params.breastfeedingDistribution) Zt = self.derived.getZa(incidencesAfter, self.params.breastfeedingDistribution) beta = self.derived.getFracDiarrhea(Z0, Zt) self.derived.updateProbStuntedIfDiarrheaNewZa(Zt) stuntingUpdateDueToIncidence = self.params.getStuntingUpdateDueToIncidence(beta) # STUNTING for ageGroup in self.listOfAgeCompartments: ageName = ageGroup.name totalUpdate = stuntingUpdate[ageName] * stuntingUpdateDueToIncidence[ageName] * stuntingUpdateComplementaryFeeding[ageName] *stuntingUpdateDueToBreastfeeding[ageName] #save total stunting update for use in apply births and apply aging self.derived.stuntingUpdateAfterInterventions[ageName] *= totalUpdate #update stunting oldProbStunting = ageGroup.getStuntedFraction() newProbStunting = oldProbStunting * totalUpdate self.params.stuntingDistribution[ageName] = self.helper.restratify(newProbStunting) ageGroup.distribute(self.params.stuntingDistribution, self.params.wastingDistribution, self.params.breastfeedingDistribution) # BIRTH OUTCOME for outcome in self.birthOutcomes: self.params.birthOutcomeDist[outcome] *= birthUpdate[outcome] self.params.birthOutcomeDist['Term AGA'] = 1 - (self.params.birthOutcomeDist['Pre-term SGA'] + self.params.birthOutcomeDist['Pre-term AGA'] + self.params.birthOutcomeDist['Term SGA']) # UPDATE MORTALITY AFTER HAVING CHANGED: underlyingMortality and birthOutcomeDist self.updateMortalityRate() # set newCoverages as the coverages in interventions self.params.coverage = newCoverage