def test_simple_path_construction(): G = AnalysisGraph.from_indra_statements_json_file("tests/data/indra_statements_format.json") G.add_node("c0") G.add_node("c1") G.add_node("c2") print("Nodes of the graph:") G.print_nodes() G.add_edge((("", 1, "c0"), ("", 1, "c1"))) G.add_edge((("", 1, "c1"), ("", 1, "c2"))) G.add_edge((("", 1, "c0"), ("", 1, "c2"))) G.add_edge( (("", 1, "c3"), ("", 1, "c1")) ) # Creates a loop 1 -> 2 -> 3 -> 1 print("Edges of the graph:") G.print_edges() G.find_all_paths() G.print_all_paths() G.print_cells_affected_by_beta(0, 1) G.print_cells_affected_by_beta(1, 2) G2 = AnalysisGraph.from_indra_statements_json_file( "tests/data/indra_statements_format.json" )
def test_remove_edge(): causal_fragments = [(("small", 1, tension), ("large", -1, food_security))] print("\n\n\n\n") print("\nCreating CAG") G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() G.print_nodes() G.print_all_paths() print("\nRemoving edge - invalid source") with pytest.raises(IndexError): G.remove_edge(source="invalid", target=food_security) print("\nRemoving edge - invalid target") with pytest.raises(IndexError): G.remove_edge(source=tension, target="invalid") print("\nRemoving edge - source and target inverted target") G.remove_edge(source=food_security, target=tension) G.print_nodes() print("\nRemoving edge - correct") G.remove_edge(source=tension, target=food_security) G.print_nodes() G.print_edges() G.to_png()
def test_remove_edges(): causal_fragments = [(("small", 1, tension), ("large", -1, food_security))] print("\n\n\n\n") print("\nCreating CAG") G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() G.print_nodes() print("\nName to vertex ID map entries") G.print_name_to_vertex() G.print_all_paths() edges_to_remove = [ ("invalid_src_1", food_security), ("invalid_src_2", food_security), (tension, "invalid_tgt1"), (tension, "invalid_tgt2"), ("invalid_src_2", "invalid_tgt_2"), ("invalid_src_3", "invalid_tgt3"), (food_security, tension), (tension, food_security), ] print("\nRemoving edges") G.remove_edges(edges_to_remove) G.print_nodes() print("\nName to vertex ID map entries") G.print_name_to_vertex() G.print_all_paths()
def test_prune(): causal_fragments = [ # Center node is n4 (("small", 1, "n0"), ("large", -1, "n1")), (("small", 1, "n0"), ("large", -1, "n2")), (("small", 1, "n0"), ("large", -1, "n3")), (("small", 1, "n2"), ("large", -1, "n1")), (("small", 1, "n3"), ("large", -1, "n4")), (("small", 1, "n4"), ("large", -1, "n1")), # (("small", 1, "n4"), ("large", -1, "n2")), # (("small", 1, "n2"), ("large", -1, "n3")), ] print("\n\n\n\n") print("\nCreating CAG") G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() print("\nBefore pruning") G.print_all_paths() cutoff = 2 G.prune(cutoff) print("\nAfter pruning") G.print_all_paths()
def test_remove_edges(): causal_fragments = [(("small", 1, "UN/events/human/conflict"), ("large", -1, "UN/entities/human/food/food_security")) ] print('\n\n\n\n') print('\nCreating CAG') G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() G.print_nodes() print('\nName to vertex ID map entries') G.print_name_to_vertex() G.print_all_paths() edges_to_remove = [ ('invalid_src_1', "UN/entities/human/food/food_security"), ('invalid_src_2', "UN/entities/human/food/food_security"), ('UN/events/human/conflict', 'invalid_tgt1'), ('UN/events/human/conflict', 'invalid_tgt2'), ('invalid_src_2', 'invalid_tgt_2'), ('invalid_src_3', 'invalid_tgt3'), ("UN/entities/human/food/food_security", 'UN/events/human/conflict'), ('UN/events/human/conflict', "UN/entities/human/food/food_security"), ] print('\nRemoving edges') G.remove_edges(edges_to_remove) G.print_nodes() print('\nName to vertex ID map entries') G.print_name_to_vertex() G.print_all_paths()
def test_cpp_extensions_preds(): statements = [( ("large", -1, "UN/entities/human/financial/economic/inflation"), ("small", 1, "UN/events/human/human_migration"), )] G = AnalysisGraph.from_causal_fragments(statements) G.map_concepts_to_indicators() G["UN/events/human/human_migration"].replace_indicator( "Net migration", "New asylum seeking applicants", "UNHCR") G.to_png() # Now we can specify how to initialize betas. Posible values are: # InitialBeta.ZERO # InitialBeta.ONE # InitialBeta.HALF # InitialBeta.MEAN # InitialBeta.RANDOM - A random value between [-1, 1] G.train_model(2015, 1, 2015, 12, 10, 10, initial_beta=InitialBeta.ZERO, use_continuous=False) preds = G.generate_prediction(2015, 1, 2016, 12) pred_plot(preds, "New asylum seeking applicants", save_as="pred_plot.pdf")
def test_delete_indicator(): statements = [( ("large", -1, "UN/entities/human/financial/economic/inflation"), ("small", 1, "UN/events/human/human_migration"), )] G = AnalysisGraph.from_causal_fragments(statements) print("\n") G.print_nodes() G.map_concepts_to_indicators() G.print_indicators() print("\n") G["UN/events/human/human_migration"].replace_indicator( "Net migration", "New asylum seeking applicants", "UNHCR") G.print_indicators() print("\n") G.set_indicator("UN/events/human/human_migration", "Net Migration", "MITRE12") G.print_indicators() print("\n") G.delete_indicator("UN/events/human/human_migration", "New asylum seeking applicants") G.print_indicators() print("\n") G.set_indicator( "UN/events/human/human_migration", "New asylum seeking applicants", "UNHCR", ) G.delete_all_indicators("UN/events/human/human_migration") G.print_indicators()
def test_merge(): causal_fragments = [ (("small", 1, tension), ("large", -1, food_security)), (("small", 1, displacement), ("small", 1, tension)), (("small", 1, displacement), ("large", -1, food_security)), (("small", 1, tension), ("small", 1, crop_production)), (("large", -1, food_security), ("small", 1, crop_production)), ( ("small", 1, "UN/events/human/economic_crisis"), ("small", 1, tension), ), ( ("small", 1, "UN/events/weather/precipitation"), ("large", -1, food_security), ), (("large", -1, food_security), ("small", 1, inflation)), ] print("\n\n\n\n") print("\nCreating CAG") G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() print("\nBefore merging") G.print_all_paths() G.print_nodes() print("\nAfter merging") G.merge_nodes(food_security, tension) G.print_all_paths() G.print_nodes()
def test_remove_node(): causal_fragments = [(("small", 1, "UN/events/human/conflict"), ("large", -1, "UN/entities/human/food/food_security")) ] print('\n\n\n\n') print('\nCreating CAG') G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() G.print_nodes() print('\nName to vertex ID map entries') G.print_name_to_vertex() G.print_all_paths() print('\nRemoving an invalid concept') G.remove_node(concept='invalid') G.print_nodes() print('\nName to vertex ID map entries') G.print_name_to_vertex() G.print_all_paths() print('\nRemoving a valid concept') G.remove_node(concept='UN/events/human/conflict') G.print_nodes() print('\nName to vertex ID map entries') G.print_name_to_vertex() G.print_all_paths()
def createNewModel(): """ Create a new Delphi model. """ data = json.loads(request.data) if os.environ.get("CI") == "true": # When running in a continuous integration run, we set the sampling # resolution to be small to prevent timeouts. res = 5 elif os.environ.get("DELPHI_N_SAMPLES") is not None: # We also enable setting the sampling resolution through the # environment variable "DELPHI_N_SAMPLES", for development and testing # purposes. res = int(os.environ["DELPHI_N_SAMPLES"]) else: # If neither "CI" or "DELPHI_N_SAMPLES" is set, we default to a # sampling resolution of 1000. # TODO - we might want to set the default sampling resolution with some # kind of heuristic, based on the number of nodes and edges. - Adarsh res = 1000 G = AnalysisGraph.from_causemos_json_string(request.data, res) model = DelphiModel(id=data["id"], model=G.serialize_to_json_string(verbose=False)) db.session.merge(model) db.session.commit() response = json.loads(G.generate_create_model_response()) return jsonify(response)
def test_subgraph_between(): causal_fragments = [ # Center node is n4 (("small", 1, "n0"), ("large", -1, "n1")), (("small", 1, "n1"), ("large", -1, "n2")), (("small", 1, "n2"), ("large", -1, "n3")), (("small", 1, "n3"), ("large", -1, "n4")), (("small", 1, "n0"), ("large", -1, "n5")), (("small", 1, "n5"), ("large", -1, "n6")), (("small", 1, "n6"), ("large", -1, "n4")), (("small", 1, "n0"), ("large", -1, "n7")), (("small", 1, "n7"), ("large", -1, "n4")), (("small", 1, "n0"), ("large", -1, "n4")), (("small", 1, "n0"), ("large", -1, "n8")), (("small", 1, "n8"), ("large", -1, "n9")), (("small", 1, "n10"), ("large", -1, "n0")), (("small", 1, "n4"), ("large", -1, "n12")), (("small", 1, "n12"), ("large", -1, "n13")), (("small", 1, "n13"), ("large", -1, "n4")), ] print("\n\n\n\n") print("\nCreating CAG") G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() G.print_nodes() print("\nName to vertex ID map entries") G.print_name_to_vertex() G.print_nodes() G.print_name_to_vertex() # G.print_all_paths() cutoff = 3 src = "n0" tgt = "n4" print( "\nSubgraph with inbetween hops less than or equal {} between source node {} and target node {}".format( cutoff, src, tgt ) ) try: G_sub = G.get_subgraph_for_concept_pair(src, tgt, cutoff) # G_sub.find_all_paths() except IndexError: print("Incorrect source or target concept") return print("\n\nTwo Graphs") print("The original") G.print_nodes() G.print_name_to_vertex() print() print("The subgraph") G_sub.print_nodes() G_sub.print_name_to_vertex()
def createNewICM(): """ Create a new ICM""" data = json.loads(request.data) G = AnalysisGraph.from_uncharted_json_serialized_dict(data) G.assemble_transition_model_from_gradable_adjectives() G.sample_from_prior() G.to_sql(app=current_app) _metadata = ICMMetadata.query.filter_by(id=G.id).first().deserialize() del _metadata["model_id"] return jsonify(_metadata)
def createNewModel(): """ Create a new Delphi model. """ data = json.loads(request.data) G = AnalysisGraph.from_causemos_json_string(request.data) G.id = data["id"] model = DelphiModel(id=data["id"], model=G.to_json_string()) db.session.merge(model) db.session.commit() edge_weights = G.get_edge_weights_for_causemos_viz() return jsonify({"status": "success", "relations": edge_weights})
def create_base_CAG(uncharted_json_file): G = AnalysisGraph.from_uncharted_json_file(uncharted_json_file) G.merge_nodes( "wm/concept/causal_factor/condition/food_security", "wm/concept/causal_factor/condition/food_insecurity", same_polarity=False, ) G = G.get_subgraph_for_concept( "wm/concept/causal_factor/condition/food_insecurity", inward=True) G.map_concepts_to_indicators() return G
def test_inference(): causal_fragments = [(("small", 1, tension), ("large", -1, food_security))] print("\n\n\n\n") print("\nCreating CAG") G = AnalysisGraph.from_causal_fragments(causal_fragments) G.print_nodes() print("\nName to vertex ID map entries") G.print_name_to_vertex()
def getModelStatus(modelID): query_result = DelphiModel.query.filter_by(id=modelID).first() if not query_result: return jsonify(json.loads('{"status": "invalid model id"}')) model = query_result.model G = AnalysisGraph.deserialize_from_json_string(model, verbose=False) response = json.loads(G.generate_create_model_response()) return jsonify(response)
def test_simple_path_construction(): G = AnalysisGraph.from_json_file("tests/data/indra_statements_format.json") G.add_node('c0') G.add_node('c1') G.add_node('c2') #G.add_node('c3') #G.add_node('c4') print('Nodes of the graph:') G.print_nodes() G.add_edge((("", 1, "c0"), ("", 1, "c1"))) G.add_edge((("", 1, "c1"), ("", 1, "c2"))) #G.add_edge((("", 1, "c1"), ("", 1, "c3"))) #G.add_edge((("", 1, "c2"), ("", 1, "c3"))) G.add_edge((("", 1, "c0"), ("", 1, "c2"))) G.add_edge( (("", 1, "c3"), ("", 1, "c1"))) # Creates a loop 1 -> 2 -> 3 -> 1 ''' G.add_edge(0,1) G.add_edge(1,2) #G.add_edge(1,3) #G.add_edge(2,3) G.add_edge(0,2) G.add_edge(3,1) # Creates a loop 1 -> 2 -> 3 -> 1 ''' print('Edges of the graph:') G.print_edges() G.find_all_paths() G.print_all_paths() G.print_cells_affected_by_beta(0, 1) G.print_cells_affected_by_beta(1, 2) G2 = AnalysisGraph.from_json_file( "tests/data/indra_statements_format.json") #G2.initialize( True ) '''
def createNewModel(): """ Create a new Delphi model. """ if os.environ.get("CI") == "true": # When running in a continuous integration run, we set the sampling # resolution to be small to prevent timeouts. kde_kernels = 5 sampling_resolution = 5 burn = 5 elif os.environ.get("DELPHI_N_SAMPLES") is not None: # We also enable setting the sampling resolution through the # environment variable "DELPHI_N_SAMPLES", for development and testing # purposes. # NOTE: When creating, this environment variable is named incorrectly! kde_kernels = int(os.environ["DELPHI_N_SAMPLES"]) sampling_resolution = 100 burn = 100 else: # If neither "CI" or "DELPHI_N_SAMPLES" is set, we default to a # sampling resolution of 1000. # TODO - we might want to set the default sampling resolution with some # kind of heuristic, based on the number of nodes and edges. - Adarsh kde_kernels = 1000 sampling_resolution = 1000 burn = 10000 data = json.loads(request.data) G = AnalysisGraph.from_causemos_json_string(request.data, belief_score_cutoff=0, grounding_score_cutoff=0, kde_kernels=kde_kernels) model = DelphiModel(id=data["id"], model=G.serialize_to_json_string(verbose=False)) db.session.merge(model) db.session.commit() response = json.loads(G.generate_create_model_response()) # executor.submit_stored(data["id"], train_model, G, data["id"]) try: proc = multiprocessing.Process(target=train_model, args=(G, data["id"], sampling_resolution, burn, current_app), name='training') proc.start() except multiprocessing.ProcessError: print("Error: unable to start training process") response['status'] = 'server error: training' return jsonify(response)
def create_base_CAG(causemos_create_model, belief_score_cutoff=0, grounding_score_cutoff=0, kde_kernels=4): if causemos_create_model: G = AnalysisGraph.from_causemos_json_file(causemos_create_model, belief_score_cutoff, grounding_score_cutoff, kde_kernels) else: statements = [( ("large", 1, "wm/concept/indicator_and_reported_property/agriculture/Crop_Production" ), ("small", -1, "wm/concept/causal_factor/condition/food_insecurity"), )] G = AnalysisGraph.from_causal_fragments(statements) G.map_concepts_to_indicators() curate_indicators(G) return G
def test_inference(): causal_fragments = [(("small", 1, "UN/events/human/conflict"), ("large", -1, "UN/entities/human/food/food_security")) ] print('\n\n\n\n') print('\nCreating CAG') G = AnalysisGraph.from_causal_fragments(causal_fragments) G.print_nodes() print('\nName to vertex ID map entries') G.print_name_to_vertex() print('\nSample from proposal debug')
def test_merge(): causal_fragments = [ (("small", 1, "UN/events/human/conflict"), ("large", -1, "UN/entities/human/food/food_security")), (("small", 1, "UN/events/human/human_migration"), ("small", 1, "UN/events/human/conflict")), (("small", 1, "UN/events/human/human_migration"), ("large", -1, "UN/entities/human/food/food_security")), (("small", 1, "UN/events/human/conflict"), ("small", 1, "UN/entities/natural/crop_technology/product")), (("large", -1, "UN/entities/human/food/food_security"), ("small", 1, "UN/entities/natural/crop_technology/product")), (("small", 1, "UN/events/human/economic_crisis"), ("small", 1, "UN/events/human/conflict")), (("small", 1, "UN/events/weather/precipitation"), ("large", -1, "UN/entities/human/food/food_security")), (("small", 1, "UN/entities/human/financial/economic/inflation"), ("small", 1, "UN/events/human/conflict")), (("large", -1, "UN/entities/human/food/food_security"), ("small", 1, "UN/entities/human/financial/economic/inflation")), ] ''' ("large", -1, "UN/entities/human/food/food_security") ("small", 1, "UN/events/human/conflict") ("small", 1, "UN/events/human/human_migration") ("small", 1, "UN/entities/natural/crop_technology/product") ("small", 1, "UN/events/human/economic_crisis") ("small", 1, "UN/events/weather/precipitation") ("small", 1, "UN/entities/human/financial/economic/inflation") ''' print('\n\n\n\n') print('\nCreating CAG') G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() print('\nBefore merging') G.print_all_paths() G.print_nodes() print('\nAfter mergning') #G.merge_nodes( "UN/events/human/conflict", "UN/entities/human/food/food_security") G.merge_nodes("UN/entities/human/food/food_security", "UN/events/human/conflict") G.print_all_paths() G.print_nodes()
def test_remove_edge(): causal_fragments = [(("small", 1, "UN/events/human/conflict"), ("large", -1, "UN/entities/human/food/food_security")) ] print('\n\n\n\n') print('\nCreating CAG') G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() G.print_nodes() print('\nName to vertex ID map entries') G.print_name_to_vertex() G.print_all_paths() print('\nRemoving edge - invalid source') G.remove_edge(source='invalid', target="UN/entities/human/food/food_security") G.print_nodes() print('\nName to vertex ID map entries') G.print_name_to_vertex() G.print_all_paths() print('\nRemoving edge - invalid target') G.remove_edge(source='UN/events/human/conflict', target='invalid') G.print_nodes() print('\nName to vertex ID map entries') G.print_name_to_vertex() G.print_all_paths() print('\nRemoving edge - source and target inverted target') G.remove_edge(source="UN/entities/human/food/food_security", target='UN/events/human/conflict') G.print_nodes() print('\nName to vertex ID map entries') G.print_name_to_vertex() G.print_all_paths() print('\nRemoving edge - correct') G.remove_edge(source='UN/events/human/conflict', target="UN/entities/human/food/food_security") G.print_nodes() print('\nName to vertex ID map entries') G.print_name_to_vertex() G.print_all_paths() G.to_png()
def test_remove_node(): causal_fragments = [(("small", 1, tension), ("large", -1, food_security))] print("\n\n\n\n") print("\nCreating CAG") G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() G.print_nodes() print("\nRemoving an invalid concept") with pytest.raises(IndexError): G.remove_node(concept="invalid") print("\nRemoving a valid concept") G.remove_node(concept=tension) G.print_nodes()
def runExperiment(request, experiment_id, experiment_type, model, app): G = AnalysisGraph.deserialize_from_json_string(model, verbose=False) if experiment_type == "PROJECTION": runProjectionExperiment(request, experiment_id, G, app) elif experiment_type == "GOAL_OPTIMIZATION": # Not yet implemented pass elif experiment_type == "SENSITIVITY_ANALYSIS": # Not yet implemented pass elif experiment_type == "MODEL_VALIDATION": # Not yet implemented pass elif experiment_type == "BACKCASTING": # Not yet implemented pass else: # Unknown experiment type pass
def test_remove_nodes(): causal_fragments = [(("small", 1, tension), ("large", -1, food_security))] print("\n\n\n\n") print("\nCreating CAG") G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() G.print_nodes() print("\nName to vertex ID map entries") G.print_name_to_vertex() G.print_all_paths() print("\nRemoving a several concepts, some valid, some invalid") G.remove_nodes(concepts=set(["invalid1", tension, "invalid2"])) G.print_nodes() print("\nName to vertex ID map entries") G.print_name_to_vertex() G.print_all_paths()
def runExperiment(request, modelID, experiment_id): request_body = request.get_json() experiment_type = request_body["experimentType"] query_result = DelphiModel.query.filter_by(id=modelID).first() if not query_result: # Model ID not in database. Should be an incorrect model ID result = CauseMosAsyncExperimentResult.query.filter_by( id=experiment_id).first() result.status = "failed" db.session.merge(result) db.session.commit() return model = query_result.model trained = json.loads(model)["trained"] G = AnalysisGraph.deserialize_from_json_string(model, verbose=False) if experiment_type == "PROJECTION": runProjectionExperiment(request, modelID, experiment_id, G, trained) elif experiment_type == "GOAL_OPTIMIZATION": # Not yet implemented pass elif experiment_type == "SENSITIVITY_ANALYSIS": # Not yet implemented pass elif experiment_type == "MODEL_VALIDATION": # Not yet implemented pass elif experiment_type == "BACKCASTING": # Not yet implemented pass else: # Unknown experiment type pass
def test_cpp_extensions(): G = AnalysisGraph.from_json_file("tests/data/indra_statements_format.json")
def test_subgraph(): causal_fragments = [ # Center node is n4 (("small", 1, "n0"), ("large", -1, "n1")), (("small", 1, "n1"), ("large", -1, "n2")), (("small", 1, "n2"), ("large", -1, "n3")), (("small", 1, "n3"), ("large", -1, "n4")), (("small", 1, "n4"), ("large", -1, "n5")), (("small", 1, "n5"), ("large", -1, "n6")), (("small", 1, "n6"), ("large", -1, "n7")), (("small", 1, "n7"), ("large", -1, "n8")), (("small", 1, "n0"), ("large", -1, "n9")), (("small", 1, "n9"), ("large", -1, "n2")), (("small", 1, "n2"), ("large", -1, "n10")), (("small", 1, "n10"), ("large", -1, "n4")), (("small", 1, "n4"), ("large", -1, "n11")), (("small", 1, "n11"), ("large", -1, "n6")), (("small", 1, "n6"), ("large", -1, "n12")), (("small", 1, "n12"), ("large", -1, "n8")), (("small", 1, "n13"), ("large", -1, "n14")), (("small", 1, "n14"), ("large", -1, "n4")), (("small", 1, "n4"), ("large", -1, "n15")), (("small", 1, "n15"), ("large", -1, "n16")), (("small", 1, "n5"), ("large", -1, "n3")), # Creates a loop ] print("\n\n\n\n") print("\nCreating CAG") G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() G.print_nodes() print("\nName to vertex ID map entries") G.print_name_to_vertex() G.print_nodes() G.print_name_to_vertex() hops = 2 node = "n4" print( "\nSubgraph of {} hops beginning at node {} graph".format(hops, node) ) try: G_sub = G.get_subgraph_for_concept(node, False, hops) except IndexError: print("Concept {} is not in the CAG!".format(node)) return print("\n\nTwo Graphs") print("The original") G.print_nodes() G.print_name_to_vertex() print("The subgraph") G_sub.print_nodes() G_sub.print_name_to_vertex() print("\nSubgraph of {} hops ending at node {} graph".format(hops, node)) G_sub = G.get_subgraph_for_concept(node, True, hops) print("\n\nTwo Graphs") print("The original") G.print_nodes() G.print_name_to_vertex() print( "\nSubgraph of {} hops beginning at node {} graph".format(hops, node) ) G.get_subgraph_for_concept(node, False, hops)
def test_debug(): causal_fragments = [ # Center node is n4 (("small", 1, "n0"), ("large", -1, "n1")), (("small", 1, "n0"), ("large", -1, "n2")), (("small", 1, "n0"), ("large", -1, "n3")), (("small", 1, "n2"), ("large", -1, "n1")), (("small", 1, "n3"), ("large", -1, "n4")), (("small", 1, "n4"), ("large", -1, "n1")), ] causal_fragments = [ # Center node is n4 (("small", 1, "n0"), ("large", -1, "n1")), (("small", 1, "n1"), ("large", -1, "n2")), (("small", 1, "n2"), ("large", -1, "n3")), (("small", 1, "n3"), ("large", -1, "n4")), (("small", 1, "n4"), ("large", -1, "n5")), (("small", 1, "n5"), ("large", -1, "n6")), (("small", 1, "n6"), ("large", -1, "n7")), (("small", 1, "n7"), ("large", -1, "n8")), (("small", 1, "n0"), ("large", -1, "n9")), (("small", 1, "n9"), ("large", -1, "n2")), (("small", 1, "n2"), ("large", -1, "n10")), (("small", 1, "n10"), ("large", -1, "n4")), (("small", 1, "n4"), ("large", -1, "n11")), (("small", 1, "n11"), ("large", -1, "n6")), (("small", 1, "n6"), ("large", -1, "n12")), (("small", 1, "n12"), ("large", -1, "n8")), (("small", 1, "n13"), ("large", -1, "n14")), (("small", 1, "n14"), ("large", -1, "n4")), (("small", 1, "n4"), ("large", -1, "n15")), (("small", 1, "n15"), ("large", -1, "n16")), (("small", 1, "n5"), ("large", -1, "n3")), # Creates a loop ] causal_fragments = [ # Center node is n4 (("small", 1, "n0"), ("large", -1, "n1")), (("small", 1, "n1"), ("large", -1, "n2")), (("small", 1, "n2"), ("large", -1, "n3")), (("small", 1, "n3"), ("large", -1, "n4")), (("small", 1, "n4"), ("large", -1, "n5")), (("small", 1, "n5"), ("large", -1, "n6")), (("small", 1, "n6"), ("large", -1, "n7")), (("small", 1, "n7"), ("large", -1, "n8")), (("small", 1, "n0"), ("large", -1, "n3")), ] print("\n\n\n\n") print("\nCreating CAG") G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() G.print_nodes() print("\nBefore pruning") G.print_all_paths() hops = 3 node = "n0" print( f"\nSubgraph of {hops} hops beginning at node {node} graph" ) try: G_sub = G.get_subgraph_for_concept(node, False, hops) except IndexError: print(f"Concept {node} is not in the CAG!") return G_sub.find_all_paths() G_sub.print_nodes()
def test_subgraph(): causal_fragments = [ # Center node is n4 (("small", 1, "n0"), ("large", -1, "n1")), (("small", 1, "n1"), ("large", -1, "n2")), (("small", 1, "n2"), ("large", -1, "n3")), (("small", 1, "n3"), ("large", -1, "n4")), (("small", 1, "n4"), ("large", -1, "n5")), (("small", 1, "n5"), ("large", -1, "n6")), (("small", 1, "n6"), ("large", -1, "n7")), (("small", 1, "n7"), ("large", -1, "n8")), #(("small", 1, "n8"), ("large", -1, "n9")), #(("small", 1, "n9"), ("large", -1, "n0")), (("small", 1, "n0"), ("large", -1, "n9")), (("small", 1, "n9"), ("large", -1, "n2")), (("small", 1, "n2"), ("large", -1, "n10")), (("small", 1, "n10"), ("large", -1, "n4")), (("small", 1, "n4"), ("large", -1, "n11")), (("small", 1, "n11"), ("large", -1, "n6")), (("small", 1, "n6"), ("large", -1, "n12")), (("small", 1, "n12"), ("large", -1, "n8")), (("small", 1, "n13"), ("large", -1, "n14")), (("small", 1, "n14"), ("large", -1, "n4")), (("small", 1, "n4"), ("large", -1, "n15")), (("small", 1, "n15"), ("large", -1, "n16")), (("small", 1, "n5"), ("large", -1, "n3")), # Creates a loop ] print('\n\n\n\n') print('\nCreating CAG') G = AnalysisGraph.from_causal_fragments(causal_fragments) G.find_all_paths() G.print_nodes() print('\nName to vertex ID map entries') G.print_name_to_vertex() #G.remove_nodes(set(['n0', 'n1', 'n2', 'n3', 'n4'])) #G.remove_nodes(set(['n2', 'n3', 'n4'])) #G.remove_nodes(set(['n9', 'n8', 'n7', 'n6', 'n5'])) G.print_nodes() G.print_name_to_vertex() hops = 3 node = 'n4' print('\nSubgraph of {} hops beginning at node {} graph'.format( hops, node)) try: G_sub = G.get_subgraph_for_concept(node, False, hops) except IndexError: print('Concept {} is not in the CAG!'.format(node)) return print('\n\nTwo Graphs') print('The original') G.print_nodes() G.print_name_to_vertex() #G.print_all_paths() print() print('The subgraph') G_sub.print_nodes() G_sub.print_name_to_vertex() #G_sub.print_all_paths() print('\nSubgraph of {} hops ending at node {} graph'.format(hops, node)) G_sub = G.get_subgraph_for_concept(node, True, hops) print('\n\nTwo Graphs') print('The original') G.print_nodes() G.print_name_to_vertex() print('\nSubgraph of {} hops beginning at node {} graph'.format( hops, node)) G.get_subgraph_for_concept(node, False, hops)