def test_032_new_processor_declaration_convention(self): """ Test new convention for creation of Processors, using BareProcessors command See the descriptions in the file :return: """ file_path = os.path.dirname( os.path.abspath(__file__) ) + "/z_input_files/v2/19_naming_processors_new_convention.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # issues = prepare_and_solve_model(isess.state) # for idx, issue in enumerate(issues): # print(f"Issue {idx + 1}/{len(issues)} = {issue}") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) ps = glb_idx.get(Processor.partial_key()) lst1 = [] for p in ps: lst = p.full_hierarchy_names(glb_idx) lst1.extend(lst) p1 = glb_idx.get(Processor.partial_key("P2.C")) p2 = glb_idx.get(Processor.partial_key("P3.C")) self.assertEqual(p1[0], p2[0]) p = glb_idx.get(Processor.partial_key("P1.C.P2")) self.assertEqual(len(p), 1) # Close interactive session isess.close_db_session()
def test_029_dataset_expansion2(self): """ Test dataset expansion using advanced expansion expression (function calls returning either InterfaceTypes or Processors) :return: """ file_path = os.path.dirname(os.path.abspath( __file__)) + "/z_input_files/v2/18_dataset_expansion_2.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # issues = prepare_and_solve_model(isess.state) # for idx, issue in enumerate(issues): # print(f"Issue {idx + 1}/{len(issues)} = {issue}") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) p = glb_idx.get(Processor.partial_key()) self.assertEqual(len(p), 2) p = p[0] # Close interactive session isess.close_db_session()
def test_030_solving_flow_graph_matrix(self): file_path = os.path.dirname(os.path.abspath( __file__)) + "/z_input_files/v2/15_graph_solver_example.xlsx" output_path = os.path.dirname( os.path.abspath(__file__)) + "/tmp/flow_graph_matrix.csv" isess = execute_file(file_path, generator_type="spreadsheet") issues = prepare_and_solve_model(isess.state) for idx, issue in enumerate(issues): print(f"Issue {idx + 1}/{len(issues)} = {issue}") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) datasets.get("flow_graph_matrix").data.to_csv(output_path, index=False) df = datasets.get("flow_graph_matrix").data sankey = {} for p in list(set(df['Period'])): df_period = df[df['Period'] == p] tmp = {} for s in list(set(df_period['Scenario'])): ds_scenario = df_period[df_period['Scenario'] == s] processors = list( set(ds_scenario['source_processor'].append( ds_scenario['target_processor']))) source = [ processors.index(i) for i in list(ds_scenario['source_processor']) ] target = [ processors.index(i) for i in list(ds_scenario['target_processor']) ] label = list(ds_scenario['source'] + ' to ' + ds_scenario['target']) data = dict(type='sankey', node=dict( pad=50, thickness=100, line=dict(color="black", width=0.5), label=processors, ), link=dict(source=source, target=target, value=list(ds_scenario['Value']), label=label)) tmp[s] = data sankey[p] = tmp print(sankey['2011']['Scenario1']) # Close interactive session isess.close_db_session()
def test_024_maddalena_dataset(self): file_path = os.path.dirname(os.path.abspath( __file__)) + "/z_input_files/v2/MAGIC_n_1_CC_Spain.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # TODO Check things!!! # self.assertEqual(len(p_sets), 3) # Close interactive session isess.close_db_session()
def test_022_processor_scalings(self): file_path = os.path.dirname(os.path.abspath( __file__)) + "/z_input_files/v2/14_processor_scalings_example.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") json_string = export_model_to_json(isess.state) print(json_string) # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # TODO Check things!!! # self.assertEqual(len(p_sets), 3) # Close interactive session isess.close_db_session()
def test_023_solving(self): file_path = os.path.dirname(os.path.abspath( __file__)) + "/z_input_files/v2/15_graph_solver_example.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") issues = prepare_and_solve_model(isess.state) for idx, issue in enumerate(issues): print(f"Issue {idx + 1}/{len(issues)} = {issue}") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # Close interactive session isess.close_db_session()
def test_013_execute_file_v2_seven(self): """ Parsing of Custom datasets :return: """ file_path = os.path.dirname(os.path.abspath( __file__)) + "/z_input_files/v2/07_custom_datasets.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # TODO Check things!!! # self.assertEqual(len(p_sets), 3) # Close interactive session isess.close_db_session()
def test_021_export_to_json(self): """ Testing model export :return: """ file_path = os.path.dirname(os.path.abspath( __file__)) + "/z_input_files/v2/03_Soslaires_no_parameters.xlsx" #file_path = os.path.dirname(os.path.abspath(__file__)) + "/z_input_files/v2/02_declare_hierarchies_and_cloning_and_scaling.xlsx" #file_path = os.path.dirname(os.path.abspath(__file__)) + "/z_input_files/v2/06_upscale_almeria.xlsx" #file_path = os.path.dirname(os.path.abspath(__file__)) + "/z_input_files/test_spreadsheet_4.xlsx" #file_path = os.path.dirname(os.path.abspath(__file__)) + "/z_input_files/v2/08_caso_energia_eu_new_commands.xlsx" #file_path = os.path.dirname(os.path.abspath(__file__)) + "/z_input_files/v2/09_many_to_many_mapping.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") json_string = export_model_to_json(isess.state) print(json_string) isess.close_db_session()
def test_020_list_of_commands(self): """ Testing list of commands :return: """ file_path = os.path.dirname( os.path.abspath(__file__) ) + "/z_input_files/v2/13_list_of_commands_example_using_soslaires.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # TODO Check things!!! # self.assertEqual(len(p_sets), 3) # Close interactive session isess.close_db_session()
def test_011_execute_file_v2_five(self): """ Dataset processing using old commands :return: """ file_path = os.path.dirname( os.path.abspath(__file__) ) + "/z_input_files/v2/05_caso_energia_eu_old_commands.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # TODO Check things!!! # self.assertEqual(len(p_sets), 3) # Close interactive session isess.close_db_session()
def test_009_execute_file_v2_three(self): """ Soslaires, without parameters With regard to the two previous, introduces the syntax of a Selector of many Processors :return: """ file_path = os.path.dirname(os.path.abspath( __file__)) + "/z_input_files/v2/03_Soslaires_no_parameters.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # TODO Check things!!! # self.assertEqual(len(p_sets), 3) # Close interactive session isess.close_db_session()
def test_001_execute_file_one(self): """ A file containing QQs for three different sets of processor: Crop, Farm, AgrarianRegion (extracted from Almeria case study) Test number of processors read for each category, using processor sets and PartialRetrievalDictionary :return: """ file_path = os.path.dirname(os.path.abspath( __file__)) + "/z_input_files/test_spreadsheet_1.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # Three processor sets self.assertEqual(len(p_sets), 3) # Close interactive session isess.close_db_session()
def test_006_execute_file_five(self): """ Parameters Simple Expression evaluation in QQs :return: """ file_path = os.path.dirname(os.path.abspath( __file__)) + "/z_input_files/mapping_example_maddalena.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # Three processor sets self.assertEqual(len(p_sets), 1) # Close interactive session isess.close_db_session()
def test_007_execute_file_v2_one(self): """ Two connected Processors Test parsing and execution of a file with basic commands, and only literals (very basic syntax) :return: """ file_path = os.path.dirname( os.path.abspath(__file__) ) + "/z_input_files/v2/01_declare_two_connected_processors.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # TODO Check things!!! # self.assertEqual(len(p_sets), 3) # Close interactive session isess.close_db_session()
def test_018_many_to_many_mappings(self): """ Testing many to many mappings :return: """ file_path = os.path.dirname(os.path.abspath( __file__)) + "/z_input_files/v2/09_many_to_many_mapping.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # TODO Check things!!! # self.assertEqual(len(p_sets), 3) # Close interactive session datasets["ds1"].data.to_csv( "/tmp/09_many_to_many_mapping_ds1_results.csv", index=False) isess.close_db_session()
def test_008_execute_file_v2_two(self): """ Processors from Soslaires Test parsing and execution of a file with basic commands, and only literals (very basic syntax) :return: """ file_path = os.path.dirname( os.path.abspath(__file__) ) + "/z_input_files/v2/02_declare_hierarchies_and_cloning_and_scaling.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) processor_dict = get_processor_names_to_processors_dictionary(glb_idx) for p in processor_dict: print(p) # TODO Check things!!! # self.assertEqual(len(p_sets), 3) # Close interactive session isess.close_db_session()
def test_012_execute_file_v2_six(self): """ Almeria upscaling with new syntax * References * InterfaceTypes * BareProcessors * Dynamic attribute columns * Interfaces * Old Upscale (really efficient) :return: """ file_path = os.path.dirname(os.path.abspath( __file__)) + "/z_input_files/v2/06_upscale_almeria.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # TODO Check things!!! # self.assertEqual(len(p_sets), 3) # Close interactive session isess.close_db_session()
def test_014_execute_file_v2_eight(self): """ Dataset queries using Mappings :return: """ file_path = os.path.dirname( os.path.abspath(__file__) ) + "/z_input_files/v2/08_caso_energia_eu_new_commands_CASE_SENSITIVE.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( isess.state) # TODO Check things!!! # self.assertEqual(len(p_sets), 3) # Close interactive session name = "ds2" ds = datasets[name] # type: Dataset ds2 = ds.data print("Preparing Dataset labels") ds2 = add_label_columns_to_dataframe(name, ds2, glb_idx) print(ds2.head()) isess.close_db_session()
def test_005_execute_file_five(self): """ Just Structure. From Soslaires. :return: """ file_path = os.path.dirname( os.path.abspath(__file__)) + "/z_input_files/Soslaires.xlsx" isess = execute_file(file_path, generator_type="spreadsheet") # # Save state s = serialize_state(isess.state) # Changed "wt" to "wb": the output of "serialize_state" is a byte array (it is compressed now) with open("/home/rnebot/GoogleDrive/AA_MAGIC/Soslaires.serialized", "wb") as f: f.write(s) local_state = deserialize_state(s) # Check State of things glb_idx, p_sets, hh, datasets, mappings = get_case_study_registry_objects( local_state) # Four processor sets self.assertEqual(len(p_sets), 4) # Obtain all Observers print("---- Observer ----") oers = glb_idx.get(Observer.partial_key()) for i in oers: print(i.name) # Obtain all processors print("---- Processor ----") procs = glb_idx.get(Processor.partial_key()) for i in procs: print(i.name) # Obtain all FactorTypes print("---- FactorType ----") fts = glb_idx.get(FactorType.partial_key()) for i in fts: print(i.name) # Obtain all Factors print("---- Factor ----") fs = glb_idx.get(Factor.partial_key()) for i in fs: print(i.processor.name + ":" + i.taxon.name) # Obtain all Quantitative Observations print("---- Quantities ----") qqs = glb_idx.get(FactorQuantitativeObservation.partial_key()) for i in qqs: print(i.factor.processor.name + ":" + i.factor.taxon.name + "= " + str(i.value.expression if i.value else "")) # Obtain all part-of Relation Observations print("---- Part-of relations (P-P) ----") po_rels = glb_idx.get( ProcessorsRelationPartOfObservation.partial_key()) for i in po_rels: print(i.parent_processor.name + " \/ " + i.child_processor.name) # Obtain all undirected flow Relation Observations print("---- Undirected flow relations (P-P) ----") uf_rels = glb_idx.get( ProcessorsRelationUndirectedFlowObservation.partial_key()) for i in uf_rels: print(i.source_processor.name + " <> " + i.target_processor.name) # Obtain all upscale Relation Observations print("---- Upscale relations (P-P) ----") up_rels = glb_idx.get( ProcessorsRelationUpscaleObservation.partial_key()) for i in up_rels: print(i.parent_processor.name + " \/ " + i.child_processor.name + "(" + i.factor_name + ": " + str(i.quantity) + ")") # Obtain all directed flow Relation Observations print("---- Directed flow relations (F-F) ----") df_rels = glb_idx.get( FactorsRelationDirectedFlowObservation.partial_key()) for i in df_rels: print(i.source_factor.processor.name + ":" + i.source_factor.taxon.name + " -> " + i.target_factor.processor.name + ":" + i.target_factor.taxon.name + (" (" + str(i.weight) + ")" if i.weight else "")) # Obtain all hierarchies print("---- FactorType Hierarchies ----") hies = glb_idx.get(Hierarchy.partial_key()) for i in hies: print(i.name) # Close interactive session isess.close_db_session()