def test_io(self): test_file = os.path.join(TEST_FILE_DIR, 'xTESLADIAG_000003_CH68.000') json_file = os.path.join(TEST_FILE_DIR, 'xTESLADIAG_000003_CH68.json') test_out = 'test1.000' procedure = Procedure.from_file(os.path.join(TEST_FILE_DIR, test_file)) with ScratchDir('.'): dumpfn(procedure, json_file) procedure.to_file(test_out) hash1 = hash_file(test_file) hash2 = hash_file(test_out) if hash1 != hash2: original = open(test_file).readlines() parsed = open(test_out).readlines() self.assertFalse(list(difflib.unified_diff(original, parsed))) for line in difflib.unified_diff(original, parsed): self.assertIsNotNone(line) test_file = os.path.join(TEST_FILE_DIR, 'xTESLADIAG_000004_CH69.000') json_file = os.path.join(TEST_FILE_DIR, 'xTESLADIAG_000004_CH69.json') test_out = 'test2.000' procedure = Procedure.from_file(os.path.join(TEST_FILE_DIR, test_file)) with ScratchDir('.'): dumpfn(procedure, json_file) procedure.to_file(test_out) hash1 = hash_file(test_file) hash2 = hash_file(test_out) if hash1 != hash2: original = open(test_file).readlines() parsed = open(test_out).readlines() self.assertFalse(list(difflib.unified_diff(original, parsed))) for line in difflib.unified_diff(original, parsed): self.assertIsNotNone(line)
def test_serial_conversion(self): procedure = Procedure() templates = PROCEDURE_TEMPLATE_DIR test_file = 'diagnosticV3.000' json_file = 'test.json' proc_dict = procedure.from_file(os.path.join(templates, test_file)) test_step_dict = proc_dict['MaccorTestProcedure']['ProcSteps'][ 'TestStep'] converter = ProcedureToSchedule(test_step_dict) step_name_list, step_flow_ctrl = converter.create_metadata() for step_index, step in enumerate(test_step_dict): if 'Loop' in step['StepType']: print(step_index, step) step_arbin = converter.compile_to_arbin(test_step_dict[step_index], step_index, step_name_list, step_flow_ctrl) if 'Loop' in step['StepType']: self.assertEqual(step_arbin['m_szStepCtrlType'], 'Set Variable(s)') self.assertEqual(step_arbin['m_uLimitNum'], '2') if step_index == 15: self.assertEqual(step_arbin['Limit0']['m_szGotoStep'], '11-None') self.assertEqual(step_arbin['Limit1']['m_szGotoStep'], 'Next Step')
def test_serial_conversion(self): procedure = Procedure() templates = PROCEDURE_TEMPLATE_DIR test_file = "diagnosticV3.000" json_file = "test.json" proc_dict = procedure.from_file(os.path.join(templates, test_file)) test_step_dict = proc_dict["MaccorTestProcedure"]["ProcSteps"][ "TestStep"] converter = ProcedureToSchedule(test_step_dict) step_name_list, step_flow_ctrl = converter.create_metadata() for step_index, step in enumerate(test_step_dict): if "Loop" in step["StepType"]: print(step_index, step) step_arbin = converter.compile_to_arbin(test_step_dict[step_index], step_index, step_name_list, step_flow_ctrl) if "Loop" in step["StepType"]: self.assertEqual(step_arbin["m_szStepCtrlType"], "Set Variable(s)") self.assertEqual(step_arbin["m_uLimitNum"], "2") if step_index == 15: self.assertEqual(step_arbin["Limit0"]["m_szGotoStep"], "11-None") self.assertEqual(step_arbin["Limit1"]["m_szGotoStep"], "Next Step")
def biologic_mb_text_to_maccor_xml(cls, mps_text, maccor_header=_default_maccor_header): schedule_dict = Settings.mps_text_to_schedule_dict(mps_text) seqs = cls._get_seqs(schedule_dict) steps = cls._create_steps(seqs) body = deepcopy(_blank_maccor_body) body["MaccorTestProcedure"]["header"] = maccor_header body["MaccorTestProcedure"]["ProcSteps"] = OrderedDict( {"TestStep": steps}) newLine = "\r\n" xml_with_dirty_empty_elemnts = xmltodict.unparse(body, encoding="utf-8", pretty=True, indent=" ", newl=newLine) xml = Procedure.fixup_empty_elements(xml_with_dirty_empty_elemnts) # xmltodict's unparse does not accept custom processing instructions, # we must add Maccor's ourselves xml_with_maccor_processing_instruction = re.sub( r"^[^\r^\n]*", '<?xml version="1.0" encoding="UTF-8"?>\r\n<?maccor-application progid="Maccor Procedure File"?>', xml, ) # newline at EOF return xml_with_maccor_processing_instruction + newLine
def test_schedule_creation(self): procedure = Procedure() templates = PROCEDURE_TEMPLATE_DIR test_file = 'diagnosticV3.000' json_file = 'test.json' sdu_test_input = os.path.join(SCHEDULE_TEMPLATE_DIR, '20170630-3_6C_9per_5C.sdu') sdu_test_output = os.path.join(TEST_FILE_DIR, 'schedule_test_output.sdu') proc_dict = procedure.from_file(os.path.join(templates, test_file)) test_step_dict = proc_dict['MaccorTestProcedure']['ProcSteps'][ 'TestStep'] converter = ProcedureToSchedule(test_step_dict) converter.create_sdu(sdu_test_input, sdu_test_output) parsed = open(sdu_test_output, encoding='latin-1').readlines() self.assertEqual(parsed[329], '[Schedule_Step3_Limit0]\n') os.remove(sdu_test_output)
def test_single_step_conversion(self): procedure = Procedure() templates = PROCEDURE_TEMPLATE_DIR test_file = "diagnosticV3.000" json_file = "test.json" proc_dict = procedure.from_file(os.path.join(templates, test_file)) test_step_dict = proc_dict["MaccorTestProcedure"]["ProcSteps"][ "TestStep"] converter = ProcedureToSchedule(test_step_dict) step_index = 5 step_name_list, step_flow_ctrl = converter.create_metadata() self.assertEqual(step_flow_ctrl[7], "5-reset cycle C/20") self.assertEqual(step_flow_ctrl[68], "38-reset cycle") step_arbin = converter.compile_to_arbin(test_step_dict[step_index], step_index, step_name_list, step_flow_ctrl) self.assertEqual(step_arbin["m_szLabel"], "6-None") self.assertEqual(step_arbin["Limit0"]["m_szGotoStep"], "Next Step") self.assertEqual(step_arbin["Limit0"]["Equation0_szLeft"], "PV_CHAN_Voltage") self.assertEqual(step_arbin["Limit2"]["m_szGotoStep"], "70-These are the 2 reset cycles") step_index = 8 step_arbin = converter.compile_to_arbin(test_step_dict[step_index], step_index, step_name_list, step_flow_ctrl) self.assertEqual(step_arbin["Limit0"]["Equation0_szLeft"], "PV_CHAN_CV_Stage_Current") self.assertEqual( step_arbin["Limit0"]["Equation0_szRight"], test_step_dict[step_index]["Ends"]["EndEntry"][0]["Value"], )
def test_single_step_conversion(self): procedure = Procedure() templates = PROCEDURE_TEMPLATE_DIR test_file = 'diagnosticV3.000' json_file = 'test.json' proc_dict = procedure.from_file(os.path.join(templates, test_file)) test_step_dict = proc_dict['MaccorTestProcedure']['ProcSteps'][ 'TestStep'] converter = ProcedureToSchedule(test_step_dict) step_index = 5 step_name_list, step_flow_ctrl = converter.create_metadata() self.assertEqual(step_flow_ctrl[7], '5-reset cycle C/20') self.assertEqual(step_flow_ctrl[68], '38-reset cycle') step_arbin = converter.compile_to_arbin(test_step_dict[step_index], step_index, step_name_list, step_flow_ctrl) self.assertEqual(step_arbin['m_szLabel'], '6-None') self.assertEqual(step_arbin['Limit0']['m_szGotoStep'], 'Next Step') self.assertEqual(step_arbin['Limit0']['Equation0_szLeft'], 'PV_CHAN_Voltage') self.assertEqual(step_arbin['Limit2']['m_szGotoStep'], '70-These are the 2 reset cycles') step_index = 8 step_arbin = converter.compile_to_arbin(test_step_dict[step_index], step_index, step_name_list, step_flow_ctrl) self.assertEqual(step_arbin['Limit0']['Equation0_szLeft'], 'PV_CHAN_CV_Stage_Current') self.assertEqual( step_arbin['Limit0']['Equation0_szRight'], test_step_dict[step_index]['Ends']['EndEntry'][0]['Value'])
def test_generate_proc_exp(self): test_file = os.path.join(TEST_FILE_DIR, 'EXP.000') json_file = os.path.join(TEST_FILE_DIR, 'EXP.json') test_out = 'test_EXP.000' test_parameters = ["4.2", "2.0C", "2.0C"] procedure = Procedure.from_exp(*test_parameters) with ScratchDir('.'): dumpfn(procedure, json_file) procedure.to_file(test_out) hash1 = hash_file(test_file) hash2 = hash_file(test_out) if hash1 != hash2: original = open(test_file).readlines() parsed = open(test_out).readlines() self.assertFalse(list(difflib.unified_diff(original, parsed))) for line in difflib.unified_diff(original, parsed): self.assertIsNotNone(line)
def generate_protocol_files_from_csv(csv_filename, output_directory=None): """ Generates a set of protocol files from csv filename input by reading protocol file input corresponding to each line of the csv file. Writes a csv file that. Args: csv_filename (str): CSV containing protocol file parameters. output_directory (str): directory in which to place the output files """ # Read csv file protocol_params_df = pd.read_csv(csv_filename) new_files = [] names = [] result = "" message = {"comment": "", "error": ""} if output_directory is None: output_directory = PROCEDURE_TEMPLATE_DIR for index, protocol_params in protocol_params_df.iterrows(): template = protocol_params["template"] # Filename for the output filename_prefix = "_".join( [ protocol_params["project_name"], "{:06d}".format(protocol_params["seq_num"]), ] ) # Switch for template invocation if template == "EXP.000": protocol = Procedure.from_exp( **protocol_params[["cutoff_voltage", "charge_rate", "discharge_rate"]] ) filename = "{}.000".format(filename_prefix) filename = os.path.join(output_directory, "procedures", filename) elif template == "diagnosticV2.000": diag_params_df = pd.read_csv( os.path.join(PROCEDURE_TEMPLATE_DIR, "PreDiag_parameters - DP.csv") ) diagnostic_params = diag_params_df[ diag_params_df["diagnostic_parameter_set"] == protocol_params["diagnostic_parameter_set"] ].squeeze() # TODO: should these be separated? protocol = Procedure.from_regcyclev2(protocol_params) protocol.add_procedure_diagcyclev2( protocol_params["capacity_nominal"], diagnostic_params ) filename = "{}.000".format(filename_prefix) filename = os.path.join(output_directory, "procedures", filename) # TODO: how are these different? elif template in ["diagnosticV3.000", "diagnosticV4.000"]: diag_params_df = pd.read_csv( os.path.join(PROCEDURE_TEMPLATE_DIR, "PreDiag_parameters - DP.csv") ) diagnostic_params = diag_params_df[ diag_params_df["diagnostic_parameter_set"] == protocol_params["diagnostic_parameter_set"] ].squeeze() protocol = Procedure.generate_procedure_regcyclev3(index, protocol_params) protocol.generate_procedure_diagcyclev3( protocol_params["capacity_nominal"], diagnostic_params ) filename = "{}.000".format(filename_prefix) filename = os.path.join(output_directory, "procedures", filename) elif template == "formationV1.mps": protocol = Settings.from_file(os.path.join(BIOLOGIC_TEMPLATE_DIR, template)) protocol = protocol.formation_protocol_bcs(protocol, protocol_params) filename = "{}.mps".format(filename_prefix) filename = os.path.join(output_directory, "settings", filename) else: warnings.warn("Unsupported file template {}, skipping.".format(template)) result = "error" message = { "comment": "Unable to find template: " + template, "error": "Not Found", } continue logger.info(filename, extra=s) if not os.path.isfile(filename): protocol.to_file(filename) new_files.append(filename) names.append(filename_prefix + "_") elif ".sdu" in template: logger.warning("Schedule file generation not yet implemented", extra=s) result = "error" message = { "comment": "Schedule file generation is not yet implemented", "error": "Not Implemented", } # This block of code produces the file containing all of the run file # names produced in this function call. This is to make starting tests easier _, namefile = os.path.split(csv_filename) namefile = namefile.split("_")[0] + "_names_" namefile = namefile + datetime.datetime.now().strftime("%Y%m%d_%H%M") + ".csv" with open( os.path.join(output_directory, "names", namefile), "w", newline="" ) as outputfile: wr = csv.writer(outputfile) for name in names: wr.writerow([name]) outputfile.close() if not result: result = "success" message = { "comment": "Generated {} protocols".format(str(len(new_files))), "error": "", } return new_files, result, message
def generate_protocol_files_from_csv(csv_filename, output_directory=None): """ Generates a set of protocol files from csv filename input by reading protocol file input corresponding to each line of the csv file. Writes a csv file that. Args: csv_filename (str): CSV containing protocol file parameters. output_directory (str): directory in which to place the output files """ # Read csv file protocol_params_df = pd.read_csv(csv_filename) new_files = [] names = [] result = '' message = {'comment': '', 'error': ''} if output_directory is None: output_directory = PROCEDURE_TEMPLATE_DIR for index, protocol_params in protocol_params_df.iterrows(): template = protocol_params['template'] # Switch for template invocation if template == "EXP.000": procedure = Procedure.from_exp(**protocol_params[ ["cutoff_voltage", "charge_rate", "discharge_rate"]]) elif template == 'diagnosticV2.000': diag_params_df = pd.read_csv( os.path.join(PROCEDURE_TEMPLATE_DIR, "PreDiag_parameters - DP.csv")) diagnostic_params = diag_params_df[ diag_params_df['diagnostic_parameter_set'] == protocol_params['diagnostic_parameter_set']].squeeze() # TODO: should these be separated? procedure = Procedure.from_regcyclev2(protocol_params) procedure.add_procedure_diagcyclev2( protocol_params["capacity_nominal"], diagnostic_params) # TODO: how are these different? elif template in ['diagnosticV3.000', 'diagnosticV4.000']: diag_params_df = pd.read_csv( os.path.join(PROCEDURE_TEMPLATE_DIR, "PreDiag_parameters - DP.csv")) diagnostic_params = diag_params_df[ diag_params_df['diagnostic_parameter_set'] == protocol_params['diagnostic_parameter_set']].squeeze() procedure = Procedure.generate_procedure_regcyclev3( index, protocol_params) procedure.generate_procedure_diagcyclev3( protocol_params["capacity_nominal"], diagnostic_params) else: warnings.warn( "Unsupported file template {}, skipping.".format(template)) result = "error" message = { 'comment': 'Unable to find template: ' + template, 'error': 'Not Found' } continue filename_prefix = '_'.join([ protocol_params["project_name"], '{:06d}'.format(protocol_params["seq_num"]) ]) filename = "{}.000".format(filename_prefix) filename = os.path.join(output_directory, 'procedures', filename) logger.info(filename, extra=s) if not os.path.isfile(filename): procedure.to_file(filename) new_files.append(filename) names.append(filename_prefix + '_') elif '.sdu' in template: logger.warning('Schedule file generation not yet implemented', extra=s) result = "error" message = { 'comment': 'Schedule file generation is not yet implemented', 'error': 'Not Implemented' } # This block of code produces the file containing all of the run file # names produced in this function call. This is to make starting tests easier _, namefile = os.path.split(csv_filename) namefile = namefile.split('_')[0] + '_names_' namefile = namefile + datetime.datetime.now().strftime( "%Y%m%d_%H%M") + '.csv' with open(os.path.join(output_directory, "names", namefile), 'w', newline='') as outputfile: wr = csv.writer(outputfile) for name in names: wr.writerow([name]) outputfile.close() if not result: result = "success" message = { 'comment': 'Generated {} protocols'.format(str(len(new_files))), 'error': '' } return new_files, result, message
def test_schedule_creation(self): protocol_params_dict = { 'project_name': ['PreDiag'], 'seq_num': [100], 'template': ['diagnosticV3.000'], 'charge_constant_current_1': [1], 'charge_percent_limit_1': [30], 'charge_constant_current_2': [1], 'charge_cutoff_voltage': [3.6], 'charge_constant_voltage_time': [30], 'charge_rest_time': [5], 'discharge_constant_current': [1], 'discharge_cutoff_voltage': [3.0], 'discharge_rest_time': [15], 'cell_temperature_nominal': [25], 'cell_type': ['Tesla_Model3_21700'], 'capacity_nominal': [1.1], 'diagnostic_type': ['HPPC+RPT'], 'diagnostic_parameter_set': ['Tesla21700'], 'diagnostic_start_cycle': [30], 'diagnostic_interval': [100] } procedure_to_convert = 'test_procedure.000' with ScratchDir('.') as scratch_dir: protocol_params_df = pd.DataFrame.from_dict(protocol_params_dict) protocol_params = protocol_params_df.iloc[[0]].squeeze() diag_params_df = pd.read_csv( os.path.join(PROCEDURE_TEMPLATE_DIR, "PreDiag_parameters - DP.csv")) diagnostic_params = diag_params_df[ diag_params_df['diagnostic_parameter_set'] == 'A123LFP'] procedure = Procedure.generate_procedure_regcyclev3( 0, protocol_params) procedure.generate_procedure_diagcyclev3( protocol_params["capacity_nominal"], diagnostic_params) procedure.set_skip_to_end_diagnostic(3.8, 2.0, step_key='070') self.assertEqual(procedure['MaccorTestProcedure']['ProcSteps']['TestStep'][0]\ ['Ends']['EndEntry'][1]['Value'], 3.8) self.assertEqual(procedure['MaccorTestProcedure']['ProcSteps']['TestStep'][0]\ ['Ends']['EndEntry'][2]['Value'], 2.0) procedure.to_file(os.path.join(scratch_dir, procedure_to_convert)) sdu_test_input = os.path.join(SCHEDULE_TEMPLATE_DIR, '20170630-3_6C_9per_5C.sdu') converted_sdu_name = 'schedule_test_20200724.sdu' proc_dict = procedure.from_file( os.path.join(scratch_dir, procedure_to_convert)) sdu_test_output = os.path.join(TEST_FILE_DIR, converted_sdu_name) test_step_dict = proc_dict['MaccorTestProcedure']['ProcSteps'][ 'TestStep'] converter = ProcedureToSchedule(test_step_dict) global_min_cur = -2 * 1.5 * protocol_params['capacity_nominal'] global_max_cur = 2 * 1.5 * protocol_params['capacity_nominal'] converter.create_sdu( sdu_test_input, sdu_test_output, current_range='Range2', global_v_range=[2.0, 3.8], global_temp_range=[0, 60], global_current_range=[global_min_cur, global_max_cur]) parsed = open(sdu_test_output, encoding='latin-1').readlines() self.assertEqual(parsed[328], '[Schedule_Step3_Limit0]\n') self.assertEqual(parsed[6557], '[Schedule_UserDefineSafety15]\n') schedule = Schedule.from_file(os.path.join(sdu_test_output)) self.assertEqual(schedule['Schedule']['Step15']['m_uLimitNum'], '2') self.assertEqual(schedule['Schedule']['Step14']['m_uLimitNum'], '6') self.assertEqual(schedule['Schedule']['m_uStepNum'], '96') self.assertEqual(schedule['Schedule']['Step86']['m_szCtrlValue'], '15') self.assertEqual( schedule['Schedule']['Step86']['m_szExtCtrlValue1'], '1') self.assertEqual( schedule['Schedule']['Step86']['m_szExtCtrlValue2'], '0')
def test_waveform_from_csv(self): csv_file = os.path.join(TEST_FILE_DIR, "data-share", "raw", "parameters", "Drive_parameters - GP.csv") protocol_params_df = pd.read_csv(csv_file) # Max power in watts in the profiles (should be less than max power limit on cyclers) MAX_PROFILE_CURRENT = 15 MIN_PROFILE_VOLTAGE = 2.7 MAX_PROFILE_POWER = MAX_PROFILE_CURRENT * MIN_PROFILE_VOLTAGE new_files = [] names = [] waveform_names = [] result = "" message = {"comment": "", "error": ""} with ScratchDir(".") as scratch_dir: output_directory = scratch_dir os.makedirs(os.path.join(output_directory, "procedures")) for index, protocol_params in protocol_params_df.iterrows(): template = protocol_params["template"] filename_prefix = "_".join([ protocol_params["project_name"], "{:06d}".format(protocol_params["seq_num"]), ]) if template == "drivingV1.000": diag_params_df = pd.read_csv( os.path.join(PROCEDURE_TEMPLATE_DIR, "PreDiag_parameters - DP.csv")) diagnostic_params = diag_params_df[ diag_params_df["diagnostic_parameter_set"] == protocol_params["diagnostic_parameter_set"]].squeeze() mwf_dir = os.path.join(scratch_dir, "mwf_files") waveform_name = insert_driving_parametersv1( protocol_params, waveform_directory=mwf_dir) template_fullpath = os.path.join(PROCEDURE_TEMPLATE_DIR, template) protocol = Procedure.generate_procedure_drivingv1( index, protocol_params, waveform_name, template=template_fullpath) protocol.generate_procedure_diagcyclev3( protocol_params["capacity_nominal"], diagnostic_params) filename = "{}.000".format(filename_prefix) filename = os.path.join(output_directory, "procedures", filename) waveform_names.append(os.path.split(waveform_name)[-1]) if not os.path.isfile(filename): protocol.to_file(filename) new_files.append(filename) names.append(filename_prefix + "_") waveform_df = pd.read_csv(waveform_name, sep="\t", header=None) power_df = waveform_df[waveform_df[1] == "P"] self.assertLessEqual(power_df[2].abs().max(), MAX_PROFILE_POWER) self.assertGreaterEqual(power_df[2].abs().max(), 0) current_df = waveform_df[waveform_df[1] == "I"] self.assertLessEqual(current_df[2].abs().max(), MAX_PROFILE_CURRENT) self.assertGreaterEqual(current_df[2].abs().max(), 0) discharge_df = waveform_df[waveform_df[0] == "D"] self.assertGreaterEqual(discharge_df[4].abs().max(), MIN_PROFILE_VOLTAGE) self.assertEqual( protocol.get( "MaccorTestProcedure.ProcSteps.TestStep.32.StepType"), 'FastWave') self.assertEqual( protocol.get( "MaccorTestProcedure.ProcSteps.TestStep.64.StepType"), 'FastWave') wave_value = os.path.split(waveform_name)[-1].split(".")[0] self.assertEqual( protocol.get( "MaccorTestProcedure.ProcSteps.TestStep.32.StepValue"), wave_value) self.assertEqual( protocol.get( "MaccorTestProcedure.ProcSteps.TestStep.64.StepValue"), wave_value) self.assertEqual( len(os.listdir(os.path.join(output_directory, "procedures"))), 36) self.assertEqual( len(os.listdir(os.path.join(output_directory, "mwf_files"))), 18) test_names = [ 'US06_x4_24W.MWF', 'LA4_x4_10W.MWF', 'US06_x4_32W.MWF', 'LA4_x4_14W.MWF', 'US06_x4_40W.MWF', 'LA4_x4_18W.MWF', 'US06_x8_24W.MWF', 'LA4_x8_10W.MWF', 'US06_x8_32W.MWF', 'LA4_x8_14W.MWF', 'US06_x8_40W.MWF', 'LA4_x8_18W.MWF', 'US06_x12_24W.MWF', 'LA4_x12_10W.MWF', 'US06_x12_32W.MWF', 'LA4_x12_14W.MWF', 'US06_x12_40W.MWF', 'LA4_x12_18W.MWF', 'US06_x4_24W.MWF', 'LA4_x4_10W.MWF', 'US06_x4_32W.MWF', 'LA4_x4_14W.MWF', 'US06_x4_40W.MWF', 'LA4_x4_18W.MWF', 'US06_x8_24W.MWF', 'LA4_x8_10W.MWF', 'US06_x8_32W.MWF', 'LA4_x8_14W.MWF', 'US06_x8_40W.MWF', 'LA4_x8_18W.MWF', 'US06_x12_24W.MWF', 'LA4_x12_10W.MWF', 'US06_x12_32W.MWF', 'LA4_x12_14W.MWF', 'US06_x12_40W.MWF', 'LA4_x12_18W.MWF' ] self.assertListEqual(test_names, waveform_names)
def test_prediag_with_waveform(self): maccor_waveform_file = os.path.join(TEST_FILE_DIR, "LA4_8rep_lim.MWF") test_file = os.path.join(PROCEDURE_TEMPLATE_DIR, "diagnosticV3.000") csv_file = os.path.join(TEST_FILE_DIR, "PredictionDiagnostics_parameters.csv") protocol_params_df = pd.read_csv(csv_file) index = 1 protocol_params_df.iloc[ index, protocol_params_df.columns.get_loc("capacity_nominal")] = 3.71 protocol_params = protocol_params_df.iloc[index] diag_params_df = pd.read_csv( os.path.join(PROCEDURE_TEMPLATE_DIR, "PreDiag_parameters - DP.csv")) diagnostic_params = diag_params_df[ diag_params_df["diagnostic_parameter_set"] == protocol_params["diagnostic_parameter_set"]].squeeze() procedure = Procedure.generate_procedure_regcyclev3( index, protocol_params) procedure.generate_procedure_diagcyclev3( protocol_params["capacity_nominal"], diagnostic_params) steps = [ x["StepType"] for x in procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"] ] print(steps) start = 27 reg_cycle_steps = [ "Do 1", "Charge", "Charge", "Charge", "Rest", "Dischrge", "Rest", "AdvCycle", "Loop 1", ] reg_steps_len = len(reg_cycle_steps) self.assertEqual(steps[start:start + reg_steps_len], reg_cycle_steps) start = 59 reg_cycle_steps = [ "Do 2", "Charge", "Charge", "Charge", "Rest", "Dischrge", "Rest", "AdvCycle", "Loop 2", ] reg_steps_len = len(reg_cycle_steps) self.assertEqual(steps[start:start + reg_steps_len], reg_cycle_steps) print(procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"][32]) print(procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"][64]) procedure.insert_maccor_waveform_discharge(32, maccor_waveform_file) procedure.insert_maccor_waveform_discharge(64, maccor_waveform_file) self.assertEqual( procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"][32] ["StepType"], "FastWave", ) self.assertEqual( procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"][64] ["StepType"], "FastWave", ) with ScratchDir(".") as scratch_dir: driving_test_name = "Drive_test20200716.000" procedure.to_file(driving_test_name)
def test_procedure_with_waveform(self): maccor_waveform_file = os.path.join(TEST_FILE_DIR, "LA4_8rep_lim.MWF") test_file = os.path.join(PROCEDURE_TEMPLATE_DIR, "diagnosticV2.000") procedure = Procedure.from_file(test_file) rest_step = procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"][ 2] end_step = procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"][ -1] procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"] = procedure[ "MaccorTestProcedure"]["ProcSteps"]["TestStep"][:8] procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"][ 5:9] = deepcopy( procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"][4:8]) procedure.set("MaccorTestProcedure.ProcSteps.TestStep.5", deepcopy(rest_step)) procedure.set("MaccorTestProcedure.ProcSteps.TestStep.9", deepcopy(rest_step)) procedure.set("MaccorTestProcedure.ProcSteps.TestStep.10", deepcopy(end_step)) procedure.set( "MaccorTestProcedure.ProcSteps.TestStep.5.Ends.EndEntry.0.Step", "007") procedure.set( "MaccorTestProcedure.ProcSteps.TestStep.8.Ends.EndEntry.Step", "010") procedure.set( "MaccorTestProcedure.ProcSteps.TestStep.9.Ends.EndEntry.0.Step", "011") procedure = procedure.insert_maccor_waveform_discharge( 6, maccor_waveform_file) for step in procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"]: print(step["Ends"]) if step["StepType"] in ["Charge", "Dischrge", "Rest"]: step["Ends"]["EndEntry"][-1]["Step"] = "011" step["Ends"]["EndEntry"][-2]["Step"] = "011" steps = [ x["StepType"] for x in procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"] ] self.assertEqual( steps, [ "Rest", "Charge", "Rest", "Do 1", "Charge", "Rest", "FastWave", "AdvCycle", "Loop 1", "Rest", "End", ], ) self.assertEqual( procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"][5] ["Ends"]["EndEntry"][0]["Step"], "007", ) self.assertEqual( procedure["MaccorTestProcedure"]["ProcSteps"]["TestStep"][6] ["StepType"], "FastWave", ) with ScratchDir(".") as scratch_dir: local_name = "test_mwf_LA4_lim.000" procedure.to_file(os.path.join(scratch_dir, local_name))
def test_conversion_with_updated(self): converter = MaccorToBiologicMb() with ScratchDir(".") as scratch_dir: # Generate a protocol that can be used with the existing cells for testing purposes reg_params = { 'project_name': { 0: 'FormDegrade' }, 'seq_num': { 0: 0 }, 'template': { 0: 'diagnosticV5.000' }, 'charge_constant_current_1': { 0: 2.0 }, 'charge_percent_limit_1': { 0: 30 }, 'charge_constant_current_2': { 0: 2.0 }, 'charge_cutoff_voltage': { 0: 4.4 }, 'charge_constant_voltage_time': { 0: 60 }, 'charge_rest_time': { 0: 5 }, 'discharge_constant_current': { 0: 1.0 }, 'discharge_cutoff_voltage': { 0: 3.0 }, 'discharge_rest_time': { 0: 15 }, 'cell_temperature_nominal': { 0: 25 }, 'cell_type': { 0: 'LiFun240' }, 'capacity_nominal': { 0: 0.240 }, 'diagnostic_type': { 0: 'HPPC+RPT' }, 'diagnostic_parameter_set': { 0: 'LiFunForm' }, 'diagnostic_start_cycle': { 0: 30 }, 'diagnostic_interval': { 0: 100 } } protocol_params_df = pd.DataFrame.from_dict(reg_params) index = 0 protocol_params = protocol_params_df.iloc[index] diag_params_df = pd.read_csv( os.path.join(PROCEDURE_TEMPLATE_DIR, "PreDiag_parameters - DP.csv")) diagnostic_params = diag_params_df[ diag_params_df["diagnostic_parameter_set"] == protocol_params["diagnostic_parameter_set"]].squeeze() procedure = Procedure.generate_procedure_regcyclev3( index, protocol_params) procedure.generate_procedure_diagcyclev3( protocol_params["capacity_nominal"], diagnostic_params) procedure.set_skip_to_end_diagnostic(4.5, 2.0, step_key="070", new_step_key="095") procedure.to_file(os.path.join(scratch_dir, "BioTest_000001.000")) # Setup the converter and run it def set_i_range(tech_num, seq, idx): seq_copy = copy.deepcopy(seq) seq_copy["I Range"] = "1 A" return seq_copy converter.seq_mappers.append(set_i_range) converter.min_voltage_v = 2.0 converter.max_voltage_v = 4.5 converter.convert(os.path.join(scratch_dir, "BioTest_000001.000"), TEST_FILE_DIR, "BioTest_000001") f = open(os.path.join(TEST_FILE_DIR, "BioTest_000001.mps"), encoding="ISO-8859-1") file = f.readlines() control_list = [ 'ctrl_type', 'Rest', 'CC', 'Rest', 'CC', 'CV', 'CC', 'Loop', 'CC', 'CV', 'Rest', 'CC', 'Rest', 'CC', 'CC', 'Loop', 'CV', 'CC', 'CC', 'CV', 'CC', 'CC', 'CV', 'CC', 'CC', 'CV', 'CC', 'CC', 'CC', 'CV', 'Rest', 'CC', 'Rest', 'Loop' ] self.assertListEqual(control_list, file[35].split()) value_list = [ 'ctrl1_val', '240.000', '34.300', '4.400', '34.300', '100.000', '80.000', '4.400', '240.000', '180.000', '80.000', '100.000', '3.000', '80.000', '48.000', '4.400', '48.000', '48.000', '4.400', '240.000', '48.000', '4.400', '480.000', '480.000', '480.000', '4.400', '240.000', '100.000' ] self.assertListEqual(value_list, file[37].split()) voltage_min = '\tEcell min = 2.00 V\n' self.assertEqual(voltage_min, file[9]) voltage_max = '\tEcell max = 4.50 V\n' self.assertEqual(voltage_max, file[10])
def generate_protocol_files_from_csv(csv_filename, output_directory=None): """ Generates a set of protocol files from csv filename input by reading protocol file input corresponding to each line of the csv file. Writes a csv file that. Args: csv_filename (str): CSV containing protocol file parameters. output_directory (str): directory in which to place the output files """ # Read csv file protocol_params_df = pd.read_csv(csv_filename) successfully_generated_files = [] file_generation_failures = [] names = [] result = "" message = {"comment": "", "error": ""} if output_directory is None: output_directory = PROCEDURE_TEMPLATE_DIR for index, protocol_params in protocol_params_df.iterrows(): template = protocol_params["template"] protocol = None # Filename for the output filename_prefix = "_".join( [ protocol_params["project_name"], "{:06d}".format(protocol_params["seq_num"]), ] ) if ".000" in template: # Extension for maccor procedure files template_fullpath = os.path.join(PROCEDURE_TEMPLATE_DIR, template) template_length = template_detection(template_fullpath) if "diagnostic_parameter_set" in protocol_params: # For parameters include diagnostics load those values diag_params_df = pd.read_csv( os.path.join(PROCEDURE_TEMPLATE_DIR, "PreDiag_parameters - DP.csv") ) diagnostic_params = diag_params_df[ diag_params_df["diagnostic_parameter_set"] == protocol_params["diagnostic_parameter_set"] ].squeeze() if template_length == 23 and template == "EXP.000": # length and name for initial procedure files protocol = Procedure.from_exp( **protocol_params[["cutoff_voltage", "charge_rate", "discharge_rate"]] ) elif template_length == 72: # length for V1 and V1 diagnostic templates without ending diagnostics protocol = Procedure.from_regcyclev2(protocol_params) protocol.add_procedure_diagcyclev2( protocol_params["capacity_nominal"], diagnostic_params ) elif template_length == 96: # template length for diagnostic type cycling mwf_dir = os.path.join(output_directory, "mwf_files") if protocol_params["project_name"] == "RapidC": # Project with charging waveform waveform_name = insert_charging_parametersv1(protocol_params, waveform_directory=mwf_dir) protocol = Procedure.generate_procedure_chargingv1(index, protocol_params, waveform_name, template=template_fullpath) elif protocol_params["project_name"] == "Drive": # Project with discharging waveform waveform_name = insert_driving_parametersv1(protocol_params, waveform_directory=mwf_dir) protocol = Procedure.generate_procedure_drivingv1(index, protocol_params, waveform_name, template=template_fullpath) else: # Use the default parameterization for PreDiag/Prediction Diagnostic projects protocol = Procedure.generate_procedure_regcyclev3(index, protocol_params, template=template_fullpath) protocol.generate_procedure_diagcyclev3( protocol_params["capacity_nominal"], diagnostic_params ) else: # Case where its not possible to match the procedure template failure = { "comment": "Unable to find template: " + template, "error": "Not Found", } file_generation_failures.append(failure) warnings.warn("Unsupported file template {}, skipping.".format(template)) result = "error" continue filename = "{}.000".format(filename_prefix) filename = os.path.join(output_directory, "procedures", filename) elif ".mps" in template and template == "formationV1.mps": # biologic settings template and formation project protocol = Settings.from_file(os.path.join(BIOLOGIC_TEMPLATE_DIR, template)) protocol = protocol.formation_protocol_bcs(protocol_params) filename = "{}.mps".format(filename_prefix) filename = os.path.join(output_directory, "settings", filename) elif ".sdu" in template: # No schedule file templates implemented failure = { "comment": "Schedule file generation is not yet implemented", "error": "Not Implemented" } file_generation_failures.append(failure) logger.warning("Schedule file generation not yet implemented", extra=s) result = "error" continue else: # Unable to match to any known template format failure = { "comment": "Unable to find template: " + template, "error": "Not Found", } file_generation_failures.append(failure) warnings.warn("Unsupported file template {}, skipping.".format(template)) result = "error" continue logger.info(filename, extra=s) protocol.to_file(filename) successfully_generated_files.append(filename) names.append(filename_prefix + "_") # This block of code produces the file containing all of the run file # names produced in this function call. This is to make starting tests easier _, namefile = os.path.split(csv_filename) namefile = namefile.split("_")[0] + "_names_" namefile = namefile + datetime.datetime.now().strftime("%Y%m%d_%H%M") + ".csv" names_dir = os.path.join(output_directory, "names") os.makedirs(names_dir, exist_ok=True) with open(os.path.join(names_dir, namefile), "w", newline="") as outputfile: wr = csv.writer(outputfile) for name in names: wr.writerow([name]) outputfile.close() num_generated_files = len(successfully_generated_files) num_generation_failures = len(file_generation_failures) num_files = num_generated_files + num_generation_failures message = { "comment": "Generated {} of {} protocols".format(num_generated_files, num_files), "error": "" } if not result: result = "success" else: message["error"] = "Failed to generate {} of {} protocols".format(num_generation_failures, num_files) logger.error(message["error"]) return successfully_generated_files, file_generation_failures, result, message