def main(): """ Main function of this module, takes in arguments of an input and output filename corresponding to featurized run data and creates a predictor object output for analysis/ML processing """ # Parse args and construct initial cycler run logger.info("starting", extra=s) logger.info("Running version=%s", __version__, extra=s) try: args = docopt(__doc__) input_json = args["INPUT_JSON"] if args["--fit"]: print( process_file_list_from_json( input_json, predict_only=False, model_dir=MODEL_DIR ), end="", ) else: print(process_file_list_from_json(input_json, model_dir=MODEL_DIR), end="") except Exception as e: logger.error(str(e), extra=s) raise e logger.info("finish", extra=s) return None
def get_protocol_parameters(filepath, parameters_path="data-share/raw/parameters"): """ Helper function to get the project parameters for a file given the filename Args: filepath (str): full path to the file parameters_path (str): location to look for parameter files Returns: pandas.DataFrame: single row DataFrame corresponding to the parameters for this file pandas.DataFrame: DataFrame with all of the parameter for the project """ project_name_list = get_project_sequence(filepath) project_name = project_name_list[0] path = os.path.join(os.environ.get("BEEP_PROCESSING_DIR", "/"), parameters_path) project_parameter_files = glob(os.path.join(path, project_name + "*")) assert len(project_parameter_files) <= 1, ( "Found too many parameter files for: " + project_name) if len(project_parameter_files) == 1: df = pd.read_csv(project_parameter_files[0]) parameter_row = df[df.seq_num == int(project_name_list[1])] if parameter_row.empty: logger.error("Unable to get project parameters for: %s", filepath, extra=s) parameter_row = None df = None else: parameter_row = None df = None return parameter_row, df
def dummy(ctx, test): logger.debug(f"Test msg: {test}") logger.info("Info msg") logger.warning("Warning message") logger.error("Error message") logger.critical("CRITICAL MESSAGE!") if test == "throw_error": raise ValueError("Some error!") dumpfn({"example": "status"}, ctx.obj.output_status_json)
def main(): logger.info('starting', extra=s) logger.info('Running version=%s', __version__, extra=s) try: args = docopt(__doc__) input_json = args['INPUT_JSON'] print(validate_file_list_from_json(input_json), end="") except Exception as e: logger.error(str(e), extra=s) raise e logger.info('finish', extra=s) return None
def main(): """Main function for the script""" logger.info("starting", extra=s) logger.info("Running version=%s", __version__, extra=s) try: args = docopt(__doc__) input_json = args["INPUT_JSON"] print(process_csv_file_list_from_json(input_json), end="") except Exception as e: logger.error(str(e), extra=s) raise e logger.info("finish", extra=s) return None
def main(): """Main function of this module, takes in arguments of an input and output filename and uses the input file to create a structured data output for analysis/ML processing. """ logger.info("starting", extra=SERVICE_CONFIG) logger.info("Running version=%s", __version__, extra=SERVICE_CONFIG) try: args = docopt(__doc__) input_json = args["INPUT_JSON"] print(process_file_list_from_json(input_json)) except Exception as e: logger.error(str(e), extra=SERVICE_CONFIG) raise e logger.info("finish", extra=SERVICE_CONFIG) return None
def main(): """ Main function for running of this module as a script Returns: (None) """ logger.info("starting", extra=s) logger.info("Running version=%s", __version__, extra=s) try: args = docopt(__doc__) input_json = args["INPUT_JSON"] print(validate_file_list_from_json(input_json), end="") except Exception as e: logger.error(str(e), extra=s) raise e logger.info("finish", extra=s) return None
def main(): """ Main function of this module, takes in arguments of an input and output filename corresponding to structured cycler run data and creates a predictor object output for analysis/ML processing Returns: None """ # Parse args and construct initial cycler run logger.info('starting', extra=s) logger.info('Running version=%s', __version__, extra=s) try: args = docopt(__doc__) input_json = args['INPUT_JSON'] print(process_file_list_from_json(input_json), end="") except Exception as e: logger.error(str(e), extra=s) raise e logger.info('finish', extra=s) return None
def generate_protocol_files_from_csv(csv_filename, output_directory=None): """ Generates a set of protocol files from csv filename input by reading protocol file input corresponding to each line of the csv file. Writes a csv file that. Args: csv_filename (str): CSV containing protocol file parameters. output_directory (str): directory in which to place the output files """ # Read csv file protocol_params_df = pd.read_csv(csv_filename) successfully_generated_files = [] file_generation_failures = [] names = [] result = "" message = {"comment": "", "error": ""} if output_directory is None: output_directory = PROCEDURE_TEMPLATE_DIR for index, protocol_params in protocol_params_df.iterrows(): template = protocol_params["template"] protocol = None # Filename for the output filename_prefix = "_".join( [ protocol_params["project_name"], "{:06d}".format(protocol_params["seq_num"]), ] ) if ".000" in template: # Extension for maccor procedure files template_fullpath = os.path.join(PROCEDURE_TEMPLATE_DIR, template) template_length = template_detection(template_fullpath) if "diagnostic_parameter_set" in protocol_params: # For parameters include diagnostics load those values diag_params_df = pd.read_csv( os.path.join(PROCEDURE_TEMPLATE_DIR, "PreDiag_parameters - DP.csv") ) diagnostic_params = diag_params_df[ diag_params_df["diagnostic_parameter_set"] == protocol_params["diagnostic_parameter_set"] ].squeeze() if template_length == 23 and template == "EXP.000": # length and name for initial procedure files protocol = Procedure.from_exp( **protocol_params[["cutoff_voltage", "charge_rate", "discharge_rate"]] ) elif template_length == 72: # length for V1 and V1 diagnostic templates without ending diagnostics protocol = Procedure.from_regcyclev2(protocol_params) protocol.add_procedure_diagcyclev2( protocol_params["capacity_nominal"], diagnostic_params ) elif template_length == 96: # template length for diagnostic type cycling mwf_dir = os.path.join(output_directory, "mwf_files") if protocol_params["project_name"] == "RapidC": # Project with charging waveform waveform_name = insert_charging_parametersv1(protocol_params, waveform_directory=mwf_dir) protocol = Procedure.generate_procedure_chargingv1(index, protocol_params, waveform_name, template=template_fullpath) elif protocol_params["project_name"] == "Drive": # Project with discharging waveform waveform_name = insert_driving_parametersv1(protocol_params, waveform_directory=mwf_dir) protocol = Procedure.generate_procedure_drivingv1(index, protocol_params, waveform_name, template=template_fullpath) else: # Use the default parameterization for PreDiag/Prediction Diagnostic projects protocol = Procedure.generate_procedure_regcyclev3(index, protocol_params, template=template_fullpath) protocol.generate_procedure_diagcyclev3( protocol_params["capacity_nominal"], diagnostic_params ) else: # Case where its not possible to match the procedure template failure = { "comment": "Unable to find template: " + template, "error": "Not Found", } file_generation_failures.append(failure) warnings.warn("Unsupported file template {}, skipping.".format(template)) result = "error" continue filename = "{}.000".format(filename_prefix) filename = os.path.join(output_directory, "procedures", filename) elif ".mps" in template and template == "formationV1.mps": # biologic settings template and formation project protocol = Settings.from_file(os.path.join(BIOLOGIC_TEMPLATE_DIR, template)) protocol = protocol.formation_protocol_bcs(protocol_params) filename = "{}.mps".format(filename_prefix) filename = os.path.join(output_directory, "settings", filename) elif ".sdu" in template: # No schedule file templates implemented failure = { "comment": "Schedule file generation is not yet implemented", "error": "Not Implemented" } file_generation_failures.append(failure) logger.warning("Schedule file generation not yet implemented", extra=s) result = "error" continue else: # Unable to match to any known template format failure = { "comment": "Unable to find template: " + template, "error": "Not Found", } file_generation_failures.append(failure) warnings.warn("Unsupported file template {}, skipping.".format(template)) result = "error" continue logger.info(filename, extra=s) protocol.to_file(filename) successfully_generated_files.append(filename) names.append(filename_prefix + "_") # This block of code produces the file containing all of the run file # names produced in this function call. This is to make starting tests easier _, namefile = os.path.split(csv_filename) namefile = namefile.split("_")[0] + "_names_" namefile = namefile + datetime.datetime.now().strftime("%Y%m%d_%H%M") + ".csv" names_dir = os.path.join(output_directory, "names") os.makedirs(names_dir, exist_ok=True) with open(os.path.join(names_dir, namefile), "w", newline="") as outputfile: wr = csv.writer(outputfile) for name in names: wr.writerow([name]) outputfile.close() num_generated_files = len(successfully_generated_files) num_generation_failures = len(file_generation_failures) num_files = num_generated_files + num_generation_failures message = { "comment": "Generated {} of {} protocols".format(num_generated_files, num_files), "error": "" } if not result: result = "success" else: message["error"] = "Failed to generate {} of {} protocols".format(num_generation_failures, num_files) logger.error(message["error"]) return successfully_generated_files, file_generation_failures, result, message