def test_console(self): """ Console command for end to end test, run by passing the output of `program_executable [JSON_STRING]` from module to module, essentially simulating >>> collate | validate | structure | featurize | run_model """ rename_output = subprocess.check_output("collate", shell=True).decode('utf-8') # Validation console test rename_output = json.loads(rename_output) validation_input = { "mode": self.events_mode, # mode run|test|events_off "file_list": rename_output['file_list'], # list of file paths ['path/test1.csv', 'path/test2.csv'] 'run_list': list(range(len(rename_output['file_list']))) # list of run_ids [0, 1] } validation_input = os_format(json.dumps(validation_input)) validation_output = subprocess.check_output("validate {}".format(validation_input), shell=True).decode('utf-8') # Structure console test validation_output = json.loads(validation_output) structure_input = { "mode": self.events_mode, # mode run|test|events_off "file_list": validation_output['file_list'], # list of file paths ['path/test1.json', 'path/test2.json'] 'run_list': list(range(len(validation_output['file_list']))), # list of run_ids [0, 1] "validity": validation_output['validity'] # list of validities ['valid', 'invalid'] } structure_input = os_format(json.dumps(structure_input)) structure_output = subprocess.check_output("structure {}".format(structure_input), shell=True).decode('utf-8') # Featurizing console test structure_output = json.loads(structure_output) feature_input = { "mode": self.events_mode, # mode run|test|events_off "file_list": structure_output['file_list'], # list of file paths ['path/test1.json', 'path/test2.json'] 'run_list': list(range(len(structure_output['file_list']))) # list of run_ids [0, 1] } feature_input = os_format(json.dumps(feature_input)) feature_output = subprocess.check_output("featurize {}".format(feature_input), shell=True).decode('utf-8') # Fitting console test # feature_output = json.loads(feature_output) # fitting_input = { # "mode": self.events_mode, # mode run|test|events_off # "file_list": feature_output['file_list'], # list of file paths ['path/test1.json', 'path/test2.json'] # 'run_list': list(range(len(feature_output['file_list']))) # list of run_ids [0, 1] # } # fitting_input = os_format(json.dumps(fitting_input)) # model_output = subprocess.check_output("run_model {}".format(fitting_input), # shell=True).decode('utf-8') # Validate output files self._check_result_file_validity()
def test_simple_conversion(self): with ScratchDir('.'): # Set root env os.environ['BEEP_PROCESSING_DIR'] = os.getcwd() # Make necessary directories os.mkdir("data-share") os.mkdir(os.path.join("data-share", "structure")) # Create dummy json obj json_obj = { "mode": self.events_mode, "file_list": [self.arbin_file], 'run_list': [0], "validity": ['valid'] } json_string = json.dumps(json_obj) command = "structure {}".format(os_format(json_string)) result = subprocess.check_call(command, shell=True) self.assertEqual(result, 0) print(os.listdir(os.path.join("data-share", "structure"))) processed = loadfn( os.path.join("data-share", "structure", "2017-12-04_4_65C-69per_6C_CH29_structure.json")) self.assertIsInstance(processed, ProcessedCyclerRun)
def test_console_script(self): csv_file = os.path.join(TEST_FILE_DIR, "parameter_test.csv") # Test script functionality with ScratchDir('.') as scratch_dir: # Set BEEP_ROOT directory to scratch_dir os.environ['BEEP_ROOT'] = os.getcwd() procedures_path = os.path.join("data-share", "protocols", "procedures") names_path = os.path.join("data-share", "protocols", "names") makedirs_p(procedures_path) makedirs_p(names_path) # Test the script json_input = json.dumps( {"file_list": [csv_file], "mode": self.events_mode}) os.system("generate_protocol {}".format(os_format(json_input))) self.assertEqual(len(os.listdir(procedures_path)), 3)