def _load_datasets(self): """ Load five persons data for each test hpo """ # expected_tables is for testing output # it maps table name to list of expected records ex: "unioned_ehr_visit_occurrence" -> [{}, {}, ...] expected_tables = dict() running_jobs = [] for cdm_table in common.CDM_TABLES: cdm_file_name = os.path.join(test_util.FIVE_PERSONS_PATH, cdm_table + '.csv') output_table = ehr_union.output_table_for(cdm_table) expected_tables[output_table] = [] for hpo_id in self.hpo_ids: # upload csv into hpo bucket bucket = gcs_utils.get_hpo_bucket(hpo_id) if os.path.exists(cdm_file_name): test_util.write_cloud_file(bucket, cdm_file_name) csv_rows = resources._csv_to_list(cdm_file_name) else: # results in empty table test_util.write_cloud_str(bucket, cdm_table + '.csv', 'dummy\n') csv_rows = [] # load table from csv result = bq_utils.load_cdm_csv(hpo_id, cdm_table) running_jobs.append(result['jobReference']['jobId']) expected_tables[output_table] += list(csv_rows) incomplete_jobs = bq_utils.wait_on_jobs(running_jobs) if len(incomplete_jobs) > 0: message = "Job id(s) %s failed to complete" % incomplete_jobs raise RuntimeError(message) self.expected_tables = expected_tables
def test_copy_five_persons(self, mock_check_cron): folder_prefix = 'dummy-prefix-2018-03-22-v1/' # upload all five_persons files for cdm_file in test_util.FIVE_PERSONS_FILES: test_util.write_cloud_file(self.hpo_bucket, cdm_file, prefix=folder_prefix) test_util.write_cloud_file(self.hpo_bucket, cdm_file, prefix=folder_prefix + folder_prefix) main.app.testing = True with main.app.test_client() as c: c.get(test_util.COPY_HPO_FILES_URL) prefix = test_util.FAKE_HPO_ID + '/' + self.hpo_bucket + '/' + folder_prefix expected_bucket_items = [ prefix + item.split(os.sep)[-1] for item in test_util.FIVE_PERSONS_FILES ] expected_bucket_items.extend([ prefix + folder_prefix + item.split(os.sep)[-1] for item in test_util.FIVE_PERSONS_FILES ]) list_bucket_result = gcs_utils.list_bucket( gcs_utils.get_drc_bucket()) actual_bucket_items = [item['name'] for item in list_bucket_result] self.assertSetEqual(set(expected_bucket_items), set(actual_bucket_items))
def _load_datasets(self): load_jobs = [] self.expected_tables = dict() for cdm_table in common.CDM_TABLES: cdm_file_name = os.path.join(test_util.FIVE_PERSONS_PATH, cdm_table + '.csv') result_table = ehr_merge.result_table_for(cdm_table) if os.path.exists(cdm_file_name): # one copy for chs, the other for pitt csv_rows = resources._csv_to_list(cdm_file_name) self.expected_tables[result_table] = csv_rows + list(csv_rows) test_util.write_cloud_file(self.chs_bucket, cdm_file_name) test_util.write_cloud_file(self.pitt_bucket, cdm_file_name) else: self.expected_tables[result_table] = [] test_util.write_cloud_str(self.chs_bucket, cdm_table + '.csv', 'dummy\n') test_util.write_cloud_str(self.pitt_bucket, cdm_table + '.csv', 'dummy\n') chs_load_results = bq_utils.load_cdm_csv(CHS_HPO_ID, cdm_table) pitt_load_results = bq_utils.load_cdm_csv(PITT_HPO_ID, cdm_table) chs_load_job_id = chs_load_results['jobReference']['jobId'] pitt_load_job_id = pitt_load_results['jobReference']['jobId'] load_jobs.append(chs_load_job_id) load_jobs.append(pitt_load_job_id) incomplete_jobs = bq_utils.wait_on_jobs(load_jobs) if len(incomplete_jobs) > 0: raise RuntimeError('BigQuery jobs %s failed to complete' % incomplete_jobs)
def _load_dataset(self): for cdm_table in common.CDM_TABLES: cdm_file_name = os.path.join(test_util.FIVE_PERSONS_PATH, cdm_table + '.csv') if os.path.exists(cdm_file_name): test_util.write_cloud_file(self.hpo_bucket, cdm_file_name) else: test_util.write_cloud_str(self.hpo_bucket, cdm_table + '.csv', 'dummy\n') bq_utils.load_cdm_csv(FAKE_HPO_ID, cdm_table)
def test_validate_five_persons_success(self, mock_check_cron): prefix = 'dummy-prefix-2018-03-22/' expected_result_items = resources._csv_to_list( test_util.FIVE_PERSONS_SUCCESS_RESULT_CSV) json_export_files = self.get_json_export_files(test_util.FAKE_HPO_ID) # upload all five_persons files for cdm_file in test_util.FIVE_PERSONS_FILES: test_util.write_cloud_file(self.hpo_bucket, cdm_file, prefix=prefix) expected_tables = [ 'person', 'visit_occurrence', 'condition_occurrence', 'procedure_occurrence', 'drug_exposure', 'measurement' ] cdm_files = [table + '.csv' for table in expected_tables] main.app.testing = True with main.app.test_client() as c: c.get(test_util.VALIDATE_HPO_FILES_URL) # check the result file was put in bucket expected_object_names = cdm_files + common.IGNORE_LIST + json_export_files expected_objects = [ prefix + item for item in expected_object_names ] list_bucket_result = gcs_utils.list_bucket(self.hpo_bucket) actual_objects = [item['name'] for item in list_bucket_result] self.assertSetEqual(set(expected_objects), set(actual_objects)) # result says file found, parsed, loaded actual_result = test_util.read_cloud_file( self.hpo_bucket, prefix + common.RESULT_CSV) actual_result_file = StringIO.StringIO(actual_result) actual_result_items = resources._csv_file_to_list( actual_result_file) expected_result_items.sort() actual_result_items.sort() self.assertListEqual(expected_result_items, actual_result_items) self.assertTrue( main.all_required_files_loaded(test_util.FAKE_HPO_ID, folder_prefix=prefix)) # check tables exist and are clustered as expected for table in expected_tables: fields_file = os.path.join(resources.fields_path, table + '.json') table_id = bq_utils.get_table_id(test_util.FAKE_HPO_ID, table) table_info = bq_utils.get_table_info(table_id) with open(fields_file, 'r') as fp: fields = json.load(fp) field_names = [field['name'] for field in fields] if 'person_id' in field_names: self.table_has_clustering(table_info)
def test_validate_five_persons_success(self, mock_check_cron): prefix = 'dummy-prefix-2018-03-22/' expected_result_items = resources._csv_to_list( test_util.FIVE_PERSONS_SUCCESS_RESULT_CSV) json_export_files = self.get_json_export_files(test_util.FAKE_HPO_ID) # upload all five_persons files for cdm_file in test_util.FIVE_PERSONS_FILES: test_util.write_cloud_file(self.hpo_bucket, cdm_file, prefix=prefix) main.app.testing = True with main.app.test_client() as c: c.get(test_util.VALIDATE_HPO_FILES_URL) # check the result file was putin bucket expected_bucket_items = common.REQUIRED_FILES + common.IGNORE_LIST + json_export_files # want to keep this test the same. So adding all the old required files. expected_bucket_items = expected_bucket_items + [ 'measurement.csv', 'procedure_occurrence.csv', 'drug_exposure.csv', 'condition_occurrence.csv', 'visit_occurrence.csv' ] expected_bucket_items = [ prefix + item for item in expected_bucket_items ] list_bucket_result = gcs_utils.list_bucket(self.hpo_bucket) actual_bucket_items = [item['name'] for item in list_bucket_result] self.assertSetEqual(set(expected_bucket_items), set(actual_bucket_items)) # result says file found, parsed, loaded actual_result = test_util.read_cloud_file( self.hpo_bucket, prefix + common.RESULT_CSV) actual_result_file = StringIO.StringIO(actual_result) actual_result_items = resources._csv_file_to_list( actual_result_file) expected_result_items.sort() actual_result_items.sort() self.assertListEqual(expected_result_items, actual_result_items) self.assertTrue( main.all_required_files_loaded(test_util.FAKE_HPO_ID, folder_prefix=prefix))
def test_five_person_data_retraction(self): folder_prefix = 'dummy-prefix-2018-03-22/' pid = 17 expected_result = {} for file_path in test_util.FIVE_PERSONS_FILES: # generate results files file_name = file_path.split('/')[-1] expected_result[file_name] = [] with open(file_path) as f: for line in f: line = line.strip() if (file_name in rd.PID_IN_COL1 and rd.get_integer(line.split(",")[0]) != pid) or \ (file_name in rd.PID_IN_COL2 and rd.get_integer(line.split(",")[1]) != pid): expected_result[file_name].append(line) # write file to cloud for testing test_util.write_cloud_file(self.hpo_bucket, file_path, prefix=folder_prefix) with mock.patch('__builtin__.raw_input', return_value='Y') as _raw_input: retract_result = rd.run_retraction(pid, self.hpo_bucket, folder=folder_prefix, force=True) actual_result = {} for file_path in test_util.FIVE_PERSONS_FILES: file_name = file_path.split('/')[-1] actual_result_contents = test_util.read_cloud_file( self.hpo_bucket, folder_prefix + file_name) # convert to list and remove last list item since it is a newline actual_result[file_name] = actual_result_contents.split('\n')[:-1] for key in expected_result.keys(): self.assertListEqual(expected_result[key], actual_result[key]) # metadata for each updated file is returned # TODO test that files lacking records for PID are not updated self.assertEqual(len(retract_result[folder_prefix]), len(expected_result.keys()))
def load_test_data(self, hpo_id=None): """ Load to bq test achilles heel results data from csv file :param hpo_id: if specified, prefix to use on csv test file and bq table, otherwise no prefix is used :return: contents of the file as list of objects """ schema_path = os.path.join(resources.fields_path, common.ACHILLES_HEEL_RESULTS + '.json') table_id = common.ACHILLES_HEEL_RESULTS if hpo_id is not None: table_id = bq_utils.get_table_id(hpo_id, common.ACHILLES_HEEL_RESULTS) test_file_name = table_id + '.csv' test_file_path = os.path.join(test_util.TEST_DATA_PATH, test_file_name) test_util.write_cloud_file(self.bucket, test_file_path) gcs_path = 'gs://' + self.bucket + '/' + test_file_name load_results = bq_utils.load_csv(schema_path, gcs_path, self.app_id, self.dataset_id, table_id) job_id = load_results['jobReference']['jobId'] bq_utils.wait_on_jobs([job_id]) return resources._csv_to_list(test_file_path)
def _load_datasets(self): load_jobs = [] for cdm_table in common.CDM_TABLES: cdm_file_name = os.path.join(test_util.FIVE_PERSONS_PATH, cdm_table + '.csv') if os.path.exists(cdm_file_name): test_util.write_cloud_file(self.chs_bucket, cdm_file_name) test_util.write_cloud_file(self.pitt_bucket, cdm_file_name) else: test_util.write_cloud_str(self.chs_bucket, cdm_table + '.csv', 'dummy\n') test_util.write_cloud_str(self.pitt_bucket, cdm_table + '.csv', 'dummy\n') chs_load_results = bq_utils.load_cdm_csv(CHS_HPO_ID, cdm_table) pitt_load_results = bq_utils.load_cdm_csv(PITT_HPO_ID, cdm_table) chs_load_job_id = chs_load_results['jobReference']['jobId'] pitt_load_job_id = pitt_load_results['jobReference']['jobId'] load_jobs.append(chs_load_job_id) load_jobs.append(pitt_load_job_id) incomplete_jobs = bq_utils.wait_on_jobs(load_jobs) if len(incomplete_jobs) > 0: raise RuntimeError('BigQuery jobs %s failed to complete' % incomplete_jobs)
def test_pii_files_loaded(self, mock_check_cron): # tests if pii files are loaded folder_prefix = 'dummy-prefix-2018-03-22/' expected_result_items = resources._csv_to_list( test_util.PII_FILE_LOAD_RESULT_CSV) test_util.write_cloud_file(self.hpo_bucket, test_util.PII_NAME_FILE, prefix=folder_prefix) test_util.write_cloud_file(self.hpo_bucket, test_util.PII_MRN_BAD_PERSON_ID_FILE, prefix=folder_prefix) main.app.testing = True with main.app.test_client() as c: c.get(test_util.VALIDATE_HPO_FILES_URL) actual_result = test_util.read_cloud_file( self.hpo_bucket, folder_prefix + common.RESULT_CSV) actual_result_file = StringIO.StringIO(actual_result) actual_result_items = resources._csv_file_to_list( actual_result_file) # sort in order to compare expected_result_items.sort() actual_result_items.sort() self.assertListEqual(expected_result_items, actual_result_items)