def test_monitor2_images(self): """ Test the POST, GET and DELETE images APIs. """ # Upload image stack url = _MON_IMAGES_URL url = add_url_argument(url, STACK_TYPE, MONITOR2, True) url = add_url_argument(url, NAME, 'monitor1_images_name') url = add_url_argument(url, DESCRIPTION, 'monitor1 images description') response = upload_file(self, _TEST_DIR, url, _MON2_IMAGES, 200) image_stack_uuid = response[UUID] # Ensure duplicate image stacks cannot be uploaded upload_file(self, _TEST_DIR, url, _MON2_IMAGES, 403) # Ensure image stack exists in the database and can be retrieved response = get_data(self, _MON_IMAGES_URL, 200) record_found = False for record in response[MONITOR_IMAGES]: if record[UUID] == image_stack_uuid: record_found = True msg = "Monitor image stack %s doesn't exist in the database." % image_stack_uuid self.assertTrue(record_found, msg) # Delete image stack url = add_url_argument(_IMAGES_URL, UUID, image_stack_uuid, True) delete_data(self, url, 200) # Ensure image stack no longer exists in the database response = get_data(self, _MON_IMAGES_URL, 200) for record in response[MONITOR_IMAGES]: msg = "Monitor image stack %s still exists in the database." % record[UUID] self.assertNotEqual(image_stack_uuid, record[UUID], msg)
def test_tags_api(self): """ test TagsGetFunction, TagsDeleteFunction, and TagsPostFunction. """ report_uuid = str(uuid4()) _DB_CONNECTOR.insert(RUN_REPORT_COLLECTION, [{ UUID: report_uuid, 'name': 'dummy run report' }]) # test TagsPostFunction, add tags to a run report post_url = _RUN_INFO_TAG_URL post_url = add_url_argument(post_url, UUID, report_uuid, True) post_url = add_url_argument(post_url, TAGS, ','.join(["BRAF e15", "FFPE"])) response = post_data(self, post_url, 200) # test TagsGetFunction, get tags from all run reports response = get_data(self, _RUN_INFO_TAG_URL, 200) self.assertIn("braf e15", response[TAGS]) # test TagsDeleteFunction, delete a tag from a run report delete_url = _RUN_INFO_TAG_URL delete_url = add_url_argument(delete_url, UUID, report_uuid, True) delete_url = add_url_argument(delete_url, TAGS, "ffpe") delete_data(self, delete_url, 200) response = get_data(self, _RUN_INFO_TAG_URL, 200) self.assertIn("braf e15", response[TAGS]) self.assertNotIn("ffpe", response[TAGS]) _DB_CONNECTOR.remove(RUN_REPORT_COLLECTION, {UUID: report_uuid})
def test_images(self): """ Test the POST, GET and DELETE images APIs. """ # Upload image stack url = _IMAGES_URL url = add_url_argument(url, EXP_DEF, 'Beta_24b_p1_V6', True) url = add_url_argument(url, NAME, 'golden_run') url = add_url_argument(url, DESCRIPTION, 'Short description.') response = upload_file(self, _TEST_DIR, url, _PNG_IMAGES, 200) image_stack_uuid = response[UUID] # Ensure duplicate image stacks cannot be uploaded response = upload_file(self, _TEST_DIR, url, _PNG_IMAGES, 403) # Ensure image stack exists in the database and can be retrieved response = get_data(self, _IMAGES_URL, 200) record_found = False for record in response[IMAGES]: if record[UUID] == image_stack_uuid: record_found = True msg = "Image stack %s doesn't exist in the database." % image_stack_uuid self.assertTrue(record_found, msg) # Delete image stack url = add_url_argument(_IMAGES_URL, UUID, image_stack_uuid, True) delete_data(self, url, 200) # Ensure image stack no longer exists in the database response = get_data(self, _IMAGES_URL, 200) for record in response[IMAGES]: msg = "Image stack %s still exists in the database." % record[UUID] self.assertNotEqual(image_stack_uuid, record[UUID], msg)
def exercise_file_upload_api(self, url, filename): response_key = url.split("/")[-1] # Test successful file upload response = upload_file(self, _TEST_DIR, url, filename, 200) uuid = response[UUID] # Test error code 403: File already exists. upload_file(self, _TEST_DIR, url, filename, 403) # Test error code 415: File is not a valid FASTA file. upload_file(self, _TEST_DIR, url, _INVALID_FASTA_FILENAME, 415) # Test successful retrieval of uploaded file response = get_data(self, url, 200) retrieved_uuid = response[response_key][0][UUID] msg = "Expected uuid (%s) doesn't match observed uuid (%s) for %s" % \ (uuid, retrieved_uuid, url) self.assertEqual(uuid, retrieved_uuid, msg) # Test successful deletion of uploaded file delete_data(self, url + "?uuid=%s" % uuid, 200) # Test unsuccessful deletion of non-existent file delete_data(self, url + "?uuid=%s" % str(uuid4()), 404)
def test_hdf5_process(self): # Construct url url = self.construct_process_url(_HDF5_DATASET, 'test_HDF5_pa_process_job') # Submit process job response = post_data(self, url, 200) process_uuid = response[_PROCESS][0][UUID] # Test that submitting two jobs with the same name fails and returns # the appropriate error code. post_data(self, url, 403) running = True while running: time.sleep(10) response = get_data(self, _PROCESS_URL, 200) for job in response[_PROCESS]: if process_uuid == job[UUID]: job_details = job running = job_details[_STATUS] == 'running' # Copy result files to cwd for bamboo to ingest as artifacts self.assertTrue(_RESULT in job_details, 'Unable to locate primary analysis file') if _RESULT in job_details: analysis_txt_path = job_details[_RESULT] msg = 'Expected HDF5 to be converted to file %s, but this file was not found.' \ % analysis_txt_path self.assertTrue(os.path.exists(analysis_txt_path), msg) # Copy result files to cwd for bamboo to ingest as artifacts self.assertTrue(_CONFIG in job_details, 'Unable to locate config file') if _CONFIG in job_details: config_path = job_details[_CONFIG] msg = 'Expected HDF5 conversion to create config file %s, but this file was not found.' \ % config_path self.assertTrue(os.path.exists(config_path), msg) error = "" if 'error' in job_details: error = job_details['error'] msg = "Expected pa process job status succeeded, but found %s. " \ "Error: %s" % (job_details[_STATUS], error) self.assertEquals(job_details[_STATUS], "succeeded", msg) # Delete absorption job delete_data(self, _PROCESS_URL + "?uuid=%s" % process_uuid, 200) # Ensure job no longer exists in the database response = get_data(self, _PROCESS_URL, 200) for job in response['Process']: msg = "PA process job %s still exists in database." % process_uuid self.assertNotEqual(process_uuid, job[UUID], msg)
def test_bin_images(self): # Upload image stack url = _IMAGES_URL url = add_url_argument(url, EXP_DEF, 'Beta_24b_p1_V6', True) url = add_url_argument(url, NAME, 'bin_images') url = add_url_argument(url, DESCRIPTION, 'Binary image stack.') response = upload_file(self, _TEST_DIR, url, _BIN_IMAGES, 200) msg = "Expected number of images (%d) not equal to observed (%d)." % \ (4, response[NUM_IMAGES]) self.assertEqual(response[NUM_IMAGES], 4, msg) # Delete image stack url = add_url_argument(_IMAGES_URL, UUID, response[UUID], True) delete_data(self, url, 200)
def test_absorption(self): # Upload targets and probes files response = upload_file(self, _TEST_DIR, _PROBES_URL, _PROBES_FILENAME, 200) probes_uuid = response[UUID] response = upload_file(self, _TEST_DIR, _TARGETS_URL, _TARGETS_FILENAME, 200) targets_uuid = response[UUID] # Post absorption job url = _ABSORPTION_URL + "?probes=%s&targets=%s&job_name=%s&%s=%s" % \ (probes_uuid, targets_uuid, _JOB_NAME, STRICT, _STRICT) response = post_data(self, url, 200) abs_job_uuid = response[UUID] running = True job_details = None while running: time.sleep(10) response = get_data(self, _ABSORPTION_URL, 200) for job in response[_ABSORPTION]: if abs_job_uuid == job[UUID]: job_details = job running = job_details[_STATUS] == 'running' # Copy result file to cwd for bamboo to ingest as an artifact absorption_path = None if _RESULT in job_details: absorption_path = job_details[_RESULT] if absorption_path and os.path.isfile(absorption_path): shutil.copy(absorption_path, "observed_absorption.txt") # Clean up by removing targets and probes files delete_data(self, _PROBES_URL + "?uuid=%s" % probes_uuid, 200) delete_data(self, _TARGETS_URL + "?uuid=%s" % targets_uuid, 200) msg = "Expected absorption job status succeeded, but found: %s" % \ job_details[_STATUS] self.assertEquals(job_details[_STATUS], "succeeded", msg) exp_result_path = os.path.join( os.path.abspath(os.path.dirname(__file__)), _EXPECTED_RESULT_FILENAME) msg = "Observed result (%s) doesn't match expected result (%s)." % \ (absorption_path, exp_result_path) self.assertTrue(filecmp.cmp(exp_result_path, absorption_path), msg) # Delete absorption job delete_data(self, _ABSORPTION_URL + "?uuid=%s" % abs_job_uuid, 200) # Ensure job no longer exists in the database response = get_data(self, _ABSORPTION_URL, 200) for job in response[_ABSORPTION]: msg = "Absorption job %s still exists in database." % abs_job_uuid self.assertNotEqual(abs_job_uuid, job[UUID], msg)
def test_replay(self): """ Test the POST, GET and DELETE images APIs. """ # Upload ham image stack ham_url = _IMAGES_URL ham_url = add_url_argument(ham_url, EXP_DEF, 'Beta_24b_p1_V6', True) ham_url = add_url_argument(ham_url, NAME, 'golden_run') ham_url = add_url_argument(ham_url, DESCRIPTION, 'Short description.') ham_response = upload_file(self, _TEST_DIR, ham_url, _HAM_IMAGES, 200) ham_uuid = ham_response[UUID] # Upload monitor 1 image stack mon1_url = _MON_IMAGES_URL mon1_url = add_url_argument(mon1_url, STACK_TYPE, MONITOR1, True) mon1_url = add_url_argument(mon1_url, NAME, 'monitor1_images_name') mon1_url = add_url_argument(mon1_url, DESCRIPTION, 'monitor1 images description') mon1_response = upload_file(self, _TEST_DIR, mon1_url, _MON1_IMAGES, 200) mon1_uuid = mon1_response[UUID] # Upload monitor 2 image stack mon2_url = _MON_IMAGES_URL mon2_url = add_url_argument(mon2_url, STACK_TYPE, MONITOR2, True) mon2_url = add_url_argument(mon2_url, NAME, 'monitor2_images_name') mon2_url = add_url_argument(mon2_url, DESCRIPTION, 'monitor2 images description') mon2_response = upload_file(self, _TEST_DIR, mon2_url, _MON2_IMAGES, 200) mon2_uuid = mon2_response[UUID] # create a replay image stack from existing stacks replay_url = _REPLAY_IMAGES_URL replay_url = add_url_argument(replay_url, NAME, 'replay_images_name', True) replay_url = add_url_argument(replay_url, HAM_NAME, 'golden_run') replay_url = add_url_argument(replay_url, MON1_NAME, 'monitor1_images_name') replay_url = add_url_argument(replay_url, MON2_NAME, 'monitor2_images_name') replay_url = add_url_argument(replay_url, DESCRIPTION, 'replay images description') replay_response = post_data(self, replay_url, 200) replay_uuid = replay_response[UUID] # try to add replay image stack with same name replay_url = _REPLAY_IMAGES_URL replay_url = add_url_argument(replay_url, NAME, 'replay_images_name', True) replay_url = add_url_argument(replay_url, HAM_NAME, 'golden_run') replay_url = add_url_argument(replay_url, MON1_NAME, 'monitor1_images_name') replay_url = add_url_argument(replay_url, MON2_NAME, 'monitor2_images_name') replay_url = add_url_argument(replay_url, DESCRIPTION, 'replay images description') post_data(self, replay_url, 403) # try to add replay image stack comprised of the same ham/mon image stacks replay_url = _REPLAY_IMAGES_URL replay_url = add_url_argument(replay_url, NAME, 'different_replay_images_name', True) replay_url = add_url_argument(replay_url, HAM_NAME, 'golden_run') replay_url = add_url_argument(replay_url, MON1_NAME, 'monitor1_images_name') replay_url = add_url_argument(replay_url, MON2_NAME, 'monitor2_images_name') replay_url = add_url_argument(replay_url, DESCRIPTION, 'replay images description') post_data(self, replay_url, 403) # Delete image stacks url = add_url_argument(_IMAGES_URL, UUID, ham_uuid, True) delete_data(self, url, 200) url = add_url_argument(_IMAGES_URL, UUID, mon1_uuid, True) delete_data(self, url, 200) url = add_url_argument(_IMAGES_URL, UUID, mon2_uuid, True) delete_data(self, url, 200) url = add_url_argument(_IMAGES_URL, UUID, replay_uuid, True) delete_data(self, url, 200)
def test_full_analysis_exploratory(self): """ Test the POST and DELETE full analysis API with exploratory experiment definition. """ # Construct url url = _FULL_ANALYSIS_URL url = add_url_argument(url, PA_DATA_SOURCE, _ARCHIVE_NAME, True) url = add_url_argument(url, JOB_NAME, _FA_EXPLORATORY_JOBNAME) url = add_url_argument(url, EXP_DEF, _EXP_DEF_EXPLORATORY_NAME) url = add_url_argument(url, OFFSETS, _OFFSETS) url = add_url_argument(url, UI_THRESHOLD, _UI_THRESHOLD) url = add_url_argument(url, MAX_UNINJECTED_RATIO, _MAX_UI_RATIO) url = add_url_argument(url, AC_TRAINING_FACTOR, _AC_TRAINING_FACTOR) url = add_url_argument(url, CTRL_THRESH, _CTRL_THRESH) url = add_url_argument(url, REQUIRED_DROPS, _REQUIRED_DROPS) # Submit full analysis job response = post_data(self, url, 200) fa_uuid = response[FULL_ANALYSIS][0][UUID] # Test that submitting two jobs with the same name fails and returns # the appropriate error code. post_data(self, url, 403) running = True while running: time.sleep(10) response = get_data(self, _FULL_ANALYSIS_URL, 200) for job in response[FULL_ANALYSIS]: if fa_uuid == job[UUID]: job_details = job running = job_details[STATUS] == 'running' msg = "%s doesn't exist in job_details." % PA_DOCUMENT self.assertTrue(PA_DOCUMENT in job_details, msg) if ERROR in job_details[PA_DOCUMENT]: self.assertTrue(False, job_details[PA_DOCUMENT][ERROR]) msg = "%s doesn't exist in job_details." % ID_DOCUMENT self.assertTrue(ID_DOCUMENT in job_details, msg) if ERROR in job_details[ID_DOCUMENT]: self.assertTrue(False, job_details[ID_DOCUMENT][ERROR]) msg = "%s doesn't exist in job_details." % AC_DOCUMENT self.assertTrue(AC_DOCUMENT in job_details, msg) if ERROR in job_details[AC_DOCUMENT]: self.assertTrue(False, job_details[AC_DOCUMENT][ERROR]) msg = "%s doesn't exist in job_details." % EP_DOCUMENT self.assertTrue(EP_DOCUMENT in job_details, msg) if ERROR in job_details[EP_DOCUMENT]: self.assertTrue(False, job_details[EP_DOCUMENT][ERROR]) msg = "%s doesn't exist in job_details." % UNIFIED_PDF self.assertTrue(UNIFIED_PDF in job_details, msg) msg = "%s doesn't exist in job_details." % UNIFIED_PDF_URL self.assertTrue(UNIFIED_PDF_URL in job_details, msg) if ERROR in job_details: self.assertTrue(False, job_details[ERROR]) # Delete full analysis job delete_url = add_url_argument(_FULL_ANALYSIS_URL, UUID, fa_uuid, True) delete_data(self, delete_url, 200) # Ensure job no longer exists in the database response = get_data(self, _FULL_ANALYSIS_URL, 200) for job in response[FULL_ANALYSIS]: msg = "Full analysis job %s still exists in database." % fa_uuid self.assertNotEqual(fa_uuid, job[UUID], msg)
def test_full_analysis(self): """ Test the POST, GET and DELETE full analysis APIs """ # run these to ensure that the instance of mongo database used by # bamboo is updated with the latest image stacks and HDF5 archives get_data(self, _ARCHIVES_URL + '?refresh=true&format=json', 200) get_data(self, _HDF5S_URL + '?refresh=true&format=json', 200) # Construct url url = _FULL_ANALYSIS_URL url = add_url_argument(url, PA_DATA_SOURCE, _ARCHIVE_NAME, True) url = add_url_argument(url, JOB_NAME, _FA_HOTSPOT_JOBNAME) url = add_url_argument(url, EXP_DEF, _EXP_DEF_HOTSPOT_NAME) url = add_url_argument(url, OFFSETS, _OFFSETS) url = add_url_argument(url, UI_THRESHOLD, _UI_THRESHOLD) url = add_url_argument(url, MAX_UNINJECTED_RATIO, _MAX_UI_RATIO) url = add_url_argument(url, AC_TRAINING_FACTOR, _AC_TRAINING_FACTOR) url = add_url_argument(url, CTRL_THRESH, _CTRL_THRESH) url = add_url_argument(url, REQUIRED_DROPS, _REQUIRED_DROPS) # Submit full analysis job response = post_data(self, url, 200) fa_uuid = response[FULL_ANALYSIS][0][UUID] # Test that submitting two jobs with the same name fails and returns # the appropriate error code. post_data(self, url, 403) running = True while running: time.sleep(10) response = get_data(self, _FULL_ANALYSIS_URL, 200) for job in response[FULL_ANALYSIS]: if fa_uuid == job[UUID]: job_details = job running = job_details[STATUS] == 'running' msg = "%s doesn't exist in job_details." % PA_DOCUMENT self.assertTrue(PA_DOCUMENT in job_details, msg) if ERROR in job_details[PA_DOCUMENT]: self.assertTrue(False, job_details[PA_DOCUMENT][ERROR]) msg = "%s doesn't exist in job_details." % ID_DOCUMENT self.assertTrue(ID_DOCUMENT in job_details, msg) if ERROR in job_details[ID_DOCUMENT]: self.assertTrue(False, job_details[ID_DOCUMENT][ERROR]) msg = "%s doesn't exist in job_details." % AC_DOCUMENT self.assertTrue(AC_DOCUMENT in job_details, msg) if ERROR in job_details[AC_DOCUMENT]: self.assertTrue(False, job_details[AC_DOCUMENT][ERROR]) msg = "%s doesn't exist in job_details." % GT_DOCUMENT self.assertTrue(GT_DOCUMENT in job_details, msg) if ERROR in job_details[GT_DOCUMENT]: self.assertTrue(False, job_details[GT_DOCUMENT][ERROR]) msg = "%s doesn't exist in job_details." % UNIFIED_PDF self.assertTrue(UNIFIED_PDF in job_details, msg) msg = "%s doesn't exist in job_details." % UNIFIED_PDF_URL self.assertTrue(UNIFIED_PDF_URL in job_details, msg) if ERROR in job_details: self.assertTrue(False, job_details[ERROR]) # Delete full analysis job delete_url = add_url_argument(_FULL_ANALYSIS_URL, UUID, fa_uuid, True) delete_data(self, delete_url, 200) # Ensure job no longer exists in the database response = get_data(self, _FULL_ANALYSIS_URL, 200) for job in response[FULL_ANALYSIS]: msg = "Full analysis job %s still exists in database." % fa_uuid self.assertNotEqual(fa_uuid, job[UUID], msg)
def test_process(self): # Construct url url = self.construct_process_url(_ARCHIVE) # Submit process job response = post_data(self, url, 200) process_uuid = response[_PROCESS][0][UUID] # Test that submitting two jobs with the same name fails and returns # the appropriate error code. post_data(self, url, 403) running = True while running: time.sleep(10) response = get_data(self, _PROCESS_URL, 200) for job in response[_PROCESS]: if process_uuid == job[UUID]: job_details = job running = job_details[_STATUS] == 'running' # Copy result files to cwd for bamboo to ingest as artifacts analysis_txt_path = None if _RESULT in job_details: analysis_txt_path = job_details[_RESULT] if os.path.isfile(analysis_txt_path): shutil.copy(analysis_txt_path, "observed_analysis.txt") config_path = None if _CONFIG in job_details: config_path = job_details[_CONFIG] if os.path.isfile(config_path): shutil.copy(config_path, "observed.cfg") error = "" if 'error' in job_details: error = job_details['error'] msg = "Expected pa process job status succeeded, but found %s. " \ "Error: %s" % (job_details[_STATUS], error) self.assertEquals(job_details[_STATUS], "succeeded", msg) exp_analysis_path = os.path.join( os.path.abspath(os.path.dirname(__file__)), _EXPECTED_ANALYSIS_RESULT) msg = "Observed result (%s) doesn't match expected result (%s)." % \ (analysis_txt_path, exp_analysis_path) self.assertTrue(filecmp.cmp(exp_analysis_path, analysis_txt_path), msg) exp_config_path = os.path.join( os.path.abspath(os.path.dirname(__file__)), _EXPECTED_CONFIG_RESULT) msg = "Observed result (%s) doesn't match expected result (%s)." % \ (config_path, exp_config_path) self.assertTrue(filecmp.cmp(exp_config_path, config_path), msg) # Delete absorption job delete_data(self, _PROCESS_URL + "?uuid=%s" % process_uuid, 200) # Ensure job no longer exists in the database response = get_data(self, _PROCESS_URL, 200) for job in response['Process']: msg = "PA process job %s still exists in database." % process_uuid self.assertNotEqual(process_uuid, job[UUID], msg)