Esempio n. 1
0
    def test_monitor2_images(self):
        """
        Test the POST, GET and DELETE images APIs.
        """
        # Upload image stack
        url = _MON_IMAGES_URL
        url = add_url_argument(url, STACK_TYPE, MONITOR2, True)
        url = add_url_argument(url, NAME, 'monitor1_images_name')
        url = add_url_argument(url, DESCRIPTION, 'monitor1 images description')

        response = upload_file(self, _TEST_DIR, url, _MON2_IMAGES, 200)
        image_stack_uuid = response[UUID]

        # Ensure duplicate image stacks cannot be uploaded
        upload_file(self, _TEST_DIR, url, _MON2_IMAGES, 403)

        # Ensure image stack exists in the database and can be retrieved
        response = get_data(self, _MON_IMAGES_URL, 200)
        record_found = False
        for record in response[MONITOR_IMAGES]:
            if record[UUID] == image_stack_uuid:
                record_found = True
        msg = "Monitor image stack %s doesn't exist in the database." % image_stack_uuid
        self.assertTrue(record_found, msg)

        # Delete image stack
        url = add_url_argument(_IMAGES_URL, UUID, image_stack_uuid, True)
        delete_data(self, url, 200)

        # Ensure image stack no longer exists in the database
        response = get_data(self, _MON_IMAGES_URL, 200)
        for record in response[MONITOR_IMAGES]:
            msg = "Monitor image stack %s still exists in the database." % record[UUID]
            self.assertNotEqual(image_stack_uuid, record[UUID], msg)
Esempio n. 2
0
    def test_tags_api(self):
        """
        test TagsGetFunction, TagsDeleteFunction, and TagsPostFunction.
        """
        report_uuid = str(uuid4())
        _DB_CONNECTOR.insert(RUN_REPORT_COLLECTION, [{
            UUID: report_uuid,
            'name': 'dummy run report'
        }])

        # test TagsPostFunction, add tags to a run report
        post_url = _RUN_INFO_TAG_URL
        post_url = add_url_argument(post_url, UUID, report_uuid, True)
        post_url = add_url_argument(post_url, TAGS,
                                    ','.join(["BRAF e15", "FFPE"]))

        response = post_data(self, post_url, 200)

        # test TagsGetFunction, get tags from all run reports
        response = get_data(self, _RUN_INFO_TAG_URL, 200)
        self.assertIn("braf e15", response[TAGS])

        # test TagsDeleteFunction, delete a tag from a run report
        delete_url = _RUN_INFO_TAG_URL
        delete_url = add_url_argument(delete_url, UUID, report_uuid, True)
        delete_url = add_url_argument(delete_url, TAGS, "ffpe")

        delete_data(self, delete_url, 200)
        response = get_data(self, _RUN_INFO_TAG_URL, 200)
        self.assertIn("braf e15", response[TAGS])
        self.assertNotIn("ffpe", response[TAGS])

        _DB_CONNECTOR.remove(RUN_REPORT_COLLECTION, {UUID: report_uuid})
Esempio n. 3
0
    def test_images(self):
        """
        Test the POST, GET and DELETE images APIs.
        """
        # Upload image stack
        url = _IMAGES_URL
        url = add_url_argument(url, EXP_DEF, 'Beta_24b_p1_V6', True)
        url = add_url_argument(url, NAME, 'golden_run')
        url = add_url_argument(url, DESCRIPTION, 'Short description.')

        response = upload_file(self, _TEST_DIR, url, _PNG_IMAGES, 200)
        image_stack_uuid = response[UUID]

        # Ensure duplicate image stacks cannot be uploaded
        response = upload_file(self, _TEST_DIR, url, _PNG_IMAGES, 403)

        # Ensure image stack exists in the database and can be retrieved
        response = get_data(self, _IMAGES_URL, 200)
        record_found = False
        for record in response[IMAGES]:
            if record[UUID] == image_stack_uuid:
                record_found = True
        msg = "Image stack %s doesn't exist in the database." % image_stack_uuid
        self.assertTrue(record_found, msg)

        # Delete image stack
        url = add_url_argument(_IMAGES_URL, UUID, image_stack_uuid, True)
        delete_data(self, url, 200)

        # Ensure image stack no longer exists in the database
        response = get_data(self, _IMAGES_URL, 200)
        for record in response[IMAGES]:
            msg = "Image stack %s still exists in the database." % record[UUID]
            self.assertNotEqual(image_stack_uuid, record[UUID], msg)
Esempio n. 4
0
    def test_update_report_by_dates(self):
        """
        test RunInfoGetFunction with start and end parameters
        """
        # test when both start and end dates are specified
        param_str = '?refresh=true&start={0}&end={1}'.format(
            _START_DATE.strftime("%Y_%m_%d"), _END_DATE.strftime("%Y_%m_%d"))
        response = get_data(self, _RUN_INFO_GET_URL + param_str, 200)
        len_expected_reports = len(
            _DB_CONNECTOR.find(
                RUN_REPORT_COLLECTION, {
                    UUID: {
                        '$exists': True
                    },
                    DEVICE_NAME: {
                        '$ne': ''
                    },
                    EXP_DEF_NAME: {
                        '$ne': None
                    },
                    IMAGE_STACKS: {
                        '$ne': None,
                        '$not': {
                            '$size': 0
                        }
                    }
                }))
        len_observed_reports = len(response['run_report'])
        msg = "Numebr of observed run reports (%s) doesn't match expected number (%s)." \
                % (len_observed_reports, len_expected_reports)
        self.assertEqual(len_expected_reports, len_observed_reports, msg)

        # test when only start date is specified
        param_str = '?refresh=true&start={0}'.format(
            _START_DATE.strftime("%Y_%m_%d"))
        response = get_data(self, _RUN_INFO_GET_URL + param_str, 200)
        len_expected_reports = len(
            _DB_CONNECTOR.find(
                RUN_REPORT_COLLECTION, {
                    UUID: {
                        '$exists': True
                    },
                    DEVICE_NAME: {
                        '$ne': ''
                    },
                    EXP_DEF_NAME: {
                        '$ne': None
                    },
                    IMAGE_STACKS: {
                        '$ne': None,
                        '$not': {
                            '$size': 0
                        }
                    }
                }))
        len_observed_reports = len(response['run_report'])
        msg = "Numebr of observed run reports (%s) doesn't match expected number (%s)." \
                % (len_observed_reports, len_expected_reports)
        self.assertEqual(len_expected_reports, len_observed_reports, msg)
Esempio n. 5
0
    def test_absorption(self):
        # Upload targets and probes files
        response = upload_file(self, _TEST_DIR, _PROBES_URL, _PROBES_FILENAME,
                               200)
        probes_uuid = response[UUID]
        response = upload_file(self, _TEST_DIR, _TARGETS_URL,
                               _TARGETS_FILENAME, 200)
        targets_uuid = response[UUID]

        # Post absorption job
        url = _ABSORPTION_URL + "?probes=%s&targets=%s&job_name=%s&%s=%s" % \
             (probes_uuid, targets_uuid, _JOB_NAME, STRICT, _STRICT)
        response = post_data(self, url, 200)
        abs_job_uuid = response[UUID]

        running = True
        job_details = None
        while running:
            time.sleep(10)
            response = get_data(self, _ABSORPTION_URL, 200)
            for job in response[_ABSORPTION]:
                if abs_job_uuid == job[UUID]:
                    job_details = job
                    running = job_details[_STATUS] == 'running'

        # Copy result file to cwd for bamboo to ingest as an artifact
        absorption_path = None
        if _RESULT in job_details:
            absorption_path = job_details[_RESULT]
            if absorption_path and os.path.isfile(absorption_path):
                shutil.copy(absorption_path, "observed_absorption.txt")

        # Clean up by removing targets and probes files
        delete_data(self, _PROBES_URL + "?uuid=%s" % probes_uuid, 200)
        delete_data(self, _TARGETS_URL + "?uuid=%s" % targets_uuid, 200)

        msg = "Expected absorption job status succeeded, but found: %s" % \
              job_details[_STATUS]
        self.assertEquals(job_details[_STATUS], "succeeded", msg)

        exp_result_path = os.path.join(
            os.path.abspath(os.path.dirname(__file__)),
            _EXPECTED_RESULT_FILENAME)
        msg = "Observed result (%s) doesn't match expected result (%s)." % \
              (absorption_path, exp_result_path)
        self.assertTrue(filecmp.cmp(exp_result_path, absorption_path), msg)

        # Delete absorption job
        delete_data(self, _ABSORPTION_URL + "?uuid=%s" % abs_job_uuid, 200)

        # Ensure job no longer exists in the database
        response = get_data(self, _ABSORPTION_URL, 200)
        for job in response[_ABSORPTION]:
            msg = "Absorption job %s still exists in database." % abs_job_uuid
            self.assertNotEqual(abs_job_uuid, job[UUID], msg)
    def test_hdf5_process(self):
        # Construct url
        url = self.construct_process_url(_HDF5_DATASET,
                                         'test_HDF5_pa_process_job')

        # Submit process job
        response = post_data(self, url, 200)
        process_uuid = response[_PROCESS][0][UUID]

        # Test that submitting two jobs with the same name fails and returns
        # the appropriate error code.
        post_data(self, url, 403)

        running = True
        while running:
            time.sleep(10)
            response = get_data(self, _PROCESS_URL, 200)
            for job in response[_PROCESS]:
                if process_uuid == job[UUID]:
                    job_details = job
                    running = job_details[_STATUS] == 'running'

        # Copy result files to cwd for bamboo to ingest as artifacts
        self.assertTrue(_RESULT in job_details,
                        'Unable to locate primary analysis file')
        if _RESULT in job_details:
            analysis_txt_path = job_details[_RESULT]
            msg = 'Expected HDF5 to be converted to file %s, but this file was not found.' \
                  % analysis_txt_path
            self.assertTrue(os.path.exists(analysis_txt_path), msg)

        # Copy result files to cwd for bamboo to ingest as artifacts
        self.assertTrue(_CONFIG in job_details, 'Unable to locate config file')
        if _CONFIG in job_details:
            config_path = job_details[_CONFIG]
            msg = 'Expected HDF5 conversion to create config file %s, but this file was not found.' \
                  % config_path
            self.assertTrue(os.path.exists(config_path), msg)

        error = ""
        if 'error' in job_details:
            error = job_details['error']
        msg = "Expected pa process job status succeeded, but found %s. " \
              "Error: %s" % (job_details[_STATUS], error)
        self.assertEquals(job_details[_STATUS], "succeeded", msg)

        # Delete absorption job
        delete_data(self, _PROCESS_URL + "?uuid=%s" % process_uuid, 200)

        # Ensure job no longer exists in the database
        response = get_data(self, _PROCESS_URL, 200)
        for job in response['Process']:
            msg = "PA process job %s still exists in database." % process_uuid
            self.assertNotEqual(process_uuid, job[UUID], msg)
Esempio n. 7
0
    def test_get_run_info(self):
        """
        test RunInfoGetFunction
        """
        response = get_data(self,
                            _RUN_INFO_GET_URL + '?refresh=true&format=json',
                            200)
        len_expected_reports = len(
            _DB_CONNECTOR.find(
                RUN_REPORT_COLLECTION, {
                    UUID: {
                        '$exists': True
                    },
                    DEVICE_NAME: {
                        '$ne': ''
                    },
                    EXP_DEF_NAME: {
                        '$ne': None
                    },
                    IMAGE_STACKS: {
                        '$ne': None,
                        '$not': {
                            '$size': 0
                        }
                    }
                }))
        len_observed_reports = len(response['run_report'])

        msg = "Numebr of observed run reports (%s) doesn't match expected number (%s)." \
                % (len_observed_reports, len_expected_reports)
        self.assertEqual(len_expected_reports, len_observed_reports, msg)
    def setUp(self):
        self._client = app.test_client(self)
        get_data(self, _HDF5S_URL + '?refresh=true&format=json', 200)
        get_data(self, _ARCHIVES_URL + '?refresh=true&format=json', 200)

        # insert HDF5 record
        hdf5_record = {
            HDF5_PATH:
            'run_reports/08_02_16/Tue02_1842_pilot1_unittest/id1470144257.h5',
            HDF5_DATASET: _HDF5_DATASET
        }
        _DB_CONNECTOR.insert(HDF5_COLLECTION, [hdf5_record])

        # insert archive record
        archive_record = {ARCHIVE: _ARCHIVE, ARCHIVE_PATH: _ARCHIVE}
        _DB_CONNECTOR.insert(ARCHIVES_COLLECTION, [archive_record])
Esempio n. 9
0
    def exercise_file_upload_api(self, url, filename):
        response_key = url.split("/")[-1]

        # Test successful file upload
        response = upload_file(self, _TEST_DIR, url, filename, 200)
        uuid = response[UUID]

        # Test error code 403: File already exists.
        upload_file(self, _TEST_DIR, url, filename, 403)

        # Test error code 415: File is not a valid FASTA file.
        upload_file(self, _TEST_DIR, url, _INVALID_FASTA_FILENAME, 415)

        # Test successful retrieval of uploaded file
        response = get_data(self, url, 200)
        retrieved_uuid = response[response_key][0][UUID]
        msg = "Expected uuid (%s) doesn't match observed uuid (%s) for %s" % \
              (uuid, retrieved_uuid, url)
        self.assertEqual(uuid, retrieved_uuid, msg)

        # Test successful deletion of uploaded file
        delete_data(self, url + "?uuid=%s" % uuid, 200)

        # Test unsuccessful deletion of non-existent file
        delete_data(self, url + "?uuid=%s" % str(uuid4()), 404)
Esempio n. 10
0
    def test_get_exp_defs(self):
        """
        test ExpDefGetFunction
        """
        response = get_data(self, _EXP_DEF_URL + '/ExpDef', 200)
        len_expected_defs = len(_DB_CONNECTOR.find(EXP_DEF_COLLECTION, {}))
        len_observed_defs = len(response['ExpDef'])

        msg = "Numebr of observed definitions (%s) doesn't match expected number (%s)." \
                % (len_observed_defs, len_expected_defs)
        self.assertEqual(len_expected_defs, len_observed_defs, msg)

        response = get_data(self, _EXP_DEF_URL + '/ExpDef?refresh=true', 200)
        len_expected_defs = len(_DB_CONNECTOR.find(EXP_DEF_COLLECTION, {}))
        len_observed_defs = len(response['ExpDef'])

        msg = "Numebr of observed definitions (%s) doesn't match expected number (%s)." \
                % (len_observed_defs, len_expected_defs)
        self.assertEqual(len_expected_defs, len_observed_defs, msg)
    def test_dyes(self):
        response = get_data(self, _DYES_URL + '?refresh=true&format=json', 200)
        dyes = read_yaml(os.path.join(_TEST_DIR, 'dyes.yaml'))

        observed_dyes = set([x['dye'] for x in response['Dyes']])
        expected_dyes = set([x['dye'] for x in dyes['Dyes']])

        msg = "Expected dyes (%s) not a subset of observed (%s)." % \
              (expected_dyes, observed_dyes)
        self.assertTrue(expected_dyes.issubset(observed_dyes), msg)
 def test_devices(self):
     response = get_data(self, _DEVICES_URL + '?refresh=true&format=json',
                         200)
     devices = read_yaml(os.path.join(_TEST_DIR, 'devices.yaml'))
     observed_devices = ", ".join(
         map(lambda x: x['device'], response['Devices']))
     expected_devices = ", ".join(
         map(lambda x: x['device'], devices['Devices']))
     msg = "Observed devices (%s) don't match expected (%s)." % \
           (observed_devices, expected_devices)
     self.assertEqual(response, devices, msg)
Esempio n. 13
0
 def testMeltingTemperatures(self):
     url = _IDT_URL + "?sequence_name=%s&sequence=%s" % \
         (",".join(self.probes_dict.keys()), 
          ",".join(self.probes_dict.values()))
     response = get_data(self, url, 200)
     results  = {x[_NAME]: (x[_TM],x[_SEQUENCE]) for x in response[_IDT]}
     with open(_OBSERVED_RESULT_FILENAME, 'w') as f:
         print >>f, "\t".join([_NAME, _SEQUENCE, _TM])
         for name in sorted(results.keys()):
             tm  = str(results[name][0])
             seq = results[name][1]
             print >>f, "\t".join([name, seq, tm])
             
     msg = "Expected Tm's (%s) don't match observed Tm's (%s)." % \
         (_EXPECTED_RESULT_FILENAME, _OBSERVED_RESULT_FILENAME)
     self.assertTrue(filecmp.cmp(self.expected_result_path, 
                                 _OBSERVED_RESULT_FILENAME), msg)
Esempio n. 14
0
    def test_full_analysis_exploratory(self):
        """
        Test the POST and DELETE full analysis API with exploratory experiment definition.
        """
        # Construct url
        url = _FULL_ANALYSIS_URL
        url = add_url_argument(url, PA_DATA_SOURCE, _ARCHIVE_NAME, True)
        url = add_url_argument(url, JOB_NAME, _FA_EXPLORATORY_JOBNAME)
        url = add_url_argument(url, EXP_DEF, _EXP_DEF_EXPLORATORY_NAME)
        url = add_url_argument(url, OFFSETS, _OFFSETS)
        url = add_url_argument(url, UI_THRESHOLD, _UI_THRESHOLD)
        url = add_url_argument(url, MAX_UNINJECTED_RATIO, _MAX_UI_RATIO)
        url = add_url_argument(url, AC_TRAINING_FACTOR, _AC_TRAINING_FACTOR)
        url = add_url_argument(url, CTRL_THRESH, _CTRL_THRESH)
        url = add_url_argument(url, REQUIRED_DROPS, _REQUIRED_DROPS)

        # Submit full analysis job
        response = post_data(self, url, 200)
        fa_uuid = response[FULL_ANALYSIS][0][UUID]

        # Test that submitting two jobs with the same name fails and returns
        # the appropriate error code.
        post_data(self, url, 403)

        running = True
        while running:
            time.sleep(10)
            response = get_data(self, _FULL_ANALYSIS_URL, 200)
            for job in response[FULL_ANALYSIS]:
                if fa_uuid == job[UUID]:
                    job_details = job
                    running = job_details[STATUS] == 'running'

        msg = "%s doesn't exist in job_details." % PA_DOCUMENT
        self.assertTrue(PA_DOCUMENT in job_details, msg)
        if ERROR in job_details[PA_DOCUMENT]:
            self.assertTrue(False, job_details[PA_DOCUMENT][ERROR])

        msg = "%s doesn't exist in job_details." % ID_DOCUMENT
        self.assertTrue(ID_DOCUMENT in job_details, msg)
        if ERROR in job_details[ID_DOCUMENT]:
            self.assertTrue(False, job_details[ID_DOCUMENT][ERROR])

        msg = "%s doesn't exist in job_details." % AC_DOCUMENT
        self.assertTrue(AC_DOCUMENT in job_details, msg)
        if ERROR in job_details[AC_DOCUMENT]:
            self.assertTrue(False, job_details[AC_DOCUMENT][ERROR])

        msg = "%s doesn't exist in job_details." % EP_DOCUMENT
        self.assertTrue(EP_DOCUMENT in job_details, msg)
        if ERROR in job_details[EP_DOCUMENT]:
            self.assertTrue(False, job_details[EP_DOCUMENT][ERROR])

        msg = "%s doesn't exist in job_details." % UNIFIED_PDF
        self.assertTrue(UNIFIED_PDF in job_details, msg)

        msg = "%s doesn't exist in job_details." % UNIFIED_PDF_URL
        self.assertTrue(UNIFIED_PDF_URL in job_details, msg)

        if ERROR in job_details:
            self.assertTrue(False, job_details[ERROR])

        # Delete full analysis job
        delete_url = add_url_argument(_FULL_ANALYSIS_URL, UUID, fa_uuid, True)
        delete_data(self, delete_url, 200)

        # Ensure job no longer exists in the database
        response = get_data(self, _FULL_ANALYSIS_URL, 200)
        for job in response[FULL_ANALYSIS]:
            msg = "Full analysis job %s still exists in database." % fa_uuid
            self.assertNotEqual(fa_uuid, job[UUID], msg)
Esempio n. 15
0
    def test_full_analysis(self):
        """
        Test the POST, GET and DELETE full analysis APIs
        """
        # run these to ensure that the instance of mongo database used by
        # bamboo is updated with the latest image stacks and HDF5 archives
        get_data(self, _ARCHIVES_URL + '?refresh=true&format=json', 200)
        get_data(self, _HDF5S_URL + '?refresh=true&format=json', 200)

        # Construct url
        url = _FULL_ANALYSIS_URL
        url = add_url_argument(url, PA_DATA_SOURCE, _ARCHIVE_NAME, True)
        url = add_url_argument(url, JOB_NAME, _FA_HOTSPOT_JOBNAME)
        url = add_url_argument(url, EXP_DEF, _EXP_DEF_HOTSPOT_NAME)
        url = add_url_argument(url, OFFSETS, _OFFSETS)
        url = add_url_argument(url, UI_THRESHOLD, _UI_THRESHOLD)
        url = add_url_argument(url, MAX_UNINJECTED_RATIO, _MAX_UI_RATIO)
        url = add_url_argument(url, AC_TRAINING_FACTOR, _AC_TRAINING_FACTOR)
        url = add_url_argument(url, CTRL_THRESH, _CTRL_THRESH)
        url = add_url_argument(url, REQUIRED_DROPS, _REQUIRED_DROPS)

        # Submit full analysis job
        response = post_data(self, url, 200)
        fa_uuid = response[FULL_ANALYSIS][0][UUID]

        # Test that submitting two jobs with the same name fails and returns
        # the appropriate error code.
        post_data(self, url, 403)

        running = True
        while running:
            time.sleep(10)
            response = get_data(self, _FULL_ANALYSIS_URL, 200)
            for job in response[FULL_ANALYSIS]:
                if fa_uuid == job[UUID]:
                    job_details = job
                    running = job_details[STATUS] == 'running'

        msg = "%s doesn't exist in job_details." % PA_DOCUMENT
        self.assertTrue(PA_DOCUMENT in job_details, msg)
        if ERROR in job_details[PA_DOCUMENT]:
            self.assertTrue(False, job_details[PA_DOCUMENT][ERROR])

        msg = "%s doesn't exist in job_details." % ID_DOCUMENT
        self.assertTrue(ID_DOCUMENT in job_details, msg)
        if ERROR in job_details[ID_DOCUMENT]:
            self.assertTrue(False, job_details[ID_DOCUMENT][ERROR])

        msg = "%s doesn't exist in job_details." % AC_DOCUMENT
        self.assertTrue(AC_DOCUMENT in job_details, msg)
        if ERROR in job_details[AC_DOCUMENT]:
            self.assertTrue(False, job_details[AC_DOCUMENT][ERROR])

        msg = "%s doesn't exist in job_details." % GT_DOCUMENT
        self.assertTrue(GT_DOCUMENT in job_details, msg)
        if ERROR in job_details[GT_DOCUMENT]:
            self.assertTrue(False, job_details[GT_DOCUMENT][ERROR])

        msg = "%s doesn't exist in job_details." % UNIFIED_PDF
        self.assertTrue(UNIFIED_PDF in job_details, msg)

        msg = "%s doesn't exist in job_details." % UNIFIED_PDF_URL
        self.assertTrue(UNIFIED_PDF_URL in job_details, msg)

        if ERROR in job_details:
            self.assertTrue(False, job_details[ERROR])

        # Delete full analysis job
        delete_url = add_url_argument(_FULL_ANALYSIS_URL, UUID, fa_uuid, True)
        delete_data(self, delete_url, 200)

        # Ensure job no longer exists in the database
        response = get_data(self, _FULL_ANALYSIS_URL, 200)
        for job in response[FULL_ANALYSIS]:
            msg = "Full analysis job %s still exists in database." % fa_uuid
            self.assertNotEqual(fa_uuid, job[UUID], msg)
    def test_process(self):
        # Construct url
        url = self.construct_process_url(_ARCHIVE)

        # Submit process job
        response = post_data(self, url, 200)
        process_uuid = response[_PROCESS][0][UUID]

        # Test that submitting two jobs with the same name fails and returns
        # the appropriate error code.
        post_data(self, url, 403)

        running = True
        while running:
            time.sleep(10)
            response = get_data(self, _PROCESS_URL, 200)
            for job in response[_PROCESS]:
                if process_uuid == job[UUID]:
                    job_details = job
                    running = job_details[_STATUS] == 'running'

        # Copy result files to cwd for bamboo to ingest as artifacts
        analysis_txt_path = None
        if _RESULT in job_details:
            analysis_txt_path = job_details[_RESULT]
            if os.path.isfile(analysis_txt_path):
                shutil.copy(analysis_txt_path, "observed_analysis.txt")

        config_path = None
        if _CONFIG in job_details:
            config_path = job_details[_CONFIG]
            if os.path.isfile(config_path):
                shutil.copy(config_path, "observed.cfg")

        error = ""
        if 'error' in job_details:
            error = job_details['error']
        msg = "Expected pa process job status succeeded, but found %s. " \
              "Error: %s" % (job_details[_STATUS], error)
        self.assertEquals(job_details[_STATUS], "succeeded", msg)

        exp_analysis_path = os.path.join(
            os.path.abspath(os.path.dirname(__file__)),
            _EXPECTED_ANALYSIS_RESULT)
        msg = "Observed result (%s) doesn't match expected result (%s)." % \
              (analysis_txt_path, exp_analysis_path)
        self.assertTrue(filecmp.cmp(exp_analysis_path, analysis_txt_path), msg)

        exp_config_path = os.path.join(
            os.path.abspath(os.path.dirname(__file__)),
            _EXPECTED_CONFIG_RESULT)
        msg = "Observed result (%s) doesn't match expected result (%s)." % \
              (config_path, exp_config_path)
        self.assertTrue(filecmp.cmp(exp_config_path, config_path), msg)

        # Delete absorption job
        delete_data(self, _PROCESS_URL + "?uuid=%s" % process_uuid, 200)

        # Ensure job no longer exists in the database
        response = get_data(self, _PROCESS_URL, 200)
        for job in response['Process']:
            msg = "PA process job %s still exists in database." % process_uuid
            self.assertNotEqual(process_uuid, job[UUID], msg)