def test_sample_template_handler_delete_request(self):
        # Test user doesn't have access
        with self.assertRaisesRegexp(HTTPError,
                                     'User does not have access to study'):
            sample_template_handler_delete_request(
                1, User('*****@*****.**'))

        # Test study doesn't exist
        user = User('*****@*****.**')
        with self.assertRaisesRegexp(HTTPError, 'Study does not exist'):
            sample_template_handler_delete_request(1000000, user)

        # Test sample information doesn't exist
        new_study = self._create_study('Study for deleting test')
        with self.assertRaisesRegexp(HTTPError, "Study %s doesn't have sample "
                                                "information" % new_study.id):
            sample_template_handler_delete_request(new_study.id, user)

        # Test success
        user = User('*****@*****.**')
        obs = sample_template_handler_delete_request(1, user)
        self.assertEqual(obs.keys(), ['job'])
        job_info = r_client.get('sample_template_1')
        self.assertIsNotNone(job_info)

        # Wait until the job is done
        wait_for_processing_job(loads(job_info)['job_id'])
Beispiel #2
0
    def test_post_job_success(self):
        pt = npt.assert_warns(
            qdb.exceptions.QiitaDBWarning,
            qdb.metadata_template.prep_template.PrepTemplate.create,
            pd.DataFrame({'new_col': {'1.SKD6.640190': 1}}),
            qdb.study.Study(1), '16S')
        job = qdb.processing_job.ProcessingJob.create(
            qdb.user.User('*****@*****.**'),
            qdb.software.Parameters.load(
                qdb.software.Command.get_validator('BIOM'),
                values_dict={'template': pt.id, 'files':
                             dumps({'BIOM': ['file']}),
                             'artifact_type': 'BIOM'}))
        job._set_status('running')

        fd, fp = mkstemp(suffix='_table.biom')
        close(fd)
        with open(fp, 'w') as f:
            f.write('\n')

        self._clean_up_files.append(fp)

        exp_artifact_count = qdb.util.get_count('qiita.artifact') + 1
        payload = dumps(
            {'success': True, 'error': '',
             'artifacts': {'OTU table': {'filepaths': [(fp, 'biom')],
                                         'artifact_type': 'BIOM'}}})
        obs = self.post(
            '/qiita_db/jobs/%s/complete/' % job.id,
            payload, headers=self.header)
        wait_for_processing_job(job.id)
        self.assertEqual(obs.code, 200)
        self.assertEqual(job.status, 'success')
        self.assertEqual(qdb.util.get_count('qiita.artifact'),
                         exp_artifact_count)
Beispiel #3
0
    def test_post_job_failure(self):
        pt = npt.assert_warns(
            qdb.exceptions.QiitaDBWarning,
            qdb.metadata_template.prep_template.PrepTemplate.create,
            pd.DataFrame({'new_col': {'1.SKD6.640190': 1}}),
            qdb.study.Study(1), '16S')
        job = qdb.processing_job.ProcessingJob.create(
            qdb.user.User('*****@*****.**'),
            qdb.software.Parameters.load(
                qdb.software.Command.get_validator('BIOM'),
                values_dict={'template': pt.id, 'files':
                             dumps({'BIOM': ['file']}),
                             'artifact_type': 'BIOM'}))
        job._set_status('running')

        payload = dumps({'success': False, 'error': 'Job failure'})
        obs = self.post(
            '/qiita_db/jobs/%s/complete/' % job.id,
            payload, headers=self.header)
        self.assertEqual(obs.code, 200)
        wait_for_processing_job(job.id)
        self.assertEqual(job.status, 'error')
        self.assertEqual(job.log,
                         qdb.logger.LogEntry.newest_records(numrecords=1)[0])
        self.assertEqual(job.log.msg, 'Job failure')
 def test_delete(self):
     response = self.delete('/study/description/sample_template/',
                            {'study_id': 1})
     self.assertEqual(response.code, 200)
     self.assertIsNotNone(response.body)
     obs = loads(response.body)
     self.assertEqual(obs.keys(), ['job'])
     # Wait until the job is done
     wait_for_processing_job(obs['job'])
 def test_post(self):
     response = self.post('/study/description/sample_template/',
                          {'study_id': 1,
                           'filepath': 'uploaded_file.txt',
                           'data_type': ''})
     self.assertEqual(response.code, 200)
     self.assertIsNotNone(response.body)
     obs = loads(response.body)
     self.assertEqual(obs.keys(), ['job'])
     # Wait until the job is done
     wait_for_processing_job(obs['job'])
 def test_patch(self):
     response = self.patch('/study/description/sample_template/',
                           {'op': 'replace',
                            'path': '/1/data',
                            'value': 'uploaded_file.txt'})
     self.assertEqual(response.code, 200)
     self.assertIsNotNone(response.body)
     obs = loads(response.body)
     self.assertEqual(obs.keys(), ['job'])
     # Wait until the job is done
     wait_for_processing_job(obs['job'])
Beispiel #7
0
 def test_post_job_failure(self):
     payload = dumps({'success': False, 'error': 'Job failure'})
     obs = self.post(
         '/qiita_db/jobs/bcc7ebcd-39c1-43e4-af2d-822e3589f14d/complete/',
         payload, headers=self.header)
     self.assertEqual(obs.code, 200)
     wait_for_processing_job('bcc7ebcd-39c1-43e4-af2d-822e3589f14d')
     job = qdb.processing_job.ProcessingJob(
         'bcc7ebcd-39c1-43e4-af2d-822e3589f14d')
     self.assertEqual(job.status, 'error')
     self.assertEqual(job.log,
                      qdb.logger.LogEntry.newest_records(numrecords=1)[0])
     self.assertEqual(job.log.msg, 'Job failure')
Beispiel #8
0
    def test_sample_template_handler_post_request(self):
        # Test user doesn't have access
        with self.assertRaisesRegex(HTTPError,
                                    'User does not have access to study'):
            sample_template_handler_post_request(
                1, User('*****@*****.**'), 'ignored')

        # Test study doesn't exist
        user = User('*****@*****.**')
        with self.assertRaisesRegex(HTTPError, 'Study does not exist'):
            sample_template_handler_post_request(1000000, user, 'ignored')

        # Test file doesn't exist
        with self.assertRaisesRegex(HTTPError, 'Filepath not found'):
            sample_template_handler_post_request(1, user, 'DoesNotExist.txt')

        # Test looks like mapping file and no data_type provided
        uploads_dir = join(get_mountpoint('uploads')[0][1], '1')
        fd, fp = mkstemp(suffix='.txt', dir=uploads_dir)
        self._clean_up_files.append(fp)
        close(fd)

        with open(fp, 'w') as f:
            f.write('#SampleID\tCol1\nSample1\tVal1')

        with self.assertRaisesRegex(
                HTTPError, 'Please, choose a data type if uploading a QIIME '
                           'mapping file'):
            sample_template_handler_post_request(1, user, fp)

        # Test success
        obs = sample_template_handler_post_request(
            1, user, 'uploaded_file.txt')
        self.assertCountEqual(obs.keys(), ['job'])
        job_info = r_client.get('sample_template_1')
        self.assertIsNotNone(job_info)

        # Wait until the job is done
        wait_for_processing_job(loads(job_info)['job_id'])

        # Test direct upload
        obs = sample_template_handler_post_request(
            1, user, fp, data_type='16S', direct_upload=True)
        self.assertCountEqual(obs.keys(), ['job'])
        job_info = r_client.get('sample_template_1')
        self.assertIsNotNone(job_info)

        # Wait until the job is done
        wait_for_processing_job(loads(job_info)['job_id'])
    def test_post_create_analysis_handler(self):
        user = User('*****@*****.**')
        dflt_analysis = user.default_analysis
        dflt_analysis.add_samples(
            {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196',
                 '1.SKM9.640192', '1.SKM4.640180']})
        args = {'name': 'New Test Analysis',
                'description': 'Test Analysis Description'}
        response = self.post('/analysis/create/', args)
        self.assertRegexpMatches(
            response.effective_url,
            r"http://localhost:\d+/analysis/description/\d+/")
        self.assertEqual(response.code, 200)

        # The new analysis id is located at the -2 position (see regex above)
        new_id = response.effective_url.split('/')[-2]
        a = Analysis(new_id)
        # Make sure that all jobs have completed before we exit this tests
        for j in a.jobs:
            wait_for_processing_job(j.id)
Beispiel #10
0
 def _wait_for_jobs(self, analysis):
     for j in analysis.jobs:
         wait_for_processing_job(j.id)
         if j.status == 'error':
             print(j.log.msg)
Beispiel #11
0
 def _wait_for_parallel_job(self, key):
     # This is needed so the clean up works - this is a distributed system
     # so we need to make sure that all processes are done before we reset
     # the test database
     obs = r_client.get(key)
     wait_for_processing_job(loads(obs)['job_id'])
Beispiel #12
0
 def _wait_for_jobs(self, analysis):
     for j in analysis.jobs:
         wait_for_processing_job(j.id)
         if j.status == 'error':
             print j.log.msg
Beispiel #13
0
                             dumps({'BIOM': ['file']}),
                             'artifact_type': 'BIOM'}))
        job._set_status('running')

        payload = dumps({'success': False, 'error': 'Job failure'})
        obs = self.post(
            '/qiita_db/jobs/%s/complete/' % job.id,
            payload, headers=self.header)
        self.assertEqual(obs.code, 200)
<<<<<<< HEAD
        exp = {'success': True, 'error': ''}
        self.assertEqual(loads(obs.body), exp)
        job = qdb.processing_job.ProcessingJob(
            'bcc7ebcd-39c1-43e4-af2d-822e3589f14d')
=======
        wait_for_processing_job(job.id)
>>>>>>> 405cbef0c9f71c620da95a0c1ba6c7d3d588b3ed
        self.assertEqual(job.status, 'error')
        self.assertEqual(job.log,
                         qdb.logger.LogEntry.newest_records(numrecords=1)[0])
        self.assertEqual(job.log.msg, 'Job failure')

    def test_post_job_success(self):
        pt = npt.assert_warns(
            qdb.exceptions.QiitaDBWarning,
            qdb.metadata_template.prep_template.PrepTemplate.create,
            pd.DataFrame({'new_col': {'1.SKD6.640190': 1}}),
            qdb.study.Study(1), '16S')
        job = qdb.processing_job.ProcessingJob.create(
            qdb.user.User('*****@*****.**'),
            qdb.software.Parameters.load(
Beispiel #14
0
    def test_sample_template_handler_patch_request(self):
        user = User('*****@*****.**')

        # Test user doesn't have access
        with self.assertRaisesRegexp(HTTPError,
                                     'User does not have access to study'):
            sample_template_handler_patch_request(
                User('*****@*****.**'), "remove",
                "/1/columns/season_environment/")

        # Test study doesn't exist
        with self.assertRaisesRegexp(HTTPError, 'Study does not exist'):
            sample_template_handler_patch_request(
                user, "remove", "/10000/columns/season_environment/")

        # Test sample template doesn't exist
        new_study = self._create_study('Patching test')
        with self.assertRaisesRegexp(HTTPError,
                                     "Study %s doesn't have sample information"
                                     % new_study.id):
            sample_template_handler_patch_request(
                user, "remove", "/%s/columns/season_environment/"
                                % new_study.id)

        # Test wrong operation value
        with self.assertRaisesRegexp(
                HTTPError, 'Operation add not supported. Current supported '
                           'operations: remove.'):
            sample_template_handler_patch_request(
                user, 'add', '/1/columns/season_environment')

        # Test wrong path parameter < 2
        with self.assertRaisesRegexp(HTTPError, 'Incorrect path parameter'):
            sample_template_handler_patch_request(user, 'ignored', '1')

        # TESTS FOR OPERATION: remove
        # Test wrong path parameter
        with self.assertRaisesRegexp(HTTPError, 'Incorrect path parameter'):
            sample_template_handler_patch_request(
                user, 'remove', '/1/season_environment/')

        # Add sample information to the new study so we can delete one column
        # without affecting the other tests
        md = pd.DataFrame.from_dict(
            {'Sample1': {'col1': 'val1', 'col2': 'val2'}},
            orient='index', dtype=str)
        st = SampleTemplate.create(md, new_study)

        # Test success
        obs = sample_template_handler_patch_request(
            user, "remove", "/%s/columns/col2/"
                            % new_study.id)
        self.assertEqual(obs.keys(), ['job'])
        job_info = r_client.get('sample_template_%s' % new_study.id)
        self.assertIsNotNone(job_info)

        # Wait until the job is done
        wait_for_processing_job(loads(job_info)['job_id'])
        self.assertNotIn('col2', st.categories())

        # TESTS FOR OPERATION: replace
        # Test incorrect path parameter with replace
        with self.assertRaisesRegexp(HTTPError, 'Incorrect path parameter'):
            sample_template_handler_patch_request(user, "replace", "/1/")

        # Test attribute not found
        with self.assertRaisesRegexp(HTTPError, 'Attribute name not found'):
            sample_template_handler_patch_request(user, "replace", "/1/name")

        # Test missing value
        with self.assertRaisesRegexp(HTTPError,
                                     'Value is required when updating sample '
                                     'information'):
            sample_template_handler_patch_request(user, "replace", "/1/data")

        # Test file doesn't exist
        with self.assertRaisesRegexp(HTTPError, 'Filepath not found'):
            sample_template_handler_patch_request(user, "replace", "/1/data",
                                                  req_value='DoesNotExist')

        # Test success
        obs = sample_template_handler_patch_request(
            user, "replace", "/1/data", req_value='uploaded_file.txt')
        self.assertEqual(obs.keys(), ['job'])
        job_info = r_client.get('sample_template_1')
        self.assertIsNotNone(job_info)

        # Wait until the job is done
        wait_for_processing_job(loads(job_info)['job_id'])
Beispiel #15
0
    def test_get_analysis_graph_handler(self):
        # making sure we load the plugins
        activate_or_update_plugins(update=True)

        response = self.get('/analysis/description/1/graph/')
        self.assertEqual(response.code, 200)
        # The job id is randomly generated in the test environment. Gather
        # it here. There is only 1 job in the first artifact of the analysis
        job_id = Analysis(1).artifacts[0].jobs()[0].id
        obs = loads(response.body)
        exp = {
            'edges': [[8, job_id], [job_id, 9]],
            'nodes': [['job', 'job', job_id, 'Single Rarefaction', 'success'],
                      ['artifact', 'BIOM', 9, 'noname\n(BIOM)', 'artifact'],
                      ['artifact', 'BIOM', 8, 'noname\n(BIOM)', 'artifact']],
            'workflow':
            None
        }
        self.assertCountEqual(obs, exp)
        self.assertCountEqual(obs['edges'], exp['edges'])
        self.assertCountEqual(obs['nodes'], exp['nodes'])
        self.assertIsNone(obs['workflow'])

        # Create a new analysis with 2 starting BIOMs to be able to test
        # the different if statements of the request
        BaseHandler.get_current_user = Mock(
            return_value=User('*****@*****.**'))
        user = User('*****@*****.**')
        dflt_analysis = user.default_analysis
        dflt_analysis.add_samples({
            4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'],
            6: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']
        })
        args = {'name': 'New Test Graph Analysis', 'description': 'Desc'}
        response = self.post('/analysis/create/', args)
        new_id = response.effective_url.split('/')[-2]
        a = Analysis(new_id)
        # Wait until all the jobs are done so the BIOM tables exist
        for j in a.jobs:
            wait_for_processing_job(j.id)

        artifacts = a.artifacts
        self.assertEqual(len(artifacts), 2)

        # Create a new workflow starting on the first artifact
        # Magic number 9 -> Summarize Taxa command
        params = Parameters.load(Command(9),
                                 values_dict={
                                     'metadata_category': 'None',
                                     'sort': 'False',
                                     'biom_table': artifacts[0].id
                                 })
        wf = ProcessingWorkflow.from_scratch(user, params)

        # There is only one job in the workflow
        job_id = list(wf.graph.nodes())[0].id

        response = self.get('/analysis/description/%s/graph/' % new_id)
        self.assertEqual(response.code, 200)
        obs = loads(response.body)
        exp = {
            'edges': [[artifacts[0].id, job_id],
                      [job_id, '%s:taxa_summary' % job_id]],
            'nodes':
            [['job', 'job', job_id, 'Summarize Taxa', 'in_construction'],
             [
                 'artifact', 'BIOM', artifacts[0].id, 'noname\n(BIOM)',
                 'artifact'
             ],
             [
                 'artifact', 'BIOM', artifacts[1].id, 'noname\n(BIOM)',
                 'artifact'
             ],
             [
                 'type', 'taxa_summary',
                 '%s:taxa_summary' % job_id, 'taxa_summary\n(taxa_summary)',
                 'type'
             ]],
            'workflow':
            wf.id
        }
        # Check that the keys are the same
        self.assertCountEqual(obs, exp)
        # Check the edges
        self.assertCountEqual(obs['edges'], exp['edges'])
        # Check the edges
        self.assertCountEqual(obs['nodes'], exp['nodes'])
        # Check the edges
        self.assertEqual(obs['workflow'], exp['workflow'])

        # Add a job to the second BIOM to make sure that the edges and nodes
        # are respected. Magic number 12 -> Single Rarefaction
        job2 = wf.add(DefaultParameters(16),
                      req_params={
                          'depth': '100',
                          'biom_table': artifacts[1].id
                      })
        job_id_2 = job2.id

        response = self.get('/analysis/description/%s/graph/' % new_id)
        self.assertEqual(response.code, 200)
        obs = loads(response.body)
        exp = {
            'edges': [[artifacts[0].id, job_id],
                      [job_id, '%s:taxa_summary' % job_id],
                      [artifacts[1].id, job_id_2],
                      [job_id_2, '%s:rarefied_table' % job_id_2]],
            'nodes':
            [['job', 'job', job_id, 'Summarize Taxa', 'in_construction'],
             ['job', 'job', job_id_2, 'Single Rarefaction', 'in_construction'],
             [
                 'artifact', 'BIOM', artifacts[0].id, 'noname\n(BIOM)',
                 'artifact'
             ],
             [
                 'artifact', 'BIOM', artifacts[1].id, 'noname\n(BIOM)',
                 'artifact'
             ],
             [
                 'type', 'taxa_summary',
                 '%s:taxa_summary' % job_id, 'taxa_summary\n(taxa_summary)',
                 'type'
             ],
             [
                 'type', 'BIOM',
                 '%s:rarefied_table' % job_id_2, 'rarefied_table\n(BIOM)',
                 'type'
             ]],
            'workflow':
            wf.id
        }
        # Check that the keys are the same
        self.assertCountEqual(obs, exp)
        # Check the edges
        self.assertCountEqual(obs['edges'], exp['edges'])
        # Check the edges
        self.assertCountEqual(obs['nodes'], exp['nodes'])
        # Check the edges
        self.assertEqual(obs['workflow'], exp['workflow'])

        # Add a second Workflow to the second artifact to force the raise of
        # the error. This situation should never happen when using
        # the interface
        wf.remove(job2)
        params = Parameters.load(Command(9),
                                 values_dict={
                                     'metadata_category': 'None',
                                     'sort': 'False',
                                     'biom_table': artifacts[1].id
                                 })
        wf = ProcessingWorkflow.from_scratch(user, params)
        response = self.get('/analysis/description/%s/graph/' % new_id)
        self.assertEqual(response.code, 500)
Beispiel #16
0
 def _wait_for_parallel_job(self, key):
     # This is needed so the clean up works - this is a distributed system
     # so we need to make sure that all processes are done before we reset
     # the test database
     obs = r_client.get(key)
     wait_for_processing_job(loads(obs)['job_id'])
Beispiel #17
0
    def test_sample_template_handler_patch_request(self):
        user = User('*****@*****.**')

        # Test user doesn't have access
        with self.assertRaisesRegexp(HTTPError,
                                     'User does not have access to study'):
            sample_template_handler_patch_request(
                User('*****@*****.**'), "remove",
                "/1/columns/season_environment/")

        # Test study doesn't exist
        with self.assertRaisesRegexp(HTTPError, 'Study does not exist'):
            sample_template_handler_patch_request(
                user, "remove", "/10000/columns/season_environment/")

        # Test sample template doesn't exist
        new_study = self._create_study('Patching test')
        with self.assertRaisesRegexp(
                HTTPError,
                "Study %s doesn't have sample information" % new_study.id):
            sample_template_handler_patch_request(
                user, "remove",
                "/%s/columns/season_environment/" % new_study.id)

        # Test wrong operation value
        with self.assertRaisesRegexp(
                HTTPError, 'Operation add not supported. Current supported '
                'operations: remove.'):
            sample_template_handler_patch_request(
                user, 'add', '/1/columns/season_environment')

        # Test wrong path parameter < 2
        with self.assertRaisesRegexp(HTTPError, 'Incorrect path parameter'):
            sample_template_handler_patch_request(user, 'ignored', '1')

        # TESTS FOR OPERATION: remove
        # Test wrong path parameter
        with self.assertRaisesRegexp(HTTPError, 'Incorrect path parameter'):
            sample_template_handler_patch_request(user, 'remove',
                                                  '/1/season_environment/')

        # Add sample information to the new study so we can delete one column
        # without affecting the other tests
        md = pd.DataFrame.from_dict(
            {'Sample1': {
                'col1': 'val1',
                'col2': 'val2'
            }},
            orient='index',
            dtype=str)
        st = SampleTemplate.create(md, new_study)

        # Test success
        obs = sample_template_handler_patch_request(
            user, "remove", "/%s/columns/col2/" % new_study.id)
        self.assertEqual(obs.keys(), ['job'])
        job_info = r_client.get('sample_template_%s' % new_study.id)
        self.assertIsNotNone(job_info)

        # Wait until the job is done
        wait_for_processing_job(loads(job_info)['job_id'])
        self.assertNotIn('col2', st.categories())

        # TESTS FOR OPERATION: replace
        # Test incorrect path parameter with replace
        with self.assertRaisesRegexp(HTTPError, 'Incorrect path parameter'):
            sample_template_handler_patch_request(user, "replace", "/1/")

        # Test attribute not found
        with self.assertRaisesRegexp(HTTPError, 'Attribute name not found'):
            sample_template_handler_patch_request(user, "replace", "/1/name")

        # Test missing value
        with self.assertRaisesRegexp(
                HTTPError, 'Value is required when updating sample '
                'information'):
            sample_template_handler_patch_request(user, "replace", "/1/data")

        # Test file doesn't exist
        with self.assertRaisesRegexp(HTTPError, 'Filepath not found'):
            sample_template_handler_patch_request(user,
                                                  "replace",
                                                  "/1/data",
                                                  req_value='DoesNotExist')

        # Test success
        obs = sample_template_handler_patch_request(
            user, "replace", "/1/data", req_value='uploaded_file.txt')
        self.assertEqual(obs.keys(), ['job'])
        job_info = r_client.get('sample_template_1')
        self.assertIsNotNone(job_info)

        # Wait until the job is done
        wait_for_processing_job(loads(job_info)['job_id'])
Beispiel #18
0
    def test_get_analysis_graph_handler(self):
        response = self.get('/analysis/description/1/graph/')
        self.assertEqual(response.code, 200)
        # The job id is randomly generated in the test environment. Gather
        # it here. There is only 1 job in the first artifact of the analysis
        job_id = Analysis(1).artifacts[0].jobs()[0].id
        obs = loads(response.body)
        exp = {'edges': [[8, job_id], [job_id, 9]],
               'nodes': [
                    ['job', 'job', job_id, 'Single Rarefaction', 'success'],
                    ['artifact', 'BIOM', 9, 'noname\n(BIOM)', 'artifact'],
                    ['artifact', 'BIOM', 8, 'noname\n(BIOM)', 'artifact']],
               'workflow': None}
        self.assertItemsEqual(obs, exp)
        self.assertItemsEqual(obs['edges'], exp['edges'])
        self.assertItemsEqual(obs['nodes'], exp['nodes'])
        self.assertIsNone(obs['workflow'])

        # Create a new analysis with 2 starting BIOMs to be able to test
        # the different if statements of the request
        BaseHandler.get_current_user = Mock(
            return_value=User('*****@*****.**'))
        user = User('*****@*****.**')
        dflt_analysis = user.default_analysis
        dflt_analysis.add_samples(
            {4: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196'],
             6: ['1.SKB8.640193', '1.SKD8.640184', '1.SKB7.640196']})
        args = {'name': 'New Test Graph Analysis', 'description': 'Desc'}
        response = self.post('/analysis/create/', args)
        new_id = response.effective_url.split('/')[-2]
        a = Analysis(new_id)
        # Wait until all the jobs are done so the BIOM tables exist
        for j in a.jobs:
            wait_for_processing_job(j.id)

        artifacts = a.artifacts
        self.assertEqual(len(artifacts), 2)

        # Create a new workflow starting on the first artifact
        # Magic number 9 -> Summarize Taxa command
        params = Parameters.load(
            Command(9), values_dict={'metadata_category': 'None',
                                     'sort': 'False',
                                     'biom_table': artifacts[0].id})
        wf = ProcessingWorkflow.from_scratch(user, params)

        # There is only one job in the workflow
        job_id = wf.graph.nodes()[0].id

        response = self.get('/analysis/description/%s/graph/' % new_id)
        self.assertEqual(response.code, 200)
        obs = loads(response.body)
        exp = {'edges': [[artifacts[0].id, job_id],
                         [job_id, '%s:taxa_summary' % job_id]],
               'nodes': [
                    ['job', 'job', job_id, 'Summarize Taxa',
                     'in_construction'],
                    ['artifact', 'BIOM', artifacts[0].id, 'noname\n(BIOM)',
                     'artifact'],
                    ['artifact', 'BIOM', artifacts[1].id, 'noname\n(BIOM)',
                     'artifact'],
                    ['type', 'taxa_summary', '%s:taxa_summary' % job_id,
                     'taxa_summary\n(taxa_summary)', 'type']],
               'workflow': wf.id}
        # Check that the keys are the same
        self.assertItemsEqual(obs, exp)
        # Check the edges
        self.assertItemsEqual(obs['edges'], exp['edges'])
        # Check the edges
        self.assertItemsEqual(obs['nodes'], exp['nodes'])
        # Check the edges
        self.assertEqual(obs['workflow'], exp['workflow'])

        # Add a job to the second BIOM to make sure that the edges and nodes
        # are respected. Magic number 12 -> Single Rarefaction
        job2 = wf.add(
            DefaultParameters(16), req_params={'depth': '100',
                                               'biom_table': artifacts[1].id})
        job_id_2 = job2.id

        response = self.get('/analysis/description/%s/graph/' % new_id)
        self.assertEqual(response.code, 200)
        obs = loads(response.body)
        exp = {'edges': [[artifacts[0].id, job_id],
                         [job_id, '%s:taxa_summary' % job_id],
                         [artifacts[1].id, job_id_2],
                         [job_id_2, '%s:rarefied_table' % job_id_2]],
               'nodes': [
                    ['job', 'job', job_id, 'Summarize Taxa',
                     'in_construction'],
                    ['job', 'job', job_id_2, 'Single Rarefaction',
                     'in_construction'],
                    ['artifact', 'BIOM', artifacts[0].id, 'noname\n(BIOM)',
                     'artifact'],
                    ['artifact', 'BIOM', artifacts[1].id, 'noname\n(BIOM)',
                     'artifact'],
                    ['type', 'taxa_summary', '%s:taxa_summary' % job_id,
                     'taxa_summary\n(taxa_summary)', 'type'],
                    ['type', 'BIOM', '%s:rarefied_table' % job_id_2,
                     'rarefied_table\n(BIOM)', 'type']],
               'workflow': wf.id}
        # Check that the keys are the same
        self.assertItemsEqual(obs, exp)
        # Check the edges
        self.assertItemsEqual(obs['edges'], exp['edges'])
        # Check the edges
        self.assertItemsEqual(obs['nodes'], exp['nodes'])
        # Check the edges
        self.assertEqual(obs['workflow'], exp['workflow'])

        # Add a second Workflow to the second artifact to force the raise of
        # the error. This situation should never happen when using
        # the interface
        wf.remove(job2)
        params = Parameters.load(
            Command(9), values_dict={'metadata_category': 'None',
                                     'sort': 'False',
                                     'biom_table': artifacts[1].id})
        wf = ProcessingWorkflow.from_scratch(user, params)
        response = self.get('/analysis/description/%s/graph/' % new_id)
        self.assertEqual(response.code, 500)