Esempio n. 1
0
    def post(self):
        """Retrieves the archiving information

        Returns
        -------
        dict
            The contents of the analysis keyed by sample id

        Notes
        -----
            Argument "path" must be the Qiita job_id which is used to infer
            the merging scheme.
            Argument "features" is a list of feature identifier,
            e.g. Deblur sequences.

            Feature identifiers not found in the archive won't be included in
            the return dictionary.
        """
        job_id = self.get_argument('job_id')
        features = self.request.arguments['features']

        ms = Archive.get_merging_scheme_from_job(ProcessingJob(job_id))
        response = Archive.retrieve_feature_values(archive_merging_scheme=ms,
                                                   features=features)

        self.write(response)
Esempio n. 2
0
def execute(job_id):
    """Executes a job through the plugin system

    Parameters
    ----------
    job_id : str
        The id of the job to execute
    """
    # Create the new job
    job = ProcessingJob(job_id)
    job_dir = join(get_work_base_dir(), job.id)
    software = job.command.software
    plugin_start_script = software.start_script
    plugin_env_script = software.environment_script

    # Get the command to start the plugin
    cmd = '%s "%s" "%s" "%s" "%s" "%s"' % (
        qiita_config.plugin_launcher, plugin_env_script, plugin_start_script,
        qiita_config.base_url, job.id, job_dir)

    # Start the plugin
    std_out, std_err, return_value = system_call(cmd)
    if return_value != 0:
        # Something wrong happened during the plugin start procedure
        job.status = 'error'
        log = LogEntry.create(
            'Runtime',
            "Error starting plugin '%s':\nStd output:%s\nStd error:%s"
            % (software.name, std_out, std_err))
        job.log = log
Esempio n. 3
0
def prep_template_jobs_get_req(prep_id, user_id):
    """Returns graph of all artifacts created from the prep base artifact

    Parameters
    ----------
    prep_id : int
        Prep template ID to get graph for
    user_id : str
        User making the request

    Returns
    -------
    dict with the jobs information

    Notes
    -----
    Nodes are identified by the corresponding Artifact ID.
    """
    prep = PrepTemplate(int(prep_id))
    access_error = check_access(prep.study_id, user_id)
    if access_error:
        return access_error

    job_info = r_client.get(PREP_TEMPLATE_KEY_FORMAT % prep_id)
    result = {}
    if job_info:
        job_info = defaultdict(lambda: '', loads(job_info))
        job_id = job_info['job_id']
        job = ProcessingJob(job_id)
        result[job.id] = {'status': job.status, 'step': job.step,
                          'error': job.log.msg if job.log else ""}

    return result
Esempio n. 4
0
def job_ajax_get_req(job_id):
    """Returns the job information

    Parameters
    ----------
    job_id : str
        The job id

    Returns
    -------
    dict of objects
        A dictionary containing the job information
        {'status': str,
         'message': str,
         'job_id': str,
         'job_status': str,
         'job_step': str,
         'job_parameters': dict of {str: str}}
    """
    job = ProcessingJob(job_id)
    return {
        'status': 'success',
        'message': '',
        'job_id': job.id,
        'job_status': job.status,
        'job_step': job.step,
        'job_parameters': job.parameters.values
    }
Esempio n. 5
0
def wait_for_processing_job(job_id):
    """Waits until a processing job is completed

    Parameters
    ----------
    job_id : str
        Job id
    """
    job = ProcessingJob(job_id)
    while job.status not in ('success', 'error'):
        sleep(0.05)
    sleep(0.05)
Esempio n. 6
0
    def test_artifact_post_request(self):
        # No access
        with self.assertRaises(QiitaHTTPError):
            artifact_post_req(User('*****@*****.**'), 1)

        artifact_post_req(User('*****@*****.**'), 2)
        # Wait until the job is completed
        wait_for_prep_information_job(1)
        # Check that the delete function has been actually called
        job = ProcessingJob(loads(r_client.get('prep_template_1'))['job_id'])
        self.assertEqual(job.status, 'error')
        self.assertIn('Cannot delete artifact 2', job.log.msg)
Esempio n. 7
0
File: upload.py Progetto: jlab/qiita
    def display_template(self, study_id, msg):
        """Simple function to avoid duplication of code"""
        study_id = int(study_id)
        study = Study(study_id)
        user = self.current_user
        level = 'info'
        message = ''
        remote_url = ''
        remote_files = []
        check_access(user, study, no_public=True, raise_error=True)

        job_info = r_client.get(UPLOAD_STUDY_FORMAT % study_id)
        if job_info:
            job_info = defaultdict(lambda: '', loads(job_info))
            job_id = job_info['job_id']
            job = ProcessingJob(job_id)
            job_status = job.status
            processing = job_status not in ('success', 'error')
            url = job.parameters.values['url']
            if processing:
                if job.command.name == 'list_remote_files':
                    message = 'Retrieving remote files: listing %s' % url
                else:
                    message = 'Retrieving remote files: download %s' % url
            elif job_status == 'error':
                level = 'danger'
                message = job.log.msg.replace('\n', '</br>')
                # making errors nicer for users
                if 'No such file' in message:
                    message = 'URL not valid: <i>%s</i>, please review.' % url
            else:
                remote_url = job_info['url']
                remote_files = job_info['files']
                level = job_info['alert_type']
                message = job_info['alert_msg'].replace('\n', '</br>')

        # getting the ontologies
        self.render('upload.html',
                    study_title=study.title,
                    study_info=study.info,
                    study_id=study_id,
                    is_admin=user.level == 'admin',
                    extensions=','.join(qiita_config.valid_upload_extension),
                    max_upload_size=qiita_config.max_upload_size,
                    level=level,
                    message=message,
                    remote_url=remote_url,
                    remote_files=remote_files,
                    files=get_files_from_uploads_folders(str(study_id)))
Esempio n. 8
0
def wait_for_processing_job(job_id):
    """Waits until a processing job is completed

    Parameters
    ----------
    job_id : str
        Job id
    """
    job = ProcessingJob(job_id)
    while job.status not in ('success', 'error'):
        sleep(0.8)
    # this print is useful for debugging
    if job.status == 'error':
        print("==> %s: %s" % (job.id, job.log.msg))
    sleep(0.8)
Esempio n. 9
0
    def patch(self):
        """Updates / stores feature information in the archive.

        Notes
        -----
            Argument "path" must be the Qiita job_id which is used to infer
            the merging scheme.
            Argument "value" is a json string, i.e. result of a json.dump(obj)
            of a dictionary, keyed with feature identifiers.
        """
        req_path = self.get_argument('path')
        req_value = self.get_argument('value')

        ms = Archive.get_merging_scheme_from_job(ProcessingJob(req_path))

        self.write(Archive.insert_features(ms, loads(req_value)))
Esempio n. 10
0
    def test_patch(self):
        # Create a new job - through a workflow since that is the only way
        # of creating jobs in the interface
        exp_command = Command(1)
        json_str = (
            '{"input_data": 1, "max_barcode_errors": 1.5, '
            '"barcode_type": "golay_12", "max_bad_run_length": 3, '
            '"rev_comp": false, "phred_quality_threshold": 3, '
            '"rev_comp_barcode": false, "rev_comp_mapping_barcodes": false, '
            '"min_per_read_length_fraction": 0.75, "sequence_max_n": 0}')
        exp_params = Parameters.load(exp_command, json_str=json_str)
        exp_user = User('*****@*****.**')
        name = "Test processing workflow"

        # tests success
        wf = ProcessingWorkflow.from_scratch(exp_user,
                                             exp_params,
                                             name=name,
                                             force=True)

        graph = wf.graph
        nodes = graph.nodes()
        job_id = nodes[0].id

        response = self.patch('/study/process/job/', {
            'op': 'remove',
            'path': job_id
        })
        self.assertEqual(response.code, 200)
        exp = {
            'status':
            'error',
            'message':
            "Can't delete job %s. It is 'in_construction' "
            "status. Please use /study/process/workflow/" % job_id
        }
        self.assertEqual(loads(response.body), exp)

        # Test success
        ProcessingJob(job_id)._set_error('Killed for testing')
        response = self.patch('/study/process/job/', {
            'op': 'remove',
            'path': job_id
        })
        self.assertEqual(response.code, 200)
        exp = {'status': 'success', 'message': ''}
        self.assertEqual(loads(response.body), exp)
Esempio n. 11
0
    def get(self):
        user = self.current_user
        is_local_request = is_localhost(self.request.headers['host'])

        uanalyses = user.shared_analyses | user.private_analyses
        user_analysis_ids = set([a.id for a in uanalyses])

        panalyses = Analysis.get_by_status('public')
        public_analysis_ids = set([a.id for a in panalyses])
        public_analysis_ids = public_analysis_ids - user_analysis_ids

        user_analyses = generate_analysis_list(user_analysis_ids)
        public_analyses = generate_analysis_list(public_analysis_ids, True)

        dlop = partial(download_link_or_path, is_local_request)

        messages = {'info': '', 'danger': ''}
        for analysis_id in user_analysis_ids:
            job_info = r_client.get('analysis_delete_%d' % analysis_id)
            if job_info:
                job_info = defaultdict(lambda: '', loads(job_info))
                job_id = job_info['job_id']
                job = ProcessingJob(job_id)
                job_status = job.status
                processing = job_status not in ('success', 'error')
                if processing:
                    messages['info'] += ('Analysis %s is being deleted<br/>' %
                                         analysis_id)
                elif job_status == 'error':
                    messages['danger'] += (job.log.msg.replace('\n', '<br/>') +
                                           '<br/>')
                else:
                    if job_info['alert_type'] not in messages:
                        messages[job_info['alert_type']] = []
                    messages[job_info['alert_type']] += (
                        job.log.msg.replace('\n', '<br/>') + '<br/>')

        self.render("list_analyses.html",
                    user_analyses=user_analyses,
                    public_analyses=public_analyses,
                    messages=messages,
                    dlop=dlop)
Esempio n. 12
0
def job_ajax_get_req(job_id):
    """Returns the job information

    Parameters
    ----------
    job_id : str
        The job id

    Returns
    -------
    dict of objects
        A dictionary containing the job information
        {'status': str,
         'message': str,
         'job_id': str,
         'job_status': str,
         'job_step': str,
         'job_parameters': dict of {str: str}}
    """
    job = ProcessingJob(job_id)
    cmd = job.command
    sw = cmd.software
    job_status = job.status
    job_error = job.log.msg if job.log is not None else None
    return {
        'status': 'success',
        'message': '',
        'job_id': job.id,
        'job_external_id': job.external_id,
        'job_status': job_status,
        'job_step': job.step,
        'job_parameters': job.parameters.values,
        'job_error': job_error,
        'command': cmd.name,
        'command_description': cmd.description,
        'software': sw.name,
        'software_version': sw.version
    }
Esempio n. 13
0
def get_sample_template_processing_status(st_id):
    # Initialize variables here
    processing = False
    alert_type = ''
    alert_msg = ''
    job_info = r_client.get(SAMPLE_TEMPLATE_KEY_FORMAT % st_id)
    if job_info:
        job_info = defaultdict(lambda: '', loads(job_info))
        job_id = job_info['job_id']
        job = ProcessingJob(job_id)
        job_status = job.status
        processing = job_status not in ('success', 'error')
        if processing:
            alert_type = 'info'
            alert_msg = 'This sample template is currently being processed'
        elif job_status == 'error':
            alert_type = 'danger'
            alert_msg = job.log.msg.replace('\n', '</br>')
        else:
            alert_type = job_info['alert_type']
            alert_msg = job_info['alert_msg'].replace('\n', '</br>')

    return processing, alert_type, alert_msg
Esempio n. 14
0
def workflow_handler_patch_req(req_op,
                               req_path,
                               req_value=None,
                               req_from=None):
    """Patches a workflow

    Parameters
    ----------
    req_op : str
        The operation to perform on the workflow
    req_path : str
        Path parameter with the workflow to patch
    req_value : str, optional
        The value that needs to be modified
    req_from : str, optional
        The original path of the element

    Returns
    -------
    dict of {str: str}
        A dictionary of the form: {'status': str, 'message': str} in which
        status is the status of the request ('error' or 'success') and message
        is a human readable string with the error message in case that status
        is 'error'.
    """
    if req_op == 'add':
        req_path = [v for v in req_path.split('/') if v]
        if len(req_path) != 1:
            return {'status': 'error', 'message': 'Incorrect path parameter'}
        req_path = req_path[0]
        try:
            wf = ProcessingWorkflow(req_path)
        except QiitaDBUnknownIDError:
            return {
                'status': 'error',
                'message': 'Workflow %s does not exist' % req_path
            }

        req_value = loads(req_value)
        dflt_params = DefaultParameters(req_value['dflt_params'])
        req_params = req_value.get('req_params', None)
        opt_params = req_value.get('opt_params', None)
        connections = {
            ProcessingJob(k): v
            for k, v in req_value['connections'].items()
        }
        job = wf.add(dflt_params,
                     connections=connections,
                     req_params=req_params,
                     opt_params=opt_params)
        job_cmd = job.command
        return {
            'status': 'success',
            'message': '',
            'job': {
                'id': job.id,
                'inputs': list(req_value['connections'].keys()),
                'label': job_cmd.name,
                'outputs': job_cmd.outputs
            }
        }
    elif req_op == 'remove':
        req_path = [v for v in req_path.split('/') if v]
        if len(req_path) != 2:
            return {'status': 'error', 'message': 'Incorrect path parameter'}
        wf_id = req_path[0]
        job_id = req_path[1]
        wf = ProcessingWorkflow(wf_id)
        job = ProcessingJob(job_id)
        wf.remove(job, cascade=True)
        return {'status': 'success', 'message': ''}
    else:
        return {
            'status':
            'error',
            'message':
            'Operation "%s" not supported. Current supported '
            'operations: add' % req_op
        }
Esempio n. 15
0
    def test_complete_job(self):
        # Complete success
        pt = npt.assert_warns(QiitaDBWarning, PrepTemplate.create,
                              pd.DataFrame({'new_col': {
                                  '1.SKD6.640190': 1
                              }}), Study(1), '16S')
        c_job = ProcessingJob.create(
            User('*****@*****.**'),
            Parameters.load(Command.get_validator('BIOM'),
                            values_dict={
                                'template': pt.id,
                                'files': dumps({'BIOM': ['file']}),
                                'artifact_type': 'BIOM'
                            }), True)
        c_job._set_status('running')
        fd, fp = mkstemp(suffix='_table.biom')
        close(fd)
        with open(fp, 'w') as f:
            f.write('\n')
        self._clean_up_files.append(fp)
        exp_artifact_count = get_count('qiita.artifact') + 1
        payload = dumps({
            'success': True,
            'error': '',
            'artifacts': {
                'OTU table': {
                    'filepaths': [(fp, 'biom')],
                    'artifact_type': 'BIOM'
                }
            }
        })
        job = self._create_job('complete_job', {
            'job_id': c_job.id,
            'payload': payload
        })
        private_task(job.id)
        self.assertEqual(job.status, 'success')
        self.assertEqual(c_job.status, 'success')
        self.assertEqual(get_count('qiita.artifact'), exp_artifact_count)

        # Complete job error
        payload = dumps({'success': False, 'error': 'Job failure'})
        job = self._create_job('complete_job', {
            'job_id': 'bcc7ebcd-39c1-43e4-af2d-822e3589f14d',
            'payload': payload
        })
        private_task(job.id)
        self.assertEqual(job.status, 'success')
        c_job = ProcessingJob('bcc7ebcd-39c1-43e4-af2d-822e3589f14d')
        self.assertEqual(c_job.status, 'error')
        self.assertEqual(c_job.log, LogEntry.newest_records(numrecords=1)[0])
        self.assertEqual(c_job.log.msg, 'Job failure')

        # Complete internal error
        pt = npt.assert_warns(QiitaDBWarning, PrepTemplate.create,
                              pd.DataFrame({'new_col': {
                                  '1.SKD6.640190': 1
                              }}), Study(1), '16S')
        c_job = ProcessingJob.create(
            User('*****@*****.**'),
            Parameters.load(Command.get_validator('BIOM'),
                            values_dict={
                                'template': pt.id,
                                'files': dumps({'BIOM': ['file']}),
                                'artifact_type': 'BIOM'
                            }), True)
        c_job._set_status('running')
        fp = '/surprised/if/this/path/exists.biom'
        payload = dumps({
            'success': True,
            'error': '',
            'artifacts': {
                'OTU table': {
                    'filepaths': [(fp, 'biom')],
                    'artifact_type': 'BIOM'
                }
            }
        })
        job = self._create_job('complete_job', {
            'job_id': c_job.id,
            'payload': payload
        })
        private_task(job.id)
        self.assertEqual(job.status, 'success')
        self.assertEqual(c_job.status, 'error')
        self.assertIn('No such file or directory', c_job.log.msg)
Esempio n. 16
0
    def test_job_ajax_patch_req(self):
        # Create a new job - through a workflow since that is the only way
        # of creating jobs in the interface
        exp_command = Command(1)
        json_str = (
            '{"input_data": 1, "max_barcode_errors": 1.5, '
            '"barcode_type": "golay_12", "max_bad_run_length": 3, '
            '"rev_comp": false, "phred_quality_threshold": 3, '
            '"rev_comp_barcode": false, "rev_comp_mapping_barcodes": false, '
            '"min_per_read_length_fraction": 0.75, "sequence_max_n": 0}')
        exp_params = Parameters.load(exp_command, json_str=json_str)
        exp_user = User('*****@*****.**')
        name = "Test processing workflow"

        # tests success
        wf = ProcessingWorkflow.from_scratch(exp_user,
                                             exp_params,
                                             name=name,
                                             force=True)

        graph = wf.graph
        nodes = list(graph.nodes())
        job_id = nodes[0].id

        # Incorrect path parameter
        obs = job_ajax_patch_req('remove', '/%s/somethingelse' % job_id)
        exp = {
            'status': 'error',
            'message': 'Incorrect path parameter: missing job id'
        }
        self.assertEqual(obs, exp)

        obs = job_ajax_patch_req('remove', '/')
        exp = {
            'status': 'error',
            'message': 'Incorrect path parameter: missing job id'
        }
        self.assertEqual(obs, exp)

        # Job id is not like a job id
        obs = job_ajax_patch_req('remove', '/notAJobId')
        exp = {
            'status':
            'error',
            'message':
            'Incorrect path parameter: '
            'notAJobId is not a recognized job id'
        }
        self.assertEqual(obs, exp)

        # Job doesn't exist
        obs = job_ajax_patch_req('remove',
                                 '/6d368e16-2242-4cf8-87b4-a5dc40bc890b')
        exp = {
            'status':
            'error',
            'message':
            'Incorrect path parameter: '
            '6d368e16-2242-4cf8-87b4-a5dc40bc890b is not a '
            'recognized job id'
        }
        self.assertEqual(obs, exp)

        # in_construction job
        obs = job_ajax_patch_req('remove', '/%s' % job_id)
        exp = {
            'status':
            'error',
            'message':
            "Can't delete job %s. It is 'in_construction' "
            "status. Please use /study/process/workflow/" % job_id
        }
        self.assertEqual(obs, exp)

        # job status != 'error'
        job = ProcessingJob(job_id)
        job._set_status('queued')
        obs = job_ajax_patch_req('remove', '/%s' % job_id)
        exp = {
            'status': 'error',
            'message': 'Only jobs in "error" status can be deleted.'
        }
        self.assertEqual(obs, exp)

        # Operation not supported
        job._set_status('queued')
        obs = job_ajax_patch_req('add', '/%s' % job_id)
        exp = {
            'status':
            'error',
            'message':
            'Operation "add" not supported. Current supported '
            'operations: remove'
        }
        self.assertEqual(obs, exp)

        # Test success
        job._set_error('Killed for testing')
        obs = job_ajax_patch_req('remove', '/%s' % job_id)
        exp = {'status': 'success', 'message': ''}
        self.assertEqual(obs, exp)
Esempio n. 17
0
def prep_template_ajax_get_req(user_id, prep_id):
    """Returns the prep tempalte information needed for the AJAX handler

    Parameters
    ----------
    user_id : str
        The user id
    prep_id : int
        The prep template id

    Returns
    -------
    dict of {str: object}
        A dictionary with the following keys:
        - status: str, whether the request is successful or not
        - message: str, if the request is unsuccessful, a human readable error
        - name: str, the name of the prep template
        - files: list of str, the files available to update the prep template
        - download_prep: int, the filepath_id of the prep file
        - download_qiime, int, the filepath_id of the qiime mapping file
        - num_samples: int, the number of samples present in the template
        - num_columns: int, the number of columns present in the template
        - investigation_type: str, the investigation type of the template
        - ontology: str, dict of {str, list of str} containing the information
        of the ENA ontology
        - artifact_attached: bool, whether the template has an artifact
        attached
        - study_id: int, the study id of the template
    """
    # Currently there is no name attribute, but it will be soon
    name = "Prep information %d" % prep_id
    pt = PrepTemplate(prep_id)

    job_info = r_client.get(PREP_TEMPLATE_KEY_FORMAT % prep_id)
    if job_info:
        job_info = loads(job_info)
        job_id = job_info['job_id']
        if job_id:
            if job_info['is_qiita_job']:
                job = ProcessingJob(job_id)
                processing = job.status in ('queued', 'running')
                success = job.status == 'success'
                alert_type = 'info' if processing or success else 'danger'
                alert_msg = (job.log.msg.replace('\n', '</br>')
                             if job.log is not None else "")
            else:
                redis_info = loads(r_client.get(job_id))
                processing = redis_info['status_msg'] == 'Running'
                success = redis_info['status_msg'] == 'Success'
                if redis_info['return'] is not None:
                    alert_type = redis_info['return']['status']
                    alert_msg = redis_info['return']['message'].replace(
                        '\n', '</br>')
                else:
                    alert_type = 'info'
                    alert_msg = ''

            if processing:
                alert_type = 'info'
                alert_msg = 'This prep template is currently being updated'
            elif success:
                payload = {
                    'job_id': None,
                    'status': alert_type,
                    'message': alert_msg,
                    'is_qiita_job': job_info['is_qiita_job']
                }
                r_client.set(PREP_TEMPLATE_KEY_FORMAT % prep_id,
                             dumps(payload))
        else:
            processing = False
            alert_type = job_info['status']
            alert_msg = job_info['message'].replace('\n', '</br>')
    else:
        processing = False
        alert_type = ''
        alert_msg = ''

    artifact_attached = pt.artifact is not None
    study_id = pt.study_id
    files = [
        f for _, f in get_files_from_uploads_folders(study_id)
        if f.endswith(('.txt', '.tsv'))
    ]

    # The call to list is needed because keys is an iterator
    num_samples = len(list(pt.keys()))
    num_columns = len(pt.categories())
    investigation_type = pt.investigation_type

    # Retrieve the information to download the prep template and QIIME
    # mapping file. See issue https://github.com/biocore/qiita/issues/1675
    download_prep = []
    download_qiime = []
    for fp_id, fp in pt.get_filepaths():
        if 'qiime' in basename(fp):
            download_qiime.append(fp_id)
        else:
            download_prep.append(fp_id)
    download_prep = download_prep[0]
    download_qiime = download_qiime[0]

    ontology = _get_ENA_ontology()

    editable = Study(study_id).can_edit(User(user_id)) and not processing

    return {
        'status': 'success',
        'message': '',
        'name': name,
        'files': files,
        'download_prep': download_prep,
        'download_qiime': download_qiime,
        'num_samples': num_samples,
        'num_columns': num_columns,
        'investigation_type': investigation_type,
        'ontology': ontology,
        'artifact_attached': artifact_attached,
        'study_id': study_id,
        'editable': editable,
        'data_type': pt.data_type(),
        'alert_type': alert_type,
        'is_submitted_to_ebi': pt.is_submitted_to_ebi,
        'alert_message': alert_msg
    }
Esempio n. 18
0
def study_get_req(study_id, user_id):
    """Returns information available for the given study

    Parameters
    ----------
    study_id : int
        Study id to get prep template info for
    user_id : str
        User requesting the info

    Returns
    -------
    dict
        Data types information in the form
        {'status': status,
         'message': message,
         'info': dict of objects
        status can be success, warning, or error depending on result
        message has the warnings or errors
        info contains study information seperated by data type, in the form
        {col_name: value, ...} with value being a string, int, or list of
        strings or ints
    """
    access_error = check_access(study_id, user_id)
    if access_error:
        return access_error
    # Can only pass ids over API, so need to instantiate object
    study = Study(study_id)
    study_info = study.info
    # Add needed info that is not part of the initial info pull
    study_info['publication_doi'] = []
    study_info['publication_pid'] = []
    for pub, is_doi in study.publications:
        if is_doi:
            study_info['publication_doi'].append(pub)
        else:
            study_info['publication_pid'].append(pub)
    study_info['study_id'] = study.id
    study_info['study_title'] = study.title
    study_info['shared_with'] = [s.id for s in study.shared_with]
    study_info['status'] = study.status
    study_info['ebi_study_accession'] = study.ebi_study_accession
    study_info['ebi_submission_status'] = study.ebi_submission_status
    study_info['public_raw_download'] = study.public_raw_download
    study_info['notes'] = study.notes

    # Clean up StudyPerson objects to string for display
    pi = study_info['principal_investigator']
    study_info['principal_investigator'] = {
        'name': pi.name,
        'email': pi.email,
        'affiliation': pi.affiliation
    }

    lab_person = study_info['lab_person']
    if lab_person:
        study_info['lab_person'] = {
            'name': lab_person.name,
            'email': lab_person.email,
            'affiliation': lab_person.affiliation
        }

    samples = study.sample_template
    study_info['num_samples'] = 0 if samples is None else len(list(samples))
    study_info['owner'] = study.owner.id
    # Study.has_access no_public=True, will return True only if the user_id is
    # the owner of the study or if the study is shared with the user_id; this
    # with study.public_raw_download will define has_access_to_raw_data
    study_info['has_access_to_raw_data'] = study.has_access(
        User(user_id), True) or study.public_raw_download

    study_info['show_biom_download_button'] = 'BIOM' in [
        a.artifact_type for a in study.artifacts()
    ]
    study_info['show_raw_download_button'] = any(
        [True for pt in study.prep_templates() if pt.artifact is not None])

    # getting study processing status from redis
    processing = False
    study_info['level'] = ''
    study_info['message'] = ''
    job_info = r_client.get(STUDY_KEY_FORMAT % study_id)
    if job_info:
        job_info = defaultdict(lambda: '', loads(job_info))
        job_id = job_info['job_id']
        job = ProcessingJob(job_id)
        job_status = job.status
        processing = job_status not in ('success', 'error')
        if processing:
            study_info['level'] = 'info'
            study_info['message'] = 'This study is currently being processed'
        elif job_status == 'error':
            study_info['level'] = 'danger'
            study_info['message'] = job.log.msg.replace('\n', '</br>')
        else:
            study_info['level'] = job_info['alert_type']
            study_info['message'] = job_info['alert_msg'].replace(
                '\n', '</br>')

    return {
        'status': 'success',
        'message': '',
        'study_info': study_info,
        'editable': study.can_edit(User(user_id))
    }
Esempio n. 19
0
def prep_template_ajax_get_req(user_id, prep_id):
    """Returns the prep tempalte information needed for the AJAX handler

    Parameters
    ----------
    user_id : str
        The user id
    prep_id : int
        The prep template id

    Returns
    -------
    dict of {str: object}
        A dictionary with the following keys:
        - status: str, whether the request is successful or not
        - message: str, if the request is unsuccessful, a human readable error
        - name: str, the name of the prep template
        - files: list of str, the files available to update the prep template
        - download_prep: int, the filepath_id of the prep file
        - download_qiime, int, the filepath_id of the qiime mapping file
        - num_samples: int, the number of samples present in the template
        - num_columns: int, the number of columns present in the template
        - investigation_type: str, the investigation type of the template
        - ontology: str, dict of {str, list of str} containing the information
        of the ENA ontology
        - artifact_attached: bool, whether the template has an artifact
        attached
        - study_id: int, the study id of the template
    """
    pt = PrepTemplate(prep_id)
    name = pt.name

    # Initialize variables here
    processing = False
    alert_type = ''
    alert_msg = ''
    job_info = r_client.get(PREP_TEMPLATE_KEY_FORMAT % prep_id)
    if job_info:
        job_info = defaultdict(lambda: '', loads(job_info))
        job_id = job_info['job_id']
        job = ProcessingJob(job_id)
        job_status = job.status
        processing = job_status not in ('success', 'error')
        if processing:
            alert_type = 'info'
            alert_msg = 'This prep template is currently being updated'
        elif job_status == 'error':
            alert_type = 'danger'
            alert_msg = job.log.msg.replace('\n', '</br>')
        else:
            alert_type = job_info['alert_type']
            alert_msg = job_info['alert_msg'].replace('\n', '</br>')

    artifact_attached = pt.artifact is not None
    study_id = pt.study_id
    files = [f for _, f, _ in get_files_from_uploads_folders(study_id)
             if f.endswith(('.txt', '.tsv'))]

    # The call to list is needed because keys is an iterator
    num_samples = len(list(pt.keys()))
    num_columns = len(pt.categories())
    investigation_type = pt.investigation_type

    download_prep_id = None
    download_qiime_id = None
    other_filepaths = []
    for fp_id, fp in pt.get_filepaths():
        fp = basename(fp)
        if 'qiime' in fp:
            if download_qiime_id is None:
                download_qiime_id = fp_id
        else:
            if download_prep_id is None:
                download_prep_id = fp_id
            else:
                other_filepaths.append(fp)

    ontology = _get_ENA_ontology()

    editable = Study(study_id).can_edit(User(user_id)) and not processing

    return {'status': 'success',
            'message': '',
            'name': name,
            'files': files,
            'download_prep_id': download_prep_id,
            'download_qiime_id': download_qiime_id,
            'other_filepaths': other_filepaths,
            'num_samples': num_samples,
            'num_columns': num_columns,
            'investigation_type': investigation_type,
            'ontology': ontology,
            'artifact_attached': artifact_attached,
            'study_id': study_id,
            'editable': editable,
            'data_type': pt.data_type(),
            'alert_type': alert_type,
            'is_submitted_to_ebi': pt.is_submitted_to_ebi,
            'alert_message': alert_msg}
Esempio n. 20
0
def job_ajax_patch_req(req_op, req_path, req_value=None, req_from=None):
    """Patches a job

    Parameters
    ----------
    req_op : str
        The operation to perform on the job
    req_path : str
        Path parameter with the job to patch
    req_value : str, optional
        The value that needs to be modified
    req_from : str, optional
        The original path of the element

    Returns
    -------
    dict of {str: str}
        A dictionary of the form: {'status': str, 'message': str} in which
        status is the status of the request ('error' or 'success') and message
        is a human readable string with the error message in case that status
        is 'error'.
    """
    if req_op == 'remove':
        req_path = [v for v in req_path.split('/') if v]
        if len(req_path) != 1:
            return {
                'status': 'error',
                'message': 'Incorrect path parameter: missing job id'
            }

        # We have ensured that we only have one element on req_path
        job_id = req_path[0]
        try:
            job = ProcessingJob(job_id)
        except QiitaDBUnknownIDError:
            return {
                'status':
                'error',
                'message':
                'Incorrect path parameter: '
                '%s is not a recognized job id' % job_id
            }
        except Exception as e:
            e = str(e)
            if "invalid input syntax for uuid" in e:
                return {
                    'status':
                    'error',
                    'message':
                    'Incorrect path parameter: '
                    '%s is not a recognized job id' % job_id
                }
            else:
                return {
                    'status':
                    'error',
                    'message':
                    'An error occured while accessing the '
                    'job: %s' % e
                }

        job_status = job.status

        if job_status == 'in_construction':
            # A job that is in construction is in a workflow. Use the methods
            # defined for workflows to keep everything consistent. This message
            # should never be presented to the user, but rather to the
            # developer if it makes a mistake during changes in the interface
            return {
                'status':
                'error',
                'message':
                "Can't delete job %s. It is 'in_construction' "
                "status. Please use /study/process/workflow/" % job_id
            }
        elif job_status == 'error':
            # When the job is in error status, we just need to hide it
            job.hide()
            return {'status': 'success', 'message': ''}
        else:
            # In any other state, we currently fail. Adding the else here
            # because it can be useful to have it for fixing issue #2307
            return {
                'status': 'error',
                'message': 'Only jobs in "error" status can be deleted.'
            }
    else:
        return {
            'status':
            'error',
            'message':
            'Operation "%s" not supported. Current supported '
            'operations: remove' % req_op
        }
Esempio n. 21
0
def correct_redis_data(key, cmd, values_dict, user):
    """Corrects the data stored in the redis DB

    Parameters
    ----------
    key: str
        The redis key to fix
    cmd : qiita_db.software.Command
        Command to use to create the processing job
    values_dict : dict
        Dictionary used to instantiate the parameters of the command
    user : qiita_db.user. User
        The user that will own the job
    """
    info = r_client.get(key)
    if info:
        info = loads(info)
        if info['job_id'] is not None:
            if 'is_qiita_job' in info:
                if info['is_qiita_job']:
                    try:
                        job = ProcessingJob(info['job_id'])
                        payload = {
                            'job_id': info['job_id'],
                            'alert_type': info['status'],
                            'alert_msg': info['alert_msg']
                        }
                        r_client.set(key, dumps(payload))
                    except (QiitaDBUnknownIDError, KeyError):
                        # We shomehow lost the information of this job
                        # Simply delete the key
                        r_client.delete(key)
                else:
                    # These jobs don't contain any information on the live
                    # dump. We can safely delete the key
                    r_client.delete(key)
            else:
                # These jobs don't contain any information on the live
                # dump. We can safely delete the key
                r_client.delete(key)
        else:
            # Job is null, we have the information here
            if info['status'] == 'success':
                # In the success case no information is stored. We can
                # safely delete the key
                r_client.delete(key)
            elif info['status'] == 'warning':
                # In case of warning the key message stores the warning
                # message. We need to create a new job, mark it as
                # successful and store the error message as expected by
                # the new structure
                params = Parameters.load(cmd, values_dict=values_dict)
                job = ProcessingJob.create(user, params)
                job._set_status('success')
                payload = {
                    'job_id': job.id,
                    'alert_type': 'warning',
                    'alert_msg': info['message']
                }
                r_client.set(key, dumps(payload))
            else:
                # The status is error. The key message stores the error
                # message. We need to create a new job and mark it as
                # failed with the given error message
                params = Parameters.load(cmd, values_dict=values_dict)
                job = ProcessingJob.create(user, params)
                job._set_error(info['message'])
                payload = {'job_id': job.id}
                r_client.set(key, dumps(payload))
    else:
        # The key doesn't contain any information. Delete the key
        r_client.delete(key)