コード例 #1
0
def update_project(experiment_id):
    '''Saves modifications of the pipeline and module descriptions to the
    corresponding `.pipe` and `.handles` files.
    '''
    logger.info('save jterator project of experiment %d', experiment_id)
    data = json.loads(request.data)
    project = yaml.load(data['project'])
    pipeline_description = PipelineDescription(
        **project['pipe']['description'])
    handles_descriptions = dict()
    for h in project['handles']:
        logger.debug('check handles of module "%s"', h['name'])
        handles_descriptions[h['name']] = HandleDescriptions(
            **h['description'])
    jt = ImageAnalysisPipelineEngine(
        experiment_id,
        pipeline_description=pipeline_description,
        handles_descriptions=handles_descriptions,
    )
    try:
        jt.project.save()
        return jsonify({'success': True})
    except Exception as err:
        raise MalformedRequestError(
            'Project could not be saved: {err}'.format(err=err))
コード例 #2
0
ファイル: api.py プロジェクト: dvischi/TmServer
def check_project(experiment_id):
    '''Checks pipeline and module descriptions.
    '''
    logger.info('check description of jterator project of experiment %d',
                experiment_id)
    data = json.loads(request.data)
    project = yaml.load(data['project'])
    pipeline_description = PipelineDescription(
        **project['pipe']['description'])
    handles_descriptions = {
        h['name']: HandleDescriptions(**h['description'])
        for h in project['handles']
    }
    try:
        jt = ImageAnalysisPipelineEngine(
            experiment_id,
            pipeline_description=pipeline_description,
            handles_descriptions=handles_descriptions,
        )
        return jsonify(success=True)
    except Exception as err:
        raise MalformedRequestError('Pipeline check failed:\n%s' % str(err))
コード例 #3
0
def run_jobs(experiment_id):
    '''Runs one or more jobs of the current project with pipeline and module
    descriptions provided by the UI.

    This requires the pipeline and module descriptions to be saved to *pipe*
    and *handles* files, respectively.
    '''
    logger.info('submit jobs for jterator project of experiment %d',
                experiment_id)
    data = json.loads(request.data)
    job_ids = map(int, data['job_ids'])
    project = yaml.load(data['project'])
    pipeline_description = PipelineDescription(
        **project['pipe']['description'])
    handles_descriptions = {
        h['name']: HandleDescriptions(**h['description'])
        for h in project['handles']
    }
    jt = ImageAnalysisPipelineEngine(
        experiment_id,
        pipeline_description=pipeline_description,
        handles_descriptions=handles_descriptions,
    )

    # 1. Delete figures and logs from previous submission
    #    since they are not tracked per submission.
    jt.remove_previous_pipeline_output()
    # TODO: remove figure files of previous runs!!

    # 2. Build job descriptions
    channel_names = [
        ch.name for ch in jt.project.pipe.description.input.channels
    ]
    job_descriptions = list()
    with tm.utils.ExperimentSession(experiment_id) as session:
        sites = session.query(tm.Site.id).\
            order_by(tm.Site.id).\
            all()
        for j in job_ids:
            site_id = sites[j].id
            image_file_count = session.query(tm.ChannelImageFile.id).\
                join(tm.Channel).\
                filter(tm.Channel.name.in_(channel_names)).\
                filter(tm.ChannelImageFile.site_id == site_id).\
                count()
            if image_file_count == 0:
                raise MalformedRequestError(
                    'No images found for job ID {j}.'.format(j=j))
            job_descriptions.append({'site_id': site_id, 'plot': True})

    with tm.utils.MainSession() as session:
        submission = tm.Submission(experiment_id=experiment_id,
                                   program='jtui',
                                   user_id=current_identity.id)
        session.add(submission)
        session.flush()

        SubmitArgs = get_step_args('jterator')[1]
        submit_args = SubmitArgs()
        job_collection = jt.create_debug_run_phase(submission.id)
        jobs = jt.create_debug_run_jobs(user_name=current_identity.name,
                                        batches=job_descriptions,
                                        job_collection=job_collection,
                                        verbosity=2,
                                        duration=submit_args.duration,
                                        memory=submit_args.memory,
                                        cores=submit_args.cores)

    # 3. Store jobs in session
    gc3pie.store_task(jobs)
    # session.remove(data['previousSubmissionId'])
    gc3pie.submit_task(jobs)
    return jsonify(submission_id=jobs.submission_id)