コード例 #1
0
    def test_build_combine_archive_for_model_xpp_with_plot(self):
        model_filename = os.path.join(os.path.dirname(__file__), '..',
                                      'fixtures', 'xpp', 'wilson-cowan.ode')
        archive_filename = os.path.join(self.dir_name, 'archive.omex')

        model_utils.build_combine_archive_for_model(
            model_filename,
            ModelLanguage.XPP,
            UniformTimeCourseSimulation,
            archive_filename,
        )

        archive_dirname = os.path.join(self.dir_name, 'archive')
        archive = CombineArchiveReader().run(archive_filename, archive_dirname)

        doc = SedmlSimulationReader().run(
            os.path.join(archive_dirname, 'simulation.sedml'))
        output = doc.outputs[-1]
        self.assertIsInstance(output, Plot2D)
        self.assertEqual(len(output.curves), 1)
        self.assertEqual(len(output.curves[0].x_data_generator.variables), 1)
        self.assertEqual(len(output.curves[0].y_data_generator.variables), 1)
        self.assertEqual(output.curves[0].x_data_generator.variables[0].target,
                         'U')
        self.assertEqual(output.curves[0].y_data_generator.variables[0].target,
                         'V')
        self.assertEqual(output.curves[0].x_scale, AxisScale.linear)
        self.assertEqual(output.curves[0].y_scale, AxisScale.linear)
コード例 #2
0
    def test_build_combine_archive_for_model_bngl(self):
        model_filename = os.path.join(os.path.dirname(__file__), '..',
                                      'fixtures', 'smoldyn', 'bounce1.txt')
        archive_filename = os.path.join(self.dir_name, 'archive.omex')
        extra_filename = os.path.join(self.dir_name, 'extra.txt')
        with open(extra_filename, 'w') as file:
            file.write('extra content')

        model_utils.build_combine_archive_for_model(
            model_filename,
            ModelLanguage.Smoldyn,
            UniformTimeCourseSimulation,
            archive_filename,
            extra_contents={
                extra_filename:
                CombineArchiveContent(
                    location='extra.txt',
                    format=CombineArchiveContentFormat.TEXT,
                ),
            },
        )

        archive_dirname = os.path.join(self.dir_name, 'archive')
        archive = CombineArchiveReader().run(archive_filename, archive_dirname)

        doc = SedmlSimulationReader().run(
            os.path.join(archive_dirname, 'simulation.sedml'))
        sim = doc.simulations[0]
        self.assertEqual(sim.initial_time, 0.)
        self.assertEqual(sim.output_start_time, 0.)
        self.assertEqual(sim.output_end_time, 100.)
        self.assertEqual(sim.number_of_steps, 10000)
        self.assertEqual(sim.algorithm.kisao_id, 'KISAO_0000057')
コード例 #3
0
    def test_hdf5_response(self):
        endpoint = '/run/run'
        archive_filename = os.path.join(
            self.FIXTURES_DIRNAME,
            'Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.omex')
        data = MultiDict([
            ('_type', 'SimulationRun'),
            ('archiveUrl', 'https://web.site/archive.omex'),
            ('simulator', 'copasi'),
        ])
        with app.app.app.test_client() as client:
            with open(archive_filename, 'rb') as archive_file:
                with mock.patch('requests.get',
                                return_value=mock.Mock(
                                    raise_for_status=lambda: None,
                                    content=archive_file.read())):
                    response = client.post(
                        endpoint,
                        data=data,
                        content_type="multipart/form-data",
                        headers={'Accept': 'application/x-hdf'})

        self.assertEqual(response.status_code, 200, response.json)
        h5_filename = os.path.join(self.tmp_dirname, 'reports.h5')
        with open(h5_filename, 'wb') as file:
            file.write(response.data)

        self.assertEqual([
            'BIOMD0000000912_sim.sedml/BIOMD0000000912_report',
            'BIOMD0000000912_sim.sedml/plot_1'
        ],
                         ReportReader().get_ids(self.tmp_dirname))

        archive_dirname = os.path.join(self.tmp_dirname, 'archive')
        CombineArchiveReader().run(archive_filename, archive_dirname)
        sed_doc = SedmlSimulationReader().run(
            os.path.join(archive_dirname, 'BIOMD0000000912_sim.sedml'))
        report = next(output for output in sed_doc.outputs
                      if output.id == 'BIOMD0000000912_report')

        data_set_results = ReportReader().run(
            report, self.tmp_dirname,
            'BIOMD0000000912_sim.sedml/BIOMD0000000912_report')

        self.assertEqual(
            set(data_set_results.keys()),
            set([
                'data_set_time',
                'data_set_T',
                'data_set_E',
                'data_set_I',
            ]),
        )

        numpy.testing.assert_allclose(data_set_results['data_set_time'],
                                      numpy.linspace(0., 1000., 5001))
        for values in data_set_results.values():
            self.assertEqual(values.shape, (5001, ))
            self.assertFalse(numpy.any(numpy.isnan(values)))
コード例 #4
0
ファイル: cli.py プロジェクト: virtualcell/vcell_cli_utils
def gen_sedml_2d_3d(omex_file_path, base_out_path):

    temp_path = os.path.join(base_out_path, "temp")
    if not os.path.exists(temp_path):
        os.mkdir(temp_path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)

    # defining archive
    archive = CombineArchiveReader().run(
        in_file=omex_file_path,
        out_dir=temp_path,
        try_reading_as_plain_zip_archive=False)

    # determine files to execute
    sedml_contents = get_sedml_contents(archive)

    for i_content, content in enumerate(sedml_contents):
        content_filename = os.path.join(temp_path, content.location)
        sedml_name = content.location.split('/')[1].split('.')[0]

        doc = SedmlSimulationReader().run(content_filename)
        for output in doc.outputs:
            if isinstance(output, (Plot2D, Plot3D)):
                report = Report(id='__plot__' + output.id, name=output.name)

                data_generators = {}
                if isinstance(output, Plot2D):
                    for curve in output.curves:
                        data_generators[
                            curve.x_data_generator.id] = curve.x_data_generator
                        data_generators[
                            curve.y_data_generator.id] = curve.y_data_generator

                elif isinstance(output, Plot3D):
                    for surface in output.surfaces:
                        data_generators[surface.x_data_generator.
                                        id] = surface.x_data_generator
                        data_generators[surface.y_data_generator.
                                        id] = surface.y_data_generator
                        data_generators[surface.z_data_generator.
                                        id] = surface.z_data_generator

                for data_generator in data_generators.values():
                    report.data_sets.append(
                        DataSet(
                            id='__data_set__{}_{}'.format(
                                output.id, data_generator.id),
                            name=data_generator.name,
                            label=data_generator.id,
                            data_generator=data_generator,
                        ))

                report.data_sets.sort(key=lambda data_set: data_set.id)
                doc.outputs.append(report)

        filename_with_reports_for_plots = os.path.join(
            temp_path, f'simulation_{sedml_name}.sedml')
        SedmlSimulationWriter().run(doc,
                                    filename_with_reports_for_plots,
                                    validate_models_with_languages=False)
コード例 #5
0
ファイル: cli.py プロジェクト: virtualcell/vcell
def exec_sed_doc(omex_file_path, base_out_path):
    # defining archive
    config = Config(VALIDATE_OMEX_MANIFESTS=False)
    archive = CombineArchiveReader().run(in_file=omex_file_path,
                                         out_dir=tmp_dir,
                                         config=config)

    # determine files to execute
    sedml_contents = get_sedml_contents(archive)

    report_results = ReportResults()
    for i_content, content in enumerate(sedml_contents):
        content_filename = os.path.join(tmp_dir, content.location)

        doc = SedmlSimulationReader().run(content_filename)

        for report_filename in glob.glob(
                os.path.join(base_out_path, content.location, '*.csv')):
            report_id = os.path.splitext(os.path.basename(report_filename))[0]

            # read report from CSV file produced by VCell
            data_set_df = pd.read_csv(report_filename).transpose()
            data_set_df.columns = data_set_df.iloc[0]
            data_set_df = data_set_df.drop(data_set_df.iloc[0].name)
            data_set_df = data_set_df.reset_index()
            data_set_df = data_set_df.rename(
                columns={'index': data_set_df.columns.name})
            data_set_df = data_set_df.transpose()
            data_set_df.index.name = None

            report = next(report for report in doc.outputs
                          if report.id == report_id)

            data_set_results = DataSetResults()

            # print("report: ", report, file=sys.stderr)
            # print("report Type: ", type(report), file=sys.stderr)
            # print("Plot Type: ", Plot2D, file=sys.stderr)
            if type(report) != Plot2D and type(report) != Plot3D:
                # Considering the scenario where it has the datasets in sedml
                for data_set in report.data_sets:
                    data_set_results[data_set.id] = data_set_df.loc[
                        data_set.label, :].to_numpy(dtype='float64')
                    # print("DF for report: ", data_set_results[data_set.id], file=sys.stderr)
                    # print("df.types: ", data_set_results[data_set.id].dtype, file=sys.stderr)
            else:
                data_set_df = pd.read_csv(report_filename, header=None).T
                data_set_df.columns = data_set_df.iloc[0]
                data_set_df.drop(0, inplace=True)
                data_set_df.reset_index(inplace=True)
                data_set_df.drop('index', axis=1, inplace=True)
                # print("DF for plot: ", data_set_df, file=sys.stderr)
                # Considering the scenario where it doesn't have datasets in sedml (pseudo sedml for plots)
                for col in list(data_set_df.columns):
                    data_set_results[col] = data_set_df[col].values

            # append to data structure of report results
            report_results[report_id] = data_set_results

            # save file in desired BioSimulators format(s)
            # for report_format in report_formats:
            # print("HDF report: ", report, file=sys.stderr)
            # print("HDF dataset results: ", data_set_results, file=sys.stderr)
            # print("HDF base_out_path: ", base_out_path,file=sys.stderr)
            # print("HDF path: ", os.path.join(content.location, report.id), file=sys.stderr)

            rel_path = os.path.join(content.location, report.id)

            if len(rel_path.split("./")) > 1:
                rel_path = rel_path.split("./")[1]

            if type(report) != Plot2D and type(report) != Plot3D:
                ReportWriter().run(report,
                                   data_set_results,
                                   base_out_path,
                                   rel_path,
                                   format='h5')
            else:
                datasets = []
                for col in list(data_set_df.columns):
                    datasets.append(DataSet(id=col, label=col, name=col))
                report.data_sets = datasets
                ReportWriter().run(report,
                                   data_set_results,
                                   base_out_path,
                                   rel_path,
                                   format='h5')

    # Remove temp directory
    shutil.rmtree(tmp_dir)
コード例 #6
0
ファイル: run.py プロジェクト: biosimulations/Biosimulations
def handler(body, archiveFile=None):
    """ Execute the SED-ML files in a COMBINE/OMEX archive.

    Args:
        body (:obj:`dict`): dictionary with schema ``SimulationRun`` with the
            specifications of the COMBINE/OMEX archive to execute and the simulator to execute it with
        archiveFile (:obj:`werkzeug.datastructures.FileStorage`, optional): COMBINE/OMEX file

    Returns:
        :obj:`werkzeug.wrappers.response.Response`: response with the results and log of the run in the
            ``SimulationRunResults`` schema
    """
    archive_file = archiveFile
    archive_url = body.get('archiveUrl', None)
    simulator_id = body['simulator']
    env_vars = body.get('environment', {}).get('variables', [])

    # set up environment (i.e. options)
    env = {}
    for env_var in env_vars:
        key = env_var['key']
        if key not in IGNORED_ENV_VARS:
            env[key] = env_var['value']

    if 'REPORT_FORMATS' not in env:
        env['REPORT_FORMATS'] = 'h5'

    with mock.patch.dict('os.environ', env):
        config = get_config()

    # process requested return type
    accept = connexion.request.headers.get('Accept', 'application/json')
    if accept in ['application/json']:
        config.COLLECT_COMBINE_ARCHIVE_RESULTS = True
        config.COLLECT_SED_DOCUMENT_RESULTS = True
        config.REPORT_FORMATS = []
        config.VIZ_FORMATS = []
        config.BUNDLE_OUTPUTS = False
        config.KEEP_INDIVIDUAL_OUTPUTS = True
        config.LOG_PATH = ''
        return_type = 'json'

    elif accept in ['application/x-hdf', 'application/x-hdf5']:
        config.COLLECT_COMBINE_ARCHIVE_RESULTS = False
        config.COLLECT_SED_DOCUMENT_RESULTS = False
        config.REPORT_FORMATS = [
            ReportFormat[format.strip().lower()]
            for format in env.get('REPORT_FORMATS', 'h5').split(',')
        ]
        config.VIZ_FORMATS = []
        config.BUNDLE_OUTPUTS = False
        config.KEEP_INDIVIDUAL_OUTPUTS = True
        config.LOG_PATH = ''
        return_type = 'h5'

    elif accept in ['application/zip']:
        config.COLLECT_COMBINE_ARCHIVE_RESULTS = False
        config.COLLECT_SED_DOCUMENT_RESULTS = False
        config.REPORT_FORMATS = [
            ReportFormat[format.strip().lower()]
            for format in env.get('REPORT_FORMATS', 'h5').split(',')
        ]
        config.VIZ_FORMATS = [
            VizFormat[format.strip().lower()]
            for format in env.get('VIZ_FORMATS', 'pdf').split(',')
        ]
        config.BUNDLE_OUTPUTS = False
        config.KEEP_INDIVIDUAL_OUTPUTS = True
        return_type = 'zip'

    else:
        raise BadRequestException(
            title=
            '`Accept` header must be one of `application/hdf5`, `application/json`, or `application/zip`.',
            instance=NotImplementedError(),
        )

    # get the COMBINE/OMEX archive
    if archive_file and archive_url:
        raise BadRequestException(
            title=
            'Only one of `archiveFile` or `archiveUrl` can be used at a time.',
            instance=ValueError(),
        )

    # get COMBINE/OMEX archive
    archive_filename = get_temp_file(suffix='.omex')

    if archive_file:
        archive_file.save(archive_filename)

    else:
        try:
            response = requests.get(archive_url)
            response.raise_for_status()
        except requests.exceptions.RequestException as exception:
            title = 'COMBINE/OMEX archive could not be loaded from `{}`'.format(
                archive_url)
            raise BadRequestException(
                title=title,
                instance=exception,
            )

        # save archive to local temporary file
        with open(archive_filename, 'wb') as file:
            file.write(response.content)

    # get the simulator
    simulator = next(
        (simulator
         for simulator in get_simulators() if simulator['id'] == simulator_id),
        None)
    if simulator is None:
        raise BadRequestException(
            title=
            '`{}` is not a BioSimulators id of a simulation tool that is available for execution.'
            .format(simulator_id),
            instance=ValueError(),
        )

    # execute the simulation
    out_dir = get_temp_dir()

    with mock.patch.dict('os.environ', env):
        results, log = exec_in_subprocess(
            use_simulator_api_to_exec_sedml_docs_in_combine_archive,
            simulator['api']['module'],
            archive_filename,
            out_dir,
            timeout=TIMEOUT,
            config=config)

    # transform the results
    if return_type == 'json':
        archive_dirname = get_temp_dir()
        with zipfile.ZipFile(archive_filename, 'r') as zip_file:
            zip_file.extractall(archive_dirname)

        outputs = []
        for sed_doc_location, sed_doc_outputs_results in (results
                                                          or {}).items():
            sed_doc = SedmlSimulationReader().run(
                os.path.join(archive_dirname, sed_doc_location))

            for output in sed_doc.outputs:
                if output.id not in sed_doc_outputs_results:
                    continue
                output_results = sed_doc_outputs_results.get(output.id, None)

                if isinstance(output, Report):
                    type = 'SedReport'
                    report = output
                elif isinstance(output, Plot2D):
                    type = 'SedPlot2D'
                    report = get_report_for_plot2d(output)
                elif isinstance(output, Plot3D):
                    type = 'SedPlot3D'
                    report = get_report_for_plot3d(output)
                else:  # pragma: no cover #
                    raise NotImplementedError(
                        'Outputs of type `{}` are not supported.'.format(
                            output.__class__.__name__))

                data = []
                for data_set in report.data_sets:
                    if data_set.id not in output_results:
                        continue
                    data_set_results = output_results[data_set.id]

                    data.append({
                        '_type':
                        'SimulationRunOutputDatum',
                        'id':
                        data_set.id,
                        'label':
                        data_set.label,
                        'name':
                        data_set.name,
                        'shape':
                        '' if data_set_results is None else ','.join(
                            str(dim_len)
                            for dim_len in data_set_results.shape),
                        'type':
                        '__None__' if data_set_results is None else
                        data_set_results.dtype.name,
                        'values':
                        None if data_set_results is None else
                        data_set_results.tolist(),
                    })

                outputs.append({
                    '_type': 'SimulationRunOutput',
                    'outputId': sed_doc_location + '/' + output.id,
                    'name': output.name,
                    'type': type,
                    'data': data,
                })

        # return
        return {
            '_type': 'SimulationRunResults',
            'outputs': outputs,
            'log': log,
        }

    elif return_type == 'h5':
        h5_filename = os.path.join(out_dir, get_config().H5_REPORTS_PATH)
        return flask.send_file(h5_filename,
                               mimetype=accept,
                               as_attachment=True,
                               attachment_filename='outputs.h5')

    else:
        zip_filename = get_temp_file()
        with zipfile.ZipFile(zip_filename, 'w') as zip_file:
            for root, dirs, files in os.walk(out_dir):
                for file in files:
                    zip_file.write(
                        os.path.join(root, file),
                        os.path.relpath(os.path.join(root, file), out_dir))

        return flask.send_file(zip_filename,
                               mimetype=accept,
                               as_attachment=True,
                               attachment_filename='outputs.zip')
コード例 #7
0
    def test_create_combine_archive_with_model_at_url(self):
        endpoint = '/combine/create'

        archive_specs_filename = os.path.join(self.FIXTURES_DIR,
                                              'archive-specs.json')
        with open(archive_specs_filename, 'rb') as file:
            archive_specs = json.load(file)

        file_0_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'model.xml'))
        file_1_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'file.txt'))
        file_0_url = 'https://models.org/model.xml'
        file_1_url = 'https://models.org/file.txt'
        archive_specs['contents'][0]['location']['value'].pop('filename')
        archive_specs['contents'][1]['location']['value'].pop('filename')
        archive_specs['contents'][0]['location']['value'][
            '_type'] = 'CombineArchiveContentUrl'
        archive_specs['contents'][1]['location']['value'][
            '_type'] = 'CombineArchiveContentUrl'
        archive_specs['contents'][0]['location']['value']['url'] = file_0_url
        archive_specs['contents'][1]['location']['value']['url'] = file_1_url

        fid_0 = open(file_0_path, 'rb')
        fid_1 = open(file_1_path, 'rb')

        data = MultiDict([
            ('specs', json.dumps(archive_specs)),
        ])
        with app.app.app.test_client() as client:
            archive_filename = os.path.join(self.temp_dirname, 'archive.omex')

            def save_temporary_combine_archive_to_s3_bucket(
                    filename, public=True, archive_filename=archive_filename):
                shutil.copy(filename, archive_filename)
                return archive_filename

            with mock.patch(
                    'src.s3.save_temporary_combine_archive_to_s3_bucket',
                    side_effect=save_temporary_combine_archive_to_s3_bucket):

                def requests_get(url):
                    assert url in [file_0_url, file_1_url]
                    if url == file_0_url:
                        return mock.Mock(raise_for_status=lambda: None,
                                         content=fid_0.read())
                    else:
                        return mock.Mock(raise_for_status=lambda: None,
                                         content=fid_1.read())

                with mock.patch('requests.get', side_effect=requests_get):
                    response = client.post(endpoint,
                                           data=data,
                                           content_type="multipart/form-data")

        fid_0.close()
        fid_1.close()

        self.assertEqual(response.status_code, 200, response.json)
        self.assertEqual(response.json, archive_filename)

        contents_dirname = os.path.join(self.temp_dirname, 'archive')
        archive = CombineArchiveReader().run(archive_filename,
                                             contents_dirname)

        self.assertEqual(len(archive.contents), 3)

        for content, expected_content in zip(archive.contents,
                                             archive_specs['contents']):
            self.assertEqual(content.location,
                             expected_content['location']['path'])
            self.assertEqual(content.format, expected_content['format'])
            self.assertEqual(content.master, expected_content['master'])

        sed_doc = SedmlSimulationReader().run(
            os.path.join(contents_dirname,
                         archive_specs['contents'][2]['location']['path']),
            validate_models_with_languages=False)
        sed_doc_specs = archive_specs['contents'][2]['location']['value']
        self.assertEqual(sed_doc.level, sed_doc_specs['level'])
        self.assertEqual(sed_doc.version, sed_doc_specs['version'])

        self.assertEqual(
            sed_doc.tasks[0].simulation.algorithm.changes[0].kisao_id,
            'KISAO_0000488')
        self.assertEqual(
            sed_doc.tasks[0].simulation.algorithm.changes[0].new_value, '10')
        self.assertEqual(
            sed_doc.outputs[1].curves[0].x_data_generator.variables[0].target,
            "/sbml:sbml/sbml:model/qual:listOfQualitativeSpecies/qual:qualitativeSpecies[@qual:id='x']"
        )
        self.assertEqual(
            sed_doc.outputs[1].curves[0].x_data_generator.variables[0].
            target_namespaces,
            {
                None: 'http://sed-ml.org/sed-ml/level1/version4',
                "sbml": "http://www.sbml.org/sbml/level3/version1/core",
                "qual":
                "http://www.sbml.org/sbml/level3/version1/qual/version1"
            },
        )
コード例 #8
0
    def test_create_combine_archive_with_uploaded_model_file(self):
        endpoint = '/combine/create'

        archive_specs_filename = os.path.join(self.FIXTURES_DIR,
                                              'archive-specs.json')
        with open(archive_specs_filename, 'rb') as file:
            archive_specs = json.load(file)

        file_0_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'model.xml'))
        file_1_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'file.txt'))
        archive_specs['contents'][0]['location']['value'][
            'filename'] = file_0_path
        archive_specs['contents'][1]['location']['value'][
            'filename'] = file_1_path

        fid_0 = open(file_0_path, 'rb')
        fid_1 = open(file_1_path, 'rb')

        data = MultiDict([
            ('specs', json.dumps(archive_specs)),
            ('files', fid_0),
            ('files', fid_1),
        ])
        with app.app.app.test_client() as client:
            archive_filename = os.path.join(self.temp_dirname, 'archive.omex')

            def save_temporary_combine_archive_to_s3_bucket(
                    filename, public=True, archive_filename=archive_filename):
                shutil.copy(filename, archive_filename)
                return archive_filename

            with mock.patch(
                    'src.s3.save_temporary_combine_archive_to_s3_bucket',
                    side_effect=save_temporary_combine_archive_to_s3_bucket):
                response = client.post(endpoint,
                                       data=data,
                                       content_type="multipart/form-data")

        fid_0.close()
        fid_1.close()

        self.assertEqual(response.status_code, 200, response.json)
        self.assertEqual(response.json, archive_filename)

        contents_dirname = os.path.join(self.temp_dirname, 'archive')
        archive = CombineArchiveReader().run(archive_filename,
                                             contents_dirname)

        self.assertEqual(len(archive.contents), 3)

        for content, expected_content in zip(archive.contents,
                                             archive_specs['contents']):
            self.assertEqual(content.location,
                             expected_content['location']['path'])
            self.assertEqual(content.format, expected_content['format'])
            self.assertEqual(content.master, expected_content['master'])

        with self.assertRaisesRegex(ValueError,
                                    'Missing a required XML attribute'):
            sed_doc = SedmlSimulationReader().run(
                os.path.join(contents_dirname,
                             archive_specs['contents'][2]['location']['path']),
                validate_models_with_languages=True)
        sed_doc = SedmlSimulationReader().run(
            os.path.join(contents_dirname,
                         archive_specs['contents'][2]['location']['path']),
            validate_models_with_languages=False)
        sed_doc_specs = archive_specs['contents'][2]['location']['value']
        self.assertEqual(sed_doc.level, sed_doc_specs['level'])
        self.assertEqual(sed_doc.version, sed_doc_specs['version'])

        self.assertEqual(sed_doc.styles[0].id,
                         sed_doc_specs['styles'][0]['id'])
        self.assertEqual(sed_doc.styles[0].name,
                         sed_doc_specs['styles'][0].get('name', None))
        self.assertEqual(sed_doc.styles[0].base,
                         sed_doc_specs['styles'][0].get('base', None))
        self.assertEqual(
            sed_doc.styles[0].line.type.value,
            sed_doc_specs['styles'][0].get('line', None).get('type', None))
        self.assertEqual(
            sed_doc.styles[0].line.color,
            sed_doc_specs['styles'][0].get('line', None).get('color', None))
        self.assertEqual(
            sed_doc.styles[0].line.thickness,
            sed_doc_specs['styles'][0].get('line',
                                           None).get('thickness', None))
        self.assertEqual(
            sed_doc.styles[0].marker.type.value,
            sed_doc_specs['styles'][0].get('marker', None).get('type', None))
        self.assertEqual(
            sed_doc.styles[0].marker.size,
            sed_doc_specs['styles'][0].get('marker', None).get('size', None))
        self.assertEqual(
            sed_doc.styles[0].marker.line_color,
            sed_doc_specs['styles'][0].get('marker',
                                           None).get('lineColor', None))
        self.assertEqual(
            sed_doc.styles[0].marker.line_thickness,
            sed_doc_specs['styles'][0].get('marker',
                                           None).get('lineThickness', None))
        self.assertEqual(
            sed_doc.styles[0].marker.fill_color,
            sed_doc_specs['styles'][0].get('marker',
                                           None).get('fillColor', None))
        self.assertEqual(
            sed_doc.styles[0].fill.color,
            sed_doc_specs['styles'][0].get('fill', None).get('color', None))

        self.assertEqual(sed_doc.styles[1].id,
                         sed_doc_specs['styles'][1]['id'])
        self.assertEqual(sed_doc.styles[1].name,
                         sed_doc_specs['styles'][1].get('name', None))
        self.assertEqual(sed_doc.styles[1].base.id,
                         sed_doc_specs['styles'][1].get('base', None))
        self.assertEqual(sed_doc.styles[1].line,
                         sed_doc_specs['styles'][1].get('line', None))
        self.assertEqual(
            sed_doc.styles[1].marker.type.value,
            sed_doc_specs['styles'][1].get('marker', None).get('type', None))
        self.assertEqual(
            sed_doc.styles[1].marker.size,
            sed_doc_specs['styles'][1].get('marker', None).get('size', None))
        self.assertEqual(
            sed_doc.styles[1].marker.line_color,
            sed_doc_specs['styles'][1].get('marker',
                                           None).get('lineColor', None))
        self.assertEqual(
            sed_doc.styles[1].marker.line_thickness,
            sed_doc_specs['styles'][1].get('marker',
                                           None).get('lineThickness', None))
        self.assertEqual(
            sed_doc.styles[1].marker.fill_color,
            sed_doc_specs['styles'][1].get('marker',
                                           None).get('fillColor', None))
        self.assertEqual(
            sed_doc.styles[1].fill.color,
            sed_doc_specs['styles'][1].get('fill', None).get('color', None))

        self.assertEqual(sed_doc.outputs[1].curves[0].style.id,
                         sed_doc_specs['outputs'][1]['curves'][0]['style'])
        self.assertEqual(sed_doc.outputs[2].surfaces[0].style.id,
                         sed_doc_specs['outputs'][2]['surfaces'][0]['style'])

        self.assertEqual(sed_doc.tasks[0].model, sed_doc.models[0])
        self.assertEqual(len(sed_doc.models[0].changes), 2)
        self.assertEqual(
            sed_doc.models[0].changes[0].target,
            "/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='k1']/@value"
        )
        self.assertEqual(sed_doc.models[0].changes[0].new_value, '1.2')
        self.assertEqual(
            sed_doc.models[0].changes[0].target_namespaces, {
                None: 'http://sed-ml.org/sed-ml/level1/version4',
                'sbml': 'http://www.sbml.org/sbml/level3/version1/core',
                'qual':
                'http://www.sbml.org/sbml/level3/version1/qual/version1',
            })

        self.assertEqual(
            sed_doc.tasks[0].simulation.algorithm.changes[0].kisao_id,
            'KISAO_0000488')
        self.assertEqual(
            sed_doc.tasks[0].simulation.algorithm.changes[0].new_value, '10')
        self.assertEqual(
            sed_doc.outputs[1].curves[0].x_data_generator.variables[0].target,
            "/sbml:sbml/sbml:model/qual:listOfQualitativeSpecies/qual:qualitativeSpecies[@qual:id='x']"
        )
        self.assertEqual(
            sed_doc.outputs[1].curves[0].x_data_generator.variables[0].
            target_namespaces,
            {
                None: 'http://sed-ml.org/sed-ml/level1/version4',
                "sbml": "http://www.sbml.org/sbml/level3/version1/core",
                "qual":
                "http://www.sbml.org/sbml/level3/version1/qual/version1"
            },
        )
コード例 #9
0
    def test_modify_combine_archive_with_uploaded_model_file(self):
        endpoint = '/combine/modify'

        archive_specs_filename = os.path.join(self.FIXTURES_DIR,
                                              'archive-specs.json')
        with open(archive_specs_filename, 'rb') as file:
            archive_specs = json.load(file)

        file_0_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'model.xml'))
        file_1_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'file.txt'))
        file_2_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, self.TEST_CASE + '.omex'))
        archive_specs['contents'][0]['location']['path'] = 'Caravagna2010.xml'
        archive_specs['contents'][0]['location']['value'][
            'filename'] = file_0_path
        archive_specs['contents'][1]['location'][
            'path'] = 'subdir/' + archive_specs['contents'][1]['location'][
                'path']
        archive_specs['contents'][1]['location']['value'][
            'filename'] = file_1_path

        fid_0 = open(file_0_path, 'rb')
        fid_1 = open(file_1_path, 'rb')
        fid_2 = open(file_2_path, 'rb')

        data = MultiDict([
            ('archive', json.dumps({'filename': file_2_path})),
            ('specs', json.dumps(archive_specs)),
            ('files', fid_0),
            ('files', fid_1),
            ('files', fid_2),
        ])
        with app.app.app.test_client() as client:
            archive_filename = os.path.join(self.temp_dirname, 'archive.omex')

            def save_temporary_combine_archive_to_s3_bucket(
                    filename, public=True, archive_filename=archive_filename):
                shutil.copy(filename, archive_filename)
                return archive_filename

            with mock.patch(
                    'src.s3.save_temporary_combine_archive_to_s3_bucket',
                    side_effect=save_temporary_combine_archive_to_s3_bucket):
                response = client.post(endpoint,
                                       data=data,
                                       content_type="multipart/form-data")

        fid_0.close()
        fid_1.close()
        fid_2.close()

        self.assertEqual(response.status_code, 200, response.json)
        self.assertEqual(response.json, archive_filename)

        contents_dirname = os.path.join(self.temp_dirname, 'archive')
        archive = CombineArchiveReader().run(archive_filename,
                                             contents_dirname)

        self.assertEqual(len(archive.contents), 5)

        for expected_content in archive_specs['contents']:
            found = False
            for content in archive.contents:
                if os.path.relpath(content.location, '.') == os.path.relpath(
                        expected_content['location']['path'], '.'):
                    found = True
                    self.assertEqual(content.format,
                                     expected_content['format'])
                    self.assertEqual(content.master,
                                     expected_content['master'])
                    break
            self.assertTrue(found)

        sed_doc = SedmlSimulationReader().run(
            os.path.join(contents_dirname,
                         archive_specs['contents'][2]['location']['path']),
            validate_models_with_languages=False)
        sed_doc_specs = archive_specs['contents'][2]['location']['value']
        self.assertEqual(sed_doc.level, sed_doc_specs['level'])
        self.assertEqual(sed_doc.version, sed_doc_specs['version'])

        self.assertEqual(sed_doc.tasks[0].model, sed_doc.models[0])
        self.assertEqual(len(sed_doc.models[0].changes), 2)
        self.assertEqual(
            sed_doc.models[0].changes[0].target,
            "/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='k1']/@value"
        )
        self.assertEqual(sed_doc.models[0].changes[0].new_value, '1.2')
        self.assertEqual(
            sed_doc.models[0].changes[0].target_namespaces, {
                None: 'http://sed-ml.org/sed-ml/level1/version4',
                'sbml': 'http://www.sbml.org/sbml/level3/version1/core',
                'qual':
                'http://www.sbml.org/sbml/level3/version1/qual/version1',
            })

        self.assertEqual(
            sed_doc.tasks[0].simulation.algorithm.changes[0].kisao_id,
            'KISAO_0000488')
        self.assertEqual(
            sed_doc.tasks[0].simulation.algorithm.changes[0].new_value, '10')
        self.assertEqual(
            sed_doc.outputs[1].curves[0].x_data_generator.variables[0].target,
            "/sbml:sbml/sbml:model/qual:listOfQualitativeSpecies/qual:qualitativeSpecies[@qual:id='x']"
        )
        self.assertEqual(
            sed_doc.outputs[1].curves[0].x_data_generator.variables[0].
            target_namespaces,
            {
                None: 'http://sed-ml.org/sed-ml/level1/version4',
                "sbml": "http://www.sbml.org/sbml/level3/version1/core",
                "qual":
                "http://www.sbml.org/sbml/level3/version1/qual/version1"
            },
        )
コード例 #10
0
def handler(body, file=None):
    ''' Validate a simulation experiment (SED-ML) file

    Args:
        body (:obj:`dict`): dictionary in schema ``FileOrUrl`` with keys

            * ``url`` whose value has schema ``Url`` with the URL for a model file

        file (:obj:`werkzeug.datastructures.FileStorage`): OMEX Metadata file

    Returns:
        ``ValidationReport``: information about the validity or
            lack thereof of the simulation experiment
    '''
    simulation_file = file
    simulation_url = body.get('url', None)
    if simulation_url and simulation_file:
        raise BadRequestException(
            title='Only one of `file` or `url` can be used at a time.',
            instance=ValueError(),
        )
    if not simulation_url and not simulation_file:
        raise BadRequestException(
            title='One of `file` or `url` must be used.',
            instance=ValueError(),
        )

    # create temporary file
    simulation_filename = get_temp_file()

    # get simulation experiment
    if simulation_file:
        simulation_file.save(simulation_filename)

    else:
        try:
            response = requests.get(simulation_url)
            response.raise_for_status()
        except requests.exceptions.RequestException as exception:
            title = 'Simulation experiment could not be loaded from `{}`'.format(
                simulation_url)
            raise BadRequestException(
                title=title,
                instance=exception,
            )

        # save simulation experiment to local temporary file
        with open(simulation_filename, 'wb') as file:
            file.write(response.content)

    # validate simulation experiment
    reader = SedmlSimulationReader()
    try:
        reader.run(simulation_filename,
                   validate_models_with_languages=False,
                   validate_targets_with_model_sources=False)
    except Exception:
        if not reader.errors:
            raise
    return make_validation_report(reader.errors,
                                  reader.warnings,
                                  filenames=[simulation_filename])
def handler(body, file=None):
    ''' Get the specifications of the SED-ML files in a COMBINE/OMEX arvhive

    Args:
        body (:obj:`dict`): dictionary with keys

            * ``url`` whose value has schema ``Url`` with the
              URL for a COMBINE/OMEX archive

        file (:obj:`werkzeug.datastructures.FileStorage`, optional): COMBINE/OMEX archive file

    Returns:
        ``CombineArchive``: specifications of the SED-ML
            files in the COMBINE/OMEX archive
    '''
    archive_file = file
    archive_url = body.get('url', None)
    if archive_url and archive_file:
        raise BadRequestException(
            title='Only one of `file` or `url` can be used at a time.',
            instance=ValueError(),
        )
    if not archive_url and not archive_file:
        raise BadRequestException(
            title='One of `file` or `url` must be used.',
            instance=ValueError(),
        )

    # create temporary working directory
    temp_dirname = get_temp_dir()
    archive_filename = os.path.join(temp_dirname, 'archive.omex')

    # get COMBINE/OMEX archive
    if archive_file:
        archive_file.save(archive_filename)

    else:
        try:
            response = requests.get(archive_url)
            response.raise_for_status()
        except requests.exceptions.RequestException as exception:
            title = 'COMBINE/OMEX archive could not be loaded from `{}`'.format(
                archive_url)
            raise BadRequestException(
                title=title,
                instance=exception,
            )

        # save archive to local temporary file
        with open(archive_filename, 'wb') as file:
            file.write(response.content)

    # read archive
    archive_dirname = os.path.join(temp_dirname, 'archive')
    try:
        archive = CombineArchiveReader().run(archive_filename, archive_dirname)
    except Exception as exception:
        # return exception
        raise BadRequestException(
            title='`{}` is not a valid COMBINE/OMEX archive'.format(
                archive_url if archive_url else archive_file.filename),
            instance=exception,
        )

    # get specifications of SED-ML outputs
    contents_specs = []

    sedml_contents = get_sedml_contents(archive)
    for content in sedml_contents:
        sed_doc_filename = os.path.join(archive_dirname, content.location)
        try:
            sed_doc = SedmlSimulationReader().run(
                sed_doc_filename,
                validate_semantics=False,
                validate_models_with_languages=False)
        except Exception:
            traceback.print_exc()
            continue

        sed_doc_specs = get_sed_document_specs(sed_doc)

        content_specs = {
            '_type': 'CombineArchiveSedDocSpecsContent',
            'location': {
                '_type': 'CombineArchiveSedDocSpecsLocation',
                'path': content.location,
                'value': sed_doc_specs,
            },
            'format': content.format,
            'master': content.master,
        }
        contents_specs.append(content_specs)

    # format response
    response = {
        '_type': 'CombineArchiveSedDocSpecs',
        'contents': contents_specs
    }

    # return reponse
    return response