Пример #1
0
    def validate_archive(self, filename):
        reader = CombineArchiveReader()
        name = os.path.relpath(filename, EXAMPLES_DIR)
        temp_dirname = os.path.join(self.temp_dirname, name)
        if not os.path.isdir(temp_dirname):
            os.makedirs(temp_dirname)
        archive = reader.run(filename, temp_dirname)

        config = Config(
            OMEX_METADATA_SCHEMA=OmexMetadataSchema.biosimulations, )

        error_msgs, warning_msgs = validate(
            archive,
            temp_dirname,
            formats_to_validate=list(
                CombineArchiveContentFormat.__members__.values()),
            config=config,
        )

        if warning_msgs:
            msg = 'The COMBINE/OMEX archive may be invalid.\n  {}'.format(
                flatten_nested_list_of_strings(warning_msgs).replace(
                    '\n', '\n  '))
            warnings.warn(msg, BioSimulatorsWarning)

        if error_msgs:
            msg = 'The COMBINE/OMEX archive is not valid.\n  {}'.format(
                flatten_nested_list_of_strings(error_msgs).replace(
                    '\n', '\n  '))
            raise ValueError(msg)
    def test_build_combine_archive_for_model_xpp_with_plot(self):
        model_filename = os.path.join(os.path.dirname(__file__), '..',
                                      'fixtures', 'xpp', 'wilson-cowan.ode')
        archive_filename = os.path.join(self.dir_name, 'archive.omex')

        model_utils.build_combine_archive_for_model(
            model_filename,
            ModelLanguage.XPP,
            UniformTimeCourseSimulation,
            archive_filename,
        )

        archive_dirname = os.path.join(self.dir_name, 'archive')
        archive = CombineArchiveReader().run(archive_filename, archive_dirname)

        doc = SedmlSimulationReader().run(
            os.path.join(archive_dirname, 'simulation.sedml'))
        output = doc.outputs[-1]
        self.assertIsInstance(output, Plot2D)
        self.assertEqual(len(output.curves), 1)
        self.assertEqual(len(output.curves[0].x_data_generator.variables), 1)
        self.assertEqual(len(output.curves[0].y_data_generator.variables), 1)
        self.assertEqual(output.curves[0].x_data_generator.variables[0].target,
                         'U')
        self.assertEqual(output.curves[0].y_data_generator.variables[0].target,
                         'V')
        self.assertEqual(output.curves[0].x_scale, AxisScale.linear)
        self.assertEqual(output.curves[0].y_scale, AxisScale.linear)
    def test_build_combine_archive_for_model_bngl(self):
        model_filename = os.path.join(os.path.dirname(__file__), '..',
                                      'fixtures', 'smoldyn', 'bounce1.txt')
        archive_filename = os.path.join(self.dir_name, 'archive.omex')
        extra_filename = os.path.join(self.dir_name, 'extra.txt')
        with open(extra_filename, 'w') as file:
            file.write('extra content')

        model_utils.build_combine_archive_for_model(
            model_filename,
            ModelLanguage.Smoldyn,
            UniformTimeCourseSimulation,
            archive_filename,
            extra_contents={
                extra_filename:
                CombineArchiveContent(
                    location='extra.txt',
                    format=CombineArchiveContentFormat.TEXT,
                ),
            },
        )

        archive_dirname = os.path.join(self.dir_name, 'archive')
        archive = CombineArchiveReader().run(archive_filename, archive_dirname)

        doc = SedmlSimulationReader().run(
            os.path.join(archive_dirname, 'simulation.sedml'))
        sim = doc.simulations[0]
        self.assertEqual(sim.initial_time, 0.)
        self.assertEqual(sim.output_start_time, 0.)
        self.assertEqual(sim.output_end_time, 100.)
        self.assertEqual(sim.number_of_steps, 10000)
        self.assertEqual(sim.algorithm.kisao_id, 'KISAO_0000057')
    def test_modify_combine_archive_with_uploaded_model_file_and_download(
            self):
        endpoint = '/combine/modify'

        archive_filename = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, self.TEST_CASE + '.omex'))
        with open(archive_filename, 'rb') as file:
            archive_content = file.read()

        archive_specs_filename = os.path.join(self.FIXTURES_DIR,
                                              'archive-specs.json')
        with open(archive_specs_filename, 'rb') as file:
            archive_specs = json.load(file)

        file_0_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'model.xml'))
        file_1_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'file.txt'))
        archive_specs['contents'][0]['location'][
            'path'] = './Caravagna2010.xml'
        archive_specs['contents'][0]['location']['value'][
            'filename'] = file_0_path
        archive_specs['contents'][1]['location']['value'][
            'filename'] = file_1_path

        fid_0 = open(file_0_path, 'rb')
        fid_1 = open(file_1_path, 'rb')

        data = MultiDict([
            ('archive', json.dumps({'url': 'archive.omex'})),
            ('specs', json.dumps(archive_specs)),
            ('files', fid_0),
            ('files', fid_1),
            ('download', True),
        ])
        with app.app.app.test_client() as client:
            with mock.patch('requests.get',
                            return_value=mock.Mock(
                                raise_for_status=lambda: None,
                                content=archive_content)):
                response = client.post(endpoint,
                                       data=data,
                                       content_type="multipart/form-data")

        fid_0.close()
        fid_1.close()

        self.assertEqual(response.status_code, 200, response.json)
        self.assertEqual(response.content_type, 'application/zip')
        downloaded_archive_filename = os.path.join(self.temp_dirname,
                                                   'archive-downloaded.omex')
        with open(downloaded_archive_filename, 'wb') as file:
            file.write(response.data)

        contents_dirname = os.path.join(self.temp_dirname, 'archive')
        archive = CombineArchiveReader().run(downloaded_archive_filename,
                                             contents_dirname)

        self.assertEqual(len(archive.contents), 5)
Пример #5
0
def gen_sedml_2d_3d(omex_file_path, base_out_path):

    temp_path = os.path.join(base_out_path, "temp")
    if not os.path.exists(temp_path):
        os.mkdir(temp_path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)

    # defining archive
    archive = CombineArchiveReader().run(
        in_file=omex_file_path,
        out_dir=temp_path,
        try_reading_as_plain_zip_archive=False)

    # determine files to execute
    sedml_contents = get_sedml_contents(archive)

    for i_content, content in enumerate(sedml_contents):
        content_filename = os.path.join(temp_path, content.location)
        sedml_name = content.location.split('/')[1].split('.')[0]

        doc = SedmlSimulationReader().run(content_filename)
        for output in doc.outputs:
            if isinstance(output, (Plot2D, Plot3D)):
                report = Report(id='__plot__' + output.id, name=output.name)

                data_generators = {}
                if isinstance(output, Plot2D):
                    for curve in output.curves:
                        data_generators[
                            curve.x_data_generator.id] = curve.x_data_generator
                        data_generators[
                            curve.y_data_generator.id] = curve.y_data_generator

                elif isinstance(output, Plot3D):
                    for surface in output.surfaces:
                        data_generators[surface.x_data_generator.
                                        id] = surface.x_data_generator
                        data_generators[surface.y_data_generator.
                                        id] = surface.y_data_generator
                        data_generators[surface.z_data_generator.
                                        id] = surface.z_data_generator

                for data_generator in data_generators.values():
                    report.data_sets.append(
                        DataSet(
                            id='__data_set__{}_{}'.format(
                                output.id, data_generator.id),
                            name=data_generator.name,
                            label=data_generator.id,
                            data_generator=data_generator,
                        ))

                report.data_sets.sort(key=lambda data_set: data_set.id)
                doc.outputs.append(report)

        filename_with_reports_for_plots = os.path.join(
            temp_path, f'simulation_{sedml_name}.sedml')
        SedmlSimulationWriter().run(doc,
                                    filename_with_reports_for_plots,
                                    validate_models_with_languages=False)
Пример #6
0
    def test_hdf5_response(self):
        endpoint = '/run/run'
        archive_filename = os.path.join(
            self.FIXTURES_DIRNAME,
            'Caravagna-J-Theor-Biol-2010-tumor-suppressive-oscillations.omex')
        data = MultiDict([
            ('_type', 'SimulationRun'),
            ('archiveUrl', 'https://web.site/archive.omex'),
            ('simulator', 'copasi'),
        ])
        with app.app.app.test_client() as client:
            with open(archive_filename, 'rb') as archive_file:
                with mock.patch('requests.get',
                                return_value=mock.Mock(
                                    raise_for_status=lambda: None,
                                    content=archive_file.read())):
                    response = client.post(
                        endpoint,
                        data=data,
                        content_type="multipart/form-data",
                        headers={'Accept': 'application/x-hdf'})

        self.assertEqual(response.status_code, 200, response.json)
        h5_filename = os.path.join(self.tmp_dirname, 'reports.h5')
        with open(h5_filename, 'wb') as file:
            file.write(response.data)

        self.assertEqual([
            'BIOMD0000000912_sim.sedml/BIOMD0000000912_report',
            'BIOMD0000000912_sim.sedml/plot_1'
        ],
                         ReportReader().get_ids(self.tmp_dirname))

        archive_dirname = os.path.join(self.tmp_dirname, 'archive')
        CombineArchiveReader().run(archive_filename, archive_dirname)
        sed_doc = SedmlSimulationReader().run(
            os.path.join(archive_dirname, 'BIOMD0000000912_sim.sedml'))
        report = next(output for output in sed_doc.outputs
                      if output.id == 'BIOMD0000000912_report')

        data_set_results = ReportReader().run(
            report, self.tmp_dirname,
            'BIOMD0000000912_sim.sedml/BIOMD0000000912_report')

        self.assertEqual(
            set(data_set_results.keys()),
            set([
                'data_set_time',
                'data_set_T',
                'data_set_E',
                'data_set_I',
            ]),
        )

        numpy.testing.assert_allclose(data_set_results['data_set_time'],
                                      numpy.linspace(0., 1000., 5001))
        for values in data_set_results.values():
            self.assertEqual(values.shape, (5001, ))
            self.assertFalse(numpy.any(numpy.isnan(values)))
Пример #7
0
def handler(url, location):
    ''' Retrieve a file at a location in a COMBINE/OMEX archive

    Args:
        url (:obj:`str`): URL for a COMBINE/OMEX archive
        location (:obj:`str`): location of the COMBINE/OMEX archive to retrieve

    Returns:
        :obj:`werkzeug.wrappers.response.Response`: response with file from COMBINE/OMEX
            archive
    '''
    # create temporary working directory
    temp_dirname = get_temp_dir()
    archive_filename = os.path.join(temp_dirname, 'archive.omex')

    # get COMBINE/OMEX archive
    try:
        response = requests.get(url)
        response.raise_for_status()
    except requests.exceptions.RequestException as exception:
        title = 'COMBINE/OMEX archive could not be loaded from `{}`'.format(
            url)
        raise BadRequestException(
            title=title,
            instance=exception,
        )

    # save archive to local temporary file
    with open(archive_filename, 'wb') as file:
        file.write(response.content)

    # read archive
    archive_dirname = os.path.join(temp_dirname, 'archive')
    try:
        archive = CombineArchiveReader().run(archive_filename, archive_dirname)
    except Exception as exception:
        # return exception
        raise BadRequestException(
            title='`{}` is not a valid COMBINE/OMEX archive'.format(url),
            instance=exception,
        )

    filename = os.path.join(archive_dirname, location)
    if not os.path.isfile(filename):
        msg = '`{}` is not a valid location in the COMBINE/OMEX archive.'.format(location)
        raise BadRequestException(
            title=msg,
            instance=ValueError())

    mimetype = None
    for content in archive.contents:
        if os.path.relpath(content.location, '.') == os.path.relpath(location, '.'):
            if content.format.startswith('http://purl.org/NET/mediatypes/'):
                mimetype = content.format[len('http://purl.org/NET/mediatypes/'):]
            break

    return flask.send_file(filename, mimetype=mimetype, as_attachment=False, attachment_filename=os.path.basename(location))
    def test_validate_no_metadata(self):
        os.remove(os.path.join(self.tmp_dir, 'thumbnail.png'))

        config = Config(VALIDATE_OMEX_METADATA=True)
        archive = CombineArchiveReader().run(os.path.join(
            self.FIXTURES_DIR, 'no-metadata.omex'),
                                             self.tmp_dir,
                                             config=config)
        errors, warnings = validate(archive, self.tmp_dir, config=config)
        self.assertEqual(errors, [])

        config = Config(VALIDATE_OMEX_METADATA=False)
        archive = CombineArchiveReader().run(os.path.join(
            self.FIXTURES_DIR, 'no-metadata.omex'),
                                             self.tmp_dir,
                                             config=config)
        errors, warnings = validate(archive, self.tmp_dir, config=config)
        self.assertEqual(errors, [])

        config = Config(VALIDATE_OMEX_METADATA=True)
        archive = CombineArchiveReader().run(os.path.join(
            self.FIXTURES_DIR, 'no-metadata.omex'),
                                             self.tmp_dir,
                                             config=config)
        errors, warnings = validate(
            archive,
            self.tmp_dir,
            formats_to_validate=list(
                CombineArchiveContentFormat.__members__.values()),
            config=config)
        self.assertNotEqual(errors, [])

        config = Config(VALIDATE_OMEX_METADATA=False)
        archive = CombineArchiveReader().run(os.path.join(
            self.FIXTURES_DIR, 'no-metadata.omex'),
                                             self.tmp_dir,
                                             config=config)
        errors, warnings = validate(
            archive,
            self.tmp_dir,
            formats_to_validate=list(
                CombineArchiveContentFormat.__members__.values()),
            config=config)
        self.assertEqual(errors, [])
Пример #9
0
def exec_plot_output_sed_doc(omex_file_path, base_out_path):
    archive = CombineArchiveReader().run(in_file=omex_file_path,
                                         out_dir=tmp_dir,
                                         try_reading_as_plain_zip_archive=True)

    # determine files to execute
    sedml_contents = get_sedml_contents(archive)

    report_results = ReportResults()
    for i_content, content in enumerate(sedml_contents):
        content_filename = os.path.join(tmp_dir, content.location)

        for report_filename in glob.glob(
                os.path.join(base_out_path, content.location, '*.csv')):
            if report_filename.find('__plot__') != -1:
                report_id = os.path.splitext(
                    os.path.basename(report_filename))[0]

                # read report from CSV file produced by tellurium
                # data_set_df = pd.read_csv(report_filename).transpose()

                data_set_df = pd.read_csv(report_filename, header=None).T
                data_set_df.columns = data_set_df.iloc[0]
                data_set_df.drop(0, inplace=True)
                data_set_df.reset_index(inplace=True)
                data_set_df.drop('index', axis=1, inplace=True)

                # create pseudo-report for ReportWriter
                datasets = []
                for col in list(data_set_df.columns):
                    datasets.append(DataSet(id=col, label=col, name=col))
                #report.data_sets = datasets
                report = Report(id=report_id,
                                name=report_id,
                                data_sets=datasets)

                data_set_results = DataSetResults()

                for col in list(data_set_df.columns):
                    data_set_results[col] = data_set_df[col].values

                # append to data structure of report results

                # save file in desired BioSimulators format(s)
                export_id = report_id.replace('__plot__', '')
                report.id = export_id
                rel_path = os.path.join(content.location, report.id)
                if len(rel_path.split("./")) > 1:
                    rel_path = rel_path.split("./")[1]
                ReportWriter().run(report,
                                   data_set_results,
                                   base_out_path,
                                   rel_path,
                                   format='h5')
                os.rename(report_filename,
                          report_filename.replace('__plot__', ''))
    def test_manifest_in_manifest(self):
        out_dir = os.path.join(self.tmp_dir, 'out')
        archive = CombineArchiveReader().run(
            os.path.join(os.path.dirname(__file__), '..', 'fixtures',
                         'manifest-in-manifest.omex'), out_dir)
        errors, warnings = validate(archive, out_dir)
        self.assertEqual(errors, [])
        self.assertIn(
            'manifests should not contain content entries for themselves',
            flatten_nested_list_of_strings(warnings))

        out_dir = os.path.join(self.tmp_dir, 'out')
        archive = CombineArchiveReader().run(
            os.path.join(os.path.dirname(__file__), '..', 'fixtures',
                         'multiple-manifests.omex'), out_dir)
        errors, warnings = validate(archive, out_dir)
        self.assertEqual(errors, [])
        self.assertIn(
            'manifests should not contain content entries for themselves',
            flatten_nested_list_of_strings(warnings))
Пример #11
0
    def test_create_combine_archive_with_uploaded_model_file_and_download(
            self):
        endpoint = '/combine/create'

        archive_specs_filename = os.path.join(self.FIXTURES_DIR,
                                              'archive-specs.json')
        with open(archive_specs_filename, 'rb') as file:
            archive_specs = json.load(file)

        file_0_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'model.xml'))
        file_1_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'file.txt'))
        archive_specs['contents'][0]['location']['value'][
            'filename'] = file_0_path
        archive_specs['contents'][1]['location']['value'][
            'filename'] = file_1_path

        fid_0 = open(file_0_path, 'rb')
        fid_1 = open(file_1_path, 'rb')

        data = MultiDict([
            ('specs', json.dumps(archive_specs)),
            ('files', fid_0),
            ('files', fid_1),
            ('download', True),
        ])
        with app.app.app.test_client() as client:
            archive_filename = os.path.join(self.temp_dirname, 'archive.omex')

            response = client.post(endpoint,
                                   data=data,
                                   content_type="multipart/form-data")

        fid_0.close()
        fid_1.close()

        self.assertEqual(response.status_code, 200, response.json)
        self.assertEqual(response.content_type, 'application/zip')
        downloaded_archive_filename = os.path.join(self.temp_dirname,
                                                   'archive-downloaded.omex')
        with open(downloaded_archive_filename, 'wb') as file:
            file.write(response.data)

        contents_dirname = os.path.join(self.temp_dirname, 'archive')
        archive = CombineArchiveReader().run(downloaded_archive_filename,
                                             contents_dirname)

        self.assertEqual(len(archive.contents), 3)
    def test_validate(self):
        os.remove(os.path.join(self.tmp_dir, 'thumbnail.png'))

        archive = CombineArchiveReader().run(self.OMEX_FIXTURE, self.tmp_dir)
        errors, warnings = validate(archive, self.tmp_dir)
        self.assertEqual(errors, [])
        self.assertNotEqual(warnings, [])

        archive2 = copy.deepcopy(archive)
        for content in archive.contents:
            archive2.contents.append(content)
        errors, warnings = validate(archive2, self.tmp_dir)
        self.assertIn('contains repeated content items',
                      flatten_nested_list_of_strings(errors))

        archive2 = copy.deepcopy(archive)
        archive2.contents = []
        errors, warnings = validate(archive2, self.tmp_dir)
        self.assertIn('does not contain content items',
                      flatten_nested_list_of_strings(errors))
Пример #13
0
def exec_plot_output_sed_doc(omex_file_path, base_out_path):
    config = Config(VALIDATE_OMEX_MANIFESTS=False)
    archive = CombineArchiveReader().run(in_file=omex_file_path,
                                         out_dir=tmp_dir,
                                         config=config)

    # determine files to execute
    sedml_contents = get_sedml_contents(archive)

    report_results = ReportResults()
    for i_content, content in enumerate(sedml_contents):
        content_filename = os.path.join(tmp_dir, content.location)

        for report_filename in glob.glob(
                os.path.join(base_out_path, content.location, '*.csv')):
            if report_filename.find('__plot__') != -1:
                report_id = os.path.splitext(
                    os.path.basename(report_filename))[0]

                # read report from CSV file produced by tellurium
                # data_set_df = pd.read_csv(report_filename).transpose()

                data_set_df = pd.read_csv(report_filename, header=None).T

                datasets = []
                for col in data_set_df.columns:
                    datasets.append(
                        DataSet(id=data_set_df.loc[0, col],
                                label=data_set_df.loc[1, col],
                                name=data_set_df.loc[2, col]))
                report = Report(id=report_id,
                                name=report_id,
                                data_sets=datasets)

                data_set_df.columns = data_set_df.iloc[0]
                data_set_df.drop(0, inplace=True)
                data_set_df.drop(1, inplace=True)
                data_set_df.drop(2, inplace=True)
                data_set_df.reset_index(inplace=True)
                data_set_df.drop('index', axis=1, inplace=True)

                # create pseudo-report for ReportWriter

                data_set_results = DataSetResults()

                for col in list(data_set_df.columns):
                    data_set_results[col] = data_set_df[col].to_numpy(
                        dtype='float64')

                # append to data structure of report results

                # save file in desired BioSimulators format(s)
                export_id = report_id.replace('__plot__', '')
                report.id = export_id
                rel_path = os.path.join(content.location, report.id)
                if len(rel_path.split("./")) > 1:
                    rel_path = rel_path.split("./")[1]
                # print("base: ", base_out_path, file=sys.stdout)
                # print("rel: ", rel_path, file=sys.stdout)
                ReportWriter().run(report,
                                   data_set_results,
                                   base_out_path,
                                   rel_path,
                                   format='h5',
                                   type=Plot2D)
                os.rename(report_filename,
                          report_filename.replace('__plot__', ''))

            else:
                print("report   : ", report_filename, file=sys.stdout)
                report_id = os.path.splitext(
                    os.path.basename(report_filename))[0]
                data_set_df = pd.read_csv(report_filename, header=None).T

                datasets = []
                for col in data_set_df.columns:
                    datasets.append(
                        DataSet(id=data_set_df.loc[0, col],
                                label=data_set_df.loc[1, col],
                                name=""))
                report = Report(id=report_id,
                                name=report_id,
                                data_sets=datasets)

                data_set_df.columns = data_set_df.iloc[0]  # use ids
                data_set_df.drop(0, inplace=True)
                data_set_df.drop(1, inplace=True)
                data_set_df.drop(2, inplace=True)
                data_set_df.reset_index(inplace=True)
                data_set_df.drop('index', axis=1, inplace=True)

                data_set_results = DataSetResults()
                for col in list(data_set_df.columns):
                    data_set_results[col] = data_set_df[col].to_numpy(
                        dtype='float64')

                rel_path = os.path.join(content.location, report.id)
                if len(rel_path.split("./")) > 1:
                    rel_path = rel_path.split("./")[1]
                ReportWriter().run(report,
                                   data_set_results,
                                   base_out_path,
                                   rel_path,
                                   format='h5',
                                   type=Report)
Пример #14
0
def exec_sed_doc(omex_file_path, base_out_path):
    # defining archive
    config = Config(VALIDATE_OMEX_MANIFESTS=False)
    archive = CombineArchiveReader().run(in_file=omex_file_path,
                                         out_dir=tmp_dir,
                                         config=config)

    # determine files to execute
    sedml_contents = get_sedml_contents(archive)

    report_results = ReportResults()
    for i_content, content in enumerate(sedml_contents):
        content_filename = os.path.join(tmp_dir, content.location)

        doc = SedmlSimulationReader().run(content_filename)

        for report_filename in glob.glob(
                os.path.join(base_out_path, content.location, '*.csv')):
            report_id = os.path.splitext(os.path.basename(report_filename))[0]

            # read report from CSV file produced by VCell
            data_set_df = pd.read_csv(report_filename).transpose()
            data_set_df.columns = data_set_df.iloc[0]
            data_set_df = data_set_df.drop(data_set_df.iloc[0].name)
            data_set_df = data_set_df.reset_index()
            data_set_df = data_set_df.rename(
                columns={'index': data_set_df.columns.name})
            data_set_df = data_set_df.transpose()
            data_set_df.index.name = None

            report = next(report for report in doc.outputs
                          if report.id == report_id)

            data_set_results = DataSetResults()

            # print("report: ", report, file=sys.stderr)
            # print("report Type: ", type(report), file=sys.stderr)
            # print("Plot Type: ", Plot2D, file=sys.stderr)
            if type(report) != Plot2D and type(report) != Plot3D:
                # Considering the scenario where it has the datasets in sedml
                for data_set in report.data_sets:
                    data_set_results[data_set.id] = data_set_df.loc[
                        data_set.label, :].to_numpy(dtype='float64')
                    # print("DF for report: ", data_set_results[data_set.id], file=sys.stderr)
                    # print("df.types: ", data_set_results[data_set.id].dtype, file=sys.stderr)
            else:
                data_set_df = pd.read_csv(report_filename, header=None).T
                data_set_df.columns = data_set_df.iloc[0]
                data_set_df.drop(0, inplace=True)
                data_set_df.reset_index(inplace=True)
                data_set_df.drop('index', axis=1, inplace=True)
                # print("DF for plot: ", data_set_df, file=sys.stderr)
                # Considering the scenario where it doesn't have datasets in sedml (pseudo sedml for plots)
                for col in list(data_set_df.columns):
                    data_set_results[col] = data_set_df[col].values

            # append to data structure of report results
            report_results[report_id] = data_set_results

            # save file in desired BioSimulators format(s)
            # for report_format in report_formats:
            # print("HDF report: ", report, file=sys.stderr)
            # print("HDF dataset results: ", data_set_results, file=sys.stderr)
            # print("HDF base_out_path: ", base_out_path,file=sys.stderr)
            # print("HDF path: ", os.path.join(content.location, report.id), file=sys.stderr)

            rel_path = os.path.join(content.location, report.id)

            if len(rel_path.split("./")) > 1:
                rel_path = rel_path.split("./")[1]

            if type(report) != Plot2D and type(report) != Plot3D:
                ReportWriter().run(report,
                                   data_set_results,
                                   base_out_path,
                                   rel_path,
                                   format='h5')
            else:
                datasets = []
                for col in list(data_set_df.columns):
                    datasets.append(DataSet(id=col, label=col, name=col))
                report.data_sets = datasets
                ReportWriter().run(report,
                                   data_set_results,
                                   base_out_path,
                                   rel_path,
                                   format='h5')

    # Remove temp directory
    shutil.rmtree(tmp_dir)
    def test_add_file_to_combine_archive_download(self):
        endpoint = '/combine/file'

        archive_filename = os.path.join(self.FIXTURES_DIR,
                                        self.TEST_CASE + '.omex')
        new_content_filename = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'file.txt'))
        new_content = {
            '_type': 'CombineArchiveContent',
            'location': './NewLocation.txt',
            "filename": new_content_filename,
            'format': 'http://purl.org/NET/mediatypes/text/plain',
            'master': False,
        }

        archive_specs_filename = os.path.join(self.FIXTURES_DIR,
                                              'archive-specs.json')
        with open(archive_specs_filename, 'rb') as file:
            archive_specs = json.load(file)

        archive_fid = open(archive_filename, 'rb')
        new_content_fid = open(new_content_filename, 'rb')

        data = MultiDict([
            ('archive', json.dumps({"filename": archive_filename})),
            ('newContent', json.dumps(new_content)),
            ('files', archive_fid),
            ('files', new_content_fid),
            ('download', True),
        ])

        modified_archive_filename = os.path.join(self.temp_dirname,
                                                 'archive.omex')
        with app.app.app.test_client() as client:
            response = client.post(endpoint,
                                   data=data,
                                   content_type="multipart/form-data")

        archive_fid.close()
        new_content_fid.close()

        self.assertEqual(response.status_code, 200, response.json)
        self.assertEqual(response.content_type, 'application/zip')
        modified_archive_filename = os.path.join(self.temp_dirname,
                                                 'archive-downloaded.omex')
        with open(modified_archive_filename, 'wb') as file:
            file.write(response.data)

        modified_archive_dirname = os.path.join(self.temp_dirname, 'archive')
        archive = CombineArchiveReader().run(modified_archive_filename,
                                             modified_archive_dirname)

        self.assertEqual(
            set(
                os.path.relpath(content.location, '.')
                for content in archive.contents),
            set([
                'BIOMD0000000912_sim.sedml',
                'Caravagna2010.xml',
                'metadata.rdf',
                'NewLocation.txt',
            ]))

        content = next(content for content in archive.contents
                       if content.location == 'NewLocation.txt')
        self.assertEqual(content.format,
                         'http://purl.org/NET/mediatypes/text/plain')
        self.assertEqual(content.master, False)
        self.assertTrue(
            os.path.isfile(
                os.path.join(modified_archive_dirname, 'NewLocation.txt')))
Пример #16
0
def handler(body, files=None):
    ''' Add one or more files to a COMBINE/OMEX archive

    Args:
        body (:obj:`dict`): dictionary with schema ``CombineArchiveAndAdditionalContent`` with the
            specifications of the desired additions to the COMBINE/OMEX archive
        files (:obj:`list` of :obj:`werkzeug.datastructures.FileStorage`, optional): files (e.g., SBML
            file)

    Returns:
        :obj:`werkzeug.wrappers.response.Response` or :obj:`str`: response with COMBINE/OMEX
            archive or a URL to a COMBINE/OMEX archive
    '''
    archive = body.get('archive')
    new_content = body.get('newContent')
    files = connexion.request.files.getlist('files')
    filename_map = {file.filename: file for file in files}
    overwrite_locations = body.get('overwriteLocations', True)
    download = body.get('download', False)

    # create temporary working directory
    temp_dirname = get_temp_dir()
    archive_filename = os.path.join(temp_dirname, 'archive.omex')

    # get COMBINE/OMEX archive
    archive_url = archive.get('url', None)
    if archive_url:
        try:
            response = requests.get(archive_url)
            response.raise_for_status()
        except requests.exceptions.RequestException as exception:
            title = 'COMBINE/OMEX archive could not be loaded from `{}`'.format(
                archive_url)
            raise BadRequestException(
                title=title,
                instance=exception,
            )

        # save archive to local temporary file
        with open(archive_filename, 'wb') as file:
            file.write(response.content)
    else:
        archive_file = filename_map.get(archive['filename'], None)
        if archive_file is None:
            raise BadRequestException(
                title='Archive file with name `{}` was not uploaded'.format(
                    archive['filename']),
                instance=ValueError(),
            )
        archive_file.save(archive_filename)

    # read archive
    archive_dirname = os.path.join(temp_dirname, 'archive')
    try:
        archive = CombineArchiveReader().run(archive_filename, archive_dirname)
    except Exception as exception:
        # return exception
        raise BadRequestException(
            title='COMBINE/OMEX archive is not valid.',
            instance=exception,
        )

    location_content_map = {
        os.path.relpath(content.location, '.'): content
        for content in archive.contents
    }
    new_location = os.path.relpath(new_content['location'], '.')
    content = location_content_map.get(new_location, None)
    if content is None:
        content = CombineArchiveContent(location=new_location)
        archive.contents.append(content)

    else:
        if not overwrite_locations:
            new_location_parts = os.path.splitext(new_location)
            i_file = 0
            while True:
                i_file += 1
                temp_new_location = new_location_parts[0] + '_' + str(
                    i_file) + new_location_parts[1]
                if temp_new_location not in location_content_map:
                    new_location = temp_new_location
                    break

            content = CombineArchiveContent(location=new_location)
            archive.contents.append(content)

    content_filename = new_content['filename']
    file = filename_map.get(content_filename, None)
    if file:
        file.save(os.path.join(archive_dirname, new_location))
    else:
        raise BadRequestException(
            title='File with name `{}` was not uploaded'.format(
                content_filename),
            instance=ValueError(),
        )

    content.format = new_content['format']
    content.master = new_content['master']

    # package COMBINE/OMEX archive
    CombineArchiveWriter().run(archive, archive_dirname, archive_filename)

    if download:
        return flask.send_file(archive_filename,
                               mimetype='application/zip',
                               as_attachment=True,
                               attachment_filename='archive.omex')

    else:
        # save COMBINE/OMEX archive to S3 bucket
        archive_url = src.s3.save_temporary_combine_archive_to_s3_bucket(
            archive_filename, public=True)

        # return URL for archive in S3 bucket
        return archive_url
def handler(body, file=None):
    ''' Get the specifications of the SED-ML files in a COMBINE/OMEX arvhive

    Args:
        body (:obj:`dict`): dictionary with keys

            * ``url`` whose value has schema ``Url`` with the
              URL for a COMBINE/OMEX archive

        file (:obj:`werkzeug.datastructures.FileStorage`, optional): COMBINE/OMEX archive file

    Returns:
        ``CombineArchive``: specifications of the SED-ML
            files in the COMBINE/OMEX archive
    '''
    archive_file = file
    archive_url = body.get('url', None)
    if archive_url and archive_file:
        raise BadRequestException(
            title='Only one of `file` or `url` can be used at a time.',
            instance=ValueError(),
        )
    if not archive_url and not archive_file:
        raise BadRequestException(
            title='One of `file` or `url` must be used.',
            instance=ValueError(),
        )

    # create temporary working directory
    temp_dirname = get_temp_dir()
    archive_filename = os.path.join(temp_dirname, 'archive.omex')

    # get COMBINE/OMEX archive
    if archive_file:
        archive_file.save(archive_filename)

    else:
        try:
            response = requests.get(archive_url)
            response.raise_for_status()
        except requests.exceptions.RequestException as exception:
            title = 'COMBINE/OMEX archive could not be loaded from `{}`'.format(
                archive_url)
            raise BadRequestException(
                title=title,
                instance=exception,
            )

        # save archive to local temporary file
        with open(archive_filename, 'wb') as file:
            file.write(response.content)

    # read archive
    archive_dirname = os.path.join(temp_dirname, 'archive')
    try:
        archive = CombineArchiveReader().run(archive_filename, archive_dirname)
    except Exception as exception:
        # return exception
        raise BadRequestException(
            title='`{}` is not a valid COMBINE/OMEX archive'.format(
                archive_url if archive_url else archive_file.filename),
            instance=exception,
        )

    # get specifications of SED-ML outputs
    contents_specs = []

    sedml_contents = get_sedml_contents(archive)
    for content in sedml_contents:
        sed_doc_filename = os.path.join(archive_dirname, content.location)
        try:
            sed_doc = SedmlSimulationReader().run(
                sed_doc_filename,
                validate_semantics=False,
                validate_models_with_languages=False)
        except Exception:
            traceback.print_exc()
            continue

        sed_doc_specs = get_sed_document_specs(sed_doc)

        content_specs = {
            '_type': 'CombineArchiveSedDocSpecsContent',
            'location': {
                '_type': 'CombineArchiveSedDocSpecsLocation',
                'path': content.location,
                'value': sed_doc_specs,
            },
            'format': content.format,
            'master': content.master,
        }
        contents_specs.append(content_specs)

    # format response
    response = {
        '_type': 'CombineArchiveSedDocSpecs',
        'contents': contents_specs
    }

    # return reponse
    return response
    def test_add_file_to_combine_archive_at_existing_location_overwrite(self):
        endpoint = '/combine/file'

        archive_filename = os.path.join(self.FIXTURES_DIR,
                                        self.TEST_CASE + '.omex')
        new_content_filename = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'file.txt'))
        new_content = {
            '_type': 'CombineArchiveContent',
            'location': 'Caravagna2010.xml',
            "filename": new_content_filename,
            'format': 'http://purl.org/NET/mediatypes/text/plain',
            'master': False,
        }

        archive_specs_filename = os.path.join(self.FIXTURES_DIR,
                                              'archive-specs.json')
        with open(archive_specs_filename, 'rb') as file:
            archive_specs = json.load(file)

        archive_fid = open(archive_filename, 'rb')
        new_content_fid = open(new_content_filename, 'rb')

        data = MultiDict([
            ('archive', json.dumps({"filename": archive_filename})),
            ('newContent', json.dumps(new_content)),
            ('files', archive_fid),
            ('files', new_content_fid),
            ('overwriteLocations', True),
        ])

        modified_archive_filename = os.path.join(self.temp_dirname,
                                                 'archive.omex')
        with app.app.app.test_client() as client:

            def save_temporary_combine_archive_to_s3_bucket(
                    filename,
                    public=True,
                    archive_filename=modified_archive_filename):
                shutil.copy(filename, archive_filename)
                return archive_filename

            with mock.patch(
                    'src.s3.save_temporary_combine_archive_to_s3_bucket',
                    side_effect=save_temporary_combine_archive_to_s3_bucket):
                response = client.post(endpoint,
                                       data=data,
                                       content_type="multipart/form-data")

        archive_fid.close()
        new_content_fid.close()

        self.assertEqual(response.status_code, 200, response.json)
        self.assertEqual(response.json, modified_archive_filename)

        modified_archive_dirname = os.path.join(self.temp_dirname, 'archive')
        archive = CombineArchiveReader().run(modified_archive_filename,
                                             modified_archive_dirname)

        self.assertEqual(
            set(
                os.path.relpath(content.location, '.')
                for content in archive.contents),
            set([
                'BIOMD0000000912_sim.sedml',
                'Caravagna2010.xml',
                'metadata.rdf',
            ]))

        content = next(
            content for content in archive.contents
            if os.path.relpath(content.location, '.') == 'Caravagna2010.xml')
        self.assertEqual(content.format,
                         'http://purl.org/NET/mediatypes/text/plain')
        self.assertEqual(content.master, False)
        with open(os.path.join(modified_archive_dirname, 'Caravagna2010.xml'),
                  'r') as file:
            self.assertEqual(file.read(), 'A text file\n')
Пример #19
0
def handler(body, file=None):
    ''' Validate a COMBINE/OMEX archive

    Args:
        body (:obj:`dict`): dictionary in schema ``ValidateCombineArchiveFileOrUrl`` with keys

            * ``url`` whose value has schema ``Url`` with the URL for a COMBINE/OMEX archive
            * ``omexMetadataFormat`` (:obj:`str`): format of the OMEX Metadata files
            * ``omexMetadataSchema`` (:obj:`str`): schema for validating the OMEX Metadata files
            * ``validateOmexManifest`` (:obj:`bool`, optional): Whether to validate the OMEX manifest file in the archive
            * ``validateSedml`` (:obj:`bool`, optional): Whether to validate the SED-ML files in the archive
            * ``validateSedmlModels`` (:obj:`bool`, optional): Whether to validate the sources of the models in the SED-ML files in the archive
            * ``validateOmexMetadata`` (:obj:`bool`, optional): Whether to validate the OMEX metdata files in the archive according to
                `BioSimulators' conventions <https://docs.biosimulations.org/concepts/conventions/simulation-project-metadata/>`_
            * ``validateImages`` (:obj:`bool`, optional): Whether to validate the images (BMP, GIF, JPEG, PNG, TIFF WEBP) files in the archive

        file (:obj:`werkzeug.datastructures.FileStorage`): COMBINE/OMEX archive file

    Returns:
        ``ValidationReport``: information about the validity or
            lack thereof of a COMBINE/OMEX archive
    '''
    try:
        omexMetadataInputFormat = OmexMetadataInputFormat(
            body['omexMetadataFormat'])
    except ValueError as exception:
        raise BadRequestException(
            title='`omexMetadataFormat` must be a recognized format.',
            exception=exception)

    try:
        omexMetadataSchema = OmexMetadataSchema(body['omexMetadataSchema'])
    except ValueError as exception:
        raise BadRequestException(
            title='`omexMetadataSchema` must be a recognized schema.',
            exception=exception)

    config = Config(
        OMEX_METADATA_INPUT_FORMAT=omexMetadataInputFormat,
        OMEX_METADATA_SCHEMA=omexMetadataSchema,
        VALIDATE_OMEX_MANIFESTS=body.get('validateOmexManifest', True),
        VALIDATE_SEDML=body.get('validateSedml', True),
        VALIDATE_SEDML_MODELS=body.get('validateSedmlModels', True),
        VALIDATE_OMEX_METADATA=body.get('validateOmexMetadata', True),
        VALIDATE_IMAGES=body.get('validateImages', True),
    )

    archive_file = file
    archive_url = body.get('url', None)
    if archive_url and archive_file:
        raise BadRequestException(
            title='Only one of `file` or `url` can be used at a time.',
            instance=ValueError(),
        )
    if not archive_url and not archive_file:
        raise BadRequestException(
            title='One of `file` or `url` must be used.',
            instance=ValueError(),
        )

    # create temporary working directory
    temp_dirname = get_temp_dir()
    archive_filename = os.path.join(temp_dirname, 'archive.omex')

    # get COMBINE/OMEX archive
    if archive_file:
        archive_file.save(archive_filename)

    else:
        try:
            response = requests.get(archive_url)
            response.raise_for_status()
        except requests.exceptions.RequestException as exception:
            title = 'COMBINE/OMEX archive could not be loaded from `{}`'.format(
                archive_url)
            raise BadRequestException(
                title=title,
                instance=exception,
            )

        # save archive to local temporary file
        with open(archive_filename, 'wb') as file:
            file.write(response.content)

    # read archive
    archive_dirname = os.path.join(temp_dirname, 'archive')
    reader = CombineArchiveReader()
    errors = []
    warnings = []
    try:
        archive = reader.run(archive_filename, archive_dirname, config=config)
    except Exception as exception:
        errors = [[
            'The file could not be parsed as a COMBINE/OMEX archive.',
            [[str(exception)]]
        ]]

    if not errors:
        errors, warnings = validate(
            archive,
            archive_dirname,
            formats_to_validate=list(
                CombineArchiveContentFormat.__members__.values()),
            config=config,
        )

    return make_validation_report(errors,
                                  warnings,
                                  filenames=[archive_filename])
Пример #20
0
def handler(body, files=None):
    ''' Modify a COMBINE/OMEX archive.

    Args:
        body (:obj:`dict`): dictionary with schema ``ModifyCombineArchiveSpecsAndFiles`` with the
            specifications of the COMBINE/OMEX archive to create
        files (:obj:`list` of :obj:`werkzeug.datastructures.FileStorage`, optional): files (e.g., SBML
            file)

    Returns:
        :obj:`werkzeug.wrappers.response.Response` or :obj:`str`: response with COMBINE/OMEX
            archive or a URL to a COMBINE/OMEX archive
    '''
    download = body.get('download', False)
    archive_filename_or_url = body['archive']
    archive_specs = body['specs']
    files = connexion.request.files.getlist('files')

    # build map from model filenames to file objects
    filename_map = {
        file.filename: file
        for file in files
    }

    # create temporary working directory
    temp_dirname = get_temp_dir()
    archive_filename = os.path.join(temp_dirname, 'archive.omex')

    # save COMBINE/OMEX archive to local temporary file
    if 'filename' in archive_filename_or_url and 'url' in archive_filename_or_url:
        raise BadRequestException(
            title='Only one of `filename` or `url` can be used at a time.',
            instance=ValueError(),
        )

    elif 'filename' not in archive_filename_or_url and 'url' not in archive_filename_or_url:
        raise BadRequestException(
            title='One of `filename` or `url` must be used.',
            instance=ValueError(),
        )

    elif 'filename' in archive_filename_or_url:
        # get COMBINE/OMEX archive
        archive_file = filename_map[archive_filename_or_url['filename']]

        # save archive to local temporary file
        archive_file.save(archive_filename)

    else:
        # get COMBINE/OMEX archive
        archive_url = archive_filename_or_url['url']
        try:
            response = requests.get(archive_url)
            response.raise_for_status()
        except requests.exceptions.RequestException as exception:
            title = 'COMBINE/OMEX archive could not be loaded from `{}`'.format(archive_url)
            raise BadRequestException(
                title=title,
                instance=exception,
            )

        # save archive to local temporary file
        with open(archive_filename, 'wb') as file:
            file.write(response.content)

    # read archive
    archive_dirname = os.path.join(temp_dirname, 'archive')
    try:
        archive = CombineArchiveReader().run(archive_filename, archive_dirname)
    except Exception as exception:
        # return exception
        raise BadRequestException(
            title='`{}` is not a valid COMBINE/OMEX archive'.format(
                archive_filename_or_url.get('filename', None) or archive_filename_or_url.get('url', None)
            ),
            instance=exception,
        )

    # build map of locations in archive to contents
    archive_location_to_contents = {
        os.path.relpath(content.location, '.'): content
        for content in archive.contents
    }

    # add files to archive or modify existing files
    for content in archive_specs['contents']:
        content_type = content['location']['value']['_type']
        if content_type == 'SedDocument':
            sed_doc = export_sed_doc(content['location']['value'])

            # save SED-ML document to file
            try:
                SedmlSimulationWriter().run(
                    sed_doc,
                    os.path.join(archive_dirname, content['location']['path']),
                    validate_models_with_languages=False)
            except ValueError as exception:
                raise BadRequestException(
                    title='`{}` does not contain a configuration for a valid SED-ML document.'.format(
                        content['location']['value']),
                    instance=exception,
                )

        elif content_type == 'CombineArchiveContentFile':
            file = filename_map.get(
                content['location']['value']['filename'], None)
            if not file:
                raise BadRequestException(
                    title='File with name `{}` was not uploaded'.format(
                        content['location']['value']['filename']),
                    instance=ValueError(),
                )
            filename = os.path.join(archive_dirname,
                                    content['location']['path'])
            if not os.path.isdir(os.path.dirname(filename)):
                os.makedirs(os.path.dirname(filename))
            file.save(filename)

        elif content_type == 'CombineArchiveContentUrl':
            filename = os.path.join(archive_dirname,
                                    content['location']['path'])
            if not os.path.isdir(os.path.dirname(filename)):
                os.makedirs(os.path.dirname(filename))

            content_url = content['location']['value']['url']
            try:
                response = requests.get(content_url)
                response.raise_for_status()
            except requests.exceptions.RequestException as exception:
                title = 'COMBINE/OMEX archive content could not be loaded from `{}`'.format(
                    content_url)
                raise BadRequestException(
                    title=title,
                    instance=exception,
                )
            with open(filename, 'wb') as file:
                file.write(response.content)

        else:
            raise BadRequestException(
                title='Content of type `{}` is not supported'.format(
                    content_type),
                instance=NotImplementedError('Invalid content')
            )  # pragma: no cover: unreachable due to schema validation

        combine_archive_content = archive_location_to_contents.get(os.path.relpath(content['location']['path'], '.'), None)
        if combine_archive_content is None:
            combine_archive_content = CombineArchiveContent(
                location=content['location']['path'],
                format=content['format'],
                master=content['master'],
            )

            archive.contents.append(combine_archive_content)

        else:
            combine_archive_content.format = content['format']
            combine_archive_content.master = content['master']

    # package COMBINE/OMEX archive
    CombineArchiveWriter().run(archive, archive_dirname, archive_filename)

    if download:
        return flask.send_file(archive_filename,
                               mimetype='application/zip',
                               as_attachment=True,
                               attachment_filename='archive.omex')

    else:
        # save COMBINE/OMEX archive to S3 bucket
        archive_url = src.s3.save_temporary_combine_archive_to_s3_bucket(archive_filename, public=True)

        # return URL for archive in S3 bucket
        return archive_url
    def test_modify_combine_archive_with_uploaded_model_file(self):
        endpoint = '/combine/modify'

        archive_specs_filename = os.path.join(self.FIXTURES_DIR,
                                              'archive-specs.json')
        with open(archive_specs_filename, 'rb') as file:
            archive_specs = json.load(file)

        file_0_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'model.xml'))
        file_1_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'file.txt'))
        file_2_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, self.TEST_CASE + '.omex'))
        archive_specs['contents'][0]['location']['path'] = 'Caravagna2010.xml'
        archive_specs['contents'][0]['location']['value'][
            'filename'] = file_0_path
        archive_specs['contents'][1]['location'][
            'path'] = 'subdir/' + archive_specs['contents'][1]['location'][
                'path']
        archive_specs['contents'][1]['location']['value'][
            'filename'] = file_1_path

        fid_0 = open(file_0_path, 'rb')
        fid_1 = open(file_1_path, 'rb')
        fid_2 = open(file_2_path, 'rb')

        data = MultiDict([
            ('archive', json.dumps({'filename': file_2_path})),
            ('specs', json.dumps(archive_specs)),
            ('files', fid_0),
            ('files', fid_1),
            ('files', fid_2),
        ])
        with app.app.app.test_client() as client:
            archive_filename = os.path.join(self.temp_dirname, 'archive.omex')

            def save_temporary_combine_archive_to_s3_bucket(
                    filename, public=True, archive_filename=archive_filename):
                shutil.copy(filename, archive_filename)
                return archive_filename

            with mock.patch(
                    'src.s3.save_temporary_combine_archive_to_s3_bucket',
                    side_effect=save_temporary_combine_archive_to_s3_bucket):
                response = client.post(endpoint,
                                       data=data,
                                       content_type="multipart/form-data")

        fid_0.close()
        fid_1.close()
        fid_2.close()

        self.assertEqual(response.status_code, 200, response.json)
        self.assertEqual(response.json, archive_filename)

        contents_dirname = os.path.join(self.temp_dirname, 'archive')
        archive = CombineArchiveReader().run(archive_filename,
                                             contents_dirname)

        self.assertEqual(len(archive.contents), 5)

        for expected_content in archive_specs['contents']:
            found = False
            for content in archive.contents:
                if os.path.relpath(content.location, '.') == os.path.relpath(
                        expected_content['location']['path'], '.'):
                    found = True
                    self.assertEqual(content.format,
                                     expected_content['format'])
                    self.assertEqual(content.master,
                                     expected_content['master'])
                    break
            self.assertTrue(found)

        sed_doc = SedmlSimulationReader().run(
            os.path.join(contents_dirname,
                         archive_specs['contents'][2]['location']['path']),
            validate_models_with_languages=False)
        sed_doc_specs = archive_specs['contents'][2]['location']['value']
        self.assertEqual(sed_doc.level, sed_doc_specs['level'])
        self.assertEqual(sed_doc.version, sed_doc_specs['version'])

        self.assertEqual(sed_doc.tasks[0].model, sed_doc.models[0])
        self.assertEqual(len(sed_doc.models[0].changes), 2)
        self.assertEqual(
            sed_doc.models[0].changes[0].target,
            "/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='k1']/@value"
        )
        self.assertEqual(sed_doc.models[0].changes[0].new_value, '1.2')
        self.assertEqual(
            sed_doc.models[0].changes[0].target_namespaces, {
                None: 'http://sed-ml.org/sed-ml/level1/version4',
                'sbml': 'http://www.sbml.org/sbml/level3/version1/core',
                'qual':
                'http://www.sbml.org/sbml/level3/version1/qual/version1',
            })

        self.assertEqual(
            sed_doc.tasks[0].simulation.algorithm.changes[0].kisao_id,
            'KISAO_0000488')
        self.assertEqual(
            sed_doc.tasks[0].simulation.algorithm.changes[0].new_value, '10')
        self.assertEqual(
            sed_doc.outputs[1].curves[0].x_data_generator.variables[0].target,
            "/sbml:sbml/sbml:model/qual:listOfQualitativeSpecies/qual:qualitativeSpecies[@qual:id='x']"
        )
        self.assertEqual(
            sed_doc.outputs[1].curves[0].x_data_generator.variables[0].
            target_namespaces,
            {
                None: 'http://sed-ml.org/sed-ml/level1/version4',
                "sbml": "http://www.sbml.org/sbml/level3/version1/core",
                "qual":
                "http://www.sbml.org/sbml/level3/version1/qual/version1"
            },
        )
Пример #22
0
    def test_create_combine_archive_with_model_at_url(self):
        endpoint = '/combine/create'

        archive_specs_filename = os.path.join(self.FIXTURES_DIR,
                                              'archive-specs.json')
        with open(archive_specs_filename, 'rb') as file:
            archive_specs = json.load(file)

        file_0_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'model.xml'))
        file_1_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'file.txt'))
        file_0_url = 'https://models.org/model.xml'
        file_1_url = 'https://models.org/file.txt'
        archive_specs['contents'][0]['location']['value'].pop('filename')
        archive_specs['contents'][1]['location']['value'].pop('filename')
        archive_specs['contents'][0]['location']['value'][
            '_type'] = 'CombineArchiveContentUrl'
        archive_specs['contents'][1]['location']['value'][
            '_type'] = 'CombineArchiveContentUrl'
        archive_specs['contents'][0]['location']['value']['url'] = file_0_url
        archive_specs['contents'][1]['location']['value']['url'] = file_1_url

        fid_0 = open(file_0_path, 'rb')
        fid_1 = open(file_1_path, 'rb')

        data = MultiDict([
            ('specs', json.dumps(archive_specs)),
        ])
        with app.app.app.test_client() as client:
            archive_filename = os.path.join(self.temp_dirname, 'archive.omex')

            def save_temporary_combine_archive_to_s3_bucket(
                    filename, public=True, archive_filename=archive_filename):
                shutil.copy(filename, archive_filename)
                return archive_filename

            with mock.patch(
                    'src.s3.save_temporary_combine_archive_to_s3_bucket',
                    side_effect=save_temporary_combine_archive_to_s3_bucket):

                def requests_get(url):
                    assert url in [file_0_url, file_1_url]
                    if url == file_0_url:
                        return mock.Mock(raise_for_status=lambda: None,
                                         content=fid_0.read())
                    else:
                        return mock.Mock(raise_for_status=lambda: None,
                                         content=fid_1.read())

                with mock.patch('requests.get', side_effect=requests_get):
                    response = client.post(endpoint,
                                           data=data,
                                           content_type="multipart/form-data")

        fid_0.close()
        fid_1.close()

        self.assertEqual(response.status_code, 200, response.json)
        self.assertEqual(response.json, archive_filename)

        contents_dirname = os.path.join(self.temp_dirname, 'archive')
        archive = CombineArchiveReader().run(archive_filename,
                                             contents_dirname)

        self.assertEqual(len(archive.contents), 3)

        for content, expected_content in zip(archive.contents,
                                             archive_specs['contents']):
            self.assertEqual(content.location,
                             expected_content['location']['path'])
            self.assertEqual(content.format, expected_content['format'])
            self.assertEqual(content.master, expected_content['master'])

        sed_doc = SedmlSimulationReader().run(
            os.path.join(contents_dirname,
                         archive_specs['contents'][2]['location']['path']),
            validate_models_with_languages=False)
        sed_doc_specs = archive_specs['contents'][2]['location']['value']
        self.assertEqual(sed_doc.level, sed_doc_specs['level'])
        self.assertEqual(sed_doc.version, sed_doc_specs['version'])

        self.assertEqual(
            sed_doc.tasks[0].simulation.algorithm.changes[0].kisao_id,
            'KISAO_0000488')
        self.assertEqual(
            sed_doc.tasks[0].simulation.algorithm.changes[0].new_value, '10')
        self.assertEqual(
            sed_doc.outputs[1].curves[0].x_data_generator.variables[0].target,
            "/sbml:sbml/sbml:model/qual:listOfQualitativeSpecies/qual:qualitativeSpecies[@qual:id='x']"
        )
        self.assertEqual(
            sed_doc.outputs[1].curves[0].x_data_generator.variables[0].
            target_namespaces,
            {
                None: 'http://sed-ml.org/sed-ml/level1/version4',
                "sbml": "http://www.sbml.org/sbml/level3/version1/core",
                "qual":
                "http://www.sbml.org/sbml/level3/version1/qual/version1"
            },
        )
Пример #23
0
    def test_create_combine_archive_with_uploaded_model_file(self):
        endpoint = '/combine/create'

        archive_specs_filename = os.path.join(self.FIXTURES_DIR,
                                              'archive-specs.json')
        with open(archive_specs_filename, 'rb') as file:
            archive_specs = json.load(file)

        file_0_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'model.xml'))
        file_1_path = os.path.abspath(
            os.path.join(self.FIXTURES_DIR, 'file.txt'))
        archive_specs['contents'][0]['location']['value'][
            'filename'] = file_0_path
        archive_specs['contents'][1]['location']['value'][
            'filename'] = file_1_path

        fid_0 = open(file_0_path, 'rb')
        fid_1 = open(file_1_path, 'rb')

        data = MultiDict([
            ('specs', json.dumps(archive_specs)),
            ('files', fid_0),
            ('files', fid_1),
        ])
        with app.app.app.test_client() as client:
            archive_filename = os.path.join(self.temp_dirname, 'archive.omex')

            def save_temporary_combine_archive_to_s3_bucket(
                    filename, public=True, archive_filename=archive_filename):
                shutil.copy(filename, archive_filename)
                return archive_filename

            with mock.patch(
                    'src.s3.save_temporary_combine_archive_to_s3_bucket',
                    side_effect=save_temporary_combine_archive_to_s3_bucket):
                response = client.post(endpoint,
                                       data=data,
                                       content_type="multipart/form-data")

        fid_0.close()
        fid_1.close()

        self.assertEqual(response.status_code, 200, response.json)
        self.assertEqual(response.json, archive_filename)

        contents_dirname = os.path.join(self.temp_dirname, 'archive')
        archive = CombineArchiveReader().run(archive_filename,
                                             contents_dirname)

        self.assertEqual(len(archive.contents), 3)

        for content, expected_content in zip(archive.contents,
                                             archive_specs['contents']):
            self.assertEqual(content.location,
                             expected_content['location']['path'])
            self.assertEqual(content.format, expected_content['format'])
            self.assertEqual(content.master, expected_content['master'])

        with self.assertRaisesRegex(ValueError,
                                    'Missing a required XML attribute'):
            sed_doc = SedmlSimulationReader().run(
                os.path.join(contents_dirname,
                             archive_specs['contents'][2]['location']['path']),
                validate_models_with_languages=True)
        sed_doc = SedmlSimulationReader().run(
            os.path.join(contents_dirname,
                         archive_specs['contents'][2]['location']['path']),
            validate_models_with_languages=False)
        sed_doc_specs = archive_specs['contents'][2]['location']['value']
        self.assertEqual(sed_doc.level, sed_doc_specs['level'])
        self.assertEqual(sed_doc.version, sed_doc_specs['version'])

        self.assertEqual(sed_doc.styles[0].id,
                         sed_doc_specs['styles'][0]['id'])
        self.assertEqual(sed_doc.styles[0].name,
                         sed_doc_specs['styles'][0].get('name', None))
        self.assertEqual(sed_doc.styles[0].base,
                         sed_doc_specs['styles'][0].get('base', None))
        self.assertEqual(
            sed_doc.styles[0].line.type.value,
            sed_doc_specs['styles'][0].get('line', None).get('type', None))
        self.assertEqual(
            sed_doc.styles[0].line.color,
            sed_doc_specs['styles'][0].get('line', None).get('color', None))
        self.assertEqual(
            sed_doc.styles[0].line.thickness,
            sed_doc_specs['styles'][0].get('line',
                                           None).get('thickness', None))
        self.assertEqual(
            sed_doc.styles[0].marker.type.value,
            sed_doc_specs['styles'][0].get('marker', None).get('type', None))
        self.assertEqual(
            sed_doc.styles[0].marker.size,
            sed_doc_specs['styles'][0].get('marker', None).get('size', None))
        self.assertEqual(
            sed_doc.styles[0].marker.line_color,
            sed_doc_specs['styles'][0].get('marker',
                                           None).get('lineColor', None))
        self.assertEqual(
            sed_doc.styles[0].marker.line_thickness,
            sed_doc_specs['styles'][0].get('marker',
                                           None).get('lineThickness', None))
        self.assertEqual(
            sed_doc.styles[0].marker.fill_color,
            sed_doc_specs['styles'][0].get('marker',
                                           None).get('fillColor', None))
        self.assertEqual(
            sed_doc.styles[0].fill.color,
            sed_doc_specs['styles'][0].get('fill', None).get('color', None))

        self.assertEqual(sed_doc.styles[1].id,
                         sed_doc_specs['styles'][1]['id'])
        self.assertEqual(sed_doc.styles[1].name,
                         sed_doc_specs['styles'][1].get('name', None))
        self.assertEqual(sed_doc.styles[1].base.id,
                         sed_doc_specs['styles'][1].get('base', None))
        self.assertEqual(sed_doc.styles[1].line,
                         sed_doc_specs['styles'][1].get('line', None))
        self.assertEqual(
            sed_doc.styles[1].marker.type.value,
            sed_doc_specs['styles'][1].get('marker', None).get('type', None))
        self.assertEqual(
            sed_doc.styles[1].marker.size,
            sed_doc_specs['styles'][1].get('marker', None).get('size', None))
        self.assertEqual(
            sed_doc.styles[1].marker.line_color,
            sed_doc_specs['styles'][1].get('marker',
                                           None).get('lineColor', None))
        self.assertEqual(
            sed_doc.styles[1].marker.line_thickness,
            sed_doc_specs['styles'][1].get('marker',
                                           None).get('lineThickness', None))
        self.assertEqual(
            sed_doc.styles[1].marker.fill_color,
            sed_doc_specs['styles'][1].get('marker',
                                           None).get('fillColor', None))
        self.assertEqual(
            sed_doc.styles[1].fill.color,
            sed_doc_specs['styles'][1].get('fill', None).get('color', None))

        self.assertEqual(sed_doc.outputs[1].curves[0].style.id,
                         sed_doc_specs['outputs'][1]['curves'][0]['style'])
        self.assertEqual(sed_doc.outputs[2].surfaces[0].style.id,
                         sed_doc_specs['outputs'][2]['surfaces'][0]['style'])

        self.assertEqual(sed_doc.tasks[0].model, sed_doc.models[0])
        self.assertEqual(len(sed_doc.models[0].changes), 2)
        self.assertEqual(
            sed_doc.models[0].changes[0].target,
            "/sbml:sbml/sbml:model/sbml:listOfParameters/sbml:parameter[@id='k1']/@value"
        )
        self.assertEqual(sed_doc.models[0].changes[0].new_value, '1.2')
        self.assertEqual(
            sed_doc.models[0].changes[0].target_namespaces, {
                None: 'http://sed-ml.org/sed-ml/level1/version4',
                'sbml': 'http://www.sbml.org/sbml/level3/version1/core',
                'qual':
                'http://www.sbml.org/sbml/level3/version1/qual/version1',
            })

        self.assertEqual(
            sed_doc.tasks[0].simulation.algorithm.changes[0].kisao_id,
            'KISAO_0000488')
        self.assertEqual(
            sed_doc.tasks[0].simulation.algorithm.changes[0].new_value, '10')
        self.assertEqual(
            sed_doc.outputs[1].curves[0].x_data_generator.variables[0].target,
            "/sbml:sbml/sbml:model/qual:listOfQualitativeSpecies/qual:qualitativeSpecies[@qual:id='x']"
        )
        self.assertEqual(
            sed_doc.outputs[1].curves[0].x_data_generator.variables[0].
            target_namespaces,
            {
                None: 'http://sed-ml.org/sed-ml/level1/version4',
                "sbml": "http://www.sbml.org/sbml/level3/version1/core",
                "qual":
                "http://www.sbml.org/sbml/level3/version1/qual/version1"
            },
        )
Пример #24
0
def handler(body, file=None,
            omexMetadataSchema=OmexMetadataSchema.biosimulations.value):
    ''' Get the metadata about a COMBINE/OMEX archive and its contents

    Args:
        dictionary in schema ``GetCombineArchiveMetadataFileOrUrl`` with keys

            * ``url`` whose value has schema ``Url``
              with the URL for a COMBINE/OMEX archive
            * ``omexMetadataFormat` whose value is a value of :obj:`OmexMetadataInputFormat`

        file (:obj:`werkzeug.datastructures.FileStorage`, optional): COMBINE/OMEX archive file
        omexMetadataSchema (:obj:`str`, optional): schema for validating the OMEX Metadata files

    Returns:
        :obj:`list` of ``BioSimulationsCombineArchiveLocationMetadata``
            or ``RdfTriple``: metadata about a COMBINE/OMEX archive
            and its contents
    '''
    try:
        omexMetadataInputFormat = OmexMetadataInputFormat(body.get('omexMetadataFormat', 'rdfxml'))
    except ValueError as exception:
        raise BadRequestException(title='`omexMetadataFormat` must be a recognized format.', exception=exception)

    try:
        omexMetadataSchema = OmexMetadataSchema(omexMetadataSchema)
    except ValueError as exception:
        raise BadRequestException(title='`omexMetadataSchema` must be a recognized schema.', exception=exception)

    archive_file = file
    archive_url = body.get('url', None)
    if archive_url and archive_file:
        raise BadRequestException(
            title='Only one of `file` or `url` can be used at a time.',
            instance=ValueError(),
        )
    if not archive_url and not archive_file:
        raise BadRequestException(
            title='One of `file` or `url` must be used.',
            instance=ValueError(),
        )

    # create temporary working directory
    temp_dirname = get_temp_dir()
    archive_filename = os.path.join(temp_dirname, 'archive.omex')

    # get COMBINE/OMEX archive
    if archive_file:
        archive_file.save(archive_filename)

    else:
        try:
            response = requests.get(archive_url)
            response.raise_for_status()
        except requests.exceptions.RequestException as exception:
            title = 'COMBINE/OMEX archive could not be loaded from `{}`'.format(
                archive_url)
            raise BadRequestException(
                title=title,
                instance=exception,
            )

        # save archive to local temporary file
        with open(archive_filename, 'wb') as file:
            file.write(response.content)

    # read archive
    archive_dirname = os.path.join(temp_dirname, 'archive')
    try:
        archive = CombineArchiveReader().run(archive_filename, archive_dirname)
    except Exception as exception:
        # return exception
        raise BadRequestException(
            title='`{}` is not a valid COMBINE/OMEX archive'.format(archive_url if archive_url else archive_file.filename),
            instance=exception,
        )

    config = Config(
        OMEX_METADATA_INPUT_FORMAT=omexMetadataInputFormat,
        OMEX_METADATA_SCHEMA=omexMetadataSchema,
    )

    metadata, errors, warnings = read_omex_meta_files_for_archive(archive, archive_dirname, config=config)
    shutil.rmtree(archive_dirname)

    if errors:
        raise BadRequestException(
            title='The metadata for the COMBINE/OMEX archive is not valid.',
            instance=ValueError(),
            validation_report=make_validation_report(errors, warnings, filenames=[archive_filename]),
        )

    # return response
    return metadata
Пример #25
0
def handler(body, file=None):
    ''' Get the manifest of a COMBINE/OMEX archive

    Args:
        body (:obj:`dict`): dictionary with keys

            * ``url`` whose value has schema ``Url`` with the
              URL for a COMBINE/OMEX archive or a manifest for a COMBINE/OMEX archive

        file (:obj:`werkzeug.datastructures.FileStorage`, optional): COMBINE/OMEX archive or OMEX manifest file

    Returns:
        ``CombineArchive``: manifest of the COMBINE/OMEX archive
    '''
    archive_or_manifest_file = file
    archive_or_manifest_url = body.get('url', None)
    if archive_or_manifest_url and archive_or_manifest_file:
        raise BadRequestException(
            title='Only one of `file` or `url` can be used at a time.',
            instance=ValueError(),
        )
    if not archive_or_manifest_url and not archive_or_manifest_file:
        raise BadRequestException(
            title='One of `file` or `url` must be used.',
            instance=ValueError(),
        )

    # create temporary working directory
    temp_dirname = get_temp_dir()
    archive_or_manifest_filename = os.path.join(temp_dirname, 'archive.omex')
    manifest_filename = os.path.join(temp_dirname, 'manifest.xml')

    # get COMBINE/OMEX archive or manifest
    if archive_or_manifest_file:
        archive_or_manifest_file.save(archive_or_manifest_filename)

    else:
        try:
            response = requests.get(archive_or_manifest_url)
            response.raise_for_status()
        except requests.exceptions.RequestException as exception:
            title = 'File could not be loaded from `{}`'.format(
                archive_or_manifest_url)
            raise BadRequestException(
                title=title,
                instance=exception,
            )

        # save archive to local temporary file
        with open(archive_or_manifest_filename, 'wb') as file:
            file.write(response.content)

    # read archive
    is_archive = False
    try:
        with zipfile.ZipFile(archive_or_manifest_filename, 'r') as zip_file:
            is_archive = True
            zip_file.extract('manifest.xml', temp_dirname)
    except zipfile.BadZipFile:
        pass
    except KeyError as exception:
        raise BadRequestException(
            title='COMBINE/OMEX archive does not contain a manifest.',
            instance=exception)

    if not is_archive:
        manifest_filename = archive_or_manifest_filename

    reader = CombineArchiveReader()
    contents = reader.read_manifest(manifest_filename,
                                    archive_or_manifest_filename)
    if reader.errors:
        raise BadRequestException(
            title=
            'File is not a valid manifest or a COMBINE/OMEX which contains a valid manifest.\n  {}'
            .format(
                flatten_nested_list_of_strings(reader.errors).replace(
                    '\n', '\n  ')),
            instance=ValueError())

    contents_specs = []
    for content in contents:
        content_specs = {
            '_type': 'CombineArchiveManifestContent',
            'location': {
                '_type': 'CombineArchiveManifestLocation',
                'path': content.location,
                'value': {
                    '_type': 'CombineArchiveContentFile',
                    'filename': os.path.relpath(content.location, '.'),
                },
            },
            'format': content.format,
            'master': content.master,
        }
        contents_specs.append(content_specs)

    # format response
    response = {'_type': 'CombineArchiveManifest', 'contents': contents_specs}

    # return reponse
    return response