Beispiel #1
0
    def test_read_write_error_handling(self):
        report_1 = Report(
            id='report_1',
            data_sets=[
                DataSet(id='x', label='X'),
                DataSet(id='y', label='Y'),
                DataSet(id='z', label='Z'),
            ],
        )
        data_set_results_1 = data_model.DataSetResults({
            'x':
            numpy.array([1., 2.]),
            'y':
            numpy.array([3., 4.]),
            'z':
            numpy.array([5., 6.]),
        })

        rel_path_1 = os.path.join('a/b/c.sedml', report_1.id)

        io.ReportWriter().run(report_1,
                              data_set_results_1,
                              self.dirname,
                              rel_path_1,
                              format=data_model.ReportFormat.h5)

        report_1.data_sets.append(DataSet(id='w', label='W'))
        with self.assertWarns(MissingDataWarning):
            io.ReportReader().run(report_1,
                                  self.dirname,
                                  rel_path_1,
                                  format=data_model.ReportFormat.h5)

        report_1.data_sets.pop()
        report_1.data_sets.pop()
        with self.assertWarns(ExtraDataWarning):
            io.ReportReader().run(report_1,
                                  self.dirname,
                                  rel_path_1,
                                  format=data_model.ReportFormat.h5)

        data_set_results_1['x'] = numpy.array([1., 2.],
                                              dtype=numpy.dtype('object'))
        with self.assertRaisesRegex(TypeError, 'NumPy dtype should be '):
            io.ReportWriter().run(report_1,
                                  data_set_results_1,
                                  self.dirname,
                                  rel_path_1,
                                  format=data_model.ReportFormat.h5)
Beispiel #2
0
def gen_sedml_2d_3d(omex_file_path, base_out_path):

    temp_path = os.path.join(base_out_path, "temp")
    if not os.path.exists(temp_path):
        os.mkdir(temp_path, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)

    # defining archive
    archive = CombineArchiveReader().run(
        in_file=omex_file_path,
        out_dir=temp_path,
        try_reading_as_plain_zip_archive=False)

    # determine files to execute
    sedml_contents = get_sedml_contents(archive)

    for i_content, content in enumerate(sedml_contents):
        content_filename = os.path.join(temp_path, content.location)
        sedml_name = content.location.split('/')[1].split('.')[0]

        doc = SedmlSimulationReader().run(content_filename)
        for output in doc.outputs:
            if isinstance(output, (Plot2D, Plot3D)):
                report = Report(id='__plot__' + output.id, name=output.name)

                data_generators = {}
                if isinstance(output, Plot2D):
                    for curve in output.curves:
                        data_generators[
                            curve.x_data_generator.id] = curve.x_data_generator
                        data_generators[
                            curve.y_data_generator.id] = curve.y_data_generator

                elif isinstance(output, Plot3D):
                    for surface in output.surfaces:
                        data_generators[surface.x_data_generator.
                                        id] = surface.x_data_generator
                        data_generators[surface.y_data_generator.
                                        id] = surface.y_data_generator
                        data_generators[surface.z_data_generator.
                                        id] = surface.z_data_generator

                for data_generator in data_generators.values():
                    report.data_sets.append(
                        DataSet(
                            id='__data_set__{}_{}'.format(
                                output.id, data_generator.id),
                            name=data_generator.name,
                            label=data_generator.id,
                            data_generator=data_generator,
                        ))

                report.data_sets.sort(key=lambda data_set: data_set.id)
                doc.outputs.append(report)

        filename_with_reports_for_plots = os.path.join(
            temp_path, f'simulation_{sedml_name}.sedml')
        SedmlSimulationWriter().run(doc,
                                    filename_with_reports_for_plots,
                                    validate_models_with_languages=False)
Beispiel #3
0
def exec_plot_output_sed_doc(omex_file_path, base_out_path):
    archive = CombineArchiveReader().run(in_file=omex_file_path,
                                         out_dir=tmp_dir,
                                         try_reading_as_plain_zip_archive=True)

    # determine files to execute
    sedml_contents = get_sedml_contents(archive)

    report_results = ReportResults()
    for i_content, content in enumerate(sedml_contents):
        content_filename = os.path.join(tmp_dir, content.location)

        for report_filename in glob.glob(
                os.path.join(base_out_path, content.location, '*.csv')):
            if report_filename.find('__plot__') != -1:
                report_id = os.path.splitext(
                    os.path.basename(report_filename))[0]

                # read report from CSV file produced by tellurium
                # data_set_df = pd.read_csv(report_filename).transpose()

                data_set_df = pd.read_csv(report_filename, header=None).T
                data_set_df.columns = data_set_df.iloc[0]
                data_set_df.drop(0, inplace=True)
                data_set_df.reset_index(inplace=True)
                data_set_df.drop('index', axis=1, inplace=True)

                # create pseudo-report for ReportWriter
                datasets = []
                for col in list(data_set_df.columns):
                    datasets.append(DataSet(id=col, label=col, name=col))
                #report.data_sets = datasets
                report = Report(id=report_id,
                                name=report_id,
                                data_sets=datasets)

                data_set_results = DataSetResults()

                for col in list(data_set_df.columns):
                    data_set_results[col] = data_set_df[col].values

                # append to data structure of report results

                # save file in desired BioSimulators format(s)
                export_id = report_id.replace('__plot__', '')
                report.id = export_id
                rel_path = os.path.join(content.location, report.id)
                if len(rel_path.split("./")) > 1:
                    rel_path = rel_path.split("./")[1]
                ReportWriter().run(report,
                                   data_set_results,
                                   base_out_path,
                                   rel_path,
                                   format='h5')
                os.rename(report_filename,
                          report_filename.replace('__plot__', ''))
Beispiel #4
0
    def test_write_error_handling(self):
        with self.assertRaisesRegex(NotImplementedError, 'is not supported'):
            io.ReportWriter().run(Report(), None, None, 'a', format='TSV')

        report = Report(data_sets=[DataSet(id='x', label='x')])

        data_set_results = data_model.DataSetResults({'x': numpy.zeros((3, ))})
        io.ReportWriter().run(report,
                              data_set_results,
                              self.dirname,
                              '.',
                              format=data_model.ReportFormat.csv)

        data_set_results['x'] = data_set_results['x'].reshape((3, 1))
        with self.assertWarnsRegex(
                CannotExportMultidimensionalTableWarning,
                'Multidimensional reports cannot be exported'):
            io.ReportWriter().run(report,
                                  data_set_results,
                                  self.dirname,
                                  '.',
                                  format=data_model.ReportFormat.csv)
Beispiel #5
0
def exec_plot_output_sed_doc(omex_file_path, base_out_path):
    config = Config(VALIDATE_OMEX_MANIFESTS=False)
    archive = CombineArchiveReader().run(in_file=omex_file_path,
                                         out_dir=tmp_dir,
                                         config=config)

    # determine files to execute
    sedml_contents = get_sedml_contents(archive)

    report_results = ReportResults()
    for i_content, content in enumerate(sedml_contents):
        content_filename = os.path.join(tmp_dir, content.location)

        for report_filename in glob.glob(
                os.path.join(base_out_path, content.location, '*.csv')):
            if report_filename.find('__plot__') != -1:
                report_id = os.path.splitext(
                    os.path.basename(report_filename))[0]

                # read report from CSV file produced by tellurium
                # data_set_df = pd.read_csv(report_filename).transpose()

                data_set_df = pd.read_csv(report_filename, header=None).T

                datasets = []
                for col in data_set_df.columns:
                    datasets.append(
                        DataSet(id=data_set_df.loc[0, col],
                                label=data_set_df.loc[1, col],
                                name=data_set_df.loc[2, col]))
                report = Report(id=report_id,
                                name=report_id,
                                data_sets=datasets)

                data_set_df.columns = data_set_df.iloc[0]
                data_set_df.drop(0, inplace=True)
                data_set_df.drop(1, inplace=True)
                data_set_df.drop(2, inplace=True)
                data_set_df.reset_index(inplace=True)
                data_set_df.drop('index', axis=1, inplace=True)

                # create pseudo-report for ReportWriter

                data_set_results = DataSetResults()

                for col in list(data_set_df.columns):
                    data_set_results[col] = data_set_df[col].to_numpy(
                        dtype='float64')

                # append to data structure of report results

                # save file in desired BioSimulators format(s)
                export_id = report_id.replace('__plot__', '')
                report.id = export_id
                rel_path = os.path.join(content.location, report.id)
                if len(rel_path.split("./")) > 1:
                    rel_path = rel_path.split("./")[1]
                # print("base: ", base_out_path, file=sys.stdout)
                # print("rel: ", rel_path, file=sys.stdout)
                ReportWriter().run(report,
                                   data_set_results,
                                   base_out_path,
                                   rel_path,
                                   format='h5',
                                   type=Plot2D)
                os.rename(report_filename,
                          report_filename.replace('__plot__', ''))

            else:
                print("report   : ", report_filename, file=sys.stdout)
                report_id = os.path.splitext(
                    os.path.basename(report_filename))[0]
                data_set_df = pd.read_csv(report_filename, header=None).T

                datasets = []
                for col in data_set_df.columns:
                    datasets.append(
                        DataSet(id=data_set_df.loc[0, col],
                                label=data_set_df.loc[1, col],
                                name=""))
                report = Report(id=report_id,
                                name=report_id,
                                data_sets=datasets)

                data_set_df.columns = data_set_df.iloc[0]  # use ids
                data_set_df.drop(0, inplace=True)
                data_set_df.drop(1, inplace=True)
                data_set_df.drop(2, inplace=True)
                data_set_df.reset_index(inplace=True)
                data_set_df.drop('index', axis=1, inplace=True)

                data_set_results = DataSetResults()
                for col in list(data_set_df.columns):
                    data_set_results[col] = data_set_df[col].to_numpy(
                        dtype='float64')

                rel_path = os.path.join(content.location, report.id)
                if len(rel_path.split("./")) > 1:
                    rel_path = rel_path.split("./")[1]
                ReportWriter().run(report,
                                   data_set_results,
                                   base_out_path,
                                   rel_path,
                                   format='h5',
                                   type=Report)
Beispiel #6
0
def exec_sed_doc(omex_file_path, base_out_path):
    # defining archive
    config = Config(VALIDATE_OMEX_MANIFESTS=False)
    archive = CombineArchiveReader().run(in_file=omex_file_path,
                                         out_dir=tmp_dir,
                                         config=config)

    # determine files to execute
    sedml_contents = get_sedml_contents(archive)

    report_results = ReportResults()
    for i_content, content in enumerate(sedml_contents):
        content_filename = os.path.join(tmp_dir, content.location)

        doc = SedmlSimulationReader().run(content_filename)

        for report_filename in glob.glob(
                os.path.join(base_out_path, content.location, '*.csv')):
            report_id = os.path.splitext(os.path.basename(report_filename))[0]

            # read report from CSV file produced by VCell
            data_set_df = pd.read_csv(report_filename).transpose()
            data_set_df.columns = data_set_df.iloc[0]
            data_set_df = data_set_df.drop(data_set_df.iloc[0].name)
            data_set_df = data_set_df.reset_index()
            data_set_df = data_set_df.rename(
                columns={'index': data_set_df.columns.name})
            data_set_df = data_set_df.transpose()
            data_set_df.index.name = None

            report = next(report for report in doc.outputs
                          if report.id == report_id)

            data_set_results = DataSetResults()

            # print("report: ", report, file=sys.stderr)
            # print("report Type: ", type(report), file=sys.stderr)
            # print("Plot Type: ", Plot2D, file=sys.stderr)
            if type(report) != Plot2D and type(report) != Plot3D:
                # Considering the scenario where it has the datasets in sedml
                for data_set in report.data_sets:
                    data_set_results[data_set.id] = data_set_df.loc[
                        data_set.label, :].to_numpy(dtype='float64')
                    # print("DF for report: ", data_set_results[data_set.id], file=sys.stderr)
                    # print("df.types: ", data_set_results[data_set.id].dtype, file=sys.stderr)
            else:
                data_set_df = pd.read_csv(report_filename, header=None).T
                data_set_df.columns = data_set_df.iloc[0]
                data_set_df.drop(0, inplace=True)
                data_set_df.reset_index(inplace=True)
                data_set_df.drop('index', axis=1, inplace=True)
                # print("DF for plot: ", data_set_df, file=sys.stderr)
                # Considering the scenario where it doesn't have datasets in sedml (pseudo sedml for plots)
                for col in list(data_set_df.columns):
                    data_set_results[col] = data_set_df[col].values

            # append to data structure of report results
            report_results[report_id] = data_set_results

            # save file in desired BioSimulators format(s)
            # for report_format in report_formats:
            # print("HDF report: ", report, file=sys.stderr)
            # print("HDF dataset results: ", data_set_results, file=sys.stderr)
            # print("HDF base_out_path: ", base_out_path,file=sys.stderr)
            # print("HDF path: ", os.path.join(content.location, report.id), file=sys.stderr)

            rel_path = os.path.join(content.location, report.id)

            if len(rel_path.split("./")) > 1:
                rel_path = rel_path.split("./")[1]

            if type(report) != Plot2D and type(report) != Plot3D:
                ReportWriter().run(report,
                                   data_set_results,
                                   base_out_path,
                                   rel_path,
                                   format='h5')
            else:
                datasets = []
                for col in list(data_set_df.columns):
                    datasets.append(DataSet(id=col, label=col, name=col))
                report.data_sets = datasets
                ReportWriter().run(report,
                                   data_set_results,
                                   base_out_path,
                                   rel_path,
                                   format='h5')

    # Remove temp directory
    shutil.rmtree(tmp_dir)
Beispiel #7
0
    def test_get_ids(self):
        report = Report(data_sets=[
            DataSet(id='A', label='A'),
            DataSet(id='B', label='A'),
        ], )

        results = data_model.DataSetResults({
            report.data_sets[0].id:
            numpy.array([1, 2, 3]),
            report.data_sets[1].id:
            numpy.array([4, 5, 6]),
        })

        for format in [
                data_model.ReportFormat.h5, data_model.ReportFormat.csv
        ]:
            filename = os.path.join(self.dirname, 'test')
            io.ReportWriter().run(report,
                                  results,
                                  filename,
                                  'a/b/c.sedml/report1',
                                  format=format)
            io.ReportWriter().run(report,
                                  results,
                                  filename,
                                  'a/b/c.sedml/report2',
                                  format=format)
            io.ReportWriter().run(report,
                                  results,
                                  filename,
                                  'a/b/c.sedml/report3',
                                  format=format)
            io.ReportWriter().run(report,
                                  results,
                                  filename,
                                  'a/b/d.sedml/report4',
                                  format=format)
            io.ReportWriter().run(report,
                                  results,
                                  filename,
                                  'a/b/report5',
                                  format=format)
            io.ReportWriter().run(report,
                                  results,
                                  filename,
                                  'a/b/report6',
                                  format=format)

            self.assertEqual(
                set(io.ReportReader().get_ids(filename, format=format)),
                set([
                    'a/b/c.sedml/report1',
                    'a/b/c.sedml/report2',
                    'a/b/c.sedml/report3',
                    'a/b/d.sedml/report4',
                    'a/b/report5',
                    'a/b/report6',
                ]))

        with self.assertRaisesRegex(NotImplementedError, 'is not supported'):
            io.ReportReader().get_ids(filename, format=None)
Beispiel #8
0
    def test_overwrite_report(self):
        report_1 = Report(
            id='report_1',
            data_sets=[
                DataSet(id='x', label='X'),
                DataSet(id='y', label='Y'),
                DataSet(id='z', label='Z'),
            ],
        )
        data_set_results_1 = data_model.DataSetResults({
            'x':
            numpy.array([1., 2.]),
            'y':
            numpy.array([3., 4.]),
            'z':
            numpy.array([5., 6.]),
        })

        rel_path_1 = os.path.join('a/b/c.sedml', report_1.id)

        io.ReportWriter().run(report_1,
                              data_set_results_1,
                              self.dirname,
                              rel_path_1,
                              format=data_model.ReportFormat.h5)
        data_set_results_2 = io.ReportReader().run(
            report_1,
            self.dirname,
            rel_path_1,
            format=data_model.ReportFormat.h5)

        numpy.testing.assert_allclose(data_set_results_2['x'],
                                      numpy.array([1., 2.]))
        numpy.testing.assert_allclose(data_set_results_2['y'],
                                      numpy.array([3., 4.]))
        numpy.testing.assert_allclose(data_set_results_2['z'],
                                      numpy.array([5., 6.]))

        data_set_results_1 = data_model.DataSetResults({
            'x':
            numpy.array([1., 2.]) + 1.,
            'y':
            numpy.array([3., 4.]) + 1.,
            'z':
            numpy.array([5., 6.]) + 1.,
        })

        io.ReportWriter().run(report_1,
                              data_set_results_1,
                              self.dirname,
                              rel_path_1,
                              format=data_model.ReportFormat.h5)
        data_set_results_2 = io.ReportReader().run(
            report_1,
            self.dirname,
            rel_path_1,
            format=data_model.ReportFormat.h5)

        numpy.testing.assert_allclose(data_set_results_2['x'],
                                      numpy.array([1., 2.]) + 1.)
        numpy.testing.assert_allclose(data_set_results_2['y'],
                                      numpy.array([3., 4.]) + 1.)
        numpy.testing.assert_allclose(data_set_results_2['z'],
                                      numpy.array([5., 6.]) + 1.)
Beispiel #9
0
    def test_read_write_duplicate_labels(self):
        # labels in same order
        report_1 = Report(
            id='report_1',
            data_sets=[
                DataSet(id='x', label='A'),
                DataSet(id='y', label='A'),
                DataSet(id='z', label='A'),
            ],
        )
        data_set_results_1 = data_model.DataSetResults({
            'x':
            numpy.array([1., 2.]),
            'y':
            numpy.array([3., 4.]),
            'z':
            numpy.array([5., 6.]),
        })

        rel_path_1 = os.path.join('a/b/c.sedml', report_1.id)

        io.ReportWriter().run(report_1,
                              data_set_results_1,
                              self.dirname,
                              rel_path_1,
                              format=data_model.ReportFormat.csv)
        data_set_results_2 = io.ReportReader().run(
            report_1,
            self.dirname,
            rel_path_1,
            format=data_model.ReportFormat.csv)

        numpy.testing.assert_allclose(data_set_results_2['x'],
                                      numpy.array([1., 2.]))
        numpy.testing.assert_allclose(data_set_results_2['y'],
                                      numpy.array([3., 4.]))
        numpy.testing.assert_allclose(data_set_results_2['z'],
                                      numpy.array([5., 6.]))

        # labels in different order
        report_1 = Report(
            id='report_1',
            data_sets=[
                DataSet(id='x', label='X'),
                DataSet(id='y', label='X'),
                DataSet(id='z', label='Z'),
            ],
        )
        data_set_results_1 = data_model.DataSetResults({
            'x':
            numpy.array([1., 2.]),
            'y':
            numpy.array([3., 4.]),
            'z':
            numpy.array([5., 6.]),
        })

        rel_path_1 = os.path.join('a/b/c.sedml', report_1.id)

        io.ReportWriter().run(report_1,
                              data_set_results_1,
                              self.dirname,
                              rel_path_1,
                              format=data_model.ReportFormat.csv)

        report_2 = Report(
            id='report_1',
            data_sets=[
                DataSet(id='x', label='X'),
                DataSet(id='z', label='Z'),
                DataSet(id='y', label='X'),
            ],
        )
        data_set_results_2 = io.ReportReader().run(
            report_2,
            self.dirname,
            rel_path_1,
            format=data_model.ReportFormat.csv)

        self.assertEqual(set(data_set_results_2.keys()), set(['z']))
        numpy.testing.assert_allclose(data_set_results_2['z'],
                                      numpy.array([5., 6.]))
Beispiel #10
0
    def test_read_write(self):
        report_1 = Report(
            id='report_1',
            name='report 1',
            data_sets=[
                DataSet(id='w', label='W'),
                DataSet(id='x', label='X'),
                DataSet(id='y', label='Y'),
                DataSet(id='z', label='Z'),
            ],
        )
        report_2 = Report(
            id='report_2',
            name='report 2',
            data_sets=[
                DataSet(id='a', label='A'),
                DataSet(id='b', label='B'),
                DataSet(id='c', label='C'),
                DataSet(id='d', label='D'),
            ],
        )
        report_3 = Report(
            id='report_3',
            data_sets=[
                DataSet(id='a', label='A'),
                DataSet(id='b', label='B'),
                DataSet(id='c', label='C'),
                DataSet(id='d', label='D'),
            ],
        )
        data_set_results_1 = data_model.DataSetResults({
            'w':
            None,
            'x':
            numpy.array([1, 2, 3]),
            'y':
            numpy.array([4., numpy.nan]),
            'z':
            numpy.array(6.),
        })
        data_set_results_2 = data_model.DataSetResults({
            'a':
            numpy.array([1, 2]),
            'b':
            numpy.array([7., 8., 9.]),
            'c':
            numpy.array(True),
            'd':
            None,
        })
        data_set_results_3 = data_model.DataSetResults({
            'a':
            numpy.array([[1, 2], [3, 4], [5, 6]]),
            'b':
            numpy.array([7., 8., 9.]),
            'c':
            numpy.array(True),
            'd':
            None,
        })

        # CSV, TSV
        for format in [
                data_model.ReportFormat.csv, data_model.ReportFormat.tsv,
                data_model.ReportFormat.xlsx
        ]:
            rel_path_1 = os.path.join(format.value, 'a/b/c.sedml', report_1.id)
            rel_path_2 = os.path.join(format.value, 'a/d.sedml', report_2.id)
            rel_path_3 = os.path.join(format.value, 'e.sedml', report_2.id)

            io.ReportWriter().run(report_1,
                                  data_set_results_1,
                                  self.dirname,
                                  rel_path_1,
                                  format=format)
            io.ReportWriter().run(report_2,
                                  data_set_results_2,
                                  self.dirname,
                                  rel_path_2,
                                  format=format)
            with self.assertWarnsRegex(
                    CannotExportMultidimensionalTableWarning,
                    'Multidimensional reports cannot be exported'):
                io.ReportWriter().run(report_3,
                                      data_set_results_3,
                                      self.dirname,
                                      rel_path_3,
                                      format=format)
            data_set_results_1_b = io.ReportReader().run(report_1,
                                                         self.dirname,
                                                         rel_path_1,
                                                         format=format)
            data_set_results_2_b = io.ReportReader().run(report_2,
                                                         self.dirname,
                                                         rel_path_2,
                                                         format=format)

            self.assertEqual(
                set(io.ReportReader().get_ids(self.dirname, format=format)),
                set([rel_path_1, rel_path_2]))

            numpy.testing.assert_allclose(
                data_set_results_1_b['w'],
                numpy.array([numpy.nan, numpy.nan, numpy.nan]))
            numpy.testing.assert_allclose(data_set_results_1_b['x'],
                                          numpy.array([1., 2., 3.]))
            numpy.testing.assert_allclose(
                data_set_results_1_b['y'],
                numpy.array([4., numpy.nan, numpy.nan]))
            numpy.testing.assert_allclose(
                data_set_results_1_b['z'],
                numpy.array([6., numpy.nan, numpy.nan]))

            self.assertEqual(data_set_results_1_b['w'].dtype.name, 'float64')
            self.assertEqual(data_set_results_1_b['x'].dtype.name, 'float64')
            self.assertEqual(data_set_results_1_b['y'].dtype.name, 'float64')
            self.assertEqual(data_set_results_1_b['z'].dtype.name, 'float64')

            numpy.testing.assert_allclose(data_set_results_2_b['a'],
                                          numpy.array([1., 2., numpy.nan]))
            numpy.testing.assert_allclose(data_set_results_2_b['b'],
                                          numpy.array([7., 8., 9.]))
            numpy.testing.assert_allclose(
                data_set_results_2_b['c'],
                numpy.array([1., numpy.nan, numpy.nan]))
            numpy.testing.assert_allclose(
                data_set_results_2_b['d'],
                numpy.array([numpy.nan, numpy.nan, numpy.nan]))

            self.assertEqual(data_set_results_2_b['a'].dtype.name, 'float64')
            self.assertEqual(data_set_results_2_b['b'].dtype.name, 'float64')
            self.assertEqual(data_set_results_2_b['c'].dtype.name, 'float64')
            self.assertEqual(data_set_results_2_b['d'].dtype.name, 'float64')

        # HDF
        for format in [data_model.ReportFormat.h5]:
            rel_path_1 = os.path.join(format.value, 'a/b/c.sedml', report_1.id)
            rel_path_2 = os.path.join(format.value, 'a/d.sedml', report_2.id)
            rel_path_3 = os.path.join(format.value, 'e.sedml', report_3.id)

            io.ReportWriter().run(report_1,
                                  data_set_results_1,
                                  self.dirname,
                                  rel_path_1,
                                  format=format)
            io.ReportWriter().run(report_2,
                                  data_set_results_2,
                                  self.dirname,
                                  rel_path_2,
                                  format=format)
            io.ReportWriter().run(report_3,
                                  data_set_results_3,
                                  self.dirname,
                                  rel_path_3,
                                  format=format)
            data_set_results_1_b = io.ReportReader().run(report_1,
                                                         self.dirname,
                                                         rel_path_1,
                                                         format=format)
            data_set_results_2_b = io.ReportReader().run(report_2,
                                                         self.dirname,
                                                         rel_path_2,
                                                         format=format)
            data_set_results_3_b = io.ReportReader().run(report_3,
                                                         self.dirname,
                                                         rel_path_3,
                                                         format=format)

            self.assertEqual(
                set(io.ReportReader().get_ids(self.dirname, format=format)),
                set([rel_path_1, rel_path_2, rel_path_3]))

            self.assertEqual(data_set_results_1_b['w'], None)
            numpy.testing.assert_allclose(data_set_results_1_b['x'],
                                          numpy.array([1, 2, 3]))
            numpy.testing.assert_allclose(data_set_results_1_b['y'],
                                          numpy.array([4., numpy.nan]))
            numpy.testing.assert_allclose(data_set_results_1_b['z'],
                                          numpy.array(6.))

            self.assertEqual(data_set_results_1_b['x'].dtype.name, 'int64')
            self.assertEqual(data_set_results_1_b['y'].dtype.name, 'float64')
            self.assertEqual(data_set_results_1_b['z'].dtype.name, 'float64')

            numpy.testing.assert_allclose(data_set_results_2_b['a'],
                                          numpy.array([1, 2]))
            numpy.testing.assert_allclose(data_set_results_2_b['b'],
                                          numpy.array([7., 8., 9.]))
            numpy.testing.assert_allclose(data_set_results_2_b['c'],
                                          numpy.array(True))
            self.assertEqual(data_set_results_2_b['d'], None)

            self.assertEqual(data_set_results_2_b['a'].dtype.name, 'int64')
            self.assertEqual(data_set_results_2_b['b'].dtype.name, 'float64')
            self.assertEqual(data_set_results_2_b['c'].dtype.name, 'bool')

            numpy.testing.assert_allclose(
                data_set_results_3_b['a'], numpy.array([[1, 2], [3, 4], [5,
                                                                         6]]))
            numpy.testing.assert_allclose(data_set_results_3_b['b'],
                                          numpy.array([7., 8., 9.]))
            numpy.testing.assert_allclose(data_set_results_3_b['c'],
                                          numpy.array(True))
            self.assertEqual(data_set_results_3_b['d'], None)

            self.assertEqual(data_set_results_3_b['a'].dtype.name, 'int64')
            self.assertEqual(data_set_results_3_b['b'].dtype.name, 'float64')
            self.assertEqual(data_set_results_3_b['c'].dtype.name, 'bool')

            with h5py.File(os.path.join(self.dirname, 'reports.h5'),
                           'r') as file:
                self.assertEqual(
                    file[format.value + '/a'].attrs, {
                        'uri': format.value + '/a',
                        'combineArchiveLocation': format.value + '/a',
                    })
                self.assertEqual(
                    file[format.value + '/a/b'].attrs, {
                        'uri': format.value + '/a/b',
                        'combineArchiveLocation': format.value + '/a/b',
                    })
                self.assertEqual(
                    file[format.value + '/a/b/c.sedml'].attrs, {
                        'uri': format.value + '/a/b/c.sedml',
                        'combineArchiveLocation':
                        format.value + '/a/b/c.sedml',
                    })
                self.assertEqual(
                    file[format.value + '/a/d.sedml'].attrs, {
                        'uri': format.value + '/a/d.sedml',
                        'combineArchiveLocation': format.value + '/a/d.sedml',
                    })
                self.assertEqual(
                    file[format.value + '/e.sedml'].attrs, {
                        'uri': format.value + '/e.sedml',
                        'combineArchiveLocation': format.value + '/e.sedml',
                    })

                self.assertEqual(
                    file[format.value + '/a/b/c.sedml/' +
                         report_1.id].attrs['uri'],
                    format.value + '/a/b/c.sedml/' + report_1.id)
                self.assertEqual(
                    file[format.value + '/a/b/c.sedml/' +
                         report_1.id].attrs['sedmlId'], report_1.id)
                self.assertEqual(
                    file[format.value + '/a/b/c.sedml/' +
                         report_1.id].attrs['sedmlName'], report_1.name)

                self.assertEqual(
                    file[format.value + '/a/d.sedml/' +
                         report_2.id].attrs['uri'],
                    format.value + '/a/d.sedml/' + report_2.id)
                self.assertEqual(
                    file[format.value + '/a/d.sedml/' +
                         report_2.id].attrs['sedmlId'], report_2.id)
                self.assertEqual(
                    file[format.value + '/a/d.sedml/' +
                         report_2.id].attrs['sedmlName'], report_2.name)

                self.assertEqual(
                    file[format.value + '/e.sedml/' +
                         report_3.id].attrs['uri'],
                    format.value + '/e.sedml/' + report_3.id)
                self.assertEqual(
                    file[format.value + '/e.sedml/' +
                         report_3.id].attrs['sedmlId'], report_3.id)
                self.assertNotIn(
                    'sedmlName',
                    file[format.value + '/e.sedml/' + report_3.id].attrs)
    def test_exec_sedml_docs_in_archive_without_log(self):
        archive = CombineArchive(contents=[
            CombineArchiveContent(
                location='sim.sedml',
                format='http://identifiers.org/combine.specifications/sed-ml',
            ),
            CombineArchiveContent(
                location='model.xml',
                format='http://identifiers.org/combine.specifications/sbml',
            ),
        ], )

        sed_doc = SedDocument()
        model = Model(id='model_1',
                      source='model.xml',
                      language=ModelLanguage.SBML.value)
        sed_doc.models.append(model)
        sim = UniformTimeCourseSimulation(
            id='sim_1',
            initial_time=0.,
            output_start_time=0.,
            output_end_time=10.,
            number_of_points=10,
            algorithm=Algorithm(kisao_id='KISAO_0000019'))
        sed_doc.simulations.append(sim)
        task = Task(id='task_1', model=model, simulation=sim)
        sed_doc.tasks.append(task)
        sed_doc.data_generators.append(
            DataGenerator(
                id='data_gen_1',
                variables=[
                    Variable(
                        id='var_1',
                        target=
                        "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Trim']",
                        target_namespaces={
                            'sbml': 'http://www.sbml.org/sbml/level2/version4'
                        },
                        task=task)
                ],
                math='var_1',
            ))
        sed_doc.data_generators.append(
            DataGenerator(
                id='data_gen_2',
                variables=[
                    Variable(
                        id='var_2',
                        target=
                        "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Clb']",
                        target_namespaces={
                            'sbml': 'http://www.sbml.org/sbml/level2/version4'
                        },
                        task=task)
                ],
                math='var_2',
            ))
        report = Report(id='output_1')
        sed_doc.outputs.append(report)
        report.data_sets.append(
            DataSet(id='data_set_1',
                    label='data_set_1',
                    data_generator=sed_doc.data_generators[0]))
        report.data_sets.append(
            DataSet(id='data_set_2',
                    label='data_set_2',
                    data_generator=sed_doc.data_generators[1]))

        archive_dirname = os.path.join(self.tmp_dir, 'archive')
        os.makedirs(archive_dirname)

        shutil.copyfile(
            os.path.join(os.path.dirname(__file__), '..', 'fixtures',
                         'BIOMD0000000297.xml'),
            os.path.join(archive_dirname, 'model.xml'))
        SedmlSimulationWriter().run(sed_doc,
                                    os.path.join(archive_dirname, 'sim.sedml'))

        archive_filename = os.path.join(self.tmp_dir, 'archive.omex')
        CombineArchiveWriter().run(archive, archive_dirname, archive_filename)

        def sed_task_executer(task, variables, log=None, config=None):
            if log:
                log.algorithm = task.simulation.algorithm.kisao_id
                log.simulator_details = {
                    'attrib': 'value',
                }

            return VariableResults({
                'var_1':
                numpy.linspace(0., 10., task.simulation.number_of_points + 1),
                'var_2':
                numpy.linspace(10., 20., task.simulation.number_of_points + 1),
            }), log

        def sed_task_executer_error(task, variables, log=None, config=None):
            raise ValueError('Big error')

        out_dir = os.path.join(self.tmp_dir, 'outputs')

        config = get_config()
        config.REPORT_FORMATS = []
        config.VIZ_FORMATS = []
        config.COLLECT_COMBINE_ARCHIVE_RESULTS = True
        config.LOG = True

        # with log
        sed_doc_executer = functools.partial(sedml_exec.exec_sed_doc,
                                             sed_task_executer)
        results, log = exec.exec_sedml_docs_in_archive(
            sed_doc_executer,
            archive_filename,
            out_dir,
            apply_xml_model_changes=False,
            config=config)
        self.assertEqual(set(results.keys()), set(['sim.sedml']))
        self.assertEqual(set(results['sim.sedml'].keys()), set(['output_1']))
        self.assertEqual(set(results['sim.sedml']['output_1'].keys()),
                         set(['data_set_1', 'data_set_2']))
        numpy.testing.assert_allclose(
            results['sim.sedml']['output_1']['data_set_1'],
            numpy.linspace(0., 10., 11))
        numpy.testing.assert_allclose(
            results['sim.sedml']['output_1']['data_set_2'],
            numpy.linspace(10., 20., 11))
        self.assertEqual(log.exception, None)
        self.assertEqual(
            log.sed_documents['sim.sedml'].tasks['task_1'].algorithm,
            task.simulation.algorithm.kisao_id)
        self.assertEqual(
            log.sed_documents['sim.sedml'].tasks['task_1'].simulator_details,
            {'attrib': 'value'})

        sed_doc_executer = functools.partial(sedml_exec.exec_sed_doc,
                                             sed_task_executer_error)
        results, log = exec.exec_sedml_docs_in_archive(
            sed_doc_executer,
            archive_filename,
            out_dir,
            apply_xml_model_changes=False,
            config=config)
        self.assertIsInstance(log.exception, CombineArchiveExecutionError)

        config.DEBUG = True
        sed_doc_executer = functools.partial(sedml_exec.exec_sed_doc,
                                             sed_task_executer_error)
        with self.assertRaisesRegex(ValueError, 'Big error'):
            exec.exec_sedml_docs_in_archive(sed_doc_executer,
                                            archive_filename,
                                            out_dir,
                                            apply_xml_model_changes=False,
                                            config=config)

        # without log
        config.COLLECT_COMBINE_ARCHIVE_RESULTS = False
        config.LOG = False
        config.DEBUG = False

        sed_doc_executer = functools.partial(sedml_exec.exec_sed_doc,
                                             sed_task_executer)
        results, log = exec.exec_sedml_docs_in_archive(
            sed_doc_executer,
            archive_filename,
            out_dir,
            apply_xml_model_changes=False,
            config=config)
        self.assertEqual(results, None)
        self.assertEqual(log, None)

        sed_doc_executer = functools.partial(sedml_exec.exec_sed_doc,
                                             sed_task_executer_error)
        with self.assertRaisesRegex(CombineArchiveExecutionError, 'Big error'):
            exec.exec_sedml_docs_in_archive(sed_doc_executer,
                                            archive_filename,
                                            out_dir,
                                            apply_xml_model_changes=False,
                                            config=config)

        config.DEBUG = True
        sed_doc_executer = functools.partial(sedml_exec.exec_sed_doc,
                                             sed_task_executer_error)
        with self.assertRaisesRegex(ValueError, 'Big error'):
            exec.exec_sedml_docs_in_archive(sed_doc_executer,
                                            archive_filename,
                                            out_dir,
                                            apply_xml_model_changes=False,
                                            config=config)
Beispiel #12
0
    def _build_combine_archive(self):
        task = Task(
            id='task',
            model=Model(
                id='model',
                source='bounce1.txt',
                language=ModelLanguage.Smoldyn.value,
            ),
            simulation=UniformTimeCourseSimulation(
                id='sim',
                initial_time=0.,
                output_start_time=0.1,
                output_end_time=0.2,
                number_of_points=10,
                algorithm=Algorithm(kisao_id='KISAO_0000057',
                                    changes=[
                                        AlgorithmParameterChange(
                                            kisao_id='KISAO_0000488',
                                            new_value='10'),
                                    ])),
        )

        variables = [
            Variable(id='time', symbol=Symbol.time.value, task=task),
            Variable(id='red', target='molcount red', task=task),
            Variable(id='green', target='molcount green', task=task),
        ]

        doc = SedDocument(
            models=[task.model],
            simulations=[task.simulation],
            tasks=[task],
            data_generators=[
                DataGenerator(
                    id='data_gen_time',
                    variables=[
                        Variable(id='var_time',
                                 symbol=Symbol.time.value,
                                 task=task)
                    ],
                    math='var_time',
                ),
                DataGenerator(
                    id='data_gen_red',
                    variables=[
                        Variable(id='var_red',
                                 target='molcount red',
                                 task=task)
                    ],
                    math='var_red',
                ),
                DataGenerator(
                    id='data_gen_green',
                    variables=[
                        Variable(id='var_green',
                                 target='molcount green',
                                 task=task)
                    ],
                    math='var_green',
                ),
            ],
        )
        doc.outputs.append(
            Report(id='report',
                   data_sets=[
                       DataSet(id='data_set_time',
                               label='time',
                               data_generator=doc.data_generators[0]),
                       DataSet(id='data_set_red',
                               label='red',
                               data_generator=doc.data_generators[1]),
                       DataSet(id='data_set_green',
                               label='green',
                               data_generator=doc.data_generators[2]),
                   ]))

        archive_dirname = os.path.join(self.dirname, 'archive')
        os.makedirs(archive_dirname)
        shutil.copyfile(
            os.path.join(self.EXAMPLES_DIRNAME, 'S1_intro', 'bounce1.txt'),
            os.path.join(archive_dirname, 'bounce1.txt'))
        sim_filename = os.path.join(archive_dirname, 'sim_1.sedml')
        SedmlSimulationWriter().run(doc, sim_filename)

        archive = CombineArchive(contents=[
            CombineArchiveContent('bounce1.txt',
                                  CombineArchiveContentFormat.Smoldyn.value),
            CombineArchiveContent('sim_1.sedml',
                                  CombineArchiveContentFormat.SED_ML.value),
        ], )
        archive_filename = os.path.join(self.dirname, 'archive.omex')
        CombineArchiveWriter().run(archive, archive_dirname, archive_filename)

        return doc, archive_filename
Beispiel #13
0
def export_sed_doc(sed_doc_specs):
    """ Export the specifications of SED-ML document to SED-ML

    Args:
        sed_doc_specs (``SedDocument``)

    Returns:
        :obj:`SedDocument`
    """
    sed_doc = SedDocument(
        level=sed_doc_specs['level'],
        version=sed_doc_specs['version'],
    )

    # add styles to SED-ML document
    style_id_map = {}
    for style_spec in sed_doc_specs['styles']:
        style = Style(
            id=style_spec.get('id'),
            name=style_spec.get('name', None),
        )
        sed_doc.styles.append(style)
        style_id_map[style.id] = style

        if style_spec.get('line', None) is not None:
            style.line = LineStyle(
                type=style_spec['line'].get('type', None),
                color=style_spec['line'].get('color', None),
                thickness=style_spec['line'].get('thickness', None),
            )
            if style_spec['line'].get('type', None) is not None:
                style.line.type = LineStyleType[style_spec['line']['type']]
            if style_spec['line'].get('color', None) is not None:
                style.line.color = Color(style_spec['line']['color'])

        if style_spec.get('marker', None) is not None:
            style.marker = MarkerStyle(
                type=style_spec['marker'].get('type', None),
                size=style_spec['marker'].get('size', None),
                line_color=style_spec['marker'].get('lineColor', None),
                line_thickness=style_spec['marker'].get('lineThickness', None),
                fill_color=style_spec['marker'].get('fillColor', None),
            )
            if style_spec['marker'].get('type', None) is not None:
                style.marker.type = MarkerStyleType[style_spec['marker']
                                                    ['type']]
            if style_spec['marker'].get('lineColor', None) is not None:
                style.marker.line_color = Color(
                    style_spec['marker']['lineColor'])
            if style_spec['marker'].get('fillColor', None) is not None:
                style.marker.fill_color = Color(
                    style_spec['marker']['fillColor'])

        if style_spec.get('fill', None) is not None:
            style.fill = FillStyle(color=style_spec['fill'].get('color',
                                                                None), )
            if style_spec['fill'].get('color', None) is not None:
                style.fill.color = Color(style_spec['fill']['color'])

    for style_spec, style in zip(sed_doc_specs['styles'], sed_doc.styles):
        if style_spec.get('base', None) is not None:
            style.base = style_id_map.get(style_spec['base'], None)
            if style.base is None:
                raise BadRequestException(
                    title='Base style `{}` for style `{}` does not exist'.
                    format(style_spec['base'], style.id),
                    instance=ValueError('Style does not exist'),
                )

    # add models to SED-ML document
    model_id_map = {}
    for model_spec in sed_doc_specs['models']:
        model = Model(
            id=model_spec.get('id'),
            name=model_spec.get('name', None),
            language=model_spec.get('language'),
            source=model_spec.get('source'),
        )
        sed_doc.models.append(model)
        model_id_map[model.id] = model

        for change_spec in model_spec['changes']:
            if change_spec['_type'] == 'SedModelAttributeChange':
                change = ModelAttributeChange(
                    new_value=change_spec.get('newValue'), )

            elif change_spec['_type'] == 'SedAddElementModelChange':
                change = AddElementModelChange(
                    new_elements=change_spec.get('newElements'), )

            elif change_spec['_type'] == 'SedReplaceElementModelChange':
                change = ReplaceElementModelChange(
                    new_elements=change_spec.get('newElements'), )

            elif change_spec['_type'] == 'SedRemoveElementModelChange':
                change = RemoveElementModelChange()

            elif change_spec['_type'] == 'SedComputeModelChange':
                change = ComputeModelChange(
                    parameters=[],
                    variables=[],
                    math=change_spec.get('math'),
                )
                for parameter_spec in change_spec.get('parameters', []):
                    change.parameters.append(
                        Parameter(
                            id=parameter_spec.get('id'),
                            name=parameter_spec.get('name', None),
                            value=parameter_spec.get('value'),
                        ))
                for variable_spec in change_spec.get('variables', []):
                    change.variables.append(
                        Variable(
                            id=variable_spec.get('id'),
                            name=variable_spec.get('name', None),
                            model=variable_spec.get('model', None),
                            target=variable_spec.get('target',
                                                     {}).get('value', None),
                            target_namespaces={
                                namespace['prefix']: namespace['uri']
                                for namespace in variable_spec.get(
                                    'target', {}).get('namespaces', [])
                            },
                            symbol=variable_spec.get('symbol', None),
                            task=variable_spec.get('task', None),
                        ))

            else:
                raise BadRequestException(
                    title='Changes of type `{}` are not supported'.format(
                        change_spec['_type']),
                    instance=NotImplementedError('Invalid change'))

            change.target = change_spec.get('target').get('value')
            for ns in change_spec.get('target').get('namespaces', []):
                change.target_namespaces[ns.get('prefix', None)] = ns['uri']

            model.changes.append(change)

    # add simulations to SED-ML document
    simulation_id_map = {}
    for sim_spec in sed_doc_specs['simulations']:
        if sim_spec['_type'] == 'SedOneStepSimulation':
            sim = OneStepSimulation(
                id=sim_spec.get('id'),
                name=sim_spec.get('name', None),
                step=sim_spec.get('step'),
            )
        elif sim_spec['_type'] == 'SedSteadyStateSimulation':
            sim = SteadyStateSimulation(
                id=sim_spec.get('id'),
                name=sim_spec.get('name', None),
            )
        elif sim_spec['_type'] == 'SedUniformTimeCourseSimulation':
            sim = UniformTimeCourseSimulation(
                id=sim_spec.get('id'),
                name=sim_spec.get('name', None),
                initial_time=sim_spec.get('initialTime'),
                output_start_time=sim_spec.get('outputStartTime'),
                output_end_time=sim_spec.get('outputEndTime'),
                number_of_steps=sim_spec.get('numberOfSteps'),
            )
        else:
            raise BadRequestException(
                title='Simulations of type `{}` are not supported'.format(
                    sim_spec['_type']),
                instance=NotImplementedError('Invalid simulation')
            )  # pragma: no cover: unreachable due to schema validation

        alg_spec = sim_spec.get('algorithm')
        sim.algorithm = Algorithm(kisao_id=alg_spec.get('kisaoId'))
        for change_spec in alg_spec.get('changes'):
            sim.algorithm.changes.append(
                AlgorithmParameterChange(
                    kisao_id=change_spec.get('kisaoId'),
                    new_value=change_spec.get('newValue'),
                ))

        sed_doc.simulations.append(sim)
        simulation_id_map[sim.id] = sim

    # add tasks to SED-ML document
    task_id_map = {}
    for task_spec in sed_doc_specs['tasks']:
        if task_spec['_type'] == 'SedTask':
            model_id = task_spec.get('model')
            sim_id = task_spec.get('simulation')
            model = model_id_map.get(model_id, None)
            sim = simulation_id_map.get(sim_id, None)

            if not model:
                raise BadRequestException(
                    title='Model `{}` for task `{}` does not exist'.format(
                        model_id, task_spec.get('id')),
                    instance=ValueError('Model does not exist'),
                )
            if not sim:
                raise BadRequestException(
                    title='Simulation `{}` for task `{}` does not exist'.
                    format(sim_id, task_spec.get('id')),
                    instance=ValueError('Simulation does not exist'),
                )

            task = Task(
                id=task_spec.get('id'),
                name=task_spec.get('name', None),
                model=model,
                simulation=sim,
            )
        else:
            # TODO: support repeated tasks
            raise BadRequestException(
                title='Tasks of type `{}` are not supported'.format(
                    task_spec['_type']),
                instance=NotImplementedError('Invalid task')
            )  # pragma: no cover: unreachable due to schema validation

        sed_doc.tasks.append(task)
        task_id_map[task.id] = task

    # add data generators to SED-ML document
    data_gen_id_map = {}
    for data_gen_spec in sed_doc_specs['dataGenerators']:
        data_gen = DataGenerator(
            id=data_gen_spec.get('id'),
            name=data_gen_spec.get('name', None),
            math=data_gen_spec.get('math'),
        )

        for var_spec in data_gen_spec['variables']:
            task_id = var_spec.get('task')
            task = task_id_map.get(task_id, None)

            if not task:
                raise BadRequestException(
                    title='Task `{}` for variable `{}` does not exist'.format(
                        task_id, var_spec.get('id')),
                    instance=ValueError('Task does not exist'),
                )

            var = Variable(
                id=var_spec.get('id'),
                name=var_spec.get('name', None),
                task=task,
                symbol=var_spec.get('symbol', None),
            )

            target_spec = var_spec.get('target', None)
            if target_spec:
                var.target = target_spec['value']
                for ns in target_spec.get('namespaces', []):
                    var.target_namespaces[ns.get('prefix', None)] = ns['uri']

            data_gen.variables.append(var)

        sed_doc.data_generators.append(data_gen)
        data_gen_id_map[data_gen.id] = data_gen

    # add outputs to SED-ML document
    for output_spec in sed_doc_specs['outputs']:
        if output_spec['_type'] == 'SedReport':
            output = Report(
                id=output_spec.get('id'),
                name=output_spec.get('name', None),
            )
            for data_set_spec in output_spec['dataSets']:
                data_gen_id = data_set_spec['dataGenerator']
                data_gen = data_gen_id_map.get(data_gen_id, None)

                if not data_gen:
                    raise BadRequestException(
                        title=
                        'Data generator `{}` for output `{}` does not exist'.
                        format(data_gen_id, output_spec.get('id')),
                        instance=ValueError('Data generator does not exist'),
                    )

                data_set = DataSet(
                    id=data_set_spec.get('id'),
                    name=data_set_spec.get('name', None),
                    label=data_set_spec.get('label', None),
                    data_generator=data_gen,
                )
                output.data_sets.append(data_set)

        elif output_spec['_type'] == 'SedPlot2D':
            output = Plot2D(
                id=output_spec.get('id'),
                name=output_spec.get('name', None),
            )
            for curve_spec in output_spec['curves']:
                x_data_gen_id = curve_spec['xDataGenerator']
                y_data_gen_id = curve_spec['yDataGenerator']
                style_id = curve_spec.get('style', None)
                x_data_gen = data_gen_id_map.get(x_data_gen_id, None)
                y_data_gen = data_gen_id_map.get(y_data_gen_id, None)
                style = style_id_map.get(style_id, None)

                if not x_data_gen:
                    raise BadRequestException(
                        title=
                        'X data generator `{}` for curve `{}` does not exist'.
                        format(x_data_gen_id, output_spec.get('id')),
                        instance=ValueError('Data generator does not exist'),
                    )
                if not y_data_gen:
                    raise BadRequestException(
                        title=
                        'Y data generator `{}` for curve `{}` does not exist'.
                        format(y_data_gen_id, output_spec.get('id')),
                        instance=ValueError('Data generator does not exist'),
                    )
                if style_id is not None and style is None:
                    raise BadRequestException(
                        title='Style `{}` for curve `{}` does not exist'.
                        format(style_id, output_spec.get('id')),
                        instance=ValueError('Style does not exist'),
                    )

                curve = Curve(
                    id=curve_spec.get('id'),
                    name=curve_spec.get('name', None),
                    x_data_generator=x_data_gen,
                    y_data_generator=y_data_gen,
                    x_scale=AxisScale[output_spec['xScale']],
                    y_scale=AxisScale[output_spec['yScale']],
                    style=style,
                )
                output.curves.append(curve)

        elif output_spec['_type'] == 'SedPlot3D':
            output = Plot3D(
                id=output_spec.get('id'),
                name=output_spec.get('name', None),
            )
            for surface_spec in output_spec['surfaces']:
                x_data_gen_id = surface_spec['xDataGenerator']
                y_data_gen_id = surface_spec['yDataGenerator']
                z_data_gen_id = surface_spec['zDataGenerator']
                style_id = surface_spec.get('style', None)
                x_data_gen = data_gen_id_map.get(x_data_gen_id, None)
                y_data_gen = data_gen_id_map.get(y_data_gen_id, None)
                z_data_gen = data_gen_id_map.get(z_data_gen_id, None)
                style = style_id_map.get(style_id, None)

                if not x_data_gen:
                    raise BadRequestException(
                        title=
                        'X data generator `{}` for surface `{}` does not exist'
                        .format(x_data_gen_id, output_spec.get('id')),
                        instance=ValueError('Data generator does not exist'),
                    )
                if not y_data_gen:
                    raise BadRequestException(
                        title=
                        'Y data generator `{}` for surface `{}` does not exist'
                        .format(y_data_gen_id, output_spec.get('id')),
                        instance=ValueError('Data generator does not exist'),
                    )
                if not z_data_gen:
                    raise BadRequestException(
                        title=
                        'X data generator `{}` for surface `{}` does not exist'
                        .format(z_data_gen_id, output_spec.get('id')),
                        instance=ValueError('Data generator does not exist'),
                    )
                if style_id is not None and style is None:
                    raise BadRequestException(
                        title='Style `{}` for surface `{}` does not exist'.
                        format(style_id, output_spec.get('id')),
                        instance=ValueError('Style does not exist'),
                    )

                surface = Surface(
                    id=surface_spec.get('id'),
                    name=surface_spec.get('name', None),
                    x_data_generator=x_data_gen,
                    y_data_generator=y_data_gen,
                    z_data_generator=z_data_gen,
                    x_scale=AxisScale[output_spec['xScale']],
                    y_scale=AxisScale[output_spec['yScale']],
                    z_scale=AxisScale[output_spec['zScale']],
                    style=style,
                )
                output.surfaces.append(surface)

        else:
            raise BadRequestException(
                title='Outputs of type `{}` are not supported'.format(
                    output_spec['_type']),
                instance=NotImplementedError('Invalid output')
            )  # pragma: no cover: unreachable due to schema validation

        sed_doc.outputs.append(output)

    # deserialize references
    model_map = {}
    for model in sed_doc.models:
        model_map[model.id] = model

    task_map = {}
    for task in sed_doc.tasks:
        task_map[task.id] = task

    for model in sed_doc.models:
        for change in model.changes:
            if isinstance(change, ComputeModelChange):
                for variable in change.variables:
                    if variable.model:
                        variable.model = model_map[variable.model]
                    if variable.task:
                        variable.task = task_map[variable.task]

    return sed_doc