def test_read_write_duplicate_labels(self): # labels in same order report_1 = Report( id='report_1', data_sets=[ DataSet(id='x', label='A'), DataSet(id='y', label='A'), DataSet(id='z', label='A'), ], ) data_set_results_1 = data_model.DataSetResults({ 'x': numpy.array([1., 2.]), 'y': numpy.array([3., 4.]), 'z': numpy.array([5., 6.]), }) rel_path_1 = os.path.join('a/b/c.sedml', report_1.id) io.ReportWriter().run(report_1, data_set_results_1, self.dirname, rel_path_1, format=data_model.ReportFormat.csv) data_set_results_2 = io.ReportReader().run( report_1, self.dirname, rel_path_1, format=data_model.ReportFormat.csv) numpy.testing.assert_allclose(data_set_results_2['x'], numpy.array([1., 2.])) numpy.testing.assert_allclose(data_set_results_2['y'], numpy.array([3., 4.])) numpy.testing.assert_allclose(data_set_results_2['z'], numpy.array([5., 6.])) # labels in different order report_1 = Report( id='report_1', data_sets=[ DataSet(id='x', label='X'), DataSet(id='y', label='X'), DataSet(id='z', label='Z'), ], ) data_set_results_1 = data_model.DataSetResults({ 'x': numpy.array([1., 2.]), 'y': numpy.array([3., 4.]), 'z': numpy.array([5., 6.]), }) rel_path_1 = os.path.join('a/b/c.sedml', report_1.id) io.ReportWriter().run(report_1, data_set_results_1, self.dirname, rel_path_1, format=data_model.ReportFormat.csv) report_2 = Report( id='report_1', data_sets=[ DataSet(id='x', label='X'), DataSet(id='z', label='Z'), DataSet(id='y', label='X'), ], ) data_set_results_2 = io.ReportReader().run( report_2, self.dirname, rel_path_1, format=data_model.ReportFormat.csv) self.assertEqual(set(data_set_results_2.keys()), set(['z'])) numpy.testing.assert_allclose(data_set_results_2['z'], numpy.array([5., 6.]))
def test_read_write(self): report_1 = Report( id='report_1', name='report 1', data_sets=[ DataSet(id='w', label='W'), DataSet(id='x', label='X'), DataSet(id='y', label='Y'), DataSet(id='z', label='Z'), ], ) report_2 = Report( id='report_2', name='report 2', data_sets=[ DataSet(id='a', label='A'), DataSet(id='b', label='B'), DataSet(id='c', label='C'), DataSet(id='d', label='D'), ], ) report_3 = Report( id='report_3', data_sets=[ DataSet(id='a', label='A'), DataSet(id='b', label='B'), DataSet(id='c', label='C'), DataSet(id='d', label='D'), ], ) data_set_results_1 = data_model.DataSetResults({ 'w': None, 'x': numpy.array([1, 2, 3]), 'y': numpy.array([4., numpy.nan]), 'z': numpy.array(6.), }) data_set_results_2 = data_model.DataSetResults({ 'a': numpy.array([1, 2]), 'b': numpy.array([7., 8., 9.]), 'c': numpy.array(True), 'd': None, }) data_set_results_3 = data_model.DataSetResults({ 'a': numpy.array([[1, 2], [3, 4], [5, 6]]), 'b': numpy.array([7., 8., 9.]), 'c': numpy.array(True), 'd': None, }) # CSV, TSV for format in [ data_model.ReportFormat.csv, data_model.ReportFormat.tsv, data_model.ReportFormat.xlsx ]: rel_path_1 = os.path.join(format.value, 'a/b/c.sedml', report_1.id) rel_path_2 = os.path.join(format.value, 'a/d.sedml', report_2.id) rel_path_3 = os.path.join(format.value, 'e.sedml', report_2.id) io.ReportWriter().run(report_1, data_set_results_1, self.dirname, rel_path_1, format=format) io.ReportWriter().run(report_2, data_set_results_2, self.dirname, rel_path_2, format=format) with self.assertWarnsRegex( CannotExportMultidimensionalTableWarning, 'Multidimensional reports cannot be exported'): io.ReportWriter().run(report_3, data_set_results_3, self.dirname, rel_path_3, format=format) data_set_results_1_b = io.ReportReader().run(report_1, self.dirname, rel_path_1, format=format) data_set_results_2_b = io.ReportReader().run(report_2, self.dirname, rel_path_2, format=format) self.assertEqual( set(io.ReportReader().get_ids(self.dirname, format=format)), set([rel_path_1, rel_path_2])) numpy.testing.assert_allclose( data_set_results_1_b['w'], numpy.array([numpy.nan, numpy.nan, numpy.nan])) numpy.testing.assert_allclose(data_set_results_1_b['x'], numpy.array([1., 2., 3.])) numpy.testing.assert_allclose( data_set_results_1_b['y'], numpy.array([4., numpy.nan, numpy.nan])) numpy.testing.assert_allclose( data_set_results_1_b['z'], numpy.array([6., numpy.nan, numpy.nan])) self.assertEqual(data_set_results_1_b['w'].dtype.name, 'float64') self.assertEqual(data_set_results_1_b['x'].dtype.name, 'float64') self.assertEqual(data_set_results_1_b['y'].dtype.name, 'float64') self.assertEqual(data_set_results_1_b['z'].dtype.name, 'float64') numpy.testing.assert_allclose(data_set_results_2_b['a'], numpy.array([1., 2., numpy.nan])) numpy.testing.assert_allclose(data_set_results_2_b['b'], numpy.array([7., 8., 9.])) numpy.testing.assert_allclose( data_set_results_2_b['c'], numpy.array([1., numpy.nan, numpy.nan])) numpy.testing.assert_allclose( data_set_results_2_b['d'], numpy.array([numpy.nan, numpy.nan, numpy.nan])) self.assertEqual(data_set_results_2_b['a'].dtype.name, 'float64') self.assertEqual(data_set_results_2_b['b'].dtype.name, 'float64') self.assertEqual(data_set_results_2_b['c'].dtype.name, 'float64') self.assertEqual(data_set_results_2_b['d'].dtype.name, 'float64') # HDF for format in [data_model.ReportFormat.h5]: rel_path_1 = os.path.join(format.value, 'a/b/c.sedml', report_1.id) rel_path_2 = os.path.join(format.value, 'a/d.sedml', report_2.id) rel_path_3 = os.path.join(format.value, 'e.sedml', report_3.id) io.ReportWriter().run(report_1, data_set_results_1, self.dirname, rel_path_1, format=format) io.ReportWriter().run(report_2, data_set_results_2, self.dirname, rel_path_2, format=format) io.ReportWriter().run(report_3, data_set_results_3, self.dirname, rel_path_3, format=format) data_set_results_1_b = io.ReportReader().run(report_1, self.dirname, rel_path_1, format=format) data_set_results_2_b = io.ReportReader().run(report_2, self.dirname, rel_path_2, format=format) data_set_results_3_b = io.ReportReader().run(report_3, self.dirname, rel_path_3, format=format) self.assertEqual( set(io.ReportReader().get_ids(self.dirname, format=format)), set([rel_path_1, rel_path_2, rel_path_3])) self.assertEqual(data_set_results_1_b['w'], None) numpy.testing.assert_allclose(data_set_results_1_b['x'], numpy.array([1, 2, 3])) numpy.testing.assert_allclose(data_set_results_1_b['y'], numpy.array([4., numpy.nan])) numpy.testing.assert_allclose(data_set_results_1_b['z'], numpy.array(6.)) self.assertEqual(data_set_results_1_b['x'].dtype.name, 'int64') self.assertEqual(data_set_results_1_b['y'].dtype.name, 'float64') self.assertEqual(data_set_results_1_b['z'].dtype.name, 'float64') numpy.testing.assert_allclose(data_set_results_2_b['a'], numpy.array([1, 2])) numpy.testing.assert_allclose(data_set_results_2_b['b'], numpy.array([7., 8., 9.])) numpy.testing.assert_allclose(data_set_results_2_b['c'], numpy.array(True)) self.assertEqual(data_set_results_2_b['d'], None) self.assertEqual(data_set_results_2_b['a'].dtype.name, 'int64') self.assertEqual(data_set_results_2_b['b'].dtype.name, 'float64') self.assertEqual(data_set_results_2_b['c'].dtype.name, 'bool') numpy.testing.assert_allclose( data_set_results_3_b['a'], numpy.array([[1, 2], [3, 4], [5, 6]])) numpy.testing.assert_allclose(data_set_results_3_b['b'], numpy.array([7., 8., 9.])) numpy.testing.assert_allclose(data_set_results_3_b['c'], numpy.array(True)) self.assertEqual(data_set_results_3_b['d'], None) self.assertEqual(data_set_results_3_b['a'].dtype.name, 'int64') self.assertEqual(data_set_results_3_b['b'].dtype.name, 'float64') self.assertEqual(data_set_results_3_b['c'].dtype.name, 'bool') with h5py.File(os.path.join(self.dirname, 'reports.h5'), 'r') as file: self.assertEqual( file[format.value + '/a'].attrs, { 'uri': format.value + '/a', 'combineArchiveLocation': format.value + '/a', }) self.assertEqual( file[format.value + '/a/b'].attrs, { 'uri': format.value + '/a/b', 'combineArchiveLocation': format.value + '/a/b', }) self.assertEqual( file[format.value + '/a/b/c.sedml'].attrs, { 'uri': format.value + '/a/b/c.sedml', 'combineArchiveLocation': format.value + '/a/b/c.sedml', }) self.assertEqual( file[format.value + '/a/d.sedml'].attrs, { 'uri': format.value + '/a/d.sedml', 'combineArchiveLocation': format.value + '/a/d.sedml', }) self.assertEqual( file[format.value + '/e.sedml'].attrs, { 'uri': format.value + '/e.sedml', 'combineArchiveLocation': format.value + '/e.sedml', }) self.assertEqual( file[format.value + '/a/b/c.sedml/' + report_1.id].attrs['uri'], format.value + '/a/b/c.sedml/' + report_1.id) self.assertEqual( file[format.value + '/a/b/c.sedml/' + report_1.id].attrs['sedmlId'], report_1.id) self.assertEqual( file[format.value + '/a/b/c.sedml/' + report_1.id].attrs['sedmlName'], report_1.name) self.assertEqual( file[format.value + '/a/d.sedml/' + report_2.id].attrs['uri'], format.value + '/a/d.sedml/' + report_2.id) self.assertEqual( file[format.value + '/a/d.sedml/' + report_2.id].attrs['sedmlId'], report_2.id) self.assertEqual( file[format.value + '/a/d.sedml/' + report_2.id].attrs['sedmlName'], report_2.name) self.assertEqual( file[format.value + '/e.sedml/' + report_3.id].attrs['uri'], format.value + '/e.sedml/' + report_3.id) self.assertEqual( file[format.value + '/e.sedml/' + report_3.id].attrs['sedmlId'], report_3.id) self.assertNotIn( 'sedmlName', file[format.value + '/e.sedml/' + report_3.id].attrs)
def test_exec_sedml_docs_in_archive_without_log(self): archive = CombineArchive(contents=[ CombineArchiveContent( location='sim.sedml', format='http://identifiers.org/combine.specifications/sed-ml', ), CombineArchiveContent( location='model.xml', format='http://identifiers.org/combine.specifications/sbml', ), ], ) sed_doc = SedDocument() model = Model(id='model_1', source='model.xml', language=ModelLanguage.SBML.value) sed_doc.models.append(model) sim = UniformTimeCourseSimulation( id='sim_1', initial_time=0., output_start_time=0., output_end_time=10., number_of_points=10, algorithm=Algorithm(kisao_id='KISAO_0000019')) sed_doc.simulations.append(sim) task = Task(id='task_1', model=model, simulation=sim) sed_doc.tasks.append(task) sed_doc.data_generators.append( DataGenerator( id='data_gen_1', variables=[ Variable( id='var_1', target= "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Trim']", target_namespaces={ 'sbml': 'http://www.sbml.org/sbml/level2/version4' }, task=task) ], math='var_1', )) sed_doc.data_generators.append( DataGenerator( id='data_gen_2', variables=[ Variable( id='var_2', target= "/sbml:sbml/sbml:model/sbml:listOfSpecies/sbml:species[@id='Clb']", target_namespaces={ 'sbml': 'http://www.sbml.org/sbml/level2/version4' }, task=task) ], math='var_2', )) report = Report(id='output_1') sed_doc.outputs.append(report) report.data_sets.append( DataSet(id='data_set_1', label='data_set_1', data_generator=sed_doc.data_generators[0])) report.data_sets.append( DataSet(id='data_set_2', label='data_set_2', data_generator=sed_doc.data_generators[1])) archive_dirname = os.path.join(self.tmp_dir, 'archive') os.makedirs(archive_dirname) shutil.copyfile( os.path.join(os.path.dirname(__file__), '..', 'fixtures', 'BIOMD0000000297.xml'), os.path.join(archive_dirname, 'model.xml')) SedmlSimulationWriter().run(sed_doc, os.path.join(archive_dirname, 'sim.sedml')) archive_filename = os.path.join(self.tmp_dir, 'archive.omex') CombineArchiveWriter().run(archive, archive_dirname, archive_filename) def sed_task_executer(task, variables, log=None, config=None): if log: log.algorithm = task.simulation.algorithm.kisao_id log.simulator_details = { 'attrib': 'value', } return VariableResults({ 'var_1': numpy.linspace(0., 10., task.simulation.number_of_points + 1), 'var_2': numpy.linspace(10., 20., task.simulation.number_of_points + 1), }), log def sed_task_executer_error(task, variables, log=None, config=None): raise ValueError('Big error') out_dir = os.path.join(self.tmp_dir, 'outputs') config = get_config() config.REPORT_FORMATS = [] config.VIZ_FORMATS = [] config.COLLECT_COMBINE_ARCHIVE_RESULTS = True config.LOG = True # with log sed_doc_executer = functools.partial(sedml_exec.exec_sed_doc, sed_task_executer) results, log = exec.exec_sedml_docs_in_archive( sed_doc_executer, archive_filename, out_dir, apply_xml_model_changes=False, config=config) self.assertEqual(set(results.keys()), set(['sim.sedml'])) self.assertEqual(set(results['sim.sedml'].keys()), set(['output_1'])) self.assertEqual(set(results['sim.sedml']['output_1'].keys()), set(['data_set_1', 'data_set_2'])) numpy.testing.assert_allclose( results['sim.sedml']['output_1']['data_set_1'], numpy.linspace(0., 10., 11)) numpy.testing.assert_allclose( results['sim.sedml']['output_1']['data_set_2'], numpy.linspace(10., 20., 11)) self.assertEqual(log.exception, None) self.assertEqual( log.sed_documents['sim.sedml'].tasks['task_1'].algorithm, task.simulation.algorithm.kisao_id) self.assertEqual( log.sed_documents['sim.sedml'].tasks['task_1'].simulator_details, {'attrib': 'value'}) sed_doc_executer = functools.partial(sedml_exec.exec_sed_doc, sed_task_executer_error) results, log = exec.exec_sedml_docs_in_archive( sed_doc_executer, archive_filename, out_dir, apply_xml_model_changes=False, config=config) self.assertIsInstance(log.exception, CombineArchiveExecutionError) config.DEBUG = True sed_doc_executer = functools.partial(sedml_exec.exec_sed_doc, sed_task_executer_error) with self.assertRaisesRegex(ValueError, 'Big error'): exec.exec_sedml_docs_in_archive(sed_doc_executer, archive_filename, out_dir, apply_xml_model_changes=False, config=config) # without log config.COLLECT_COMBINE_ARCHIVE_RESULTS = False config.LOG = False config.DEBUG = False sed_doc_executer = functools.partial(sedml_exec.exec_sed_doc, sed_task_executer) results, log = exec.exec_sedml_docs_in_archive( sed_doc_executer, archive_filename, out_dir, apply_xml_model_changes=False, config=config) self.assertEqual(results, None) self.assertEqual(log, None) sed_doc_executer = functools.partial(sedml_exec.exec_sed_doc, sed_task_executer_error) with self.assertRaisesRegex(CombineArchiveExecutionError, 'Big error'): exec.exec_sedml_docs_in_archive(sed_doc_executer, archive_filename, out_dir, apply_xml_model_changes=False, config=config) config.DEBUG = True sed_doc_executer = functools.partial(sedml_exec.exec_sed_doc, sed_task_executer_error) with self.assertRaisesRegex(ValueError, 'Big error'): exec.exec_sedml_docs_in_archive(sed_doc_executer, archive_filename, out_dir, apply_xml_model_changes=False, config=config)
def test_1(self): archive = CombineArchive(contents=[ CombineArchiveContent( location='sim.sedml', format='http://identifiers.org/combine.specifications/sed-ml', ), CombineArchiveContent( location='model.xml', format='http://identifiers.org/combine.specifications/sbml', ), ], ) in_dir = os.path.join(self.tmp_dir, 'archive') archive_filename = os.path.join(self.tmp_dir, 'archive.omex') CombineArchiveWriter().run(archive, in_dir, archive_filename) def sed_task_executer(task, variables): pass out_dir = os.path.join(self.tmp_dir, 'outputs') def exec_sed_doc(task_executer, filename, working_dir, base_out_dir, rel_path, apply_xml_model_changes=False, indent=0, log=None, log_level=None, config=None): out_dir = os.path.join(base_out_dir, rel_path) if not os.path.isdir(out_dir): os.makedirs(out_dir) with open(os.path.join(out_dir, 'report1.csv'), 'w') as file: file.write('ABC') with open(os.path.join(out_dir, 'report2.csv'), 'w') as file: file.write('DEF') with open(os.path.join(base_out_dir, 'reports.h5'), 'w') as file: file.write('DEF') return ReportResults({ 'report1': 'ABC', 'report2': 'DEF', }), None with mock.patch('biosimulators_utils.sedml.exec.exec_sed_doc', side_effect=exec_sed_doc): sed_doc = SedDocument( tasks=[Task(id='task_1')], outputs=[Report(id='output_1')], ) with mock.patch.object(SedmlSimulationReader, 'run', return_value=sed_doc): sed_doc_executer = functools.partial(exec_sed_doc, sed_task_executer) config = get_config() config.REPORT_FORMATS = [ReportFormat.h5, ReportFormat.csv] config.VIZ_FORMATS = [] config.BUNDLE_OUTPUTS = True config.KEEP_INDIVIDUAL_OUTPUTS = True config.COLLECT_COMBINE_ARCHIVE_RESULTS = False results, _ = exec.exec_sedml_docs_in_archive(sed_doc_executer, archive_filename, out_dir, config=config) self.assertEqual(results, None) config.COLLECT_COMBINE_ARCHIVE_RESULTS = True results, _ = exec.exec_sedml_docs_in_archive(sed_doc_executer, archive_filename, out_dir, config=config) self.assertEqual( results, SedDocumentResults({ 'sim.sedml': ReportResults({ 'report1': 'ABC', 'report2': 'DEF', }) })) self.assertEqual( sorted(os.listdir(out_dir)), sorted(['reports.h5', 'reports.zip', 'sim.sedml', 'log.yml'])) self.assertEqual( sorted(os.listdir(os.path.join(out_dir, 'sim.sedml'))), sorted(['report1.csv', 'report2.csv'])) archive.contents[0].format = CombineArchiveContentFormat.TEXT CombineArchiveWriter().run(archive, in_dir, archive_filename) with self.assertRaisesRegex( NoSedmlError, 'does not contain any executing SED-ML files'): with mock.patch('biosimulators_utils.sedml.exec.exec_sed_doc', side_effect=exec_sed_doc): sed_doc_executer = functools.partial(exec_sed_doc, sed_task_executer) config = get_config() config.REPORT_FORMATS = [ReportFormat.h5, ReportFormat.csv] config.VIZ_FORMATS = [] config.BUNDLE_OUTPUTS = True config.KEEP_INDIVIDUAL_OUTPUTS = True config.DEBUG = True exec.exec_sedml_docs_in_archive(sed_doc_executer, archive_filename, out_dir, config=config) archive.contents[ 0].format = 'http://identifiers.org/combine.specifications/sed-ml' CombineArchiveWriter().run(archive, in_dir, archive_filename) out_dir = os.path.join(self.tmp_dir, 'outputs-with-error') def exec_sed_doc(task_executer, filename, working_dir, base_out_dir, rel_path, apply_xml_model_changes=False, indent=0, log=None, log_level=None, config=None): out_dir = os.path.join(base_out_dir, rel_path) if not os.path.isdir(out_dir): os.makedirs(out_dir) with open(os.path.join(out_dir, 'report1.csv'), 'w') as file: file.write('ABC') with open(os.path.join(out_dir, 'report2.csv'), 'w') as file: file.write('DEF') with open(os.path.join(base_out_dir, 'reports.h5'), 'w') as file: file.write('DEF') raise ValueError('An error') sed_doc = SedDocument( tasks=[Task(id='task_1')], outputs=[Report(id='output_1')], ) with mock.patch.object(SedmlSimulationReader, 'run', return_value=sed_doc): sed_doc_executer = functools.partial(exec_sed_doc, sed_task_executer) with self.assertRaisesRegex(CombineArchiveExecutionError, 'An error'): config = get_config() config.REPORT_FORMATS = [ReportFormat.h5, ReportFormat.csv] config.VIZ_FORMATS = [] config.BUNDLE_OUTPUTS = True config.KEEP_INDIVIDUAL_OUTPUTS = True _, log = exec.exec_sedml_docs_in_archive(sed_doc_executer, archive_filename, out_dir, config=config) if log.exception: raise log.exception self.assertEqual( sorted(os.listdir(out_dir)), sorted(['reports.h5', 'reports.zip', 'sim.sedml', 'log.yml'])) self.assertEqual( sorted(os.listdir(os.path.join(out_dir, 'sim.sedml'))), sorted(['report1.csv', 'report2.csv']))
def _build_combine_archive(self): task = Task( id='task', model=Model( id='model', source='bounce1.txt', language=ModelLanguage.Smoldyn.value, ), simulation=UniformTimeCourseSimulation( id='sim', initial_time=0., output_start_time=0.1, output_end_time=0.2, number_of_points=10, algorithm=Algorithm(kisao_id='KISAO_0000057', changes=[ AlgorithmParameterChange( kisao_id='KISAO_0000488', new_value='10'), ])), ) variables = [ Variable(id='time', symbol=Symbol.time.value, task=task), Variable(id='red', target='molcount red', task=task), Variable(id='green', target='molcount green', task=task), ] doc = SedDocument( models=[task.model], simulations=[task.simulation], tasks=[task], data_generators=[ DataGenerator( id='data_gen_time', variables=[ Variable(id='var_time', symbol=Symbol.time.value, task=task) ], math='var_time', ), DataGenerator( id='data_gen_red', variables=[ Variable(id='var_red', target='molcount red', task=task) ], math='var_red', ), DataGenerator( id='data_gen_green', variables=[ Variable(id='var_green', target='molcount green', task=task) ], math='var_green', ), ], ) doc.outputs.append( Report(id='report', data_sets=[ DataSet(id='data_set_time', label='time', data_generator=doc.data_generators[0]), DataSet(id='data_set_red', label='red', data_generator=doc.data_generators[1]), DataSet(id='data_set_green', label='green', data_generator=doc.data_generators[2]), ])) archive_dirname = os.path.join(self.dirname, 'archive') os.makedirs(archive_dirname) shutil.copyfile( os.path.join(self.EXAMPLES_DIRNAME, 'S1_intro', 'bounce1.txt'), os.path.join(archive_dirname, 'bounce1.txt')) sim_filename = os.path.join(archive_dirname, 'sim_1.sedml') SedmlSimulationWriter().run(doc, sim_filename) archive = CombineArchive(contents=[ CombineArchiveContent('bounce1.txt', CombineArchiveContentFormat.Smoldyn.value), CombineArchiveContent('sim_1.sedml', CombineArchiveContentFormat.SED_ML.value), ], ) archive_filename = os.path.join(self.dirname, 'archive.omex') CombineArchiveWriter().run(archive, archive_dirname, archive_filename) return doc, archive_filename
def export_sed_doc(sed_doc_specs): """ Export the specifications of SED-ML document to SED-ML Args: sed_doc_specs (``SedDocument``) Returns: :obj:`SedDocument` """ sed_doc = SedDocument( level=sed_doc_specs['level'], version=sed_doc_specs['version'], ) # add styles to SED-ML document style_id_map = {} for style_spec in sed_doc_specs['styles']: style = Style( id=style_spec.get('id'), name=style_spec.get('name', None), ) sed_doc.styles.append(style) style_id_map[style.id] = style if style_spec.get('line', None) is not None: style.line = LineStyle( type=style_spec['line'].get('type', None), color=style_spec['line'].get('color', None), thickness=style_spec['line'].get('thickness', None), ) if style_spec['line'].get('type', None) is not None: style.line.type = LineStyleType[style_spec['line']['type']] if style_spec['line'].get('color', None) is not None: style.line.color = Color(style_spec['line']['color']) if style_spec.get('marker', None) is not None: style.marker = MarkerStyle( type=style_spec['marker'].get('type', None), size=style_spec['marker'].get('size', None), line_color=style_spec['marker'].get('lineColor', None), line_thickness=style_spec['marker'].get('lineThickness', None), fill_color=style_spec['marker'].get('fillColor', None), ) if style_spec['marker'].get('type', None) is not None: style.marker.type = MarkerStyleType[style_spec['marker'] ['type']] if style_spec['marker'].get('lineColor', None) is not None: style.marker.line_color = Color( style_spec['marker']['lineColor']) if style_spec['marker'].get('fillColor', None) is not None: style.marker.fill_color = Color( style_spec['marker']['fillColor']) if style_spec.get('fill', None) is not None: style.fill = FillStyle(color=style_spec['fill'].get('color', None), ) if style_spec['fill'].get('color', None) is not None: style.fill.color = Color(style_spec['fill']['color']) for style_spec, style in zip(sed_doc_specs['styles'], sed_doc.styles): if style_spec.get('base', None) is not None: style.base = style_id_map.get(style_spec['base'], None) if style.base is None: raise BadRequestException( title='Base style `{}` for style `{}` does not exist'. format(style_spec['base'], style.id), instance=ValueError('Style does not exist'), ) # add models to SED-ML document model_id_map = {} for model_spec in sed_doc_specs['models']: model = Model( id=model_spec.get('id'), name=model_spec.get('name', None), language=model_spec.get('language'), source=model_spec.get('source'), ) sed_doc.models.append(model) model_id_map[model.id] = model for change_spec in model_spec['changes']: if change_spec['_type'] == 'SedModelAttributeChange': change = ModelAttributeChange( new_value=change_spec.get('newValue'), ) elif change_spec['_type'] == 'SedAddElementModelChange': change = AddElementModelChange( new_elements=change_spec.get('newElements'), ) elif change_spec['_type'] == 'SedReplaceElementModelChange': change = ReplaceElementModelChange( new_elements=change_spec.get('newElements'), ) elif change_spec['_type'] == 'SedRemoveElementModelChange': change = RemoveElementModelChange() elif change_spec['_type'] == 'SedComputeModelChange': change = ComputeModelChange( parameters=[], variables=[], math=change_spec.get('math'), ) for parameter_spec in change_spec.get('parameters', []): change.parameters.append( Parameter( id=parameter_spec.get('id'), name=parameter_spec.get('name', None), value=parameter_spec.get('value'), )) for variable_spec in change_spec.get('variables', []): change.variables.append( Variable( id=variable_spec.get('id'), name=variable_spec.get('name', None), model=variable_spec.get('model', None), target=variable_spec.get('target', {}).get('value', None), target_namespaces={ namespace['prefix']: namespace['uri'] for namespace in variable_spec.get( 'target', {}).get('namespaces', []) }, symbol=variable_spec.get('symbol', None), task=variable_spec.get('task', None), )) else: raise BadRequestException( title='Changes of type `{}` are not supported'.format( change_spec['_type']), instance=NotImplementedError('Invalid change')) change.target = change_spec.get('target').get('value') for ns in change_spec.get('target').get('namespaces', []): change.target_namespaces[ns.get('prefix', None)] = ns['uri'] model.changes.append(change) # add simulations to SED-ML document simulation_id_map = {} for sim_spec in sed_doc_specs['simulations']: if sim_spec['_type'] == 'SedOneStepSimulation': sim = OneStepSimulation( id=sim_spec.get('id'), name=sim_spec.get('name', None), step=sim_spec.get('step'), ) elif sim_spec['_type'] == 'SedSteadyStateSimulation': sim = SteadyStateSimulation( id=sim_spec.get('id'), name=sim_spec.get('name', None), ) elif sim_spec['_type'] == 'SedUniformTimeCourseSimulation': sim = UniformTimeCourseSimulation( id=sim_spec.get('id'), name=sim_spec.get('name', None), initial_time=sim_spec.get('initialTime'), output_start_time=sim_spec.get('outputStartTime'), output_end_time=sim_spec.get('outputEndTime'), number_of_steps=sim_spec.get('numberOfSteps'), ) else: raise BadRequestException( title='Simulations of type `{}` are not supported'.format( sim_spec['_type']), instance=NotImplementedError('Invalid simulation') ) # pragma: no cover: unreachable due to schema validation alg_spec = sim_spec.get('algorithm') sim.algorithm = Algorithm(kisao_id=alg_spec.get('kisaoId')) for change_spec in alg_spec.get('changes'): sim.algorithm.changes.append( AlgorithmParameterChange( kisao_id=change_spec.get('kisaoId'), new_value=change_spec.get('newValue'), )) sed_doc.simulations.append(sim) simulation_id_map[sim.id] = sim # add tasks to SED-ML document task_id_map = {} for task_spec in sed_doc_specs['tasks']: if task_spec['_type'] == 'SedTask': model_id = task_spec.get('model') sim_id = task_spec.get('simulation') model = model_id_map.get(model_id, None) sim = simulation_id_map.get(sim_id, None) if not model: raise BadRequestException( title='Model `{}` for task `{}` does not exist'.format( model_id, task_spec.get('id')), instance=ValueError('Model does not exist'), ) if not sim: raise BadRequestException( title='Simulation `{}` for task `{}` does not exist'. format(sim_id, task_spec.get('id')), instance=ValueError('Simulation does not exist'), ) task = Task( id=task_spec.get('id'), name=task_spec.get('name', None), model=model, simulation=sim, ) else: # TODO: support repeated tasks raise BadRequestException( title='Tasks of type `{}` are not supported'.format( task_spec['_type']), instance=NotImplementedError('Invalid task') ) # pragma: no cover: unreachable due to schema validation sed_doc.tasks.append(task) task_id_map[task.id] = task # add data generators to SED-ML document data_gen_id_map = {} for data_gen_spec in sed_doc_specs['dataGenerators']: data_gen = DataGenerator( id=data_gen_spec.get('id'), name=data_gen_spec.get('name', None), math=data_gen_spec.get('math'), ) for var_spec in data_gen_spec['variables']: task_id = var_spec.get('task') task = task_id_map.get(task_id, None) if not task: raise BadRequestException( title='Task `{}` for variable `{}` does not exist'.format( task_id, var_spec.get('id')), instance=ValueError('Task does not exist'), ) var = Variable( id=var_spec.get('id'), name=var_spec.get('name', None), task=task, symbol=var_spec.get('symbol', None), ) target_spec = var_spec.get('target', None) if target_spec: var.target = target_spec['value'] for ns in target_spec.get('namespaces', []): var.target_namespaces[ns.get('prefix', None)] = ns['uri'] data_gen.variables.append(var) sed_doc.data_generators.append(data_gen) data_gen_id_map[data_gen.id] = data_gen # add outputs to SED-ML document for output_spec in sed_doc_specs['outputs']: if output_spec['_type'] == 'SedReport': output = Report( id=output_spec.get('id'), name=output_spec.get('name', None), ) for data_set_spec in output_spec['dataSets']: data_gen_id = data_set_spec['dataGenerator'] data_gen = data_gen_id_map.get(data_gen_id, None) if not data_gen: raise BadRequestException( title= 'Data generator `{}` for output `{}` does not exist'. format(data_gen_id, output_spec.get('id')), instance=ValueError('Data generator does not exist'), ) data_set = DataSet( id=data_set_spec.get('id'), name=data_set_spec.get('name', None), label=data_set_spec.get('label', None), data_generator=data_gen, ) output.data_sets.append(data_set) elif output_spec['_type'] == 'SedPlot2D': output = Plot2D( id=output_spec.get('id'), name=output_spec.get('name', None), ) for curve_spec in output_spec['curves']: x_data_gen_id = curve_spec['xDataGenerator'] y_data_gen_id = curve_spec['yDataGenerator'] style_id = curve_spec.get('style', None) x_data_gen = data_gen_id_map.get(x_data_gen_id, None) y_data_gen = data_gen_id_map.get(y_data_gen_id, None) style = style_id_map.get(style_id, None) if not x_data_gen: raise BadRequestException( title= 'X data generator `{}` for curve `{}` does not exist'. format(x_data_gen_id, output_spec.get('id')), instance=ValueError('Data generator does not exist'), ) if not y_data_gen: raise BadRequestException( title= 'Y data generator `{}` for curve `{}` does not exist'. format(y_data_gen_id, output_spec.get('id')), instance=ValueError('Data generator does not exist'), ) if style_id is not None and style is None: raise BadRequestException( title='Style `{}` for curve `{}` does not exist'. format(style_id, output_spec.get('id')), instance=ValueError('Style does not exist'), ) curve = Curve( id=curve_spec.get('id'), name=curve_spec.get('name', None), x_data_generator=x_data_gen, y_data_generator=y_data_gen, x_scale=AxisScale[output_spec['xScale']], y_scale=AxisScale[output_spec['yScale']], style=style, ) output.curves.append(curve) elif output_spec['_type'] == 'SedPlot3D': output = Plot3D( id=output_spec.get('id'), name=output_spec.get('name', None), ) for surface_spec in output_spec['surfaces']: x_data_gen_id = surface_spec['xDataGenerator'] y_data_gen_id = surface_spec['yDataGenerator'] z_data_gen_id = surface_spec['zDataGenerator'] style_id = surface_spec.get('style', None) x_data_gen = data_gen_id_map.get(x_data_gen_id, None) y_data_gen = data_gen_id_map.get(y_data_gen_id, None) z_data_gen = data_gen_id_map.get(z_data_gen_id, None) style = style_id_map.get(style_id, None) if not x_data_gen: raise BadRequestException( title= 'X data generator `{}` for surface `{}` does not exist' .format(x_data_gen_id, output_spec.get('id')), instance=ValueError('Data generator does not exist'), ) if not y_data_gen: raise BadRequestException( title= 'Y data generator `{}` for surface `{}` does not exist' .format(y_data_gen_id, output_spec.get('id')), instance=ValueError('Data generator does not exist'), ) if not z_data_gen: raise BadRequestException( title= 'X data generator `{}` for surface `{}` does not exist' .format(z_data_gen_id, output_spec.get('id')), instance=ValueError('Data generator does not exist'), ) if style_id is not None and style is None: raise BadRequestException( title='Style `{}` for surface `{}` does not exist'. format(style_id, output_spec.get('id')), instance=ValueError('Style does not exist'), ) surface = Surface( id=surface_spec.get('id'), name=surface_spec.get('name', None), x_data_generator=x_data_gen, y_data_generator=y_data_gen, z_data_generator=z_data_gen, x_scale=AxisScale[output_spec['xScale']], y_scale=AxisScale[output_spec['yScale']], z_scale=AxisScale[output_spec['zScale']], style=style, ) output.surfaces.append(surface) else: raise BadRequestException( title='Outputs of type `{}` are not supported'.format( output_spec['_type']), instance=NotImplementedError('Invalid output') ) # pragma: no cover: unreachable due to schema validation sed_doc.outputs.append(output) # deserialize references model_map = {} for model in sed_doc.models: model_map[model.id] = model task_map = {} for task in sed_doc.tasks: task_map[task.id] = task for model in sed_doc.models: for change in model.changes: if isinstance(change, ComputeModelChange): for variable in change.variables: if variable.model: variable.model = model_map[variable.model] if variable.task: variable.task = task_map[variable.task] return sed_doc