def test_bad_load(self): from jsonasobj import load json_fname = os.path.join(os.path.split(os.path.abspath(__file__))[0], 'filex.json') with self.assertRaises(FileNotFoundError): json_obj = load(json_fname) with self.assertRaises(TypeError): load(dict())
def __init__(self, source: Union[fname, json_txt]) -> None: """ Construct a FHIR StructuredDefinition from a source file name, file or string :param source: JSON source """ valueset_directory = None if hasattr(source, 'read'): # an open file self._obj = load(source) elif source.strip().startswith('{'): # a dictionary in text form self._obj = loads(source) else: self._obj = load(open(source)) # a file name valueset_directory = os.path.dirname(source) self.elements = [FHIRElement(self._obj, e, valueset_directory) for e in self._obj.snapshot.element if '.' in e.path and self._significant_differential(e)]
def sdo_valueset_file(self) -> Optional[JsonObj]: """ Retrieve the value set from the FHIR downloads """ if self._vsdir: vsfile = os.path.join(self._vsdir, "valueset-" + self._reference.split('ValueSet/')[1] + ".json") if os.path.exists(vsfile): return load(open(vsfile)) return None
def __init__(self, vocabulary: Graph, json_fname: Optional[str], base_uri: str, data: Optional[JsonObj] = None, add_ontology_header: Optional[bool] = True, replace_narrative_text: Optional[bool] = False, target: Optional[Graph] = None): """ Convert a JSON collection into RDF. :param vocabulary: fhir metadata vocabulary :param json_fname: name or URI of the FHIR json collection to convert :param base_uri: URI to use as a base for identifiers :param data: JsonObj to use if json fname is not present :param add_ontology_header: Include the OWL:Ontology declaration :param replace_narrative_text: Replace long narrative text with REPLACED_NARRATIVE_TEXT :param target: Target graph -- load everything into this if present """ collection = load(json_fname) if json_fname else data self.entries = [] # type: List[FHIRResource] for entry in collection.entry: if 'resource' in entry: self.entries.append( FHIRResource(vocabulary, None, base_uri, data=entry.resource, add_ontology_header=add_ontology_header, replace_narrative_text=replace_narrative_text, target=target))
def __init__(self, source, base: Optional[str] = None, debug: Optional[bool] = False, debug_slurps: Optional[bool] = False, over_slurp: Optional[bool] = True) -> None: """ Load a manifest :param source: file name, URI or file-like object that carries the manifest description :param base: RDF and ShEx base directory or URL. If omitted, source file name/URI will be used :param debug: default debug setting for evaluate function :param debug_slurps: default debug_slurps setting for evaluate function :param over_slurp: default over_slurp setting for evaluate function """ self.manifest = load(source) self.base = base if not self.base: if isinstance(source, str): if '://' in source: self.base = urlsplit(source).path.split('/')[-1] else: self.base = os.path.dirname(source) self.debug = debug self.debug_slurps = debug_slurps self.over_slurp = over_slurp for entry in self.manifest: entry._manifest = self
def __init__(self, source: Union[fname, json_txt]) -> None: """ Construct a FHIR StructuredDefinition from a source file name, file or string :param source: JSON source """ valueset_directory = None if hasattr(source, 'read'): # an open file self._obj = load(source) elif source.strip().startswith('{'): # a dictionary in text form self._obj = loads(source) else: self._obj = load(open(source)) # a file name valueset_directory = os.path.dirname(source) self.elements = [ FHIRElement(self._obj, e, valueset_directory) for e in self._obj.snapshot.element if '.' in e.path and self._significant_differential(e) ]
def sdo_valueset_file(self) -> Optional[JsonObj]: """ Retrieve the value set from the FHIR downloads """ if self._vsdir: vsfile = os.path.join( self._vsdir, "valueset-" + self._reference.split('ValueSet/')[1] + ".json") if os.path.exists(vsfile): return load(open(vsfile)) return None
def test_as_json(self): schema = self.fix_schema_metadata( load_raw_schema(os.path.join(inputdir, 'schema6.yaml'))) outfile = os.path.join(outputdir, 'schema6.json') if not os.path.exists(outfile): with open(outfile, 'w') as f: f.write(as_json(schema)) self.fail(f"Generated {outfile} - run test again") else: self.assertEqual(load(outfile), loads(as_json(schema)))
def focus_nodes(self) -> List[URIRef]: result = self.endpoint.query() processed_results = jsonasobj.load(result.response) if self.print_results: print('\t' + ('\n\t'.join([row.item.value for row in processed_results.results.bindings[:10]]))) if len(processed_results.results.bindings) > 10: print('\n\t ...') print('\n') return [URIRef(row.item.value) for row in processed_results.results.bindings]
def get_sparql_dataframe(service, query): """ Helper function to convert SPARQL results into a Pandas data frame. """ sparql = SPARQLWrapperWithAgent(service) sparql.setQuery(query) sparql.setReturnFormat(JSON) result = sparql.query() processed_results = jsonasobj.load(result.response) return [row.item.value for row in processed_results.results.bindings]
def _load_index(self) -> None: """ Update the memory file from the disk file """ with open(self._cache_directory_index, 'r') as f: try: self._cache = jsonasobj.load(f) except json.decoder.JSONDecodeError: self._cache = None if self._cache is None: raise CacheError( f"cache index has been damaged. Remove {self.cache_directory} and try again" )
def check_types(s: SchemaDefinition) -> None: output = os.path.join(outputdir, 'schema4.json') if not os.path.exists(output): with open(output, 'w') as f: f.write(as_json(JsonObj(**{k: as_dict(loads(as_json(v))) for k, v in s.types.items()}))) self.fail(f"File {output} created - rerun test") with open(output) as f: expected = as_dict(load(f)) self.assertEqual(expected, {k: as_dict(loads(as_json(v))) for k, v in s.types.items()}) s.types = None
def test_element_slots(self): """ Test all element slots and their inheritence """ schema = SchemaLoader(env.input_path('resolver3.yaml')).resolve() x = { k: v for k, v in as_dict(schema.slots['s1']).items() if v is not None and v != [] } outfile = env.expected_path('resolver3.json') if not os.path.exists(outfile): with open(outfile, 'w') as f: f.write(as_json(JsonObj(**x))) with open(outfile) as f: expected = as_dict(load(f)) self.assertEqual(expected, x)
def check_types(s: SchemaDefinition) -> None: output = env.expected_path('schema4.json') if not os.path.exists(output): with open(output, 'w') as f: f.write( as_json( JsonObj( **{ k: as_dict(loads(as_json(v))) for k, v in s.types.items() }))) with open(output) as f: expected = as_dict(load(f)) self.assertEqual( expected, {k: as_dict(loads(as_json(v))) for k, v in s.types.items()}) s.types = None
def get_sparql_dataframe(service, query): """ Helper function to convert SPARQL results into a Pandas data frame. """ sparql = SPARQLWrapper(service) sparql.setQuery(query) sparql.setReturnFormat(JSON) result = sparql.query() processed_results = jsonasobj.load(result.response) cols = processed_results.head.vars out = [] for row in processed_results.results.bindings: item = [] for c in cols: item.append(row._as_dict.get(c, {}).get('value')) out.append(item) return pd.DataFrame(out, columns=cols)
def test_data_entry(self): save_output = False from fhirtordf.loaders.fhirresourceloader import FHIRResource with open( os.path.join(self.base_dir, 'synthea_data', 'Adams301_Keyshawn30_74.json')) as f: collection = load(f) source = FHIRResource(self.fhir_ontology, None, "http://standardhealthrecord.org/fhir/", data=collection.entry[0].resource) turtle_fname = os.path.join(self.base_dir, 'synthea_data', 'Adams301_Keyshawn30_74_entry0.ttl') if save_output: with open(turtle_fname, 'w') as output: output.write(str(source)) target = PrettyGraph() target.load(turtle_fname, format="turtle") # Note: This will fail if we use the pure turtle serializer (vs our changes in this package) self.maxDiff = None self.assertEqual( '', rdf_compare(source.graph, target, ignore_owl_version=True)) self.assertFalse(save_output, "Update output file always fails")
def fhir_json_to_rdf( json_fname: str, base_uri: str = "http://hl7.org/fhir/", target_graph: Optional[Graph] = None, add_ontology_header: bool = True, do_continuations: bool = True, replace_narrative_text: bool = False, metavoc: Optional[Union[Graph, FHIRMetaVoc]] = None) -> Graph: """ Convert a FHIR JSON resource image to RDF :param json_fname: Name or URI of the file to convert :param base_uri: Base URI to use for relative references. :param target_graph: If supplied, add RDF to this graph. If not, start with an empty graph. :param add_ontology_header: True means add owl:Ontology declaration to output :param do_continuations: True means follow continuation records on bundles and queries :param replace_narrative_text: True means replace any narrative text longer than 120 characters with '<div xmlns="http://www.w3.org/1999/xhtml">(removed)</div>' :param metavoc: FHIR Metadata Vocabulary (fhir.ttl) graph :return: resulting graph """ def check_for_continuation(data_: JsonObj) -> Optional[str]: if do_continuations and 'link' in data_ and isinstance( data_.link, list): for link_e in data_.link: if 'relation' in link_e and link_e.relation == 'next': return link_e.url return None if target_graph is None: target_graph = Graph() if metavoc is None: metavoc = FHIRMetaVoc().g elif isinstance(metavoc, FHIRMetaVoc): metavoc = metavoc.g page_fname = json_fname while page_fname: data = load(page_fname) if 'resourceType' in data and data.resourceType != 'Bundle': FHIRResource(metavoc, None, base_uri, data, target=target_graph, add_ontology_header=add_ontology_header, replace_narrative_text=replace_narrative_text) page_fname = check_for_continuation(data) elif 'entry' in data and isinstance( data.entry, list) and 'resource' in data.entry[0]: FHIRCollection(metavoc, None, base_uri, data, target=target_graph, add_ontology_header=add_ontology_header if 'resourceType' in data else False, replace_narrative_text=replace_narrative_text) page_fname = check_for_continuation(data) else: page_fname = None target_graph = None return target_graph
import os import jsonasobj from rdflib import Graph from scripts.metadata import METADATA_DIR, CONTEXT_DIR CONTEXT = os.path.join(CONTEXT_DIR, 'metadata.context.json') context_j = jsonasobj.load(CONTEXT) BASE = context_j['@context']['@base'] n_converted = 0 for fname in os.listdir(METADATA_DIR): basename, ext = os.path.splitext(fname) if ext == '.json': g = Graph() g.parse(os.path.join(METADATA_DIR, fname), format="json-ld", context=CONTEXT, base=BASE) g.serialize(os.path.join(METADATA_DIR, basename + '.ttl'), format='ttl') n_converted += 1 print(f"*** {n_converted} files converted ***")
def test_load_uri(self): from jsonasobj import load # A relatively stable JSON file json_obj = load("http://hl7.org/fhir/STU3/account-example.json") self.assertEqual('Coverage/7546D', json_obj.coverage[0].coverage.reference)
def test_load_redirect(self): from jsonasobj import load json_obj = load("http://hl7.org/fhir/Patient/f001") self.assertEqual('male', json_obj.gender)
def test_load_file(self): from jsonasobj import load json_fname = os.path.join(os.path.split(os.path.abspath(__file__))[0], 'file.json') json_obj = load(json_fname) self.assertEqual([1, False, -12.7, "qwert"], json_obj.a_dict.vals)
def test_bytearray(self): from jsonasobj import load load("http://hl7.org/fhir/Patient/f201")