def generate_or_check(manifest, args, path, func): """Generate/check a file with a single generator Return True if successful; False if a comparison failed. """ outfile = io.StringIO() func(manifest, args, outfile) generated = outfile.getvalue() existing = path.read_text() if generated != existing: if args.generate: path.write_text(generated) else: print(f'File {path} differs from expected!') diff = difflib.unified_diff( generated.splitlines(), existing.splitlines(), str(path), '<expected>', lineterm='', ) for line in diff: print(line) return False return True
def write_yaml(dictionary, filename, logger=None): """Write YAML file. Parameters ---------- dictionary : dict Python dictionary filename : `~pathlib.Path` Filename logger : `~logging.Logger` Logger """ text = yaml.safe_dump(dictionary, default_flow_style=False) path = make_path(filename) path.parent.mkdir(exist_ok=True) if logger is not None: logger.info("Writing {}".format(path)) path.write_text(text)
def run(language: str, struct_file, destination_path: pathlib.Path): template = structy_generator.templates.template_for(language) if not destination_path.exists(): if sys.stdout.isatty(): create = input( f"Hey, {destination_path} doesn't exist, do want me to create it? (y/n)" ) if create == "y": destination_path.mkdir(parents=True) else: raise StructyError( f"Well, fine, {destination_path} doesn't exist and you don't want me to make it so I give up." ) else: raise StructyError(f"C'mon, {destination_path} doesn't exist!") if not destination_path.is_dir(): raise StructyError( f"Hey, {destination_path} isn't a path. I can't put files into another file." ) source_file_name = pathlib.Path(struct_file.name).name source_file_stem = pathlib.Path(struct_file.name).stem struct = structy_generator.parser.parse(struct_file.read()) outputs = template.render( source_file_stem, date=datetime.datetime.utcnow(), source=source_file_name, source_stem=source_file_stem, struct=struct, kinds=structy_generator.kinds.get_kinds(), ) for output_name, output_contents in outputs.items(): path = destination_path / output_name path.write_text(output_contents) print(f"Generated {path}.")
def write_yaml(dictionary, filename, logger=None, sort_keys=True): """Write YAML file. Parameters ---------- dictionary : dict Python dictionary filename : `~pathlib.Path` Filename logger : `~logging.Logger` Logger sort_keys : bool Whether to sort keys. """ text = yaml.safe_dump(dictionary, default_flow_style=False, sort_keys=sort_keys) path = make_path(filename) path.parent.mkdir(exist_ok=True) if logger is not None: logger.info(f"Writing {path}") path.write_text(text)
def copy_pub(self): htm_dir = self.get_htm_dir() if htm_dir: src = self.home / 'bld' / htm_dir if src.exists(): ignore = shutil.ignore_patterns('.*') dst = self.home / 'pub' for child in src.iterdir(): if child.name.startswith('.'): continue if child.is_dir(): shutil.copytree(child, dst / child.name, ignore=ignore) else: shutil.copy2(child, dst / child.name) # create empty files for name in ['.nojekyll']: path = dst / name if not path.exists(): path.write_text('') # create CNAME if self.cname: (dst / 'CNAME').write_text(self.cname)
def cacheDict(filePath, dictionary): path = Path(filePath) path.write_text( json.dumps(dictionary) )
def main(): if (len(sys.argv) < 2): print( "Error: missing xlsx file - correct usage is:\n\tpython3 R4CapStatement_Maker.py [xlsx file]" ) return xls = sys.argv[1] print('....creating CapabilityStatement.....') # Read the config sheet from the spreadsheet # use the index_col = 0 for setting the first row as the index config_df = read_excel(xls, 'config', na_filter=False, index_col=0) # --------- ig specific variable ------------------- pre = config_df.Value.pre # for Titles - not sure this is actually used canon = config_df.Value.canon # don't forget the slash - fix using os.join or path publisher = config_df.Value.publisher restinteraction = config_df.Value.rest publisher_endpoint = dict( system=config_df.Value.publishersystem, value=config_df.Value.publishervalue, ) definitions_file = config_df.Value.definitions_file #source of spec.internal file manually extracted from downloaded spec # Read the meta sheet from the spreadsheet meta_df = read_excel(xls, 'meta', na_filter=False) meta_dict = dict(zip(meta_df.Element, meta_df.Value)) meta = namedtuple("Meta", meta_dict.keys())(*meta_dict.values()) # Create the CapabilityStatement cs = create_capabilitystatement(meta, canon, publisher, publisher_endpoint, xls) rest = CS.CapabilityStatementRest( dict(mode=meta.mode, documentation=meta.documentation, security=dict( description=meta.security) if meta.security else None, interaction=get_rest_ints(xls) if restinteraction else None, operation=get_sys_op(xls))) cs.rest = [rest] df_profiles = read_excel(xls, 'profiles', na_filter=False) df_profiles = df_profiles[df_profiles.Profile.str[0] != '!'] resources_df = read_excel(xls, 'resources', na_filter=False) resources_df = resources_df[resources_df.type.str[0] != '!'] df_i = read_excel(xls, 'interactions', na_filter=False) df_sp = read_excel(xls, 'sps', na_filter=False) df_combos = read_excel(xls, 'sp_combos', na_filter=False) df_op = read_excel(xls, 'ops', na_filter=False) rest.resource = [] for r in resources_df.itertuples(index=True): supported_profile = [ p.Profile for p in df_profiles.itertuples(index=True) if p.Type == r.type ] res = CS.CapabilityStatementRestResource( dict(type=r.type, documentation=r.documentation if r.documentation not in none_list else None, versioning=r.versioning if r.versioning not in none_list else None, readHistory=r.readHistory if r.readHistory not in none_list else None, updateCreate=r.updateCreate if r.updateCreate not in none_list else None, conditionalCreate=r.conditionalCreate if r.conditionalCreate not in none_list else None, conditionalRead=r.conditionalRead if r.conditionalRead not in none_list else None, conditionalUpdate=r.conditionalUpdate if r.conditionalUpdate not in none_list else None, conditionalDelete=r.conditionalDelete if r.conditionalDelete not in none_list else None, referencePolicy=[ re.sub('\s+', '', x) for x in r.referencePolicy.split(",") if x ], searchInclude=[ re.sub('\s+', '', x) for x in r.shall_include.split(",") + r.should_include.split(",") if x ], searchRevInclude=[ re.sub('\s+', '', x) for x in r.shall_revinclude.split(",") + r.should_revinclude.split(",") if x ], interaction=get_i(r.type, df_i), searchParam=get_sp(r.type, df_sp, pre, canon), operation=get_op(r.type, df_op), supportedProfile=supported_profile)) res.extension = get_conf(r.conformance) combos = {(i.combo, i.combo_conf) for i in df_combos.itertuples(index=True) if i.base == r.type} # convert list to lst of combo extension res.extension = res.extension + get_combo_ext(r.type, combos) rest.resource.append(res) rest.resource = sorted(rest.resource, key=lambda x: x.type) # sort resources cs.rest = [rest] # add in conformance expectations for primitives # convert to dict since model can't handle primitive extensions resttype_dict = res.as_json() for i in ['Include', 'RevInclude']: element = f'_search{i}' resttype_dict[element] = [] for expectation in ['should', 'shall']: # list all should includes first sp_attr = f'{expectation}_{i.lower()}' includes = getattr(r, sp_attr).split(',') for include in includes: if include not in none_list: conf = get_conf(expectation.upper(), as_dict=True) resttype_dict[element].append(conf) if not resttype_dict[element]: del (resttype_dict[element]) print(resttype_dict) print(dumps(cs.as_json(), indent=3)) # %% [markdown] print('.............validating..............') r = validate(cs) if (r.status_code != 200): print("Error: Unable to validate - status code {}".format( r.status_code)) path = Path.cwd() / 'validation.html' path.write_text( f'<h1>Validation output</h1><h3>Status Code = {r.status_code}</h3> {r.json()["text"]["div"]}' ) print(f"HTML webpage of validation saved to:\n\t {path}") # get from package (json) file in local .fhir directory si = get_si2(definitions_file) path_map = si['paths'] path_map in_path = '' in_file = 'R4capabilitystatement-server.j2' env = Environment(loader=FileSystemLoader(searchpath=in_path), autoescape=select_autoescape( ['html', 'xml', 'xhtml', 'j2', 'md'])) env.filters['markdown'] = markdown template = env.get_template(in_file) sp_map = {sp.code: sp.type for sp in df_sp.itertuples(index=True)} pname_map = {p.Profile: p.Name for p in df_profiles.itertuples(index=True)} sp_url_map = {sp.code: sp.rel_url for sp in df_sp.itertuples(index=True)} purl_map = { p.Profile: p.url if p.url not in none_list else p.Profile for p in df_profiles.itertuples(index=True) } # below taken from source code. Not clear what these are, perhaps from an updated spreadsheet source. #igname_map = {ig.canonical:ig.name for ig in df_igs.itertuples(index=True)} #igurl_map = {ig.canonical:ig.url if ig.url not in none_list else ig.canonical for ig in df_igs.itertuples(index=True)} #csname_map = {cs.canonical:cs.name for cs in df_capstatements.itertuples(index=True)} #csurl_map = {cs.canonical:cs.url if cs.url not in none_list else cs.canonical for cs in df_capstatements.itertuples(index=True) print(pname_map) rendered = template.render(cs=cs, path_map=path_map, pname_map=pname_map, purl_map=purl_map, sp_map=sp_map, sp_url_map=sp_url_map) # print(HTML(rendered)) parser = etree.XMLParser(remove_blank_text=True) root = etree.fromstring(rendered, parser=parser) div = (etree.tostring(root[1][0], encoding='unicode', method='html')) narr = N.Narrative() narr.status = 'generated' narr.div = div cs.text = narr # save to file print('...........saving to file............') # path = Path.cwd() / f'capabilitystatement-{cs.id.lower()}.json' path = Path.cwd() / f'capabilitystatement-{meta.title.lower()}.json' path.write_text(dumps(cs.as_json(), indent=4)) print(f"CapabilityStatement saved to:\n\t {path}")
def main(): if (len(sys.argv) < 2): print( "Error: missing json file - correct usage is:\n\tpython3 R4CapStatement_NarrativeMaker.py [json file] {[Artifacts Folder]}") return xls = sys.argv[1] in_json_file = sys.argv[1] artifacts_folder = "" if len(sys.argv) > 2: artifacts_folder = sys.argv[2] print('....Generating CapabilityStatement Narrative.....') with open(in_json_file, 'r') as h: pjs = json.load(h) capStatement = CS.CapabilityStatement(pjs) #print(dumps(capStatement.as_json(), indent=3)) # %% [markdown] # CapabilityStatement loaded in_path = '' in_file = 'R4capabilitystatement-server.j2' env = Environment( loader=FileSystemLoader(searchpath=in_path), autoescape=select_autoescape(['html', 'xml', 'xhtml', 'j2', 'md']) ) env.filters['markdown'] = markdown template = env.get_template(in_file) pname_map = {} igname_map = {} csname_map = {} # Load name maps if artifacts_folder != "": print('....Retrieving Artifact Names .....') artifacts_folder = os.path.abspath(artifacts_folder) struct_def_files = glob.glob(artifacts_folder + "/StructureDefinition-*.json") imp_guide_files = glob.glob(artifacts_folder + "/ImplementationGuide-*.json") cap_stmt_files = glob.glob(artifacts_folder + "/CapabilityStatement-*.json") pname_map = get_pname_map(struct_def_files) igname_map = get_igname_map(imp_guide_files) csname_map = get_csname_map(cap_stmt_files) # Check access to hl7.org/fhir r = get (fhir_base_url) if r.status_code == 200: print('....Retrieving Online Artifact Names .....') # Loop through all references in the CapabilityStatement and attempt to retried the artifacts to load the names into the map # Instantiates if capStatement.instantiates: for url in capStatement.instantiates: if url not in csname_map: csname_map[url] = get_url_title(url, "instantiates CapabilityStatement") # Imports if capStatement.imports: for url in capStatement.imports: if url not in csname_map: csname_map[url] = get_url_title(url, "imports CapabilityStatement") # Implementation Guides if capStatement.implementationGuide: for url in capStatement.implementationGuide: if url not in igname_map: igname_map[url] = get_url_title(url, "ImplementationGuide") # Iterate through rest resources if capStatement.rest: for rest in capStatement.rest: if rest.resource: for resource in rest.resource: if resource.profile: url = resource.profile if url not in pname_map: pname_map[url] = get_url_title(url, resource.type + " profile") if resource.supportedProfile: for url in resource.supportedProfile: if url not in pname_map: pname_map[url] = get_url_title(url, resource.type + " supported profile") else: print("Unable to connect to " + fhir_base_url + ". Will not attempt to load online artifacts to retrieve artifact names.") rendered = template.render(cs=capStatement, path_map='', pname_map=pname_map, purl_map='', sp_map='', csname_map=csname_map, csurl_map='', sp_url_map='', igname_map=igname_map, igurl_map='') #template.render(cs=cs, path_map=path_map, pname_map=pname_map, purl_map=purl_map, sp_map=sp_map, # csname_map=csname_map, csurl_map=csurl_map, igname_map=igname_map, igurl_map=igurl_map) tempPath = Path.cwd() / "test.html" tempPath.write_text(rendered) #print(rendered) parser = etree.XMLParser(remove_blank_text=True) root = etree.fromstring(rendered, parser=parser) div = (etree.tostring(root[1][0], encoding='unicode', method='html')) print("\n####################################################\n") #print(etree.tostring(root[1][0], encoding='unicode', method='html')) print("\n####################################################\n") narr = N.Narrative() narr.status = 'generated' #div = re.sub('https://paciowg.github.io/advance-directives-ig/StructureDefinition-', 'SSSSSSSSSSSSSSSSS', div) # replace all of the supported profile urls in link text with just the profile name from inside the cononical url #######div = re.sub('">\(https?://.*/StructureDefinition-(.*)\.html\)</a>', '">\\1</a>', div) #div = re.sub('">\(https://paciowg.github.io/advance-directives-ig/StructureDefinition-PADI-(PersonalGoal.html)</a>', '\\1', div) # For some reason <br /> is being replaced with <br></br>, which is wrong. Convert it back. #div = div.replace("<br></br>", "<br />") #print(div) narr.div = div #print(dumps(narr.div, indent=3)) # %% [markdown] capStatement.text = narr outfile = 'Narrative-' + in_json_file path = Path.cwd() / outfile tempOut = dumps(capStatement.as_json(), indent=4) tempOut = tempOut.replace("<sup>+</sup>", "<sup>†</sup>") #tempOut = tempOut.replace(“<sup>t</sup>”, “<sup>†</sup>”) #print(tempOut) path.write_text(tempOut) print('.............validating..............') r = validate(capStatement) if (r.status_code != 200): print("Error: Unable to validate - status code {}".format(r.status_code)) path = Path.cwd() / 'validation.html' path.write_text( f'<h1>Validation output</h1><h3>Status Code = {r.status_code}</h3> {r.json()["text"]["div"]}') print(f"HTML webpage of validation saved to:\n\t {path}") # get from package (json) file in local .fhir directory # save to file print('...........done............')
def test_info_path_like(tmpdir): path = Path(tmpdir / "test_info") path.write_text("fsspec") fs = LocalFileSystem() assert fs.exists(path)
def cacheDict(filePath, dictionary): '''utility function to cache data as dictionary ''' path = Path(filePath) path.write_text(json.dumps(dictionary))
def _process_file(path: Path, func: Callable[[str], str]): orig_content = path.read_text() new_content = func(orig_content) path.write_text(new_content)