def test_capitalcase(self): from stringcase import capitalcase eq = self.assertEqual eq('', capitalcase('')) eq('FooBar', capitalcase('fooBar'))
def test_capitalcase(self): from stringcase import capitalcase eq = self.assertEqual eq('', capitalcase('')) eq('FooBar', capitalcase('fooBar'))
def CallbackName(attr: Attribute, cluster: Cluster, known_enum_types: List[matter_idl_types.Enum]) -> str: global_name = FieldToGlobalName(attr.definition, known_enum_types) if global_name: return 'CHIP{}AttributeCallback'.format(capitalcase(global_name)) return 'CHIP{}{}AttributeCallback'.format( capitalcase(cluster.name), capitalcase(attr.definition.name))
def nameScrub(text1, text2): """ takes irregularly formatted first and last names (e.g. BOB sMiTH), converts them to lowercase, and converts them again to capital case (e.g. Bob Smith) using the stringcase library returning a clean full name (e.g. Bob Smith) """ fname = text1 lname = text2 fname = stringcase.lowercase(fname) fname = stringcase.capitalcase(fname) lname = stringcase.lowercase(lname) lname = stringcase.capitalcase(lname) fullname = fname + " " + lname + "," return fullname
def convertCase(self, data): txt = self.txtInput.text() result = txt if data == 'Alpha Num Case': result = stringcase.alphanumcase(txt) if data == 'Camel Case': result = stringcase.camelcase(txt) if data == 'Capital Case': result = stringcase.capitalcase(txt) if data == 'Const Case': result = stringcase.constcase(txt) if data == 'Lower Case': result = stringcase.lowercase(txt) if data == 'Pascal Case': result = stringcase.pascalcase(txt) if data == 'Path Case': result = stringcase.pathcase(txt) if data == 'Sentence Case': result = stringcase.sentencecase(txt) if data == 'Snake Case': result = stringcase.snakecase(txt) if data == 'Spinal Case': result = stringcase.spinalcase(txt) if data == 'Title Case': result = stringcase.titlecase(txt) if data == 'Trim Case': result = stringcase.trimcase(txt) if data == 'Upper Case': result = stringcase.uppercase(txt) self.lblResult.setText(result) pyperclip.copy(result)
def generate_variations(name): return [ # stringcase.snakecase(name), stringcase.camelcase(name), stringcase.uppercase(stringcase.snakecase(name)), stringcase.capitalcase(name), ]
def to_proto_map(self): from .management.commands.genproto import ProtoGen proto_map = {} for field in self._meta.get_fields(): field_type = type(field) field_name = field.name if getattr(self, field_name) is None: continue if field.related_model: field_type = type(field.related_model._meta.pk) field_name = field.name + '_id' proto_map[field_name] = ProtoGen.type_map[ field_type].serialize(getattr(self, field_name)) elif field.choices: camel_capital_name = stringcase.capitalcase( stringcase.camelcase(field.name)) choices: Choices = getattr(self, camel_capital_name) proto_map[field_name] = choices.get_by_key( getattr(self, field.name)).index else: proto_map[field_name] = ProtoGen.type_map[ field_type].serialize(getattr(self, field_name)) return proto_map
def __init__(self, m: re.Match): self.match = m self.group_dict = self.match.groupdict() names = [k for k in self.group_dict.keys() if k.endswith('_name')] for k in names: self.group_dict[f'jni_{k}'] = \ stringcase.camelcase(self.group_dict[k]) self.group_dict[f'cap_{k}'] = \ stringcase.capitalcase(self.group_dict[k])
def cli_class(self, class_name:ParserArgType(type=str, required=True), cli_path:ParserArgType(type=str)=''): with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),"resources","cli_class_template.txt"), 'r') as f: file_content=f.read().format(name=stringcase.capitalcase(class_name), cli_path=cli_path) f = open(f'{stringcase.trimcase(class_name)}.py', "w") f.write(file_content) f.close()
def CallbackName(attr: Attribute, cluster: Cluster, context: TypeLookupContext) -> str: """ Figure out what callback name to use when a variable requires a read callback. These are split into native types, like Boolean/Float/Double/CharString, where one callback type can support anything. For specific types (e.g. A struct) codegen will generate its own callback name specific to that type. """ global_name = FieldToGlobalName(attr.definition, context) if global_name: return 'CHIP{}AttributeCallback'.format(capitalcase(global_name)) return 'CHIP{}{}AttributeCallback'.format( capitalcase(cluster.name), capitalcase(attr.definition.name) )
def get_proto( api_model: models.Model ) -> Tuple[List[Tuple[str, List[api_models.Choice]]], List[Tuple[str, str]]]: data_fields: List[Tuple[str, str]] = [] enums: List[Tuple[str, List[api_models.Choice]]] = [] choices_classes = {} for field, value in api_model.__dict__.items(): if inspect.isclass(value) and issubclass(value, api_models.Choices): choices_classes[stringcase.snakecase(field)] = value for field in api_model._meta.get_fields(): field_type = type(field) field_name = field.name if field.related_model: field_type = type(field.related_model._meta.pk) field_name += '_id' if field_type not in ProtoGen.type_map: print('WARNING:', field_type, 'it not in known types! Ignoring.') continue if field.choices: # todo, support imported enums (eg. related = 'app.models.ModelName.ChoicesClass') camel_capital_name = stringcase.capitalcase( stringcase.camelcase(field.name)) if field.name not in choices_classes: print( 'ERROR:', field.name, 'has choices, but {} Choices class does not exist! Ignoring.' .format(camel_capital_name)) continue enums.append( (camel_capital_name, choices_classes[field.name].enum())) data_fields.append((camel_capital_name, field_name)) else: data_fields.append((ProtoGen.type_map[field_type], field_name)) return enums, data_fields
def case_conversion(source, style: StringStyle) -> str: """Case conversion of the input (usually fully qualified vss node inlcuding the path) into a supported string style representation. Args: source: Source string to apply conversion to. style: Target string style to convert source to. Returns: Converted source string according to provided string style. """ if style == StringStyle.ALPHANUM_CASE: return stringcase.alphanumcase(source) elif style == StringStyle.CAMEL_CASE: return camel_case(source) elif style == StringStyle.CAMEL_BACK: return camel_back(source) elif style == StringStyle.CAPITAL_CASE: return stringcase.capitalcase(source) elif style == StringStyle.CONST_CASE: return stringcase.constcase(source) elif style == StringStyle.LOWER_CASE: return stringcase.lowercase(source) elif style == StringStyle.PASCAL_CASE: return stringcase.pascalcase(source) elif style == StringStyle.SENTENCE_CASE: return stringcase.sentencecase(source) elif style == StringStyle.SNAKE_CASE: return stringcase.snakecase(source) elif style == StringStyle.SPINAL_CASE: return stringcase.spinalcase(source) elif style == StringStyle.TITLE_CASE: return stringcase.titlecase(source) elif style == StringStyle.TRIM_CASE: return stringcase.trimcase(source) elif style == StringStyle.UPPER_CASE: return stringcase.uppercase(source) else: return source
from ddlparse.ddlparse import DdlParse import stringcase import json f = open("./vmdm_public.dmp", "r") ddl_text=f.read() ddls = ddl_text.split(";") for index, ddl in enumerate(ddls): if ddl.find("CREATE TABLE") != -1: try: table = DdlParse().parse(ddl=ddl) logical_id = stringcase.alphanumcase( stringcase.capitalcase(stringcase.camelcase(table.name)) ) table_name = table.name columns = list(map(lambda col: {"Name":col.name.lower(),"Type":"string"}, table.columns.values())) columns_json = json.dumps(columns, separators=(',', ':')) print("{logical_id} {table_name} {columns_json}".format(logical_id = logical_id, table_name = table_name, columns_json = columns_json)) except Exception as e: print(index) print(value) print(e)
def _parse_value_to_rdf_type(value): value = stringcase.alphanumcase(value) return LDAP.term(stringcase.capitalcase(value))
def generate_c_files(png_file, header_file_name, cimplementation_file_name, verbose): png_reader = png.Reader(filename=png_file) width, height, data, info = png_reader.asRGBA8() # Remove path prefix png_file = os.path.basename(png_file) # Remove path extension png_file = os.path.splitext(png_file)[0] png_name_snake_case = stringcase.snakecase(png_file) png_name_upper_snake_case = stringcase.uppercase(png_name_snake_case) png_name_camel_case = stringcase.capitalcase( stringcase.camelcase(png_file)) # Convert RGBA888 to RGB565 dataRGB565 = [] for row in data: for i in range(0, len(row), 4): r, g, b, a = row[i], row[i + 1], row[i + 2], row[i + 3] dataRGB565.extend(rgba8882rgb565(r, g, b, a).to_bytes(2, 'little')) # Compress data compressed_data = lz4.block.compress(bytes(dataRGB565), compression=12, mode='high_compression', store_size=False) compressed_data_len = len(compressed_data) # Generate header file header_file = open(header_file_name, "w") header_file.write( "// This file is auto-generated by PNG serializer. Do not edit manually.\n" ) header_file.write("#ifndef " + png_name_upper_snake_case + "_H\n") header_file.write("#define " + png_name_upper_snake_case + "_H\n\n") header_file.write("#include <stdint.h>\n\n") header_file.write("namespace Ion {\n") header_file.write("namespace " + png_name_camel_case + " {\n\n") header_file.write("constexpr uint32_t k_compressedPixelSize = " + str(compressed_data_len) + ";\n\n") header_file.write("constexpr uint32_t k_width = " + str(width) + ";\n\n") header_file.write("constexpr uint32_t k_height = " + str(height) + ";\n\n") header_file.write("extern const uint8_t compressedPixelData[];\n\n") header_file.write("}\n") header_file.write("}\n") header_file.write("#endif\n") header_file.close() # Generate cimplementation file cimplementation_file = open(cimplementation_file_name, "w") cimplementation_file.write( "// This file is auto-generated by Inliner. Do not edit manually.\n") cimplementation_file.write("#include \"" + header_file_name + "\"\n\n") cimplementation_file.write("namespace Ion {\n") cimplementation_file.write("namespace " + png_name_camel_case + " {\n\n") cimplementation_file.write("// Compressed " + str(width * height) + " pixels into " + str(compressed_data_len) + " bytes (" + str(100.0 * compressed_data_len / len(dataRGB565)) + "% compression ratio)\n\n") cimplementation_file.write("const uint8_t compressedPixelData[" + str(compressed_data_len) + "] = {") for b in compressed_data: cimplementation_file.write(hex(b) + ", ") cimplementation_file.write("\n};\n\n") cimplementation_file.write("}\n") cimplementation_file.write("}\n") cimplementation_file.close()
def create_recipe(path_to_instances: Union[str, pathlib.Path], savedir: pathlib.Path, wf_name: str, cutoff: int = 4000, verbose: bool = False, runs: int = 1): """ Creates a recipe for a workflow application by automatically replacing custom information from the recipe skeleton. :param path_to_instances: name (for samples available in WfCommons) or path to the real workflow instances. :type path_to_instances: str or pathlib.Path :param savedir: path to save the recipe. :type savedir: pathlib.Path :param wf_name: name of the workflow application. :type wf_name: str :param cutoff: when set, only consider instances of smaller or equal sizes. :type cutoff: int :param verbose: when set, prints status messages. :type cutoff: bool :param verbose: number of times to repeat the err calculation process (due to randomization). :type runs:bool """ camelname = capitalcase(wf_name) savedir.mkdir(exist_ok=True, parents=True) dst = pathlib.Path(savedir, f"{savedir.stem}_recipes", wf_name).resolve() dst.mkdir(exist_ok=True, parents=True) if verbose: print(f"Finding microstructures") microstructures_path = dst.joinpath("microstructures") save_microstructures(path_to_instances, microstructures_path, img_type=None, cutoff=cutoff) if verbose: print(f"Generating Error Table") err_savepath = microstructures_path.joinpath("metric", "err.csv") err_savepath.parent.mkdir(exist_ok=True, parents=True) df = find_err(microstructures_path, runs=runs) err_savepath.write_text(df.to_csv()) # Recipe with skeleton_path.joinpath("recipe.py").open() as fp: skeleton_str = fp.read() if verbose: print(f"Generating Recipe Code") skeleton_str = skeleton_str.replace("Skeleton", camelname) skeleton_str = skeleton_str.replace("skeleton", wf_name) with this_dir.joinpath(dst.joinpath("recipe.py")).open("w+") as fp: fp.write(skeleton_str) # recipe __init__.py dst.joinpath("__init__.py").write_text( f"from .recipe import {camelname}Recipe") # setup.py with skeleton_path.joinpath("setup.py").open() as fp: skeleton_str = fp.read() skeleton_str = skeleton_str.replace("PACKAGE_NAME", savedir.stem) with this_dir.joinpath( dst.parent.parent.joinpath("setup.py")).open("w+") as fp: fp.write(skeleton_str) # __init__.py dst.parent.joinpath("__init__.py").touch(exist_ok=True) with dst.parent.joinpath("__init__.py").open("a") as fp: fp.write(f"from .{wf_name} import {camelname}Recipe\n") # MANIFEST with dst.parent.parent.joinpath("MANIFEST.in").open("a+") as fp: fp.write( f"graft {savedir.stem}_recipes/{wf_name}/microstructures/**\n") fp.write(f"graft {savedir.stem}_recipes/{wf_name}/microstructures\n") fp.write(f"graft {savedir.stem}_recipes/{wf_name}\n") # workflow_recipes with this_dir.joinpath(dst.parent.parent.joinpath( "workflow_recipes.txt")).open("a+") as fp: fp.write( f"{wf_name}_recipe = {savedir.stem}_recipes.{wf_name}:{camelname}Recipe\n" ) if verbose: print(f"Analyzing Workflow Statistics") stats = analyzer_summary(path_to_instances) dst.joinpath("task_type_stats.json").write_text(json.dumps(stats))
def component_name(self) -> str: return stringcase.capitalcase(self.name) + 'Button'