def update(self): """Update the data from the SolarEdge Monitoring API.""" from stringcase import snakecase try: data = self.api.get_details(self.site_id) details = data['details'] except KeyError: _LOGGER.error("Missing details data, skipping update") return except (ConnectTimeout, HTTPError): _LOGGER.error("Could not retrieve data, skipping update") return self.data = None self.attributes = {} for key, value in details.items(): key = snakecase(key) if key in ['primary_module']: for module_key, module_value in value.items(): self.attributes[snakecase(module_key)] = module_value elif key in ['peak_power', 'type', 'name', 'last_update_time', 'installation_date']: self.attributes[key] = value elif key == 'status': self.data = value _LOGGER.debug("Updated SolarEdge details: %s, %s", self.data, self.attributes)
def __call__(self): if not self.benchmark_name == sc.snakecase(self.benchmark_name): raise ValueError( "This benchmark name is not in snake_case. See https://medium.com/better-programming/string-case-styles-camel-pascal-snake-and-kebab-case-981407998841 for more information." ) if not self.submission_name == sc.snakecase(self.submission_name): raise ValueError( "This submission name is not in snake_case. See https://medium.com/better-programming/string-case-styles-camel-pascal-snake-and-kebab-case-981407998841 for more information." ) if not Path(self.root_path / "mcs_benchmark_data/pipelines" / self.benchmark_name).exists(): raise FileNotFoundError( "The benchmark that corresponds to the given benchmark name does not have an existing pipeline. Please follow the steps in the README to add the pipeline before proceeding with the submission." ) if self.using_test_data: data_dir = "TEST_DATA_DIR_PATH" else: data_dir = "DATA_DIR_PATH" is_first_submission = self._make_submission_directories() self._create_files_from_template( data_dir=data_dir, is_first_submission=is_first_submission)
def resolve_accounts(self, info, account_ids): requested_fields = get_fields(info).keys() clan_specific_account_fields = list(map(camelcase, ('joined_at', 'role', 'role_i18n',))) request_clan_account_fields = [field for field in requested_fields if field in clan_specific_account_fields] loaders = info.context['data_loaders'] accounts_promise = loaders.account_loader.load(','.join(account_ids)).then( lambda result: list(result['data'].values()) ) accounts_data = accounts_promise.get() if request_clan_account_fields: clan_ids = {str(account_data['clan_id']) for account_data in accounts_data if account_data.get('clan_id')} clans_data = None if clan_ids: clans_promise = loaders.clan_loader.load(','.join(clan_ids)).then(lambda result: result['data']) clans_data = clans_promise.get() for account in accounts_data: account_clan_data = None if account.get('clan_id'): account_clan_data = [ member for member in clans_data[str(account['clan_id'])]['members'] if member['account_id'] == account['account_id'] ][0] account_clans_data = { snakecase(field): account_clan_data[snakecase(field)] if account_clan_data else None for field in request_clan_account_fields } account.update(account_clans_data) return list(map(lambda data: Account(**{ 'clan_id': data['clan_id'], **{field: value for field, value in data.items() if camelcase(field) in requested_fields} }), accounts_data))
def __setattr__(self, name, value): if name == "_Snaked__target" or name == "_Snaked__resolution_cache" or name == "_Snaked__use_cache": return super().__setattr__(name, value) if self.__use_cache and name in self.__resolution_cache: return setattr(self.__target, self.__resolution_cache[name], value) if hasattr(self.__target, name): if self.__use_cache: self.__resolution_cache[name] = name return setattr(self.__target, name, value) else: camel_case = camelcase(name) if hasattr(self.__target, camel_case) and snakecase(name) == name: # snakecase(name) == name makes sure that we're only auto-converting snake-case names. Anything else may lead to unexpected results. if self.__use_cache: self.__resolution_cache[name] = camel_case return setattr(self.__target, camel_case, value) else: pascal_case = pascalcase(name) if hasattr(self.__target, pascal_case) and snakecase(name) == name: # snakecase(name) == name makes sure that we're only auto-converting snake-case names. Anything else may lead to unexpected results. if self.__use_cache: self.__resolution_cache[name] = pascal_case return setattr(self.__target, pascal_case, value) else: if name == "__clear_cache": return self.__clear_cache return setattr( self.__target, name, value ) # go through the usual (now potentially error-throwing) routine of obtaining an attribute
def _generate_and_record_expert_action(self): """Generate the next greedy expert action and save it to the `expert_action_list`.""" if self.task.num_steps_taken() == len(self.expert_action_list) + 1: get_logger().warning( f"Already generated the expert action at step {self.task.num_steps_taken()}" ) return assert self.task.num_steps_taken() == len( self.expert_action_list ), f"{self.task.num_steps_taken()} != {len(self.expert_action_list)}" expert_action_dict = self._generate_expert_action_dict() action_str = stringcase.snakecase(expert_action_dict["action"]) if action_str not in self.task.action_names(): obj_type = stringcase.snakecase( expert_action_dict["objectId"].split("|")[0]) action_str = f"{action_str}_{obj_type}" try: self.expert_action_list.append( self.task.action_names().index(action_str)) except ValueError: get_logger().error( f"{action_str} is not a valid action for the given task.") self.expert_action_list.append(None)
def update(self): """Update the data from the SolarEdge Monitoring API.""" try: data = self.api.get_details(self.site_id) details = data["details"] except KeyError as ex: raise UpdateFailed("Missing details data, skipping update") from ex self.data = None self.attributes = {} for key, value in details.items(): key = snakecase(key) if key in ["primary_module"]: for module_key, module_value in value.items(): self.attributes[snakecase(module_key)] = module_value elif key in [ "peak_power", "type", "name", "last_update_time", "installation_date", ]: self.attributes[key] = value elif key == "status": self.data = value _LOGGER.debug("Updated SolarEdge details: %s, %s", self.data, self.attributes)
def custom_context(view: DetailView): obj = view.get_object() Foreign = namedtuple('Foreign', ['model_name', 'id', 'to_one']) _custom_context = { 'fields': [ field.set( value=getattr(obj, field.name) ) if not field.is_foreign else field.set( value=Foreign( model_name=field.type.field.related_model.__name__.lower(), id=getattr(obj, field.type.field.related_model.__name__.lower() + '_id'), to_one=getattr(obj, field.type.field.related_model.__name__.lower()), )) for field in self.fields ], 'connections': [] } for connection in self.connections: connection_objects = connection.using.objects.all().filter( Q(**{snakecase(view.model.__name__) + '_id': obj.id}) ) for i in range(len(connection_objects)): connection_objects[i].model_name = connection.using.__name__.lower() connection_objects[i].connected_model_name = connection.to_one.__name__.lower() connection_objects[i].connected_object_id = \ getattr(connection_objects[i], snakecase(connection.to_one.__name__) + '_id') _custom_context['connections'] += connection_objects return _custom_context
def request_members_additional_info(accounts, fields, loader): account_ids = map(lambda account: str(account['account_id']), accounts) account_promise = loader.load(','.join(account_ids)).then(lambda result: list(result['data'].values())) accounts_data = account_promise.get() accounts_data = {account['account_id']: account for account in accounts_data} for account in accounts: account.update({ snakecase(field): accounts_data[account['account_id']][snakecase(field)] for field in fields })
def test_load_commands(self): """Test we are able to load commands via load_commands.""" self.core_module.load_commands([self.FakeCommand]) self.assertEqual( self.core_module.get_commands(), { stringcase.snakecase(ModuleCommand.__name__): ModuleCommand, stringcase.snakecase(self.FakeCommand.__name__): self.FakeCommand })
async def sync_account_data(account_id, catalog, selected_streams): all_core_streams = {stringcase.snakecase(o) + "s" for o in TOP_LEVEL_CORE_OBJECTS} all_report_streams = {stringcase.snakecase(r) for r in reports.REPORT_WHITELIST} if len(all_core_streams & set(selected_streams)): LOGGER.info("Syncing core objects") sync_core_objects(account_id, selected_streams) if len(all_report_streams & set(selected_streams)): LOGGER.info("Syncing reports") await sync_reports(account_id, catalog)
def create_recipe(path: Union[str, pathlib.Path], dst: Union[str, pathlib.Path], runs: int = 1) -> WorkflowRecipe: err_savepath = path.joinpath("metric", "err.csv") err_savepath.parent.mkdir(exist_ok=True, parents=True) df = find_err(path, runs=runs) err_savepath.write_text(df.to_csv()) path = pathlib.Path(path).resolve(strict=True) wf_name = f"Workflow{camelcase(path.stem)}" dst = pathlib.Path(dst, snakecase(wf_name)).resolve() dst.mkdir(exist_ok=True, parents=True) dst_metric_path = dst.joinpath("metric", "err.csv") dst_metric_path.parent.mkdir(parents=True, exist_ok=True) shutil.copy(path.joinpath("metric", "err.csv"), dst_metric_path) summary_path = dst.joinpath("microstructures", "summary.json") summary_path.parent.mkdir(exist_ok=True, parents=True) shutil.copy(path.joinpath("summary.json"), summary_path) for filename in ["base_graph.pickle", "microstructures.json"]: for p in path.glob(f"*/{filename}"): dst_path = dst.joinpath("microstructures", p.parent.stem, filename) dst_path.parent.mkdir(exist_ok=True, parents=True) shutil.copy(p, dst_path) # Recipe with skeleton_path.joinpath("recipe.py").open() as fp: skeleton_str = fp.read() skeleton_str = skeleton_str.replace("Skeleton", wf_name) skeleton_str = skeleton_str.replace("skeleton", snakecase(wf_name)) with this_dir.joinpath(dst.joinpath("__init__.py")).open("w+") as fp: fp.write(skeleton_str) # setup.py with skeleton_path.joinpath("setup.py").open() as fp: skeleton_str = fp.read() skeleton_str = skeleton_str.replace("Skeleton", wf_name) skeleton_str = skeleton_str.replace("skeleton", snakecase(wf_name)) with this_dir.joinpath(dst.parent.joinpath("setup.py")).open("w+") as fp: fp.write(skeleton_str) # MANIFEST with skeleton_path.joinpath("MANIFEST.in").open() as fp: skeleton_str = fp.read() skeleton_str = skeleton_str.replace("Skeleton", wf_name) skeleton_str = skeleton_str.replace("skeleton", snakecase(wf_name)) with this_dir.joinpath( dst.parent.joinpath("MANIFEST.in")).open("w+") as fp: fp.write(skeleton_str)
def resolve_update_accessmod_analysis(_, info, **kwargs): request: HttpRequest = info.context["request"] principal = request.user update_input = kwargs["input"] try: analysis = Analysis.objects.filter_for_user(principal).get_subclass( id=update_input["id"]) changes = {} for scalar_field in [ "name", "invertDirection", "maxTravelTime", "movingSpeeds", "waterAllTouched", "algorithm", "knightMove", "stackPriorities", ]: if scalar_field in update_input: changes[snakecase(scalar_field)] = update_input[scalar_field] for fileset_field in [ "landCoverId", "demId", "stackId", "transportNetworkId", "waterId", "barrierId", "healthFacilitiesId", ]: if fileset_field in update_input: fileset = Fileset.objects.filter_for_user(principal).get( id=update_input[fileset_field]) changes[snakecase(fileset_field)] = fileset.id if len(changes) > 0: try: analysis.update_if_has_perm(principal, **changes) except IntegrityError: return { "success": False, "analysis": analysis, "errors": ["NAME_DUPLICATE"], } except PermissionDenied: return { "success": False, "analysis": analysis, "errors": ["PERMISSION_DENIED"], } return {"success": True, "analysis": analysis, "errors": []} except Analysis.DoesNotExist: return {"success": False, "analysis": None, "errors": ["NOT_FOUND"]}
def post_datadog(dataset, tags, metric_name): if (dataset[0]['metricName'] != 'METRIC DATA NOT FOUND'): metric = stringcase.snakecase(dataset[0]['metricPath'].replace("|","")).replace("__","_") for tag in tags: metric = metric.replace(tag.split(":")[1].lower(),"") metric = metric.replace(stringcase.snakecase(tag.split(":")[1]),"") metric = metric.replace("__","_") for result in dataset[0]['metricValues'][0]: if result in {'value', 'min', 'max', 'sum', 'count', 'standardDeviation'}: response = api.Metric.send(metric=metric_name+"."+result, points=dataset[0]['metricValues'][0][result], tags=tags) if (DEBUG): print (response) return 0
def to_snakecase(self, data, **kwargs): snake_case_object = dict() for key, value in data.items(): if isinstance(value, dict): value = { snakecase(sub_key): sub_val for sub_key, sub_val in value.items() } snake_case_object[snakecase(key)] = value return snake_case_object
def __build_func(verb, args, kwargs, _locals): params = ['self'] params += ['%s' % stringcase.snakecase(k) for k in args] params += [ '%s=%s' % (stringcase.snakecase(k), "'%s'" % v if isinstance(v, six.string_types) else v) for k, v in kwargs.items() ] largs = list(_locals.keys()) + list(args) + list(kwargs.keys()) fn = eval( 'lambda %s: self._%s(%s)' % (','.join(params), verb, ','.join( ['%s=%s' % (k, stringcase.snakecase(k)) for k in largs])), _locals) return fn
def wrapper(path, controller): if isinstance(controller, str): try: _class_name, _method_name = controller.split('@') except: raise Exception( 'Controller string should be like calss@method') # todo: Read __import__ docs _module = __import__('controllers.' + stringcase.snakecase(_class_name)) _sub_module = getattr(_module, stringcase.snakecase(_class_name)) _class = getattr(_sub_module, _class_name) controller = getattr(_class(), _method_name) return func(path, controller)
def get_accounts(fields): data = requests.get('http://testserver/accounts').json() return list( map( lambda account: {field: str(account[snakecase(field)]) for field in fields}, data))
def __init__( self, reduction: str = tf.keras.losses.Reduction.SUM_OVER_BATCH_SIZE, name: Optional[str] = None) -> None: if name is None: name = snakecase(self.__class__.__name__) super().__init__(reduction=reduction, name=name)
def sc(s): # FIXME: hack, otherwise stringcase.snakecase will convert # ABC => a_b_c if s.isupper(): return s.lower() else: return stringcase.snakecase(s)
def __new__(mcs, name, bases, d): _d = d.copy() _d.update( dict(name=name, snake_case=stringcase.snakecase(name), __instance_check_key=12345678)) return super().__new__(mcs, name, bases, _d)
def get_parameter_type( self, parameter: Dict[str, Union[str, Dict[str, str]]], snake_case: bool ) -> Argument: schema: JsonSchemaObject = JsonSchemaObject.parse_obj(parameter["schema"]) format_ = schema.format or "default" type_ = json_schema_data_formats[schema.type][format_] name: str = parameter["name"] # type: ignore orig_name = name if snake_case: name = stringcase.snakecase(name) field = DataModelField( name=name, data_type=type_map[type_], required=parameter.get("required") or parameter.get("in") == "path", ) self.imports.extend(field.imports) if orig_name != name: default: Optional[ str ] = f"Query({'...' if field.required else repr(schema.default)}, alias='{orig_name}')" self.imports.append(Import(from_='fastapi', import_='Query')) else: default = repr(schema.default) if 'default' in parameter["schema"] else None return Argument( name=field.name, type_hint=field.type_hint, default=default, # type: ignore default_value=schema.default, required=field.required, )
def get_open_api_data(open_api): data = defaultdict(list) for endpoint in open_api["paths"]: for method in open_api["paths"][endpoint]: response_types = [ response.get("x-response-type") for response in get_response_data(open_api, endpoint, method) ] api_name, method_name = open_api["paths"][endpoint][method][ "operationId"].split(".") data[snakecase(api_name)].append({ "endpoint": endpoint, "response_type": list(filter(lambda x: x, response_types)), "method_name": method_name, "arguments": get_parameter(open_api, endpoint, method), "http_method": method, }) return dict(data)
def main(argv): args = parse_args(argv) # Load the task module / class via meta programming approach # e.g., GoodpathTask -> goodpath_task name = stringcase.snakecase(args.task) try: task_module = importlib.import_module(f'tasks.{name}') except ImportError: raise ValueError(f'Invalid Task: {args.task} task not found') start = time.time() klass = getattr(task_module, args.task) try: spark = create_spark_session(name) sc = spark.sparkContext sc.setLogLevel('WARN') klass(spark).run() except Exception as e: raise (e) finally: sc.stop() end = time.time() print(f'Task {name} took {end-start} seconds') return 0
def func(self, *args, model=model): kwargs = {} for arg in args: cls_name, _, attr_name = arg.type.partition('__') if cls_name.lower() == stringcase.snakecase(model.__name__): kwargs[attr_name.lower()] = arg.value return self.get_or_create_model_obj(model, **kwargs)
def function_name(self) -> str: if self.operationId: name: str = self.operationId else: path = re.sub(r'/{|/', '_', self.snake_case_path).replace('}', '') name = f"{self.type}{path}" return stringcase.snakecase(name)
def convert_to_snakecase(data, delete_empty_values=True): """ stringcase.snakecase('fooBarBaz') # => "_foo_bar_baz" """ EXCEPTIONS_CHILD = ['dockerLabels'] if isinstance(data, dict): _data = {} for key, value in data.items(): _key = stringcase.snakecase(key) if delete_empty_values and not isinstance(value, int) and not value: continue if key in EXCEPTIONS_CHILD: _value = value else: _value = convert_to_snakecase(value, delete_empty_values) _data[_key] = _value return _data elif isinstance(data, list): _list = [] for value in data: _list.append(convert_to_snakecase(value, delete_empty_values)) return _list else: return data
def repl(m: Match) -> str: name = stringcase.snakecase(m.group(1)) args = rewrite_tr_args(m.group(2)) # print(m.group(0)) # print(f".{name}({args})") # print(args) return f".{name}({args})"
def GenerateServers(self, spec, url): genFiles = GeneratedFiles() if 'servers' in spec: for serverName, serverObj in spec['servers'].items(): path = "{}#/servers/{}".format(url, serverName) headerFilename = self.resolver.cpp_get_header(path) sourceFilename = "{}.cpp".format( self.resolver.cpp_get_filename_base(path)) name = stringcase.pascalcase(stringcase.snakecase(serverName)) self.srcGenerator.render_template( template_name="broker.cpp.jinja2", output_name=sourceFilename, usings=self.resolver.cpp_get_usings(), ns=self.namespace, resolver=self.resolver, includes=[headerFilename], Name=name, server=serverObj) genFiles += GeneratedFiles(cppFile=sourceFilename) self.headerGenerator.render_template( template_name="broker.hpp.jinja2", output_name=headerFilename, ns=self.namespace, resolver=self.resolver, Name=name, server=serverObj) genFiles += GeneratedFiles(hppFile=headerFilename) return genFiles
def curie_lookup(curie: str) -> Optional[str]: """ Given a CURIE, find its label. This method first does a lookup in predefined maps. If none found, it makes use of CurieLookupService to look for the CURIE in a set of preloaded ontologies. Parameters ---------- curie: str A CURIE Returns ------- Optional[str] The label corresponding to the given CURIE """ cls = get_curie_lookup_service() name: Optional[str] = None prefix = PrefixManager.get_prefix(curie) if prefix in ['OIO', 'OWL', 'owl', 'OBO', 'rdfs']: name = stringcase.snakecase(curie.split(':', 1)[1]) elif curie in cls.curie_map: name = cls.curie_map[curie] elif curie in cls.ontology_graph: name = cls.ontology_graph.nodes()[curie]['name'] return name
def build(self): tasks = [] with open('sobjects.txt') as sobjects_file: for sobject in sobjects_file.readlines(): target = sobject.strip() env = { 'SALESFORCE_USER': os.environ.get('SALESFORCE_USER'), 'SALESFORCE_PASSWORD': os.environ.get('SALESFORCE_PASSWORD'), 'SALESFORCE_TOKEN': os.environ.get('SALESFORCE_TOKEN'), 'POSTGRES_USER': os.environ.get('POSTGRES_USER'), 'POSTGRES_PASSWORD': os.environ.get('POSTGRES_PASSWORD'), 'SALESFORCE_SOBJECT': target, 'POSTGRES_TABLE': 'sf_' + snakecase(target), } command = 'java -jar /bin/embulk run /work/salesforce.yml.liquid -c /work/diff/{}.diff.yml'.format( target) task = ShellTask(name=target, command=command, log_stdout=True, return_all=True, env=env) tasks.append(task) with Flow("build source", tasks=tasks) as f: pass out = f.run()
def _convert_json(d): """Convert the dict to turn all key into snake case.""" new_d = {} for k, v in d.items(): if isinstance(v, dict): new_d[stringcase.snakecase(k)] = _convert_json(v) elif isinstance(v, list): if isinstance(v[0], dict): result = [] for d2 in v: result.append(_convert_json(d2)) new_d[stringcase.snakecase(k)] = result else: new_d[stringcase.snakecase(k)] = v else: new_d[stringcase.snakecase(k)] = v return new_d
def test_snakecase(self): from stringcase import snakecase eq = self.assertEqual eq('foo_bar', snakecase('fooBar')) eq('foo_bar', snakecase('foo_bar')) eq('foo_bar', snakecase('foo-bar')) eq('foo_bar', snakecase('foo.bar')) eq('_bar_baz', snakecase('_bar_baz')) eq('_bar_baz', snakecase('.bar_baz')) eq('', snakecase('')) eq('none', snakecase(None))
def update(self): """Get the monitored data from firebase.""" from stringcase import camelcase, snakecase try: values = self.mgr.data(self.serial) # set state from data based on type of sensor self._state = values.get(camelcase(self.type)) # set units self.update_unit() # set basic attributes for all sensors self._attributes = { 'time': values['time'], 'localtime': values['localtime'] } # set extended attributes for main probe sensors if self.type in [PROBE_1, PROBE_2]: for key, val in values.items(): # add all attributes that don't contain any probe name # or contain a matching probe name if ( (self.type == PROBE_1 and key.find(PROBE_2) == -1) or (self.type == PROBE_2 and key.find(PROBE_1) == -1) ): if key == BATTERY_LEVEL: key = ATTR_BATTERY_LEVEL else: # strip probe label and convert to snake_case key = snakecase(key.replace(self.type, '')) # add to attrs if key and key not in EXCLUDE_KEYS: self._attributes[key] = val # store actual unit because attributes are not converted self._attributes['unit_of_min_max'] = self._unit_of_measurement except (RequestException, ValueError, KeyError): _LOGGER.warning("Could not update status for %s", self.name)