def pixel_drill(task_id=None): parameters = parse_parameters_from_task(task_id=task_id) validate_parameters(parameters, task_id=task_id) task = CustomMosaicToolTask.objects.get(pk=task_id) if task.status == "ERROR": return None dc = DataAccessApi(config=task.config_path) single_pixel = dc.get_stacked_datasets_by_extent(**parameters).isel(latitude=0, longitude=0) clear_mask = task.satellite.get_clean_mask_func()(single_pixel) single_pixel = single_pixel.where(single_pixel != task.satellite.no_data_value) dates = single_pixel.time.values if len(dates) < 2: task.update_status("ERROR", "There is only a single acquisition for your parameter set.") return None exclusion_list = ['satellite', 'pixel_qa'] plot_measurements = [band for band in single_pixel.data_vars if band not in exclusion_list] datasets = [single_pixel[band].values.transpose() for band in plot_measurements] + [clear_mask] data_labels = [stringcase.titlecase("{} Units".format(band)) for band in plot_measurements] + ["Clear"] titles = [stringcase.titlecase("{} Band".format(band)) for band in plot_measurements] + ["Clear Mask"] style = ['r-o', 'g-o', 'b-o', 'c-o', 'm-o', 'y-o', '.'] task.plot_path = os.path.join(task.get_result_path(), "plot_path.png") create_2d_plot(task.plot_path, dates=dates, datasets=datasets, data_labels=data_labels, titles=titles, style=style) task.complete = True task.update_status("OK", "Done processing pixel drill.")
def send(to,title, desc, status, workType, points=0, url=None): workType = titlecase(workType.lower()) status = titlecase(status.lower()) embed = { "color": 3447003, "title": status + " - " + workType + " " + title, "description": workType + " details: " + ((desc[:75] + "..") if len(desc) > 75 else desc), "fields": [ { "name": "Points", "value": points, }, ], } if url: embed["url"] = url data = { "embeds": [embed], "username": "******", "avatar_url": "https://lh3.googleusercontent.com/jdcCuHVB2NoCEdDqj1fNV05G8MC3TyBX6jY93v_Sba2ViqrVXIW-efKjVk3BR-41VhwV8gD0x0EHmlXK2UqvCCQLDTqOs2N1AXjppA=w1440-v1", } print(requests.post(to, json=data).text)
def _export_tags(self) -> list: # todo https://github.com/OAI/OpenAPI-Specification/blob/master/versions/3.0.0.md#tagObject tags = [] for item in self.models: if item.description: tags.append({ 'name': stringcase.titlecase(item.name), 'description': item.description }) else: tags.append({ 'name': stringcase.titlecase(item.name) }) return tags
def process_metrics(self, metric): """Process metric text""" metric = stringcase.titlecase(metric) metric = self.change_to_acronym(metric) words = metric.split() metric = " ".join([self.process_words(w) for w in words]) return metric
def add_text_plugin_to_placeholder(self, page, slot, body=None): placeholder = page.placeholders.get(slot=slot) if not body: body = "<h1>{} ({})</h1>\n<p>{}</p>\n".format( self.get_page_title(page), stringcase.titlecase(slot), lorem.paragraph()) api.add_plugin(placeholder, "TextPlugin", "de", body=body)
def convertCase(self, data): txt = self.txtInput.text() result = txt if data == 'Alpha Num Case': result = stringcase.alphanumcase(txt) if data == 'Camel Case': result = stringcase.camelcase(txt) if data == 'Capital Case': result = stringcase.capitalcase(txt) if data == 'Const Case': result = stringcase.constcase(txt) if data == 'Lower Case': result = stringcase.lowercase(txt) if data == 'Pascal Case': result = stringcase.pascalcase(txt) if data == 'Path Case': result = stringcase.pathcase(txt) if data == 'Sentence Case': result = stringcase.sentencecase(txt) if data == 'Snake Case': result = stringcase.snakecase(txt) if data == 'Spinal Case': result = stringcase.spinalcase(txt) if data == 'Title Case': result = stringcase.titlecase(txt) if data == 'Trim Case': result = stringcase.trimcase(txt) if data == 'Upper Case': result = stringcase.uppercase(txt) self.lblResult.setText(result) pyperclip.copy(result)
def get_schemes(): schemes = [] modules = [controls] for item in pkgutil.iter_modules([os.path.dirname(plugins.__file__)]): modules.append(import_module(f"frictionless.plugins.{item.name}")) for module in modules: for name, Dialect in vars(module).items(): match = re.match(r"(.+)Control", name) if not match: continue name = match.group(1) data = parse(Dialect.__doc__) scheme = {"name": name, "options": []} for param in data.params: if param.arg_name.startswith("descriptor"): continue type = param.type_name text = param.description.capitalize() name = stringcase.titlecase(param.arg_name.replace("?", "")) scheme["options"].append({ "name": name, "text": text, "type": type }) schemes.append(scheme) return schemes
def get_attribute_list(attributes): atts = [{ 'name': 'id', 'type': 'int' }, { 'name': 'createdAt', 'type': 'String' }] # convert mm data type to Dart data type data_types = { 'String': 'String', 'Int': 'int', 'Double': 'double', 'bool': 'bool' } for attribute in attributes: try: aname, atype = attribute['@TEXT'].split(':') atype = atype.lstrip() atype = stringcase.titlecase(atype) atype = data_types[atype] except ValueError: aname = attribute['@TEXT'] atype = 'String' aname = stringcase.lowercase(aname) aname = utils.to_camel_case(aname) atts.append({'name': aname, 'type': atype}) return atts
def build_formats_reference(): TEMPLATE = """ --- title: Formats Reference --- It's a formats reference supported by the main Frictionless package. If you have installed external plugins, there can be more formats available. Below we're listing a format group name (or a parser name) like Excel, which is used, for example, for `xlsx`, `xls` etc formats. Options can be used for creating dialects, for example, `dialect = ExcelDialect(sheet=1)`. {% for format in formats %} ## {{ format.name }} {% if format.options %} {% for option in format.options %} ### {{ option.name }} > Type: {{ option.type }} {{ option.text }} {% endfor %} {% else %} There are no options available. {% endif %} {% endfor %} """ # Input formats = [] modules = [] for item in pkgutil.iter_modules([os.path.dirname(plugins.__file__)]): modules.append(import_module(f"frictionless.plugins.{item.name}")) for module in modules: for name, Dialect in vars(module).items(): match = re.match(r"(.+)Dialect", name) if not match: continue name = match.group(1) data = parse(Dialect.__doc__) format = {"name": name, "options": []} for param in data.params: if param.arg_name.startswith("descriptor"): continue type = param.type_name text = param.description.capitalize() name = stringcase.titlecase(param.arg_name.replace("?", "")) format["options"].append({ "name": name, "text": text, "type": type }) formats.append(format) # Ouput template = Template(inspect.cleandoc(TEMPLATE)) document = template.render(formats=formats).strip() write_file(os.path.join("docs", "references", "formats-reference.md"), document) print("Built: Formats Reference")
def get_applicant_details(self, advisor_id): applicant = AdvisorApplicant.query.get(advisor_id) if not applicant: return jsonify({'error': 'Advisor not found'}), 404 return jsonify( {titlecase(k): v for k, v in applicant.to_json().items()}), 200
def build_schemes_reference(): TEMPLATE = """ --- title: Schemes Reference --- It's a schemes reference supported by the main Frictionless package. If you have installed external plugins, there can be more schemes available. Below we're listing a scheme group name (or a loader name) like Remote, which is used, for example, for `http`, `https` etc schemes. Options can be used for creating controls, for example, `control = RemoteControl(http_timeout=1)`. {% for scheme in schemes %} ## {{ scheme.name }} {% if scheme.options %} {% for option in scheme.options %} ### {{ option.name }} > Type: {{ option.type }} {{ option.text }} {% endfor %} {% else %} There are no options available. {% endif %} {% endfor %} """ # Input schemes = [] modules = [] for item in pkgutil.iter_modules([os.path.dirname(plugins.__file__)]): modules.append(import_module(f"frictionless.plugins.{item.name}")) for module in modules: for name, Control in vars(module).items(): match = re.match(r"(.+)Control", name) if not match: continue name = match.group(1) data = parse(Control.__doc__) scheme = {"name": name, "options": []} for param in data.params: if param.arg_name.startswith("descriptor"): continue type = param.type_name text = param.description.capitalize() name = stringcase.titlecase(param.arg_name.replace("?", "")) scheme["options"].append({ "name": name, "text": text, "type": type }) schemes.append(scheme) # Output template = Template(inspect.cleandoc(TEMPLATE)) document = template.render(schemes=schemes).strip() write_file(os.path.join("docs", "references", "schemes-reference.md"), document) print("Built: Schemes Reference")
def arcgis(catalog_record, url): if 'FeatureServer' in url: service_name = url.split('/services/')[-1].split('/FeatureServer')[0] else: service_name = url.split('/services/')[-1].split('/MapServer')[0] headers = { 'Content-Type': 'application/json', 'Accept': 'application/json', } response = requests.get(url+"?f=pjson", headers=headers) md = response.json() from pprint import pprint pprint(md.keys()) assert 'currentVersion' in md, 'currentVersion not found in json response' if not catalog_record.id: catalog_record.save() for index, column in enumerate(md.get('fields', [])): rc = RecordColumn.objects.get_or_create( catalog_record=catalog_record, field_name=column['name'])[0] rc.data_type = column['type'] rc.label = column['alias'] rc.catalog_record_order = index #rc.render_type = column['domain'] ? length? rc.save() dataset = Dataset.objects.get_or_create(catalog_record=catalog_record)[0] dataset.last_sync = datetime.now() dataset.sourced_meta_data = md dataset.title = md.get('name', stringcase.titlecase(service_name)) dataset.description = md.get('description', '') or md.get('serviceDescription', '') #dataset.issued = datetime.utcfromtimestamp(md['publicationDate']) #dataset.modified = datetime.utcfromtimestamp(md['indexUpdatedAt']) dataset.identifier = md.get('id', service_name) #if 'tags' in md: # dataset.keyword = ', '.join(md['tags']) dataset.theme = md.get('category', '') dataset.save() #only record one distriubtion: ARCGIS dist = Distribution.objects.get_or_create(dataset=dataset)[0] dist.license = md.get('license', '') dist.rights = md.get('copyrightText', '') dist.download_url = url dist.access_url = catalog_record.landing_page or catalog_record.distribution_fields dist.media_type = 'application/json' dist.format = 'arcgis' #blobFilename? dist.save()
def test_titlecase(self): from stringcase import titlecase eq = self.assertEqual eq('Foo Bar', titlecase('fooBar')) eq('Foo Bar', titlecase('foo_bar')) eq('Foo Bar', titlecase('foo-bar')) eq('Foo Bar', titlecase('foo.bar')) eq(' Bar Baz', titlecase('_bar_baz')) eq(' Bar Baz', titlecase('.bar_baz')) eq('', titlecase('')) eq('None', titlecase(None))
def print_state_message(self, msg): if msg['job_id'] not in self.ids_to_name: return time = util.str_timestamp(msg['time']) state = "[%s] --> %s" % (time, msg['state']) output = "%s%s" % (self._message_prefix(msg), color_string(state, color="bold_white")) print(output) for key in ['reason', 'message', 'exit_code']: if msg.get(key, None) is not None: message = "[{}] {}: {}".format(time, stringcase.titlecase(key), msg[key]) print("{}{}".format(self._message_prefix(msg), color_string(message, color="bold_white")))
def create_new_dashboard(self, measurement): with open('templates/dashboard_template.json') as dashboardfile: self.dashboard = json.load(dashboardfile) self.current_id = 1 self.dashboard['title'] = stringcase.titlecase(measurement) self.dashboard['uid'] = measurement self.dashboard['tags'] = [self.generated_tag] self.history[measurement] = {} # With every new dashboard we add a summary panel first self.add_summary_panel_to_dashboard(measurement)
def pixel_drill(task_id=None): parameters = parse_parameters_from_task(task_id=task_id) validate_parameters(parameters, task_id=task_id) task = FractionalCoverTask.objects.get(pk=task_id) if task.status == "ERROR": return None dc = DataAccessApi(config=task.config_path) single_pixel = dc.get_stacked_datasets_by_extent(**parameters) clear_mask = task.satellite.get_clean_mask_func()(single_pixel.isel(latitude=0, longitude=0)) single_pixel = single_pixel.where(single_pixel != task.satellite.no_data_value) dates = single_pixel.time.values if len(dates) < 2: task.update_status("ERROR", "There is only a single acquisition for your parameter set.") return None def _apply_band_math(ds, idx): # mask out water manually. Necessary for frac. cover. wofs = wofs_classify(ds, clean_mask=clear_mask[idx], mosaic=True) clear_mask[idx] = False if wofs.wofs.values[0] == 1 else clear_mask[idx] fractional_cover = frac_coverage_classify(ds, clean_mask=clear_mask[idx], no_data=task.satellite.no_data_value) return fractional_cover fractional_cover = xr.concat( [ _apply_band_math(single_pixel.isel(time=data_point, drop=True), data_point) for data_point in range(len(dates)) ], dim='time') fractional_cover = fractional_cover.where(fractional_cover != task.satellite.no_data_value).isel( latitude=0, longitude=0) exclusion_list = [] plot_measurements = [band for band in fractional_cover.data_vars if band not in exclusion_list] datasets = [fractional_cover[band].values.transpose() for band in plot_measurements] + [clear_mask] data_labels = [stringcase.titlecase("%{}".format(band)) for band in plot_measurements] + ["Clear"] titles = [ 'Bare Soil Percentage', 'Photosynthetic Vegetation Percentage', 'Non-Photosynthetic Vegetation Percentage', 'Clear Mask' ] style = ['ro', 'go', 'bo', '.'] task.plot_path = os.path.join(task.get_result_path(), "plot_path.png") create_2d_plot(task.plot_path, dates=dates, datasets=datasets, data_labels=data_labels, titles=titles, style=style) task.complete = True task.update_status("OK", "Done processing pixel drill.")
async def nick_maker(self, guild: discord.Guild, old_shit_nick): old_shit_nick = self.strip_accs(old_shit_nick) new_cool_nick = re.sub("[^a-zA-Z0-9 \n.]", "", old_shit_nick) new_cool_nick = " ".join(new_cool_nick.split()) new_cool_nick = stringcase.lowercase(new_cool_nick) new_cool_nick = stringcase.titlecase(new_cool_nick) default_name = await self.config.guild(guild).new_custom_nick() if len(new_cool_nick.replace(" ", "")) <= 1 or len(new_cool_nick) > 32: if default_name == "random": new_cool_nick = await self.get_random_nick(2) elif default_name: new_cool_nick = default_name else: new_cool_nick = "simp name" return new_cool_nick
def set_levels_dropdown_options(variable_dropdown_value): #Check whether single or multi variable single_var, multi_var = sort_variables_by_type(philly) if variable_dropdown_value in multi_var: levels = get_multi_var_levels(philly, variable_dropdown_value) options = [{'label': titlecase(e), 'value': e} for e in levels] value = options[0]['value'] return options, value, {'display': 'block'} else: return [], [], {'display': 'none'}
def handle_many_to_many_field(field: models.ManyToManyField) -> dict: data = { "name": ("m2m" + (field.model.class_name()) + stringcase.titlecase(field.name))[:-1], "columns": [ { 'column_name': stringcase.snakecase(field.model.class_name()) + "_id", 'column_type': 'integer', 'column_settings': [f"ref: > {field.model.class_name()}.id"] }, { 'column_name': stringcase.snakecase(field.related_model.class_name()) + "_id", 'column_type': 'integer', 'column_settings': [f"ref: > {field.related_model.class_name()}.id"] } ], "many_to_many": [] } data['name'] = data['name'].replace(" ", "") return data
def plot_hists_from_dir(model_root, columns=10, scale=20): """Plot all the histories in `model_dir`. For each named property, creates a plot with all the model histories that had that named property (loss or metric) :returns: fig, axes """ history_fnames = glob(join(model_root, '*history.json')) logger.debug(f"history_fnames: {history_fnames}") if not history_fnames: logger.warning(f"no history saved at {model_root}") return None, None hist_data = {} # {property_name -> {model_name -> [values]}} for fname in history_fnames: hist = utils.json_load(fname) model_name = pascalcase(basename(fname).replace('_history.json', '')) for prop_name, values in hist.items(): if not isinstance(values, list): continue if hist_data.get(prop_name) is None: hist_data[prop_name] = {} hist_data[prop_name][model_name] = values columns = min(columns, len(hist_data)) rows = max(1, len(hist_data) // columns) fig, axes = plt.subplots(rows, columns, squeeze=False, figsize=(scale, scale * rows / columns)) for i, (prop_name, prop_data) in enumerate(hist_data.items()): ax = axes[i // columns, i % columns] for model_name, values in prop_data.items(): ax.plot(values, '-', label=model_name) ax.set_title(titlecase(prop_name)) ax.set_xlabel('Epoch') ax.set_ylabel('Loss') fig.suptitle("Training") plt.legend() return fig, axes
def set_levels_dropdown_options(df_store, variable_dropdown_value): #Load city data city_data = json.loads(df_store) #Check whether single or multi variable single_var, multi_var = sort_variables_by_type(city_data) if variable_dropdown_value in multi_var: levels = get_multi_var_levels(city_data, variable_dropdown_value) options = [{'label': titlecase(e), 'value': e} for e in levels] value = options[0]['value'] return options, value, {'display': 'block'} else: return [], [], {'display': 'none'}
def main(argv): if len(sys.argv) != 2: print('{} <ion-mainfest.json>' % (sys.argv[0])) sys.exit(2) mainfestPath = sys.argv[1] with open(mainfestPath, 'r') as mainfestFile: mainfest = json.loads(mainfestFile.read()) names = [] cases = [] codePoints = [] for icon in mainfest['icons']: names.append(r'"%s"' % titlecase(icon['name']).replace( 'Md ', 'Material ').replace('Ios ', 'iOS ')) cases.append(camelcase(icon['name'].replace('-', '_'))) codePoints.append(r'"\u{%s}"' % (icon['code'][2:])) print(', '.join(names)) print(', '.join(cases)) print(', '.join(codePoints))
def generate_graph(self, postal_code, country_code, start, end, selected_metrics, graph_type, grid): observations = Observations.Observations(postal_code, country_code, start, end) figure(num=None, figsize=(18, 6), dpi=80, facecolor='w', edgecolor='k') vs_titles = '' for metric in selected_metrics: title = stringcase.titlecase(metric) metric_values = observations.get_values_by_key(metric) label = title + ' ' + metric_values['unit_code'] if graph_type == 'bar': plt.bar(metric_values['timestamps'], metric_values['values'], label=label) elif graph_type == 'barh': plt.barh(metric_values['timestamps'], metric_values['values'], label=label) elif graph_type == 'scatter': plt.scatter(metric_values['timestamps'], metric_values['values'], label=label) else: plt.plot(metric_values['timestamps'], metric_values['values'], label=label) if vs_titles == '': vs_titles += title else: vs_titles += ' vs. ' + title plt.xlabel("Date") plt.ylabel("Value") plt.title( f"{vs_titles}\n{postal_code}, {country_code}\nFrom {start} to {end}" ) plt.legend() plt.grid(grid) plt.show() self.update_status('Graph Successfully Created!', 'green')
def case_conversion(source, style: StringStyle) -> str: """Case conversion of the input (usually fully qualified vss node inlcuding the path) into a supported string style representation. Args: source: Source string to apply conversion to. style: Target string style to convert source to. Returns: Converted source string according to provided string style. """ if style == StringStyle.ALPHANUM_CASE: return stringcase.alphanumcase(source) elif style == StringStyle.CAMEL_CASE: return camel_case(source) elif style == StringStyle.CAMEL_BACK: return camel_back(source) elif style == StringStyle.CAPITAL_CASE: return stringcase.capitalcase(source) elif style == StringStyle.CONST_CASE: return stringcase.constcase(source) elif style == StringStyle.LOWER_CASE: return stringcase.lowercase(source) elif style == StringStyle.PASCAL_CASE: return stringcase.pascalcase(source) elif style == StringStyle.SENTENCE_CASE: return stringcase.sentencecase(source) elif style == StringStyle.SNAKE_CASE: return stringcase.snakecase(source) elif style == StringStyle.SPINAL_CASE: return stringcase.spinalcase(source) elif style == StringStyle.TITLE_CASE: return stringcase.titlecase(source) elif style == StringStyle.TRIM_CASE: return stringcase.trimcase(source) elif style == StringStyle.UPPER_CASE: return stringcase.uppercase(source) else: return source
def __init__(self, file_name='', *args, **kwargs): """ Args: file_name (): *args (): **kwargs (): file_name """ self.file_name = file_name self.raw_data = kwargs.get('raw_data', '') self.mapper = {} self.soup = None self.site_name = titlecase( self.__class__.__name__.replace('Parser', '')) self.snake_site_name = snakecase( self.__class__.__name__.replace('Parser', '')) self.log_level = kwargs.get('log_level', 10) # convert startup_dir to PurePath self.startup_dir = Path(str(kwargs.get('startup_dir', '/tmp'))) # a basic yaml is shared configs among all yaml files self.basic_yaml = kwargs.get('basic_yaml', '') # site yaml file is where all site's selector behold self.startup_yaml = kwargs.get('startup_yaml', self.snake_site_name) self.startup_yaml_config = kwargs.get('startup_yaml_config') # if is_test_mode, will raise exceptions instead of log self.is_test_mode = kwargs.get('is_test_mode', False) self.test_keys = kwargs.get('test_keys', []) # bs4 basic configs self.encoding = kwargs.get('encoding', '') self.features = kwargs.get('features', 'lxml') self.reserved_yaml_keys = kwargs.get('reserved_yaml_keys', []) self.elems_default_index = kwargs.get('elems_default_index', 0) self.selected_keys = kwargs.get('selected_keys', []) # where our parsed data behold self._data = {} self._spawn()
def _export_paths(self) -> dict: data = {} for model in self.models: if model.name in get_avishan_config().get_openapi_ignored_path_models(): continue for direct_callable in model.direct_callables: direct_callable: DirectCallable if direct_callable.hide_in_redoc or direct_callable.documentation is None: continue if direct_callable.url not in data.keys(): data[direct_callable.url] = Path(url=direct_callable.url) setattr(data[direct_callable.url], direct_callable.method.name.lower(), Operation( summary=direct_callable.documentation.title, description=direct_callable.documentation.description, request_body=Operation.extract_request_body_from_direct_callable(direct_callable), responses=Operation.extract_responses_from_direct_callable(direct_callable), tags=[stringcase.titlecase(model.name)] )) for key, value in data.items(): data[key] = value.export() return data
This table maps the {schema_name} descriptive statistic to the OMH to FHIR additional Observation codings ( code system `http://www.fhir.org/guides/mfhir/omh_fhir_observation_codes`). If the schema element is absent, the default measure is a single measurement and there is no additional coding. |body.descriptive_statistic|Observation.coding.code|Observation.coding.display| |---|---|---|''' stat_codes = [ 'average', 'maximum', 'minimum', 'count', 'totalcount', 'median', 'std-dev', 'sum', 'variance', '20-percent', '80-percent', '4-lower', '4-upper', '4-dev', '5-1', '5-2', '5-3', '5-4', 'skew', 'kurtosis', 'regression' ] for k, v in sorted(f.omh_datatype_mapping.items()): if v[6]: schema_name = spinalcase(k) schema_title = titlecase(k) print(header.format(schema_name=schema_name, schema_title=schema_title)) for stat_code in stat_codes: print('|{stat_code}|{obs_code}|{obs_display}|'.format( stat_code=stat_code, obs_code='{schema_name}-{stat_code}'.format( schema_name=schema_name, stat_code=stat_code), obs_display='{schema_title} {stat_code}'.format( schema_title=schema_title, stat_code=titlecase(stat_code)))) print('{: .grid}\n') # create omh component mapping tables print("|{0}|{1}|{2}|{3}|{4}|".format('Component Name', 'Component Code System',
def omh_to_fhir(data): # create an python object from the json schema x = json2obj(data) # get schema name ( e.g. 'step_count') schema_id = x.header.schema_id.name # map code and category from schema_id.name and set some default values observation_category_code = f.omh_datatype_mapping[schema_id][0] observation_category_display = f.omh_datatype_mapping[schema_id][1] observation_code_system = f.omh_datatype_mapping[schema_id][2] observation_code_code = f.omh_datatype_mapping[schema_id][3] observation_code_display = f.omh_datatype_mapping[schema_id][4] descriptive_statistic = f.omh_datatype_mapping[schema_id][6] descriptive_statistic_denominator = f.omh_datatype_mapping[schema_id][7] print('Observation.category = {category}'.format( category=observation_category_code)) print('Observation.code = {code} ({display})'.format( code=observation_code_code, display=observation_code_display)) # get actual numeric value - may need to inspect schema path to find it. try: observation_value_quantity_value = getattr(x.body, schema_id) try: observation_value_quantity_value = getattr( observation_value_quantity_value, 'value') observation_value_quantity_unit = getattr( observation_value_quantity_value, 'unit') observation_value_quantity_system = 'http://unitsofmeasure.org' observation_value_quantity_code = maps.concept_maps[ observation_value_quantity_unit][1] except AttributeError: # no value and unit element like in step step_count observation_value_quantity_unit = f.omh_datatype_mapping[ schema_id][5][0] # use default observation_value_quantity_system = 'http://unitsofmeasure.org' observation_value_quantity_code = maps.concept_maps[ observation_value_quantity_unit][1] except AttributeError: # no value element like in blood_pressure panel observation_value_quantity_value = -1 # assign as a null value for now observation_value_quantity_system = 'None' observation_value_quantity_unit = 'None' observation_value_quantity_code = 'None' # print(observation_value_quantity_value) # todo map to appropriate ucum units create table lookup. try: # mapping unit from descriptive_statistic_denominator for valueQuantity statistic_key = x.body.descriptive_statistic_denominator observation_value_quantity_unit = '{numerator}{denominator}'.format( numerator=observation_value_quantity_unit, denominator=f.omh_denominator_value[statistic_key][0]) observation_value_quantity_code = '{numerator}{denominator}'.format( numerator=observation_value_quantity_code, denominator=f.omh_denominator_value[statistic_key][1]) except AttributeError: pass # use default units variables try: # additional codings for stats addl_observation_coding_system = 'http://hl7.org/fhir/omh_fhir_observation_codes' addl_observation_coding_code = '{}-{}'.format( spinalcase(schema_id), spinalcase(x.body.descriptive_statistic)) addl_observation_coding_display = '{} {}'.format( titlecase(schema_id), titlecase(x.body.descriptive_statistic)) except AttributeError: addl_observation_coding_system = 'None' addl_observation_coding_code = 'None' addl_observation_coding_display = 'None' #map specimen_source codes to specimen code extension using the concepts from the specimen_source valueset. try: observation_specimen_code_extension_url = 'http://www.fhir.org/mfhir/StructureDefinition/omh_fhir_extension_observation_specimen_code' observation_specimen_code_system = maps.concept_maps[ x.body.specimen_source][0] observation_specimen_code_code = maps.concept_maps[ x.body.specimen_source][1] observation_specimen_code_display = maps.concept_maps[ x.body.specimen_source][2] observation_specimen_code_text = x.body.specimen_source except AttributeError: #no specimen_source observation_specimen_code_system = 'None' observation_specimen_code_code = 'None' observation_specimen_code_display = 'None' observation_specimen_code_extension_url = 'None' observation_specimen_code_text = 'None' #add components observation_componentx = [] for component in f.omh_component_mapping: try: component_name = getattr(x.body, component) observation_component_code_system = f.omh_component_mapping[ component][0] observation_component_code_code = f.omh_component_mapping[ component][1] observation_component_code_display = f.omh_component_mapping[ component][2] if f.omh_component_mapping[component][ 3] == 'valueCodeableConcept': #look up code mappings (e.g. temporal_relationship_to_sleep) observation_component_value_codeableconcept_system = maps.concept_maps[ component_name][0] observation_component_value_codeableconcept_code = maps.concept_maps[ component_name][1] observation_component_value_codeableconcept_display = maps.concept_maps[ component_name][2] observation_component_value_codeableconcept_text = component_name observation_component_value_quantity_value = -1 observation_component_value_quantity_unit = 'None' observation_component_value_quantity_system = 'None' observation_component_value_quantity_code = 'None' else: # enter values directly ( e.g. blood pressure ) observation_component_value_quantity_value = getattr( component_name, 'value') # print(component, observation_component_value_quantity_value) observation_component_value_quantity_unit = getattr( component_name, 'unit') observation_component_value_quantity_system = 'http://unitsofmeasure.org' observation_component_value_quantity_code = maps.concept_maps[ observation_component_value_quantity_unit][1] observation_component_value_codeableconcept_system = 'None' observation_component_value_codeableconcept_code = 'None' observation_component_value_codeableconcept_display = 'None' observation_component_value_codeableconcept_text = 'None' observation_componentx.append( fmt.format(f.observation_componentx_template, observation_component_code_system= observation_component_code_system, observation_component_code_code= observation_component_code_code, observation_component_code_display= observation_component_code_display, observation_component_value_codeableconcept_system= observation_component_value_codeableconcept_system, observation_component_value_codeableconcept_code= observation_component_value_codeableconcept_code, observation_component_value_codeableconcept_display= observation_component_value_codeableconcept_display, observation_component_value_codeableconcept_text= observation_component_value_codeableconcept_text, observation_component_value_quantity_value= observation_component_value_quantity_value, observation_component_value_quantity_unit= observation_component_value_quantity_unit, observation_component_value_quantity_system= observation_component_value_quantity_system, observation_component_value_quantity_code= observation_component_value_quantity_code)) except AttributeError: # no component in this instance pass # print('observation_componentx ={}'.format(','.join(observation_componentx))) observation_componentx = ','.join( observation_componentx) # change to string for string formatting omh_obs_json = fmt.format( f.omh_obs_templ, server_assigned_resource_id=None, # additional codings for stats addl_observation_coding_system=addl_observation_coding_system, addl_observation_coding_display=addl_observation_coding_display, addl_observation_coding_code=addl_observation_coding_code, #specimen_source mappings observation_specimen_code_system=observation_specimen_code_system, observation_specimen_code_code=observation_specimen_code_code, observation_specimen_code_display=observation_specimen_code_display, # add components observation_componentx=observation_componentx, # header mappings header=x.header, # body mappings (time mappings) body=x.body, # codes, value, units observation_category_code=observation_category_code, observation_category_display=observation_category_display, observation_code_system=observation_code_system, observation_code_code=observation_code_code, observation_code_display=observation_code_display, # measure value observation_value_quantity_value=observation_value_quantity_value, observation_value_quantity_unit=observation_value_quantity_unit, observation_value_quantity_system=observation_value_quantity_system, observation_value_quantity_code=observation_value_quantity_code, descriptive_statistic=descriptive_statistic, descriptive_statistic_denominator=descriptive_statistic_denominator, ) # print(omh_obs_json) # delete Nulls using scrunbd function this is one way or could load in the fhir python library # convert json to dict omh_obs_dict = json.loads(omh_obs_json) omh_obs_dict = scrub_dict(omh_obs_dict) # ....and back to json print('*********************this OMH datapoint file**************') print(json_string) print('******maps to this FHIR Observation**********') print(json.dumps(omh_obs_dict, indent=3))
def _player_embed_2(self, player: BSPlayer): """New player embed.""" description = '{}'.format(player.tag.upper()) em = discord.Embed(title=player.name, description=description, color=random_discord_color()) # club player_club_name = player.club.name player_club_tag = player.club.tag if not player_club_name: player_club_name = 'Not in club' if not player_club_tag: player_club_tag = 'N/A' em.add_field(name=player_club_name, value=player_club_tag, inline=False) # fields em.add_field(name='Trophies', value="{} / {} PB \n{} XP".format( player.trophies or 0, player.highestTrophies or 0, player.expLevel or 0, ), inline=False) em.add_field(name='Time', value="Big Brawler: {} \nRobo Rumble: {}".format( format_time(player.bestTimeAsBigBrawler or 0), format_time(player.bestRoboRumbleTime or 0), ), inline=False) em.add_field(name="Brawlers Unlocked", value=str(len(player.brawlers))) em.add_field(name='Victories', value="**{}** 3v3, **{}** Solo SD, **{}** Duo SD".format( player['3vs3Victories'] or 0, player.soloVictories or 0, player.duoVictories or 0), inline=False) # brawlers brawlers = sorted( player.brawlers, key=lambda x: (x.get('trophies', 0), x.get('highestTrophies', 0)), reverse=True, ) o = [] for b in brawlers or []: emoji_name = b.name.lower().replace(' ', '').replace('-', '').replace( '.', '') emoji = self.get_emoji(emoji_name) o.append( '{emoji} `{trophies: >3} / {pb: >3} Lvl {level: >2}`\u2800 {name}' .format( emoji=emoji, trophies=b.trophies, pb=b.highestTrophies, level=b.power, name=stringcase.titlecase(b.name.lower()), )) f = 5 for index, group in enumerate(grouper(o, f)): i0 = (index * f) + 1 i1 = (index + 1) * f grp = [g for g in group if g] em.add_field( name="Brawlers {}-{}".format(i0, i1), value='\n'.join(grp), inline=False, ) return em
def generate_name(postfix: str) -> str: dir_name = stringcase.titlecase(os.path.basename(os.getcwd())) return f"{dir_name} {postfix}"