def patch_project( self, db_session: sqlalchemy.orm.Session, name: str, project: dict, patch_mode: mlrun.api.schemas.PatchMode = mlrun.api.schemas.PatchMode.replace, projects_role: typing.Optional[mlrun.api.schemas.ProjectsRole] = None, leader_session: typing.Optional[str] = None, wait_for_completion: bool = True, ) -> typing.Tuple[typing.Optional[mlrun.api.schemas.Project], bool]: if self._is_request_from_leader(projects_role): # No real scenario for this to be useful currently - in iguazio patch is transformed to store request raise NotImplementedError("Patch operation not supported from leader") else: current_project = self.get_project(db_session, name, leader_session) strategy = patch_mode.to_mergedeep_strategy() current_project_dict = current_project.dict(exclude_unset=True) mergedeep.merge(current_project_dict, project, strategy=strategy) patched_project = mlrun.api.schemas.Project(**current_project_dict) return self.store_project( db_session, name, patched_project, projects_role, leader_session, wait_for_completion, )
def create_event(event_type: str, body: dict = None) -> dict: """ Return dict contents based on "event_type". Allowed value for "event_type": "aws:alexa-skill-event" "aws:alexa-smart-home-event" "aws:api-gateway-event" "aws:cloud-watch-event" "aws:cloud-watch-log-event" "aws:cognito-user-pool-event" "aws:dynamo-stream-event" "aws:kinesis" "aws:s3" "aws:scheduled" "aws:sns" "aws:sqs" :param event_type: Event type specification. i.e. "aws:s3", "aws:sns", ... :param body: Additional body which includes return value :return: Dict contents based on "event_type" """ json_path = _event_type_to_json_path(event_type) with json_path.open() as fh: event = json.load(fh) merge(event, body or {}, strategy=Strategy.ADDITIVE) return event
def update_data(file_path_to_write: str, file_path_to_read: str, file_ending: str) -> None: """ Collects special chosen fields from the file_path_to_read and writes them into the file_path_to_write. :param file_path_to_write: The output file path to add the special fields to. :param file_path_to_read: The input file path to read the special fields from. :param file_ending: The files ending :return: None """ pack_obj_data, _ = get_dict_from_file(file_path_to_read) fields: list = DELETED_YML_FIELDS_BY_DEMISTO if file_ending == 'yml' else DELETED_JSON_FIELDS_BY_DEMISTO # Creates a nested-complex dict of all fields to be deleted by Demisto. # We need the dict to be nested, to easily merge it later to the file data. preserved_data: dict = unflatten( { field: dictor(pack_obj_data, field) for field in fields if dictor(pack_obj_data, field) }, splitter='dot') if file_ending == 'yml': with open(file_path_to_write, 'r') as yf: file_yaml_object = yaml.load(yf) if pack_obj_data: merge(file_yaml_object, preserved_data) with open(file_path_to_write, 'w') as yf: yaml.dump(file_yaml_object, yf) elif file_ending == 'json': file_data: dict = get_json(file_path_to_write) if pack_obj_data: merge(file_data, preserved_data) with open(file_path_to_write, 'w') as jf: json.dump(obj=file_data, fp=jf, indent=4)
def merge(a, b, path=None, update=True): "http://stackoverflow.com/questions/7204805/python-dictionaries-of-dictionaries-merge" "merges b into a" if path is None: path = [] for key in b: if key in a: if isinstance(a[key], dict) and isinstance(b[key], dict): merge(a[key], b[key], path + [str(key)]) elif a[key] == b[key]: pass # same leaf value elif isinstance(a[key], list) and isinstance(b[key], list): for idx, val in enumerate(b[key]): a[key][idx] = merge(a[key][idx], b[key][idx], path + [str(key), str(idx)], update=update) elif update: a[key] = b[key] else: raise Exception('Conflict at %s' % '.'.join(path + [str(key)])) else: a[key] = b[key] return a
def filter_and_format_grouped_by_project_runtime_resources_output( self, grouped_by_project_runtime_resources_output: mlrun.api.schemas. GroupedByProjectRuntimeResourcesOutput, allowed_projects: typing.List[str], group_by: typing.Optional[ mlrun.api.schemas.ListRuntimeResourcesGroupByField] = None, ) -> typing.Union[mlrun.api.schemas.RuntimeResourcesOutput, mlrun.api. schemas.GroupedByJobRuntimeResourcesOutput, mlrun.api. schemas.GroupedByProjectRuntimeResourcesOutput, ]: runtime_resources_by_kind = {} for ( project, kind_runtime_resources_map, ) in grouped_by_project_runtime_resources_output.items(): for kind, runtime_resources in kind_runtime_resources_map.items(): if project in allowed_projects: runtime_resources_by_kind.setdefault( kind, []).append(runtime_resources) runtimes_resources_output = [] if group_by is None else {} for kind, runtime_resources_list in runtime_resources_by_kind.items(): runtime_handler = mlrun.runtimes.get_runtime_handler(kind) resources = runtime_handler.build_output_from_runtime_resources( runtime_resources_list, group_by) if group_by is None: runtimes_resources_output.append( mlrun.api.schemas.KindRuntimeResources( kind=kind, resources=resources)) else: mergedeep.merge(runtimes_resources_output, resources) return runtimes_resources_output
def list_runtime_resources( self, project: str, kind: typing.Optional[str] = None, object_id: typing.Optional[str] = None, label_selector: typing.Optional[str] = None, group_by: typing.Optional[ mlrun.api.schemas.ListRuntimeResourcesGroupByField ] = None, ) -> typing.Union[ mlrun.api.schemas.RuntimeResourcesOutput, mlrun.api.schemas.GroupedByJobRuntimeResourcesOutput, mlrun.api.schemas.GroupedByProjectRuntimeResourcesOutput, ]: response = [] if group_by is None else {} kinds = mlrun.runtimes.RuntimeKinds.runtime_with_handlers() if kind is not None: self.validate_runtime_resources_kind(kind) kinds = [kind] for kind in kinds: runtime_handler = mlrun.runtimes.get_runtime_handler(kind) resources = runtime_handler.list_resources( project, object_id, label_selector, group_by ) if group_by is None: response.append( mlrun.api.schemas.KindRuntimeResources( kind=kind, resources=resources ) ) else: mergedeep.merge(response, resources) return response
def test_should_raise_TypeError_using_typesafe_strategy_if_types_differ( self): expected = { "a": { "b": { "c": 5, "_c": 15 }, "B": { "C": 10 } }, "d": 3, "e": { 1: 2, "a": { "f": 2 } }, "f": [4, 5, 6], } a = {"a": {"b": {"c": 5}}, "d": 1, "e": {2: 3}, "f": [1, 2, 3]} b = {"a": {"B": {"C": 10}}, "d": 2, "e": 2, "f": [4, 5, 6]} c = {"a": {"b": {"_c": 15}}, "d": 3, "e": {1: 2, "a": {"f": 2}}} with self.assertRaises(TypeError): merge({}, a, b, c, strategy=Strategy.TYPESAFE)
def _create_gradient_line(self, x, y, u, colormap, name): xs, ys, us = self._get_segments(x, y, u) color_mapper = LinearColorMapper(palette=colormap, low=min(us), high=max(us)) data_source = ColumnDataSource(dict(xs=xs, ys=ys, us=us)) lkw = dict( line_width=2, name=name, line_color={ "field": "us", "transform": color_mapper }, ) line_kw = self._kwargs.get("line_kw", dict()) glyph = MultiLine(xs="xs", ys="ys", **merge({}, lkw, line_kw)) # default options cbkw = dict(width=8) # user defined options colorbar_kw = self._kwargs.get("colorbar_kw", dict()) colorbar = ColorBar(color_mapper=color_mapper, title=name, **merge({}, cbkw, colorbar_kw)) return data_source, glyph, colorbar
def _curate_amenities(amenities): curated = {} for item in amenities: if item and isinstance(item, dict): curated.update(item) elif item and isinstance(item, list): merge(curated, {'room': [amenity.lower() for amenity in item]}) return curated
def loads(contents, **kw): # noqa config = configparser.ConfigParser() config.read_string(contents) data = {} for section in config.sections(): for option in config.options(section): merge(data, path_to_dict(section, option, config.get(section, option))) return data
def patch_project( self, session: sqlalchemy.orm.Session, name: str, project: dict, patch_mode: mlrun.api.schemas.PatchMode = mlrun.api.schemas.PatchMode.replace, ): existing_project_dict = self._projects[name].dict() strategy = patch_mode.to_mergedeep_strategy() mergedeep.merge(existing_project_dict, project, strategy=strategy) self._projects[name] = mlrun.api.schemas.Project(**existing_project_dict)
def load_config(self, cfg_fname): config = read_json(cfg_fname) # apply inheritance through config hierarchy descendant, ancestors = config, [] while "inherit_from" in descendant: parent_config = read_json(Path(descendant["inherit_from"])) ancestors.append(parent_config) descendant = parent_config for ancestor in ancestors: merge(ancestor, config, strategy=Strategy.REPLACE) config = ancestor return config
def resolve(self, app_config): jsonpath_expr = parse(f'$..{self.key}.`parent`') results = jsonpath_expr.find(app_config) count = len(results) if count > 0: logging.info(f'Needs to resolve {count} values by {self.key} module') provider = self.provider() resolved = {} [merge(resolved, unflatten({f'{match.full_path}': self.fetch(match.value[self.key], provider)}), strategy=Strategy.ADDITIVE) for match in results] return merge(nested_delete(app_config, self.key), resolved, strategy=Strategy.ADDITIVE) else: return app_config
def apply_stack_instance(config_file, params, tags, secrets, service_params, service_secrets, replicas, services, stackl_context, instance_name, show_progress): final_params = {} for item in params: final_params = {**final_params, **json.loads(item)} config_doc = yaml.load(config_file.read(), Loader=yaml.FullLoader) final_params = {**config_doc['params'], **final_params} tags = json.loads(tags) replicas = json.loads(replicas) if "replicas" in config_doc: replicas = {**config_doc['replicas'], **replicas} secrets = json.loads(secrets) service_params = json.loads(service_params) if "service_params" in config_doc: service_params = merge(config_doc['service_params'], service_params) service_secrets = json.loads(service_secrets) if "service_secrets" in config_doc: service_secrets = merge(config_doc['service_secrets'], service_secrets) if "secrets" in config_doc: secrets = {**config_doc['secrets'], **secrets} if "tags" in config_doc: tags = {**config_doc['tags'], **tags} if "services" in config_doc: services = config_doc['services'] if "stages" in config_doc: stages = config_doc['stages'] invocation = stackl_client.StackInstanceInvocation( stack_instance_name=instance_name, stack_infrastructure_template=config_doc[ "stack_infrastructure_template"], stack_application_template=config_doc["stack_application_template"], params=final_params, replicas=replicas, service_params=service_params, service_secrets=service_secrets, secrets=secrets, services=services, stages=stages, tags=tags) try: stackl_context.stack_instances_api.get_stack_instance(instance_name) res = stackl_context.stack_instances_api.put_stack_instance(invocation) except stackl_client.exceptions.ApiException: res = stackl_context.stack_instances_api.post_stack_instance( invocation) click.echo(res) if show_progress: show_progress_bar(stackl_context, instance_name)
def get_multiple_assignment_tasks(weeks, done=0): mydb, cursor = create_connection() a_dict = {} weeks = [int(x.replace('W', '')) for x in weeks] weeks = [f'W{x:02}' for x in range(weeks[0], weeks[1]+1)] for each in weeks: merge(a_dict, get_assignment_tasks(each, done)) print(each) # return a_dict # return dict(sorted(a_dict.items())) # sorted(a_dict.items(), key = lambda x: x[1]) # a_dict.sort(key=lambda x: x[1]) # order_dict(a_dict) return a_dict
def _assert_project( expected_project: mlrun.api.schemas.Project, project: mlrun.api.schemas.Project, extra_exclude: dict = None, ): exclude = {"id":..., "metadata": {"created"}, "status": {"state"}} if extra_exclude: mergedeep.merge(exclude, extra_exclude, strategy=mergedeep.Strategy.ADDITIVE) assert (deepdiff.DeepDiff( expected_project.dict(exclude=exclude), project.dict(exclude=exclude), ignore_order=True, ) == {})
def add_value_in_mdb_doc(mdb_doc, doc, value_typed): fname, ftype, fvalue = value_typed sub_doc1 = {} sub_doc2 = {} fields = fname.split('.') try: value = eval(fvalue) except NameError: value = fvalue sub_doc1[fields.pop()] = parse(value, ftype) for f in reversed(fields): sub_doc2[f] = sub_doc1 sub_doc1 = sub_doc2 sub_doc2 = {} merge(mdb_doc, sub_doc1)
def _read_reccap2_products(flist, fname_specs=[]): def flatten_list(list_of_lists): if len(list_of_lists) == 0: return list_of_lists if isinstance(list_of_lists[0], list): return flatten_list(list_of_lists[0]) + flatten_list( list_of_lists[1:]) return list_of_lists[:1] + flatten_list(list_of_lists[1:]) def build_tree(tree_list): if tree_list: if len(tree_list) > 2: return {tree_list[0]: build_tree(tree_list[1:])} else: try: xds = xr.open_mfdataset(tree_list[1], decode_times=False, preprocess=preprocess( decode_times=True, center_months=True)) except OSError: return {} if len(xds.data_vars) == 1: xds = xds[list(xds.data_vars.keys())[0]] elif len(xds.data_vars) > 1: for key in xds: if key in tree_list[1]: xds = xds[key].assign_attrs( processing=xds.attrs.get('processing', '')) break return {tree_list[0]: xds} return {} import re import xarray as xr import mergedeep from munch import Munch from .preprocess import preprocess flist = flatten_list(flist) output = [] failed = [] for f in flist: if f.endswith('.nc'): matches = [] for spec in fname_specs: matches += [ m for m in spec if re.findall(spec[m] if spec[m] else m, f) ] if len(matches) == len(fname_specs): tree = [re.sub("[^0-9a-zA-Z]+", "_", m) for m in matches] + [f] output += build_tree(tree), else: failed += f, merged = mergedeep.merge({}, *output) merged['not_matched'] = failed obj = _RECCAP_dict.fromDict(merged) return obj
def build_valid_urls(self) -> None: """Guides stuff""" self.match_groups = [] for group, contents in self.bot.match_groups.items(): for match in contents: url_pattern = match['url'] url_pattern = url_pattern.replace(r'.', r'\.') url_pattern = url_pattern.replace(r'*', r'(.*?)') self.match_groups.append(MatchGroup(group, url_pattern, match['guide'])) for guide_type, v in self.bot.guides.items(): for guide_name, guide_content in v.items(): if 'inherits' not in guide_content: continue guide_to_inherit = guide_content['inherits'].split('/') source_guide = self.bot.guides[guide_type][guide_name] if len(guide_to_inherit) > 1: target_guide = self.bot.guides[guide_to_inherit[0]][guide_to_inherit[1]] else: target_guide = self.bot.guides[guide_type][guide_to_inherit[0]] combined_guide = merge({}, target_guide, source_guide) self.bot.guides[guide_type][guide_name] = combined_guide
def format_media_set(media_set): merged = merge({}, *media_set, strategy=Strategy.ADDITIVE) if "directories" in merged: for directory in merged["directories"]: os.makedirs(directory, exist_ok=True) merged.pop("directories") return merged media_set = list(chain(*media_set)) media_set.sort(key=lambda x: x["type"]) media_set = [list(g) for k, g in groupby( media_set, key=lambda x: x["type"])] new_list = [] for item in media_set: item2 = {k: [d[k] for d in item] for k in item[0]} item2["type"] = item2['type'][0].title() item2["valid"] = list(chain(*item2["valid"])) item2["invalid"] = list(chain(*item2["invalid"])) if item2["valid"]: seen = set() item2["valid"] = [x for x in item2["valid"] if x["filename"] not in seen and not seen.add(x["filename"])] seen = set() location_directories = [x["directory"] for x in item2["valid"] if x["directory"] not in seen and not seen.add(x["directory"])] for location_directory in location_directories: os.makedirs(location_directory+os.sep, exist_ok=True) item2["valid"] = [list(g) for k, g in groupby( item2["valid"], key=lambda x: x["post_id"])] new_list.append(item2) return new_list
def read_table_config(self, path: str): yaml_conf = os.path.abspath(path) logger.info("Opening YAML config file: %s", yaml_conf) with open(yaml_conf) as f: documents = yaml.safe_load_all(f) for conf in documents: self.table_config = merge({}, conf, self.table_config)
def test_should_merge_3_dicts_into_new_dict_using_typesafe_strategy_and_only_mutate_target_if_types_are_compatible( self): expected = { "a": { "b": { "c": 5, "_c": 15 }, "B": { "C": 10 } }, "d": 3, "f": [4, 5, 6], } a = {"a": {"b": {"c": 5}}, "d": 1, "f": [1, 2, 3]} a_copy = deepcopy(a) b = {"a": {"B": {"C": 10}}, "d": 2, "f": [4, 5, 6]} b_copy = deepcopy(b) c = {"a": {"b": {"_c": 15}}, "d": 3} c_copy = deepcopy(c) actual = merge({}, a, b, c, strategy=Strategy.TYPESAFE) self.assertEqual(actual, expected) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, c_copy)
def format_media_set(media_set): merged = merge({}, *media_set, strategy=Strategy.ADDITIVE) if "directories" in merged: for directory in merged["directories"]: os.makedirs(directory, exist_ok=True) merged.pop("directories") return merged
def test_should_not_copy_references(self): before = 1 after = 99 o1 = {"key1": before} o2 = {"key2": before} expected = {"list": deepcopy([o1, o2]), "tuple": deepcopy((o1, o2))} a = {"list": [o1], "tuple": (o1, )} b = {"list": [o2], "tuple": (o2, )} actual = merge({}, a, b, strategy=Strategy.ADDITIVE) o1["key1"] = after o2["key2"] = after self.assertEqual(actual, expected) # Copied dicts should `not` mutate self.assertEqual(actual["list"][0]["key1"], before) self.assertEqual(actual["list"][1]["key2"], before) self.assertEqual(actual["tuple"][0]["key1"], before) self.assertEqual(actual["tuple"][1]["key2"], before) # Non-copied dicts should mutate self.assertEqual(a["list"][0]["key1"], after) self.assertEqual(b["list"][0]["key2"], after) self.assertEqual(a["tuple"][0]["key1"], after) self.assertEqual(b["tuple"][0]["key2"], after)
def test_should_merge_3_dicts_into_new_dict_using_replace_strategy_and_only_mutate_target( self): expected = { "a": { "b": { "c": 5, "_c": 15 }, "B": { "C": 10 } }, "d": 3, "e": { 1: 2, "a": { "f": 2 } }, "f": [4, 5, 6], "g": (100, 200), } a = { "a": { "b": { "c": 5 } }, "d": 1, "e": { 2: 3 }, "f": [1, 2, 3], "g": (2, 4, 6) } a_copy = deepcopy(a) b = { "a": { "B": { "C": 10 } }, "d": 2, "e": 2, "f": [4, 5, 6], "g": (100, 200) } b_copy = deepcopy(b) c = {"a": {"b": {"_c": 15}}, "d": 3, "e": {1: 2, "a": {"f": 2}}} c_copy = deepcopy(c) actual = merge({}, a, b, c, strategy=Strategy.REPLACE) self.assertEqual(actual, expected) self.assertEqual(a, a_copy) self.assertEqual(b, b_copy) self.assertEqual(c, c_copy)
def patch_project( self, session_cookie: str, name: str, project: dict, patch_mode: mlrun.api.schemas.PatchMode = mlrun.api.schemas.PatchMode.replace, wait_for_completion: bool = True, ) -> typing.Tuple[mlrun.api.schemas.Project, bool]: logger.debug("Patching project in leader", name=name, project=project) current_project = self.get_project(session_cookie, name) strategy = patch_mode.to_mergedeep_strategy() current_project_dict = current_project.dict(exclude_unset=True) mergedeep.merge(current_project_dict, project, strategy=strategy) patched_project = mlrun.api.schemas.Project(**current_project_dict) return self.store_project( session_cookie, name, patched_project, wait_for_completion )
def content_config(self): return config.ConfigTree( merge( config.ConfigTree(), self.provider.config.content, self.config.get_value().content, # strategy=Strategy.ADDITIVE ) )
def resolve_ruleset(ruleset): # create return variable export = {} try: # try to parse the ruleset as json export = json.loads(ruleset) merged = dict(rulesets["default"]) # fill missing keys in dict merge(merged, export) export = merged except ValueError as e: # if the ruleset is a name like 'default' or 'columns+2', read it from the predefined rulesets if ruleset in rulesets: export = rulesets[ruleset] if not export: export = rulesets["default"] return export
def list_runtimes( self, project: str, label_selector: str = None, group_by: typing.Optional[ mlrun.api.schemas.ListRuntimeResourcesGroupByField] = None, ) -> typing.Union[typing.Dict, mlrun.api.schemas.GroupedRuntimeResourcesOutput]: runtimes = [] if group_by is None else {} for kind in mlrun.runtimes.RuntimeKinds.runtime_with_handlers(): runtime_handler = mlrun.runtimes.get_runtime_handler(kind) resources = runtime_handler.list_resources(project, label_selector, group_by) if group_by is None: runtimes.append({"kind": kind, "resources": resources}) else: mergedeep.merge(runtimes, resources) return runtimes
def _do_sum(self, other): """Differently from Plot.extend, this method creates a new plot object, which uses the series of both plots and merges the _kwargs dictionary of `self` with the one of `other`. """ series = [] series.extend(self.series) series.extend(other.series) kwargs = merge({}, self._kwargs, other._kwargs) return type(self)(*series, **kwargs)