def without_doi(cls, **kwargs): changes = request.json['changes'] authors = request.json['authors'] datasetUrl = request.json['datasetUrl'] article = {} article['title'] = {changes['title_lang']: changes['title_val']} article['abstract'] = { changes['abstract_lang']: changes['abstract_val'] } article['authors'] = authors article['document_type'] = changes['document_type'] always_merger.merge(article, { "_primary_community": 'cesnet', "access_right_category": "success" }) article['datasets'] = [datasetUrl] print(article) record_uuid = uuid.uuid4() pid = article_minter(record_uuid, article) record = cls.create(data=article, id_=record_uuid) indexer = cls.DOCUMENT_INDEXER() indexer.index(record) PersistentIdentifier.create('dpsart', pid.pid_value, object_type='rec', object_uuid=record_uuid, status=PIDStatus.REGISTERED) db.session.commit() return Response(status=302, headers={"Location": record.canonical_url})
def add_firmware(firm): """add firmware from info file""" ports = firm['devices'] path = Path(firm['path']).parent versions = [] updated_firm = firm.copy() for p in ports: fware = Firmware(port=p, firmware_info=firm) licenses = firm.get('licenses', []) for file in licenses: repo, file_path = file.split(':') fware.retrieve_license(path, repository=repo, repo_path=file_path) compat = fware.get_compatible_tags() for cmp in compat: prev_compat = next( (v for v in versions if v['version'] == cmp['version']), None) if prev_compat: always_merger.merge(prev_compat, cmp) else: versions.append(cmp) updated_firm['versions'] = versions for vers in versions: devices = vers.get('devices') if len(vers['devices']) >= 0: v_dir = path / vers['git_tag'] dev_dirs = [Path(v_dir / dev) for dev in devices] [d.mkdir(exist_ok=True, parents=True) for d in dev_dirs] new = update_file(firm, updated_firm) fware_index = INFO['firmware'].index(firm) INFO['firmware'].pop(fware_index) INFO['firmware'].append(new) return new
def merge_network_info(composefile_yaml, network_id, genesis_json=None): network_info_yml = yaml.safe_load(f""" services: core: environment: RADIXDLT_NETWORK_ID: {network_id} """) if genesis_json: genesis_info_yml = yaml.safe_load(f""" services: core: environment: RADIXDLT_GENESIS_FILE: "/home/radixdlt/genesis.json" volumes: - "{genesis_json}:/home/radixdlt/genesis.json" """) # network_info_yml = Helpers.merge(genesis_info_yml, network_info_yml) network_info_yml = always_merger.merge(network_info_yml, genesis_info_yml) volumes = composefile_yaml["services"]["core"]["volumes"] harcoded_key_volume = "./node-keystore.ks:/home/radixdlt/node-keystore.ks" if "./node-keystore.ks:/home/radixdlt/node-keystore.ks" in volumes: volumes.remove(harcoded_key_volume) composefile_yaml["services"]["core"]["environment"].pop( "RADIXDLT_NETWORK_ID") yml_to_return = always_merger.merge(network_info_yml, composefile_yaml) return yml_to_return
def get_claims(self, token: RefreshToken) -> dict[str, Any]: """Get a dictionary of claims from scopes that the token requires and are assigned to the provider.""" scopes_from_client = token.scope final_claims = {} for scope in ScopeMapping.objects.filter( provider=token.provider, scope_name__in=scopes_from_client).order_by("scope_name"): value = None try: value = scope.evaluate( user=token.user, request=self.request, provider=token.provider, token=token, ) except PropertyMappingExpressionException as exc: Event.new( EventAction.CONFIGURATION_ERROR, message=f"Failed to evaluate property-mapping: {str(exc)}", mapping=scope, ).from_http(self.request) if value is None: continue if not isinstance(value, dict): LOGGER.warning( "Scope returned a non-dict value, ignoring", scope=scope, value=value, ) continue LOGGER.debug("updated scope", scope=scope) always_merger.merge(final_claims, value) return final_claims
def test_example(): base = {"foo": "value", "baz": ["a"]} next = {"bar": "value2", "baz": ["b"]} always_merger.merge(base, next) assert base == {"foo": "value", "bar": "value2", "baz": ["a", "b"]}
def transformInterface(self, interfaces): for interface in interfaces: intName = self.getInterfaceName(interface) self._data[intName]["name"] = intName print("processing {0}".format(intName)) for child in interface.re_search_children(r".*"): line = child.text.strip() found = False for regex in self._tgt.matrix: match = self.getRegex(regex).match(line) if match is not None: found = True retval = self._tgt.matrix[regex](line, match, interface) if retval: if isinstance(retval, type([])): for item in retval: self._data[intName] = always_merger.merge( self._data[intName], item) else: self._data[intName] = always_merger.merge( self._data[intName], retval) if not found: raise Exception("Unknown config line '{0}'".format(line))
def merge(sdm): from deepmerge import always_merger new_dict = _read_dict() for key, value in new_dict.items(): with sdm.mutable(key, {}) as conf: always_merger.merge(conf, value)
def action_download(args): init_api_args(args) result = [] for i, lang in enumerate(args.langs): # load all translates data = request_export(lang, **{**vars(args), **{'tag': None}}) # load master translates data_master = request_export( lang, **{**vars(args), **{'tag': args.main_tag}} ) # load tag translates if need data_tag = {} if args.tag != args.main_tag: data_tag = request_export(lang, **vars(args)) data = always_merger.merge(data, data_master) data = always_merger.merge(data, data_tag) result.append(data) if args.write: if args.files: write_json(args.files[i], data) else: write_sliced_i18n_json(lang, data) if not args.write: return dump_json_output(result)
def get(self, request: HttpRequest) -> HttpResponse: """Apply data to the current flow based on a URL""" stage: InvitationStage = self.executor.current_stage token = self.get_token() if not token: # No Invitation was given, raise error or continue if stage.continue_flow_without_invitation: return self.executor.stage_ok() return self.executor.stage_invalid() invite: Invitation = Invitation.objects.filter(pk=token).first() if not invite: LOGGER.debug("invalid invitation", token=token) if stage.continue_flow_without_invitation: return self.executor.stage_ok() return self.executor.stage_invalid() self.executor.plan.context[INVITATION_IN_EFFECT] = True self.executor.plan.context[INVITATION] = invite context = {} always_merger.merge( context, self.executor.plan.context.get(PLAN_CONTEXT_PROMPT, {})) always_merger.merge(context, invite.fixed_data) self.executor.plan.context[PLAN_CONTEXT_PROMPT] = context invitation_used.send(sender=self, request=request, invitation=invite) if invite.single_use: self.executor.plan.append_stage( in_memory_stage(InvitationFinalStageView)) return self.executor.stage_ok()
def multilingual_analysis(type=None, resource=None, id=None, json_pointer=None, app=None, content=None, root=None, content_pointer=None): """Use this function as handler.""" languages = list(app.config.get("MULTILINGUAL_SUPPORTED_LANGUAGES", [])) analyzer = app.config.get("ELASTICSEARCH_LANGUAGE_ANALYSIS", {}) analysis_list = list() for language in languages: if id is not None: language_with_context = language + '#' + id if language_with_context in analyzer.keys(): analysis_list.append(analyzer[language_with_context]) elif language in analyzer.keys(): analysis_list.append(analyzer[language]) if not analysis_list: return {} result = {} for i in analysis_list: always_merger.merge(result, i) return result
def url_helper(meths): no_rule = True route_rule = str() method = {} permissions = allow_all serializers_list = [] serializers = {} detail = True for l in meths: if no_rule: if 'detail' in l[2]: pom = l[3] route_rule = pom['list_route'] + str(l[0]) no_rule = False detail = False else: pom = l[3] route_rule = pom['item_route'] + str(l[0]) no_rule = False method.update({l[1]: l[4]}) try: serializers_list.append(l[3]['serializers']) except: pass if len(serializers_list) == 0: serializers = default_serializer else: for i in serializers_list: always_merger.merge(serializers, i) return route_rule, method, permissions, serializers, detail
def deep_merge_dictionaries(items: list): base = {} for item in items: base = deepcopy(base) always_merger.merge(base, item) return base
def stage_change(self, entity_path, payload, update=True): if update: always_merger.merge( self.staged_changes.setdefault(entity_path, {}), payload ) else: self.staged_changes[entity_path] = payload
def group_attributes(self) -> dict[str, Any]: """Get a dictionary containing the attributes from all groups the user belongs to, including the users attributes""" final_attributes = {} for group in self.ak_groups.all().order_by("name"): always_merger.merge(final_attributes, group.attributes) always_merger.merge(final_attributes, self.attributes) return final_attributes
def load(self, build_name): self.name = build_name # Read the configuration with open(self.config_file, encoding="utf-8") as f: config = yaml.safe_load(f) # The configuration root should contain a mandatory element "builds", and # it should not be empty. if not config.get("builds", None): raise AssertionError( "Invalid configuration file {}: the \"builds\" element is missing or empty" .format(str(self.config_file))) # Check the target build has an entry in the configuration file build = config["builds"].get(self.name, None) if not build: raise AssertionError( "{} is not a valid build identifier. Valid identifiers are {}". format(self.name, list(config.keys()))) # Get a list of the templates, if any templates = config.get("templates", {}) # If the build references some templates, merge all the configurations. # The merge is applied in the same order as the templates are declared # in the template list. template_config = {} template_names = build.get("templates", []) for template_name in template_names: # Raise an error if the template does not exist if template_name not in templates: raise AssertionError( "Build {} configuration inherits from template {}, but the template does not exist." .format(self.name, template_name)) always_merger.merge(template_config, templates.get(template_name)) self.config = always_merger.merge(template_config, build) # Create the build directory as needed self.build_directory = Path( self.project_root.joinpath('abc-ci-builds', self.name)) # Define the junit and logs directories self.junit_reports_dir = self.build_directory.joinpath("test/junit") self.test_logs_dir = self.build_directory.joinpath("test/log") self.functional_test_logs = self.build_directory.joinpath( "test/tmp/test_runner_*") # We will provide the required environment variables self.environment_variables = { "BUILD_DIR": str(self.build_directory), "CMAKE_PLATFORMS_DIR": self.project_root.joinpath("cmake", "platforms"), "THREADS": str(self.jobs), "TOPLEVEL": str(self.project_root), }
def run(self, context): def evaluate_params(parameters, context): """Recursively evaluates parameter dict by parsing with Jinja2 and reparsing with YAML """ evaluated_params = {} for key, val in parameters.items(): if isinstance(val, str): if not isinstance(val, scriptengine.yaml.NoParseJinjaString): # Make sure that a NoParseString is still a # NoParseString after this! val = type(val)(scriptengine.jinja.render( val, context)) if not isinstance(val, scriptengine.yaml.NoParseYamlString): # Try to reload the item through YAML, in order to # get the right type # Otherwise, everything would be just a string try: val = yaml.full_load(val) # However, it may really be a string that is # no valid YAML! except (yaml.parser.ParserError, yaml.constructor.ConstructorError): self.log_debug(f'Reparsing argument "{val}" ' 'with YAML failed') # For consistency, we always return plain strings if isinstance(val, scriptengine.yaml.NoParseString): val = str(val) evaluated_params[key] = val elif isinstance(val, dict): evaluated_params[key] = evaluate_params(val, context) else: evaluated_params[key] = val return evaluated_params params = { key: val for key, val in self.__dict__.items() if not (isinstance(key, str) and key.startswith('_')) } self.log_info(f'{", ".join([f"{k}={params[k]}" for k in params])}') add_to_context = evaluate_params(params, context) self.log_debug(f'Adding to context: {add_to_context}') always_merger.merge(context, add_to_context)
def load(self, build_name): self.name = build_name # Read the configuration with open(self.config_file, encoding="utf-8") as f: config = json.load(f) # The configuration root should contain a mandatory element "builds", and # it should not be empty. if not config.get("builds", None): raise AssertionError( "Invalid configuration file {}: the \"builds\" element is missing or empty" .format(str(self.config_file))) # Check the target build has an entry in the configuration file build = config["builds"].get(self.name, None) if not build: raise AssertionError( "{} is not a valid build identifier. Valid identifiers are {}". format(self.name, list(config.keys()))) # Get a list of the templates, if any templates = config.get("templates", {}) # If the build references some templates, merge all the configurations. # The merge is applied in the same order as the templates are declared # in the template list. template_config = {} template_names = build.get("templates", []) for template_name in template_names: # Raise an error if the template does not exist if template_name not in templates: raise AssertionError( "Build {} configuration inherits from template {}, but the template does not exist." .format(self.name, template_name)) always_merger.merge(template_config, templates.get(template_name)) self.config = always_merger.merge(template_config, build) # Make sure there is a script file associated with the build... script = self.config.get("script", None) if script is None: raise AssertionError("No script provided for the build {}".format( self.name)) # ... and that the script file can be executed self.script_path = Path(self.script_root.joinpath(script)) if not self.script_path.is_file() or not os.access( self.script_path, os.X_OK): raise FileNotFoundError( "The script file {} does not exist or does not have execution permission" .format(str(self.script_path)))
def __prepare_xml(submission: dict) -> dict: submission_copy = copy.deepcopy(submission) for k, v in submission_copy.items(): if '/' not in k: continue value = v for key in reversed(k.strip('/').split('/')): value = {key: value} always_merger.merge(submission, value) del submission[k] return submission
def getenvars(variables=None): defaults = { 'bitfinex': { 'api': { 'key': '', 'key_file': None, 'secret': '', 'secret_file': None, } }, 'influxdb': { 'host': 'influxdb', 'port': 8086, 'timeout': 5, 'username': None, 'password': None, 'password_file': None, 'database': None, 'measurement': None, }, 'logging': { 'level': logging.INFO, 'format': "[%(asctime)s] %(levelname)s [%(name)s.%(module)s.%(funcName)s:%(lineno)d] %(message)s", 'datefmt': '%Y-%m-%d %H:%M:%S', }, 'mongodb': { 'host': 'mongodb', 'port': 27017, 'username': None, 'password': None, 'password_file': None, 'database': None, }, 'mysql': { 'host': 'mysql', 'port': 3306, 'username': None, 'password': None, 'password_file': None, 'database': None, "charset": "utf8mb4", }, 'delay': 30, 'test': 'sometest' } always_merger.merge( defaults, variables if variables else {}) # merge given dict with defaults variables = _flatten_vars_dict(defaults, '', {}) return variables
def __init__(self, envirocar: bool = False, carfueldata: bool = False, matchedData: bool = False): envirocarSensorsInDB = Envirocar().get_sensors( ordered=True) if envirocar else [] cfdCarsInDB = CarFuelData().get_cfd_data( ordered=True) if carfueldata else [] self.combinedCars = {} always_merger.merge(self.combinedCars, envirocarSensorsInDB) always_merger.merge(self.combinedCars, cfdCarsInDB) self.processedCars = dict() self.old_manufacturer_matches = {} self.total_progress = None
def add_schemas_from_descriptor(name: str, descriptor: dict, swag: dict = {}) -> dict: lower_case_name = name.lower() sentence_case_name = name.capitalize() # construct each property from desc file generated_properties = generate_properties_from_desc(name, descriptor) # construct from descriptor file schemas = { "components": { "schemas": { **generated_properties, f"All{sentence_case_name}": { "type": "object", "properties": { f"{lower_case_name}": { "type": "array", "items": { "$ref": f"#/components/schemas/{sentence_case_name}" }, }, "links": { "type": "array", "items": {"$ref": "#/components/schemas/Links"}, }, }, }, } } } swag = always_merger.merge(swag, schemas) return swag
def pager(items_count: int, item_per_page: int, func: Callable[[int, int], dict]) -> dict: """ A pager which executes a function on every page with the start and end item indices and deep merge the returned response. :param items_count: all items count :param item_per_page: items count per page :param func: the func called in each page with [start_index, end_index], returns the response dict :return all merged response """ page_count = items_count // item_per_page remainder = items_count % item_per_page response_all = {} for page_index in range(page_count + 1): start_index = 0 end_index = (page_index * item_per_page) + (item_per_page - 1) if page_index > 0: start_index = page_index * item_per_page if remainder > 0 and page_index == page_count: end_index = start_index + remainder response_all = always_merger.merge(response_all, func(start_index, end_index)) return response_all
def get(self, country_codes: dict) -> dict: """ The function works as a intermidiate function to get all the Eurostat data at once. :param country_codes: Provide a legit CounryObject holding the country codes :return: A dictonary holding the desired Eurostat data :rtype: dict """ es_data = dict() """ The function works as the entry point to get the data from EUROSTAT. First it checks if the temp folder is empty by either deleting it and/or creating a new and emptry one. :return: EUROSTAT oil data set with values ranging from 1994 till today. """ file_management.create_directory(temp_folder) file_management.clean_directory(temp_folder) if self.history1994 and self.history2005 is False: es_data.update(HistoryOil1994().get(country_codes=country_codes)) elif self.history2005 and self.history1994 is False: es_data.update(HistoryOil2005().get(country_codes=country_codes)) elif self.history1994 and self.history2005: history1994 = HistoryOil1994().get(country_codes=country_codes) history2005 = HistoryOil2005().get(country_codes=country_codes) es_data.update(always_merger.merge(history1994, history2005)) return es_data
def main(): ################################################################################################ #### Pre-processing ################################################################################################ cli_args = docopt(__doc__, argv=None, help=True, version=None, options_first=False) print(""" #################################### # # # POSTMAN TO ROBOTFRAMEWOK # # # #################################### """) print("{tag} Use {color} postman2robot -h {reset} to learn how to use the command line" .format(tag=tag.info,color=fg.green,reset=ft.reset)) rc_file = ".postman2robotrc" if os.path.exists(rc_file): print("{tag} Loading runtime config from .cpmrc file {reset}" .format(tag=tag.info,reset=ft.reset,)) with open(rc_file) as f: cli_args_rc = json.load(f) cli_args = always_merger.merge(cli_args, cli_args_rc) else: print("{tag} You can use a {color}.postman2robotrc file {reset} to avoide typing options everytime" .format(tag=tag.info,color=fg.green,reset=ft.reset)) run(cli_args)
def setup(self, is_new_instance): """setup(is_new_instance) This is called before user-data and vendor-data have been processed. Unless the datasource has set mode to 'local', then networking per 'fallback' or per 'network_config' will have been written and brought up the OS at this point. """ # Get information about the host. host_info = get_host_info() LOG.info("got host-info: %s", host_info) # Reflect any possible local IPv4 or IPv6 addresses in the guest # info. advertise_local_ip_addrs(host_info) # Ensure the metadata gets updated with information about the # host, including the network interfaces, default IP addresses, # etc. self.metadata = always_merger.merge(self.metadata, host_info) # Persist the instance data for versions of cloud-init that support # doing so. This occurs here rather than in the get_data call in # order to ensure that the network interfaces are up and can be # persisted with the metadata. try: self.persist_instance_data() except AttributeError: pass
def read(self) -> Dict[str, Any]: """Read configuration from file Read and parse yaml configuration file by rule: `<directory_path>/<env>.<file_format>` :return: configuration dictionary """ try: log.debug(f'Read config from {self.file_path}') with open(self.file_path) as config_file: self.__cfg = yaml.load(config_file, Loader=yaml.FullLoader) log.debug(f"Prod yml load: {self.__cfg}") meta = self.__cfg[META_CONFIG_FIELD].copy() if meta[CONFIG_SOURCE_TYPE_FIELD] in readers_types_map: self.__cfg = readers_types_map[ meta[CONFIG_SOURCE_TYPE_FIELD]](self.__cfg, self.dbutils) if meta.get(PARENT_CONFIG_FIELD) is not None: self.__cfg = always_merger.merge( BaseYmlConfigReader( env=meta.get(PARENT_CONFIG_FIELD), directory_path=self.directory_path, file_format=self.file_format).config, self.__cfg) log.debug(f"Prod yml res: {self.__cfg}") return self.__cfg except FileNotFoundError as ex: log.error(ex) raise ex
def merge(root, node): from deepmerge import always_merger if root is None: return node return always_merger.merge(root, node)
def run(self): with self.spoke_regional_client(self.client) as client: if self.use_paginator: paginator = client.get_paginator(self.call) result = dict() for page in paginator.paginate(**self.arguments): always_merger.merge(result, page) else: f = getattr(client, self.call) result = f(**self.arguments) actual_result = jmespath.search(self.filter, result) if isinstance(actual_result, str): self.write_output(actual_result.strip()) else: self.write_output(actual_result)
def single_recipe(id): if request.method == 'GET': #displays single recipe - first one that matches 'id' return next(recipe for recipe in data if recipe['id'] == id) elif request.method == 'DELETE': #return "Deleted" for ix, recipe in enumerate(data): if recipe['id'] == id: del data[ix] with open('data.json', 'w') as data_file: json.dump(data, data_file, indent=2) with open('data.json', 'r') as data_file: output = json.load(data_file) return jsonify(output) elif request.method == 'PUT': body = request.get_json() for ix, recipe in enumerate(data): if recipe['id'] == id: data[ix] = always_merger.merge(recipe, body) with open('data.json', 'w') as data_file: json.dump(data, data_file, indent=2) return jsonify(next(recipe for recipe in data if recipe['id'] == id))
def main(): ''' Executed when this file is used as a program. ''' metadata = {'network': {'config': {'dhcp': True}}} host_info = get_host_info() metadata = always_merger.merge(metadata, host_info) print(util.json_dumps(metadata))