def test_dict_merge_updates_value_when_same_key_exists(): base = dict_to_objdict({'field1': 'test1', 'field2': 'val'}) extend_by = dict_to_objdict({'field1': 'test22'}) merge_objdict(base, extend_by) assert base.field1 == 'test22' assert base.field2 == 'val'
def test_dict_merge_adds_key_when_is_missing(): base = dict_to_objdict({'field1': 'test1', 'field2': 'val'}) extend_by = dict_to_objdict({'field3': 'test3', 'field4': 'test4'}) merge_objdict(base, extend_by) assert base.field3 == 'test3' assert base.field4 == 'test4'
def merge_with_defaults(provider, feature_kind, config_selector): files = load_all_yaml_objs(types.DEFAULT, provider, feature_kind) config_spec = select_first(files, lambda x: x.name == config_selector) if config_selector != 'default': default_config = select_first(files, lambda x: x.name == 'default') merge_objdict(default_config, config_spec) return default_config return config_spec
def merge_parent(self, files, doc): if hasattr(doc, 'based_on'): self.logger.info(doc.name + ' is based on: ' + doc.based_on) parent = select_first(files, lambda x: x.name == doc.based_on) merged_parent = self.merge_parent(files, parent) merge_objdict(merged_parent, doc) return merged_parent default_doc = select_first(files, lambda x: x.name == 'default') merge_objdict(default_doc, doc) return default_doc
def test_dict_merge_replaces_list_when_same_key_exists(): base = dict_to_objdict({ 'field1': 'test1', 'list': ['base1', 'base2', 'base3'] }) extend_by = dict_to_objdict({'list': ['replaced1', 'replaced2']}) merge_objdict(base, extend_by) assert base.field1 == 'test1' assert base.list == ['replaced1', 'replaced2']
def test_dict_merge_updates_nested_object(): base = dict_to_objdict({ 'field1': 'test1', 'complex1': { 'nested_field1': 'nested_val1', 'nested_field2': 'nested_val2' } }) extend_by = dict_to_objdict({ 'complex1': { 'nested_field1': 'nested_val3', 'nested_field2': 'nested_val4' } }) merge_objdict(base, extend_by) assert base.field1 == 'test1' assert base.complex1.nested_field1 == 'nested_val3' assert base.complex1.nested_field2 == 'nested_val4'
def upgrade(self): inventory_path = get_inventory_path_for_build(self.backup_build_dir) build_version = check_build_output_version(self.backup_build_dir) self.logger.info(f'Loading backup Ansible inventory: {inventory_path}') loaded_inventory = InventoryManager(loader=DataLoader(), sources=inventory_path) # move loaded inventory to templating structure new_inventory = [] for key in loaded_inventory.groups: if key != 'all' and key != 'ungrouped': group_hosts = loaded_inventory.groups[key].hosts new_hosts = [] for host in group_hosts: new_hosts.append( AnsibleHostModel(host.address, host.vars['ansible_host'])) new_inventory.append(AnsibleInventoryItem(key, new_hosts)) if build_version == BUILD_LEGACY: self.logger.info(f'Upgrading Ansible inventory Epiphany < 0.3.0') # Epiphany < 0.3.0 did not have manifest file in build folder so lets create bare minimum cluster model from inventory self.cluster_model = dict_to_objdict({ 'provider': 'any', 'specification': { 'admin_user': { 'name': loaded_inventory.groups['all'].vars['ansible_user'], 'key_path': loaded_inventory.groups['all']. vars['ansible_ssh_private_key_file'] } } }) # Remap roles self.rename_role(new_inventory, 'master', 'kubernetes_master') self.rename_role(new_inventory, 'worker', 'kubernetes_node') self.rename_role(new_inventory, 'deployments', 'applications') self.rename_role(new_inventory, 'elasticsearch-curator', 'elasticsearch_curator') self.rename_role(new_inventory, 'jmx-exporter', 'jmx_exporter') self.rename_role(new_inventory, 'kafka-exporter', 'kafka_exporter') self.rename_role(new_inventory, 'haproxy_tls_termination', 'haproxy') # remove linux and reboot roles if present self.delete_role(new_inventory, 'linux') self.delete_role(new_inventory, 'reboot') else: self.logger.info(f'Upgrading Ansible inventory Epiphany => 0.3.0') # load cluster model from manifest self.manifest_docs = load_manifest_docs(self.backup_build_dir) self.cluster_model = select_single( self.manifest_docs, lambda x: x.kind == 'epiphany-cluster') # Merge manifest cluster config with newer defaults default_cluster_model = load_yaml_obj(data_types.DEFAULT, 'common', 'epiphany-cluster') merge_objdict(default_cluster_model, self.cluster_model) self.cluster_model = default_cluster_model # Check if repo roles are present and if not add them master = self.get_role(new_inventory, 'kubernetes_master') if master == None: raise Exception('No kubernetes_master to use as repository') master_node = master.hosts[0] # add image_registry image_registry = self.get_role(new_inventory, 'image_registry') if image_registry == None: hosts = [] hosts.append(AnsibleHostModel(master_node.name, master_node.ip)) new_inventory.append(AnsibleInventoryItem('image_registry', hosts)) # add repository repository = self.get_role(new_inventory, 'repository') if repository == None: hosts = [] hosts.append(AnsibleHostModel(master_node.name, master_node.ip)) new_inventory.append(AnsibleInventoryItem('repository', hosts)) # save new inventory save_inventory(new_inventory, self.cluster_model, self.build_dir) return 0