def test_dict_merge_replaces_list_when_same_key_exists(): base = dict_to_objdict({ 'field1': 'test1', 'list': ['base1', 'base2', 'base3'] }) extend_by = dict_to_objdict({'list': ['replaced1', 'replaced2']}) merge_objdict(base, extend_by) assert base.field1 == 'test1' assert base.list == ['replaced1', 'replaced2']
def get_vm(self, component_key, component_value, vm_config, network_interface_name, index): vm = dict_to_objdict(deepcopy(vm_config)) vm.specification.name = resource_name(self.cluster_prefix, self.cluster_name, 'vm' + '-' + str(index), component_key) vm.specification.admin_username = self.cluster_model.specification.admin_user.name vm.specification.network_interface_name = network_interface_name vm.specification.tags.append( {'cluster': cluster_tag(self.cluster_prefix, self.cluster_name)}) vm.specification.tags.append({component_key: ''}) if vm.specification.os_type == 'linux': # For linux we dont need a PW since we only support SSH. We add something random for Terraform # to run and later disable password access in Ansible. vm.specification.admin_password = str(uuid.uuid4()) if vm_config.specification.os_type == 'windows': raise NotImplementedError('Windows VMs not supported jet.') pub_key_path = self.cluster_model.specification.admin_user.key_path + '.pub' if os.path.isfile(pub_key_path): vm.specification.public_key = pub_key_path else: raise Exception( f'SSH key path "{pub_key_path}" is not valid. Ansible run will fail.' ) return vm
def safe_load_all(stream): yaml = YAML() yaml.default_flow_style = False docs = list(yaml.load_all(stream)) conv_docs = [] for doc in docs: conv_docs.append(dict_to_objdict(doc)) return conv_docs
def __init__(self, cluster_model, validation_docs): super().__init__(__name__) self.cluster_model = cluster_model self.validation_docs = validation_docs base = load_yaml_obj(types.VALIDATION, self.cluster_model.provider, 'core/base') self.definitions = load_yaml_obj(types.VALIDATION, self.cluster_model.provider, 'core/definitions') self.base_schema = dict_to_objdict(deepcopy(base)) self.base_schema['definitions'] = self.definitions self.base_schema_no_provider = dict_to_objdict(deepcopy(base)) self.base_schema_no_provider['definitions'] = self.definitions del self.base_schema_no_provider.required[0] del self.base_schema_no_provider.properties['provider']
def test_get_security_group_should_set_proper_values_to_model(): cluster_model = get_cluster_model(cluster_name='TestCluster') subnet = dict_to_objdict({'specification': {'cidr_block': '10.21.0.0/24'}}) builder = InfrastructureBuilder([cluster_model]) actual = builder.get_security_group(subnet, 'component', 'my-test-vpc', 1) assert actual.specification.name == 'prefix-testcluster-component-security-group-1' assert actual.specification.vpc_name == 'my-test-vpc' assert actual.specification.cidr_block == '10.21.0.0/24'
def test_dict_to_objdict(): base = {'field1': {'field2': {'field3': {'field4': 'val'}}}} converted = dict_to_objdict(base) assert type(converted) is ObjDict assert type(converted.field1) is ObjDict assert type(converted.field1.field2) is ObjDict assert type(converted.field1.field2.field3) is ObjDict assert type(converted.field1.field2.field3.field4) is str assert converted.field1.field2.field3.field4 == 'val'
def test_get_launch_configuration_should_set_proper_values_to_model(): cluster_model = get_cluster_model(cluster_name='TestCluster') autoscaling_group = dict_to_objdict( {'specification': { 'size': 't2.micro.test' }}) security_groups_to_create = dict_to_objdict( {'specification': { 'name': 'aws-security-group-test' }}) builder = InfrastructureBuilder([cluster_model]) actual = builder.get_launch_configuration(autoscaling_group, 'TestComponent', security_groups_to_create) assert actual.specification.name == 'prefix-testcluster-testcomponent-launch-config' assert actual.specification.size == 't2.micro.test' assert actual.specification.security_groups == ['aws-security-group-test']
def test_dict_merge_updates_nested_object(): base = dict_to_objdict({ 'field1': 'test1', 'complex1': { 'nested_field1': 'nested_val1', 'nested_field2': 'nested_val2' } }) extend_by = dict_to_objdict({ 'complex1': { 'nested_field1': 'nested_val3', 'nested_field2': 'nested_val4' } }) merge_objdict(base, extend_by) assert base.field1 == 'test1' assert base.complex1.nested_field1 == 'nested_val3' assert base.complex1.nested_field2 == 'nested_val4'
def test_get_autoscaling_group_should_set_proper_values_to_model(): cluster_model = get_cluster_model(cluster_name='TestCluster') component_value = dict_to_objdict({'machine': 'default', 'count': 4}) builder = InfrastructureBuilder([cluster_model]) actual = builder.get_autoscaling_group('TestComponent', component_value, 'my-test-subnet') assert actual.specification.name == 'prefix-testcluster-testcomponent-asg' assert actual.specification.count == 4 assert actual.specification.subnet == 'my-test-subnet'
def test_get_subnet_should_set_proper_values_to_model(): cluster_model = get_cluster_model(cluster_name='TestCluster') subnet_definition = dict_to_objdict({ 'address_pool': '10.20.0.0/24', 'availability_zone': 'eu-west-2a' }) builder = InfrastructureBuilder([cluster_model]) actual = builder.get_subnet(subnet_definition, 'component', 1) assert actual.specification.name == 'prefix-testcluster-component-subnet-1' assert actual.specification.address_prefix == subnet_definition[ 'address_pool'] assert actual.specification.cluster_name == 'testcluster'
def test_get_public_ip_should_set_proper_values_to_model(): cluster_model = get_cluster_model(cluster_name='TestCluster') builder = InfrastructureBuilder([cluster_model]) component_value = dict_to_objdict({'machine': 'kubernetes-master-machine'}) vm_config = builder.get_virtual_machine(component_value, cluster_model, []) actual = builder.get_public_ip('kubernetes_master', component_value, vm_config, 1) assert actual.specification.name == 'prefix-testcluster-kubernetes-master-pubip-1' assert actual.specification.allocation_method == 'Static' assert actual.specification.idle_timeout_in_minutes == 30 assert actual.specification.sku == 'Standard'
def get_cluster_model(address_pool='10.22.0.0/22', cluster_name='EpiphanyTestCluster'): cluster_model = dict_to_objdict({ 'kind': 'epiphany-cluster', 'provider': 'aws', 'specification': { 'name': cluster_name, 'prefix': 'prefix', 'cloud': { 'vnet_address_pool': address_pool } } }) return cluster_model
def test_dict_to_objdict_different_dict_types(): base = { 'field1': ObjDict({'field2': { 'field3': OrderedDict({'field4': 'val'}) }}) } converted = dict_to_objdict(base) assert type(converted) is ObjDict assert type(converted.field1) is ObjDict assert type(converted.field1.field2) is ObjDict assert type(converted.field1.field2.field3) is ObjDict assert type(converted.field1.field2.field3.field4) is str assert converted.field1.field2.field3.field4 == 'val'
def get_cluster_model(address_pool='10.22.0.0/22', cluster_name='EpiphanyTestCluster'): cluster_model = dict_to_objdict({ 'kind': 'epiphany-cluster', 'provider': 'azure', 'specification': { 'name': cluster_name, 'prefix': 'prefix', 'cloud': { 'region': 'West Europe', 'vnet_address_pool': address_pool, 'use_public_ips': True } } }) return cluster_model
def get_vm(self, component_key, component_value, vm_config, availability_set, network_interface_name, index): vm = dict_to_objdict(deepcopy(vm_config)) vm.specification.name = resource_name(self.cluster_prefix, self.cluster_name, 'vm' + '-' + str(index), component_key) vm.specification.admin_username = self.cluster_model.specification.admin_user.name vm.specification.network_interface_name = network_interface_name vm.specification.tags.append({'cluster': cluster_tag(self.cluster_prefix, self.cluster_name)}) vm.specification.tags.append({component_key: ''}) if vm_config.specification.os_type == 'windows': raise NotImplementedError('Windows VMs not supported jet.') pub_key_path = self.cluster_model.specification.admin_user.key_path + '.pub' if os.path.isfile(pub_key_path): vm.specification.public_key = pub_key_path else: raise Exception(f'SSH key path "{pub_key_path}" is not valid. Ansible run will fail.') if availability_set is not None: vm.specification.availability_set_name = availability_set.specification.name return vm
def get_vpc_id(self): vpc_config = dict_to_objdict( select_single(self.config_docs, lambda x: x.kind == 'infrastructure/vpc')) ec2 = self.session.resource('ec2') filters = [{ 'Name': 'tag:Name', 'Values': [vpc_config.specification.name] }] vpcs = list(ec2.vpcs.filter(Filters=filters)) if len(vpcs) == 1: return vpcs[0].id raise Exception("Expected 1 VPC matching tag Name: " + vpc_config.specification.name + " but received: " + str(len(vpcs)))
def test_get_network_interface_should_set_proper_values_to_model(): cluster_model = get_cluster_model(cluster_name='TestCluster') builder = InfrastructureBuilder([cluster_model]) component_value = dict_to_objdict({'machine': 'kubernetes-master-machine'}) vm_config = builder.get_virtual_machine(component_value, cluster_model, []) actual = builder.get_network_interface( 'kubernetes_master', component_value, vm_config, 'prefix-testcluster-component-subnet-1', 'prefix-testcluster-component-sg-1', 'prefix-testcluster-kubernetes-master-pubip-1', 1) assert actual.specification.name == 'prefix-testcluster-kubernetes-master-nic-1' assert actual.specification.security_group_name == 'prefix-testcluster-component-sg-1' assert actual.specification.ip_configuration_name == 'prefix-testcluster-kubernetes-master-ipconf-1' assert actual.specification.subnet_name == 'prefix-testcluster-component-subnet-1' assert actual.specification.use_public_ip == True assert actual.specification.public_ip_name == 'prefix-testcluster-kubernetes-master-pubip-1' assert actual.specification.enable_accelerated_networking == False
def test_get_subnet_config_should_set_proper_values_to_model(): cluster_model = get_cluster_model(cluster_name='TestCluster') component_value = dict_to_objdict({ 'address_pool': '10.20.0.0/24', 'availability_zone': 'eu-west-2a' }) builder = InfrastructureBuilder([cluster_model]) actual = builder.get_subnet( component_value, 'component', 'my-test-vpc', 1, ) assert actual.specification.name == 'prefix-testcluster-component-subnet-1' assert actual.specification.vpc_name == 'my-test-vpc' assert actual.specification.cidr_block == '10.20.0.0/24' assert actual.specification.availability_zone == 'eu-west-2a'
def get_autoscaling_group(self, component_key, component_value, subnets_to_create, index): autoscaling_group = dict_to_objdict( deepcopy( self.get_virtual_machine(component_value, self.cluster_model, self.docs))) autoscaling_group.specification.cluster_name = self.cluster_name autoscaling_group.specification.name = resource_name( self.cluster_prefix, self.cluster_name, 'asg' + '-' + str(index), component_key) autoscaling_group.specification.count = component_value.count autoscaling_group.specification.subnet_names = [ s.specification.name for s in subnets_to_create ] autoscaling_group.specification.availability_zones = list( set([s.specification.availability_zone for s in subnets_to_create])) autoscaling_group.specification.tags.append( {'cluster_name': self.cluster_name}) autoscaling_group.specification.tags.append({component_key: ''}) return autoscaling_group
def test_dict_to_objdict_nested_with_lists(): base = { 'field1': [ { 'field2': { 'field3': [ { 'field4': 'val' }, ] } }, ] } converted = dict_to_objdict(base) assert type(converted) is ObjDict assert type(converted.field1) is list assert type(converted.field1[0]) is ObjDict assert type(converted.field1[0].field2) is ObjDict assert type(converted.field1[0].field2.field3) is list assert type(converted.field1[0].field2.field3[0]) is ObjDict assert type(converted.field1[0].field2.field3[0].field4) is str assert converted.field1[0].field2.field3[0].field4 == 'val'
def upgrade(self): inventory_path = get_inventory_path_for_build(self.backup_build_dir) build_version = check_build_output_version(self.backup_build_dir) self.logger.info(f'Loading backup Ansible inventory: {inventory_path}') loaded_inventory = InventoryManager(loader=DataLoader(), sources=inventory_path) # move loaded inventory to templating structure new_inventory = [] for key in loaded_inventory.groups: if key != 'all' and key != 'ungrouped': group_hosts = loaded_inventory.groups[key].hosts new_hosts = [] for host in group_hosts: new_hosts.append( AnsibleHostModel(host.address, host.vars['ansible_host'])) new_inventory.append(AnsibleInventoryItem(key, new_hosts)) if build_version == BUILD_LEGACY: self.logger.info(f'Upgrading Ansible inventory Epiphany < 0.3.0') # Epiphany < 0.3.0 did not have manifest file in build folder so lets create bare minimum cluster model from inventory self.cluster_model = dict_to_objdict({ 'provider': 'any', 'specification': { 'admin_user': { 'name': loaded_inventory.groups['all'].vars['ansible_user'], 'key_path': loaded_inventory.groups['all']. vars['ansible_ssh_private_key_file'] } } }) # Remap roles self.rename_role(new_inventory, 'master', 'kubernetes_master') self.rename_role(new_inventory, 'worker', 'kubernetes_node') self.rename_role(new_inventory, 'deployments', 'applications') self.rename_role(new_inventory, 'elasticsearch-curator', 'elasticsearch_curator') self.rename_role(new_inventory, 'jmx-exporter', 'jmx_exporter') self.rename_role(new_inventory, 'kafka-exporter', 'kafka_exporter') self.rename_role(new_inventory, 'haproxy_tls_termination', 'haproxy') # remove linux and reboot roles if present self.delete_role(new_inventory, 'linux') self.delete_role(new_inventory, 'reboot') else: self.logger.info(f'Upgrading Ansible inventory Epiphany => 0.3.0') # load cluster model from manifest self.manifest_docs = load_manifest_docs(self.backup_build_dir) self.cluster_model = select_single( self.manifest_docs, lambda x: x.kind == 'epiphany-cluster') # Merge manifest cluster config with newer defaults default_cluster_model = load_yaml_obj(data_types.DEFAULT, 'common', 'epiphany-cluster') merge_objdict(default_cluster_model, self.cluster_model) self.cluster_model = default_cluster_model # Check if repo roles are present and if not add them master = self.get_role(new_inventory, 'kubernetes_master') if master == None: raise Exception('No kubernetes_master to use as repository') master_node = master.hosts[0] # add image_registry image_registry = self.get_role(new_inventory, 'image_registry') if image_registry == None: hosts = [] hosts.append(AnsibleHostModel(master_node.name, master_node.ip)) new_inventory.append(AnsibleInventoryItem('image_registry', hosts)) # add repository repository = self.get_role(new_inventory, 'repository') if repository == None: hosts = [] hosts.append(AnsibleHostModel(master_node.name, master_node.ip)) new_inventory.append(AnsibleInventoryItem('repository', hosts)) # save new inventory save_inventory(new_inventory, self.cluster_model, self.build_dir) return 0
def safe_load_all(stream): docs = list(yaml.safe_load_all(stream)) conv_docs = [] for doc in docs: conv_docs.append(dict_to_objdict(doc)) return conv_docs
def safe_load(stream): doc = yaml.safe_load(stream) return dict_to_objdict(doc)
def load_json_obj(path_to_file): with open(path_to_file, 'r') as stream: obj = json.load(stream) return dict_to_objdict(obj)
def safe_load(stream): yaml = YAML() yaml.default_flow_style = False doc = yaml.load(stream) return dict_to_objdict(doc)