Example #1
0
    def run(self):
        configuration_docs = []

        def append_config(doc):
            doc['version'] = VERSION
            configuration_docs.append(doc)

        for document_kind in ConfigurationAppender.REQUIRED_DOCS:
            doc = select_first(self.input_docs, lambda x: x.kind == document_kind)
            if doc is None:
                doc = load_yaml_obj(types.DEFAULT, 'common', document_kind)
                self.logger.info("Adding: " + doc.kind)
                append_config(doc)
            else:
                append_config(doc)

        for component_key, component_value in self.cluster_model.specification.components.items():
            if component_value.count < 1:
                continue

            features_map = select_first(configuration_docs, lambda x: x.kind == 'configuration/feature-mapping')
            config_selector = component_value.configuration
            for feature_key in features_map.specification.roles_mapping[component_key]:
                config = select_first(self.input_docs, lambda x: x.kind == 'configuration/' + feature_key and x.name == config_selector)
                if config is not None:
                    append_config(config)
                if config is None:
                    config = select_first(configuration_docs, lambda
                        x: x.kind == 'configuration/' + feature_key and x.name == config_selector)
                if config is None:
                    config = merge_with_defaults('common', 'configuration/' + feature_key, config_selector)
                    self.logger.info("Adding: " + config.kind)
                    append_config(config)

        return configuration_docs
    def apply_efs_filesystem_id_for_k8s_pv(self, proxy):
        efs_storage_config = select_first(self.docs, lambda x: x.kind == 'infrastructure/efs-storage')
        kubernetes_config = select_first(self.docs, lambda x: x.kind == 'configuration/kubernetes-master')

        if self.should_apply_storage_settings(efs_storage_config, kubernetes_config):
            fs_id = proxy.get_efs_id_for_given_token(efs_storage_config.specification.token)
            kubernetes_config.specification.storage.data = {'server': self.get_efs_server_url(fs_id)}
Example #3
0
def merge_with_defaults(provider, feature_kind, config_selector):
    files = load_all_yaml_objs(types.DEFAULT, provider, feature_kind)
    config_spec = select_first(files, lambda x: x.name == config_selector)
    if config_selector != 'default':
        default_config = select_first(files, lambda x: x.name == 'default')
        merge_objdict(default_config, config_spec)
        return default_config
    return config_spec
Example #4
0
 def merge_parent(self, files, doc):
     if hasattr(doc, 'based_on'):
         self.logger.info(doc.name + ' is based on: ' + doc.based_on)
         parent = select_first(files, lambda x: x.name == doc.based_on)
         merged_parent = self.merge_parent(files, parent)
         merge_objdict(merged_parent, doc)
         return merged_parent
     default_doc = select_first(files, lambda x: x.name == 'default')
     merge_objdict(default_doc, doc)
     return default_doc
Example #5
0
    def apply_file_share_for_k8s_pv(self, proxy):
        storage_share_config = select_first(
            self.docs, lambda x: x.kind == 'infrastructure/storage-share')
        kubernetes_config = select_first(
            self.docs, lambda x: x.kind == 'configuration/kubernetes-master')

        if self.should_apply_storage_settings(storage_share_config,
                                              kubernetes_config):
            primary_key = proxy.get_storage_account_primary_key(
                storage_share_config.specification.storage_account_name)
            kubernetes_config.specification.storage.data = {
                'storage_account_name':
                storage_share_config.specification.storage_account_name,
                'storage_account_key': primary_key
            }
Example #6
0
 def login_account(self):
     subscription_name = self.cluster_model.specification.cloud.subscription_name
     all_subscription = self.run(self, 'az login')
     subscription = select_first(all_subscription, lambda x: x['name'] == subscription_name)
     if subscription is None:
         raise Exception(f'User does not have access to subscription: "{subscription_name}"')
     return subscription
    def get_public_key(self):
        public_key_config = self.get_config_or_default(
            self.docs, 'infrastructure/public-key')
        public_key_config.specification.name = self.cluster_model.specification.admin_user.name

        # To avoid key-pair collisions on AWS we generate a randomized key to store it. In order to successfully
        # re-run TF we need to re-use the randomized key which we extract from the terraform.tfstate from the previous
        # run.
        tfstate_path = get_terraform_path(
            self.cluster_model.specification.name) + '/terraform.tfstate'
        if os.path.isfile(tfstate_path):
            tfstate = load_json_obj(tfstate_path)
            key_pair = select_first(tfstate['resources'],
                                    lambda x: x['type'] == 'aws_key_pair')
            public_key_config.specification.key_name = key_pair['instances'][
                0]['attributes']['id']
        else:
            public_key_config.specification.key_name = self.cluster_model.specification.admin_user.name + '-' \
                                                       + str(uuid.uuid4())
        pub_key_path = self.cluster_model.specification.admin_user.key_path + '.pub'
        if os.path.isfile(pub_key_path):
            with open(pub_key_path, 'r') as stream:
                public_key_config.specification.public_key = stream.read(
                ).rstrip()
        else:
            raise Exception(
                f'SSH key path "{pub_key_path}" is not valid. Ansible run will fail.'
            )
        return public_key_config
Example #8
0
    def add_security_rules_inbound_efs(self, infrastructure, security_group):
        ags_allowed_to_efs = select_all(
            infrastructure,
            lambda item: item.kind == 'infrastructure/virtual-machine' and item
            .specification.authorized_to_efs)

        for asg in ags_allowed_to_efs:
            for subnet_in_asg in asg.specification.subnet_names:
                subnet = select_single(
                    infrastructure,
                    lambda item: item.kind == 'infrastructure/subnet' and item.
                    specification.name == subnet_in_asg)

                rule_defined = select_first(
                    security_group.specification.rules, lambda item: item.
                    source_address_prefix == subnet.specification.cidr_block
                    and item.destination_port_range == 2049)
                if rule_defined is None:
                    rule = self.get_config_or_default(
                        self.docs, 'infrastructure/security-group-rule')
                    rule.specification.name = 'sg-rule-nfs-default-from-' + subnet.specification.name
                    rule.specification.description = 'NFS inbound for ' + subnet.specification.name
                    rule.specification.direction = 'ingress'
                    rule.specification.protocol = 'tcp'
                    rule.specification.destination_port_range = 2049
                    rule.specification.source_address_prefix = subnet.specification.cidr_block
                    rule.specification.destination_address_prefix = '*'
                    security_group.specification.rules.append(
                        rule.specification)

        rules = []
        for rule in security_group.specification.rules:
            rules.append(objdict_to_dict(rule))
        security_group.specification.rules = rules
Example #9
0
    def run(self):
        enabled_roles = self.inventory_creator.get_enabled_roles()

        ansible_dir = get_ansible_path(self.cluster_model.specification.name)

        cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common',
                                                'vars', 'main.yml')
        clean_cluster_model = self.get_clean_cluster_model()
        self.populate_group_vars(ansible_dir)
        with open(cluster_config_file_path, 'w') as stream:
            dump(clean_cluster_model, stream)

        for role in enabled_roles:
            document = select_first(
                self.config_docs,
                lambda x: x.kind == 'configuration/' + to_feature_name(role))

            if document is None:
                self.logger.warn('No config document for enabled role: ' +
                                 role)
                continue

            document = self.add_provider_info(document)
            vars_dir = os.path.join(ansible_dir, 'roles', to_role_name(role),
                                    'vars')
            if not os.path.exists(vars_dir):
                os.makedirs(vars_dir)

            vars_file_name = 'main.yml'
            vars_file_path = os.path.join(vars_dir, vars_file_name)

            with open(vars_file_path, 'w') as stream:
                dump(document, stream)
Example #10
0
    def write_role_manifest_vars(self, ansible_dir, role, kind):
        enabled_kinds = {
            "configuration/haproxy", "configuration/node-exporter"
        }

        if kind not in enabled_kinds:
            return  # skip

        try:
            cluster_model = select_single(
                self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')
        except ExpectedSingleResultException:
            return  # skip

        document = select_first(self.manifest_docs, lambda x: x.kind == kind)
        if document is None:
            # If there is no document provided by the user, then fallback to defaults
            document = load_yaml_obj(types.DEFAULT, 'common', kind)
            # Inject the required "version" attribute
            document['version'] = VERSION

        # Copy the "provider" value from the cluster model
        document['provider'] = cluster_model['provider']

        # Merge the document with defaults
        with DefaultMerger([document]) as doc_merger:
            document = doc_merger.run()[0]

        self.write_role_vars(ansible_dir,
                             role,
                             document,
                             vars_file_name='manifest.yml')
Example #11
0
    def generate(self):
        self.logger.info('Generate Ansible vars')
        if self.inventory_creator != None: 
            ansible_dir = get_ansible_path(self.cluster_model.specification.name)
        else:
            ansible_dir = get_ansible_path_for_build(self.inventory_upgrade.build_dir)

        self.populate_group_vars(ansible_dir)

        cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml')
        clean_cluster_model = self.get_clean_cluster_model()
        with open(cluster_config_file_path, 'w') as stream:
            dump(clean_cluster_model, stream)

        if self.inventory_creator == None:
            # For upgrade at this point we don't need any of other roles then
            # common, upgrade, repository and image_registry.
            # - commmon is already provisioned from the cluster model constructed from the inventory.
            # - upgrade should not require any additional config
            # roles in the list below are provisioned for upgrade from defaults
            enabled_roles = ['repository', 'image_registry']
        else:
            enabled_roles = self.inventory_creator.get_enabled_roles()

        for role in enabled_roles:
            document = select_first(self.config_docs, lambda x: x.kind == 'configuration/'+to_feature_name(role))

            if document is None:
                self.logger.warn('No config document for enabled role: ' + role)
                continue

            document.specification['provider'] = self.cluster_model.provider
            self.write_role_vars(ansible_dir, role, document)
    def populate_group_vars(self, ansible_dir):
        main_vars = ObjDict()
        main_vars['admin_user'] = self.cluster_model.specification.admin_user
        main_vars['validate_certs'] = Config().validate_certs
        main_vars['offline_requirements'] = Config().offline_requirements
        main_vars['wait_for_pods'] = Config().wait_for_pods
        main_vars['is_upgrade_run'] = self.is_upgrade_run
        main_vars['roles_with_generated_vars'] = sorted(
            self.roles_with_generated_vars)

        if self.is_upgrade_run:
            shared_config_doc = self.get_shared_config_from_manifest()
        else:
            shared_config_doc = select_first(
                self.config_docs,
                lambda x: x.kind == 'configuration/shared-config')

        if shared_config_doc is None:
            shared_config_doc = load_yaml_obj(types.DEFAULT, 'common',
                                              'configuration/shared-config')

        self.set_vault_path(shared_config_doc)
        main_vars.update(shared_config_doc.specification)

        vars_dir = os.path.join(ansible_dir, 'group_vars')
        if not os.path.exists(vars_dir):
            os.makedirs(vars_dir)

        vars_file_name = 'all.yml'
        vars_file_path = os.path.join(vars_dir, vars_file_name)

        with open(vars_file_path, 'a') as stream:
            dump(main_vars, stream)
Example #13
0
 def efs_add_mount_target_config(efs_config, subnet):
     target = select_first(efs_config.specification.mount_targets,
                           lambda item: item['availability_zone'] == subnet.specification.availability_zone)
     if target is None:
         efs_config.specification.mount_targets.append(
             {'name': 'efs-'+subnet.specification.name+'-mount',
              'subnet_name': subnet.specification.name,
              'availability_zone': subnet.specification.availability_zone})
Example #14
0
 def get_availability_set(self, availability_set_name):
     availability_set = select_first(
         self.docs,
         lambda item: item.kind == 'infrastructure/availability-set' and item.name == availability_set_name,
     )
     if availability_set is not None:
         availability_set.specification.name = resource_name(self.cluster_prefix, self.cluster_name, availability_set_name + '-' + 'aset')
     return availability_set
Example #15
0
    def get_virtual_machine(component_value, cluster_model, docs):
        machine_selector = component_value.machine
        model_with_defaults = select_first(docs, lambda x: x.kind == 'infrastructure/virtual-machine' and
                                                                 x.name == machine_selector)
        if model_with_defaults is None:
            model_with_defaults = merge_with_defaults(cluster_model.provider, 'infrastructure/virtual-machine',
                                                      machine_selector)

        return model_with_defaults
Example #16
0
    def run(self):
        configuration_docs = []

        for component_key, component_value in self.cluster_model.specification.components.items(
        ):
            if component_value.count < 1:
                continue

            features_map = select_first(
                self.input_docs,
                lambda x: x.kind == 'configuration/feature-mapping')
            if features_map is None:
                features_map = select_first(
                    configuration_docs,
                    lambda x: x.kind == 'configuration/feature-mapping')

            if features_map is None:
                features_map = load_yaml_obj(types.DEFAULT, 'common',
                                             'configuration/feature-mapping')
                self.logger.info("Adding: " + features_map.kind)
                configuration_docs.append(features_map)

            config_selector = component_value.configuration
            for feature_key in features_map.specification.roles_mapping[
                    component_key]:
                config = select_first(
                    self.input_docs, lambda x: x.kind == 'configuration/' +
                    feature_key and x.name == config_selector)
                if config is None:
                    config = select_first(
                        configuration_docs,
                        lambda x: x.kind == 'configuration/' + feature_key and
                        x.name == config_selector)
                if config is None:
                    config = merge_with_defaults(
                        'common', 'configuration/' + feature_key,
                        config_selector)
                    self.logger.info("Adding: " + config.kind)
                    configuration_docs.append(config)

        return configuration_docs
Example #17
0
 def get_ips_for_feature(self, component_key):
     component_config = self.cluster_model.specification.components[
         component_key]
     result = []
     if hasattr(component_config, 'machines'):
         for machine in component_config.machines:
             machine_doc = select_first(
                 self.config_docs, lambda x: x.kind ==
                 'infrastructure/machine' and x.name == machine)
             result.append(
                 AnsibleHostModel(machine_doc.specification.hostname,
                                  machine_doc.specification.ip))
     return result
    def generate(self):
        self.logger.info('Generate Ansible vars')
        self.is_upgrade_run = self.inventory_creator == None
        if self.is_upgrade_run:
            ansible_dir = get_ansible_path_for_build(self.inventory_upgrade.build_dir)
        else:
            ansible_dir = get_ansible_path(self.cluster_model.specification.name)

        cluster_config_file_path = os.path.join(ansible_dir, 'roles', 'common', 'vars', 'main.yml')
        clean_cluster_model = self.get_clean_cluster_model()
        with open(cluster_config_file_path, 'w') as stream:
            dump(clean_cluster_model, stream)

        if self.is_upgrade_run:
            # For upgrade at this point we don't need any of other roles than common, repository, image_registry and node_exporter.
            # - commmon is already provisioned from the cluster model constructed from the inventory
            # - (if possible) upgrade should not require any additional config
            # roles in the list below are provisioned for upgrade from defaults
            roles_with_defaults = ['repository', 'image_registry', 'node_exporter']
            # In a special cases (like haproxy), where user specifies majority of the config, it's easier (and less awkward)
            # to re-render config templates instead of modifying (for example with regular expressions) no-longer-compatible config files.
            roles_with_manifest = ['haproxy']
        else:
            roles_with_defaults = self.inventory_creator.get_enabled_roles()
            roles_with_manifest = []  # applies only to upgrades

        for role in roles_with_defaults:
            kind = 'configuration/' + to_feature_name(role)

            document = select_first(self.config_docs, lambda x: x.kind == kind)
            if document is None:
                self.logger.warn('No config document for enabled role: ' + role)
                continue
            document.specification['provider'] = self.cluster_model.provider

            self.write_role_vars(ansible_dir, role, document)

        for role in roles_with_manifest:
            kind = 'configuration/' + to_feature_name(role)

            self.write_role_manifest_vars(ansible_dir, role, kind)

        self.populate_group_vars(ansible_dir)
Example #19
0
    def populate_group_vars(self, ansible_dir):
        main_vars = ObjDict()
        main_vars['admin_user'] = self.cluster_model.specification.admin_user
        main_vars['validate_certs'] = Config().validate_certs
        main_vars['offline_requirements'] = Config().offline_requirements
        main_vars['wait_for_pods'] = Config().wait_for_pods

        shared_config_doc = select_first(self.config_docs, lambda x: x.kind == 'configuration/shared-config')
        if shared_config_doc == None:
            shared_config_doc = load_yaml_obj(types.DEFAULT, 'common', 'configuration/shared-config')
        main_vars.update(shared_config_doc.specification)        

        vars_dir = os.path.join(ansible_dir, 'group_vars')
        if not os.path.exists(vars_dir):
            os.makedirs(vars_dir)

        vars_file_name = 'all.yml'
        vars_file_path = os.path.join(vars_dir, vars_file_name)

        with open(vars_file_path, 'a') as stream:
            dump(main_vars, stream)
    def run(self):
        infrastructure = []

        public_key_config = self.get_public_key()
        infrastructure.append(public_key_config)

        vpc_config = self.get_vpc_config()

        infrastructure.append(vpc_config)
        default_security_group = self.get_default_security_group_config(
            vpc_config)
        infrastructure.append(default_security_group)

        vpc_name = vpc_config.specification.name

        resource_group = self.get_resource_group()
        infrastructure.append(resource_group)

        internet_gateway = self.get_internet_gateway(
            vpc_config.specification.name)
        infrastructure.append(internet_gateway)
        route_table = self.get_routing_table(
            vpc_name, internet_gateway.specification.name)
        infrastructure.append(route_table)

        efs_config = self.get_efs_config()

        if not (self.use_network_security_groups):
            self.logger.warning(
                'The "use_network_security_groups" flag is currently ignored on AWS'
            )

        for component_key, component_value in self.cluster_model.specification.components.items(
        ):
            if component_value['count'] < 1:
                continue

            subnets_to_create = []
            security_groups_to_create = []
            subnet_index = 0
            asg_index = 0
            for subnet_definition in component_value.subnets:  # todo extract to another method or class
                subnet = select_first(
                    infrastructure, lambda item: item.kind ==
                    'infrastructure/subnet' and item.specification.cidr_block
                    == subnet_definition['address_pool'])
                security_group = select_first(
                    infrastructure, lambda item: item.kind ==
                    'infrastructure/security-group' and item.specification.
                    cidr_block == subnet_definition['address_pool'])

                if subnet is None:
                    subnet = self.get_subnet(subnet_definition, component_key,
                                             vpc_name, subnet_index)
                    infrastructure.append(subnet)

                    security_group = self.get_security_group(
                        subnet, component_key, vpc_name, subnet_index)
                    infrastructure.append(security_group)

                    route_table_association = self.get_route_table_association(
                        route_table.specification.name, component_key,
                        subnet.specification.name, subnet_index)
                    infrastructure.append(route_table_association)
                    subnet_index += 1

                subnets_to_create.append(subnet)
                security_groups_to_create.append(security_group)

            autoscaling_group = self.get_autoscaling_group(
                component_key, component_value, subnets_to_create, asg_index)

            for security_group in security_groups_to_create:
                security_group.specification.rules += autoscaling_group.specification.security.rules

            launch_configuration = self.get_launch_configuration(
                autoscaling_group, component_key, security_groups_to_create)

            launch_configuration.specification.key_name = public_key_config.specification.key_name

            self.set_image_id_for_launch_configuration(self.cluster_model,
                                                       self.docs,
                                                       launch_configuration,
                                                       autoscaling_group)
            autoscaling_group.specification.launch_configuration = launch_configuration.specification.name

            if autoscaling_group.specification.mount_efs:
                for subnet in subnets_to_create:
                    self.efs_add_mount_target_config(efs_config, subnet)

            infrastructure.append(autoscaling_group)
            infrastructure.append(launch_configuration)
            asg_index += 1

        if self.has_efs_any_mounts(efs_config):
            infrastructure.append(efs_config)
            self.add_security_rules_inbound_efs(infrastructure,
                                                default_security_group)

        return infrastructure
Example #21
0
    def run(self):
        infrastructure = []

        resource_group = self.get_resource_group()
        infrastructure.append(resource_group)

        vnet = self.get_virtual_network()
        infrastructure.append(vnet)

        shared_storage = self.get_storage_share_config()
        infrastructure.append(shared_storage)

        for component_key, component_value in self.cluster_model.specification.components.items(
        ):
            vm_count = component_value['count']
            if vm_count < 1:
                continue

            # The vm config also contains some other stuff we use for network and security config.
            # So get it here and pass it allong.
            vm_config = self.get_virtual_machine(component_value,
                                                 self.cluster_model, self.docs)

            # For now only one subnet per component.
            if (len(component_value.subnets) > 1):
                self.logger.warning(
                    f'On Azure only one subnet per component is supported for now. Taking first and ignoring others.'
                )

            # Add message for ignoring availabiltity zones if present.
            if 'availability_zone' in component_value.subnets[0]:
                self.logger.warning(
                    f'On Azure availability_zones are not supported jet. Ignoring definition.'
                )

            subnet_definition = component_value.subnets[0]
            subnet = select_first(
                infrastructure, lambda item: item.kind ==
                'infrastructure/subnet' and item.specification.address_prefix
                == subnet_definition['address_pool'])

            if subnet is None:
                nsg = self.get_network_security_group(
                    component_key, vm_config.specification.security.rules, 0)
                infrastructure.append(nsg)

                subnet = self.get_subnet(subnet_definition, component_key,
                                         nsg.specification.name, 0)
                infrastructure.append(subnet)

                #TODO: This gives issues for now when creating more then 3 subnets. Re-test when
                #      upgrading from azurerm 1.27 to 2.0 and for now stick to azurerm_subnet.network_security_group_id
                #ssga = self.get_subnet_network_security_group_association(component_key,
                #                                                                     subnet.specification.name,
                #                                                                     nsg.specification.name,
                #                                                                     0)
                #infrastructure.append(ssga)

            #TODO: For now we create the VM infrastructure compatible with the Epiphany 2.x
            #      code line but later we might want to look at scale sets to achieve the same result:
            #      https://www.terraform.io/docs/providers/azurerm/r/virtual_machine_scale_set.html
            for index in range(vm_count):
                public_ip_name = ''
                if self.cluster_model.specification.cloud.use_public_ips:
                    public_ip = self.get_public_ip(component_key,
                                                   component_value, vm_config,
                                                   index)
                    infrastructure.append(public_ip)
                    public_ip_name = public_ip.specification.name

                network_interface = self.get_network_interface(
                    component_key, component_value, vm_config,
                    subnet.specification.name, nsg.specification.name,
                    public_ip_name, index)
                infrastructure.append(network_interface)

                vm = self.get_vm(component_key, component_value, vm_config,
                                 network_interface.specification.name, index)
                infrastructure.append(vm)

        return infrastructure
Example #22
0
 def get_config_or_default(docs, kind):
     config = select_first(docs, lambda x: x.kind == kind)
     if config is None:
         config = load_yaml_obj(types.DEFAULT, 'azure', kind)
         config['version'] = VERSION
     return config
def test_select_first_should_return_first_matching_element():

    actual = select_first(DATA, lambda item: item.name == 'test-name-1')

    assert (actual.index == 1)
def test_select_first_should_return_first_matching_element_when_many_elements_matching(
):

    actual = select_first(DATA, lambda item: item.name == 'test-name23')

    assert (actual.index == 2)
def test_select_first_should_return_none_if_there_is_no_matching_elements():

    actual = select_first(DATA,
                          lambda item: item.name == 'name-that-does-not-exist')

    assert (actual is None)
Example #26
0
    def run(self):
        infrastructure = []

        resource_group = self.get_resource_group()
        infrastructure.append(resource_group)

        vnet = self.get_virtual_network()
        infrastructure.append(vnet)

        shared_storage = self.get_storage_share_config()
        infrastructure.append(shared_storage)

        for component_key, component_value in self.cluster_model.specification.components.items():
            vm_count = component_value['count']
            if vm_count < 1:
                continue

            # The vm config also contains some other stuff we use for network and security config.
            # So get it here and pass it allong.
            vm_config = self.get_virtual_machine(component_value, self.cluster_model, self.docs)

            # If there are no security groups Ansible provisioning will fail because
            # SSH is not allowed then with public IPs on Azure.
            if not(self.use_network_security_groups) and self.use_public_ips:
                 self.logger.warning('Use of security groups has been disabled and public IP are used. Ansible run will fail because SSH will not be allowed.')

            # For now only one subnet per component.
            if (len(component_value.subnets) > 1):
                self.logger.warning(f'On Azure only one subnet per component is supported for now. Taking first and ignoring others.')

            # Add message for ignoring availabiltity zones if present.
            if 'availability_zone' in component_value.subnets[0]:
                self.logger.warning(f'On Azure availability_zones are not supported yet. Ignoring definition.')

            subnet_definition = component_value.subnets[0]
            subnet = select_first(infrastructure, lambda item: item.kind == 'infrastructure/subnet' and
                                    item.specification.address_prefix == subnet_definition['address_pool'])

            if subnet is None:
                subnet = self.get_subnet(subnet_definition, component_key, 0)
                infrastructure.append(subnet)

                if self.use_network_security_groups:
                    nsg = self.get_network_security_group(component_key,
                                                            vm_config.specification.security.rules,
                                                            0)
                    infrastructure.append(nsg)

                    subnet_nsg_association = self.get_subnet_network_security_group_association(component_key,
                                                                                        subnet.specification.name,
                                                                                        nsg.specification.name,
                                                                                        0)
                    infrastructure.append(subnet_nsg_association)

            availability_set = None
            if 'availability_set' in component_value:
                availability_set = select_first(
                    infrastructure,
                    lambda item: item.kind == 'infrastructure/availability-set' and item.name == component_value.availability_set,
                )
                if availability_set is None:
                    availability_set = self.get_availability_set(component_value.availability_set)
                    if availability_set is not None:
                        infrastructure.append(availability_set)

            #TODO: For now we create the VM infrastructure compatible with the Epiphany 2.x
            #      code line but later we might want to look at scale sets to achieve the same result:
            #      https://www.terraform.io/docs/providers/azurerm/r/virtual_machine_scale_set.html
            for index in range(vm_count):
                public_ip_name = ''
                if self.cluster_model.specification.cloud.use_public_ips:
                    public_ip = self.get_public_ip(component_key,
                                                   component_value,
                                                   vm_config,
                                                   index)
                    infrastructure.append(public_ip)
                    public_ip_name = public_ip.specification.name

                if self.use_network_security_groups:
                    nsg_name = nsg.specification.name
                else:
                    nsg_name = ''

                network_interface = self.get_network_interface(component_key,
                                                               component_value,
                                                               vm_config,
                                                               subnet.specification.name,
                                                               nsg_name,
                                                               public_ip_name,
                                                               index)
                infrastructure.append(network_interface)

                vm = self.get_vm(component_key, component_value, vm_config, availability_set,
                                 network_interface.specification.name, index)
                infrastructure.append(vm)

        return infrastructure
Example #27
0
 def get_config_or_default(docs, kind):
     config = select_first(docs, lambda x: x.kind == kind)
     if config is None:
         return load_yaml_obj(types.DEFAULT, 'aws', kind)
     return config
def test_select_first_should_return_none_if_data_is_none():

    actual = select_first(None,
                          lambda item: item.name == 'name-that-does-not-exist')

    assert (actual is None)