Ejemplo n.º 1
0
    def test(self):
        # get manifest documents
        docs = load_manifest_docs(self.build_directory)
        cluster_model = select_single(docs,
                                      lambda x: x.kind == 'epiphany-cluster')

        # get inventory
        path_to_inventory = os.path.join(self.build_directory,
                                         INVENTORY_FILE_NAME)
        if not os.path.isfile(path_to_inventory):
            raise Exception(
                f'No "{INVENTORY_FILE_NAME}" inside the build directory: "{self.build_directory}"'
            )

        # get admin user
        admin_user = cluster_model.specification.admin_user
        if not os.path.isfile(admin_user.key_path):
            raise Exception(
                f'No SSH key file in directory: "{admin_user.key_path}"')

        # get and create the spec output dir if it does not exist
        spec_output = os.path.join(self.build_directory, SPEC_OUTPUT_DIR)
        if not os.path.exists(spec_output):
            os.makedirs(spec_output)

        # run the spec tests
        spec_command = SpecCommand()
        spec_command.run(spec_output, path_to_inventory, admin_user.name,
                         admin_user.key_path, self.group)

        return 0
Ejemplo n.º 2
0
 def __init__(self, docs):
     super().__init__(__name__)
     self.cluster_model = select_single(
         docs, lambda x: x.kind == 'epiphany-cluster')
     self.cluster_name = self.cluster_model.specification.name.lower()
     self.cluster_prefix = self.cluster_model.specification.prefix.lower()
     self.docs = docs
Ejemplo n.º 3
0
    def write_role_manifest_vars(self, ansible_dir, role, kind):
        enabled_kinds = {
            "configuration/haproxy", "configuration/node-exporter"
        }

        if kind not in enabled_kinds:
            return  # skip

        try:
            cluster_model = select_single(
                self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')
        except ExpectedSingleResultException:
            return  # skip

        document = select_first(self.manifest_docs, lambda x: x.kind == kind)
        if document is None:
            # If there is no document provided by the user, then fallback to defaults
            document = load_yaml_obj(types.DEFAULT, 'common', kind)
            # Inject the required "version" attribute
            document['version'] = VERSION

        # Copy the "provider" value from the cluster model
        document['provider'] = cluster_model['provider']

        # Merge the document with defaults
        with DefaultMerger([document]) as doc_merger:
            document = doc_merger.run()[0]

        self.write_role_vars(ansible_dir,
                             role,
                             document,
                             vars_file_name='manifest.yml')
Ejemplo n.º 4
0
    def add_security_rules_inbound_efs(self, infrastructure, security_group):
        ags_allowed_to_efs = select_all(
            infrastructure,
            lambda item: item.kind == 'infrastructure/virtual-machine' and item
            .specification.authorized_to_efs)

        for asg in ags_allowed_to_efs:
            for subnet_in_asg in asg.specification.subnet_names:
                subnet = select_single(
                    infrastructure,
                    lambda item: item.kind == 'infrastructure/subnet' and item.
                    specification.name == subnet_in_asg)

                rule_defined = select_first(
                    security_group.specification.rules, lambda item: item.
                    source_address_prefix == subnet.specification.cidr_block
                    and item.destination_port_range == 2049)
                if rule_defined is None:
                    rule = self.get_config_or_default(
                        self.docs, 'infrastructure/security-group-rule')
                    rule.specification.name = 'sg-rule-nfs-default-from-' + subnet.specification.name
                    rule.specification.description = 'NFS inbound for ' + subnet.specification.name
                    rule.specification.direction = 'ingress'
                    rule.specification.protocol = 'tcp'
                    rule.specification.destination_port_range = 2049
                    rule.specification.source_address_prefix = subnet.specification.cidr_block
                    rule.specification.destination_address_prefix = '*'
                    security_group.specification.rules.append(
                        rule.specification)

        rules = []
        for rule in security_group.specification.rules:
            rules.append(objdict_to_dict(rule))
        security_group.specification.rules = rules
Ejemplo n.º 5
0
 def __init__(self, docs):
     super().__init__(__name__)
     self.cluster_model = select_single(
         docs, lambda x: x.kind == 'epiphany-cluster')
     self.cluster_name = self.cluster_model.specification.name.lower()
     self.cluster_prefix = self.cluster_model.specification.prefix.lower()
     self.use_network_security_groups = self.cluster_model.specification.cloud.network.use_network_security_groups
     self.docs = docs
Ejemplo n.º 6
0
 def __init__(self, docs):
     super().__init__(__name__)
     self.cluster_model = select_single(
         docs, lambda x: x.kind == 'epiphany-cluster')
     self.cluster_name = self.cluster_model.specification.name.lower()
     self.cluster_prefix = self.cluster_model.specification.prefix.lower()
     self.resource_group_name = resource_name(self.cluster_prefix,
                                              self.cluster_name, 'rg')
     self.region = self.cluster_model.specification.cloud.region
     self.docs = docs
Ejemplo n.º 7
0
    def get_shared_config_from_manifest(self):
        # Reuse shared config from existing manifest
        # Shared config contains the use_ha_control_plane flag which is required during upgrades

        cluster_model = select_single(self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')

        try:
            shared_config_doc = select_single(self.manifest_docs, lambda x: x.kind == 'configuration/shared-config')
            shared_config_doc['provider'] = cluster_model['provider']
        except ExpectedSingleResultException:
            # If there is no shared-config doc inside the manifest file, this is probably a v0.3 cluster
            # Returning None here (there is nothing to merge at this point) and
            # hoping that the shared-config doc from defaults will be enough
            return None

        # Merge the shared config doc with defaults
        with DefaultMerger([shared_config_doc]) as doc_merger:
            shared_config_doc = doc_merger.run()[0]
            del shared_config_doc['provider']

        return shared_config_doc
Ejemplo n.º 8
0
    def delete(self):
        docs = load_manifest_docs(self.build_directory)
        cluster_model = select_single(docs, lambda x: x.kind == 'epiphany-cluster')
        
        if cluster_model.provider == 'any':
            raise Exception('Delete works only for cloud providers')

        with TerraformRunner(cluster_model, docs) as tf_runner:
            tf_runner.delete()     
            
        shutil.rmtree(self.build_directory, ignore_errors=True)     

        return 0
Ejemplo n.º 9
0
    def init(self):
        input = load_all_yaml_objs(types.DEFAULT, self.provider,
                                   'configuration/minimal-cluster-config')
        input[0].specification.name = self.name

        if self.is_full_config:
            config = self.get_config_docs(input)
            config_only = select_all(
                config, lambda x: not (x.kind.startswith('epiphany-cluster')))
            if self.provider == 'any':
                # for any provider we want to use the default config from minimal-cluster-config
                cluster_model = select_single(
                    input, lambda x: x.kind == 'epiphany-cluster')
            else:
                # for azure|aws provider we want to use the extended defaults cluster-config after dry run.
                # TODO: We probably wants this comming from seperate documents since Azure and AWS overlap now...
                cluster_model = select_single(
                    config, lambda x: x.kind == 'epiphany-cluster')
            infra = self.get_infra_docs(input)
            docs = [cluster_model, *config_only, *infra]
        else:
            docs = [*input]

        # set the provider and version for all docs
        for doc in docs:
            doc['provider'] = self.provider
            doc['version'] = VERSION

        # remove SET_BY_AUTOMATION fields
        remove_value(docs, 'SET_BY_AUTOMATION')

        # save document
        save_manifest(docs, self.name, self.name + '.yml')

        self.logger.info('Initialized new configuration and saved it to "' +
                         os.path.join(get_build_path(self.name), self.name +
                                      '.yml') + '"')
        return 0
Ejemplo n.º 10
0
    def get_shared_config_from_manifest(self):
        # Reuse shared config from existing manifest
        # Shared config contains the use_ha_control_plane flag which is required during upgrades

        path_to_manifest = os.path.join(self.inventory_upgrade.build_dir,
                                        MANIFEST_FILE_NAME)
        if not os.path.isfile(path_to_manifest):
            raise Exception('No manifest.yml inside the build folder')

        manifest_docs = load_yamls_file(path_to_manifest)

        cluster_model = select_single(manifest_docs,
                                      lambda x: x.kind == 'epiphany-cluster')

        shared_config_doc = select_single(
            manifest_docs, lambda x: x.kind == 'configuration/shared-config')
        shared_config_doc['provider'] = cluster_model['provider']

        # Merge the shared config doc with defaults
        with DefaultMerger([shared_config_doc]) as doc_merger:
            shared_config_doc = doc_merger.run()[0]
            del shared_config_doc['provider']

        return shared_config_doc
Ejemplo n.º 11
0
    def _process_input_docs(self):
        """Load, validate and merge (with defaults) input yaml documents."""

        # Get existing manifest config documents
        self.manifest_docs = load_manifest_docs(self.build_directory)
        self.cluster_model = select_single(self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')

        # Load backup / recovery configuration documents
        self.input_docs = load_yamls_file(self.file)

        # Validate input documents
        with SchemaValidator(self.cluster_model, self.input_docs) as schema_validator:
            schema_validator.run_for_individual_documents()

        # Merge the input docs with defaults
        with DefaultMerger(self.input_docs) as doc_merger:
            self.input_docs = doc_merger.run()
Ejemplo n.º 12
0
    def recovery(self):
        """Recover all enabled components."""

        self._process_input_docs()
        self._process_configuration_docs()

        # Get recovery config document
        recovery_doc = select_single(self.configuration_docs, lambda x: x.kind == 'configuration/recovery')

        self._update_role_files_and_vars('recovery', recovery_doc)

        # Execute all enabled component playbooks sequentially
        for component_name, component_config in sorted(recovery_doc.specification.components.items()):
            if component_config.enabled:
                self._update_playbook_files_and_run('recovery', component_name)

        return 0
Ejemplo n.º 13
0
    def get_vpc_id(self):
        vpc_config = dict_to_objdict(
            select_single(self.config_docs,
                          lambda x: x.kind == 'infrastructure/vpc'))
        ec2 = self.session.resource('ec2')
        filters = [{
            'Name': 'tag:Name',
            'Values': [vpc_config.specification.name]
        }]
        vpcs = list(ec2.vpcs.filter(Filters=filters))

        if len(vpcs) == 1:
            return vpcs[0].id

        raise Exception("Expected 1 VPC matching tag Name: " +
                        vpc_config.specification.name + " but received: " +
                        str(len(vpcs)))
Ejemplo n.º 14
0
    def delete(self):
        path_to_manifest = os.path.join(self.build_directory, MANIFEST_FILE_NAME)
        if not os.path.isfile(path_to_manifest):
            raise Exception('No manifest.yml inside the build folder')

        docs = load_yamls_file(path_to_manifest)
        cluster_model = select_single(docs, lambda x: x.kind == 'epiphany-cluster')
        
        if cluster_model.provider == 'any':
            raise Exception('Delete works only for cloud providers')

        with TerraformRunner(cluster_model, docs) as tf_runner:
            tf_runner.delete()     
            
        shutil.rmtree(self.build_directory, ignore_errors=True)     

        return 0
Ejemplo n.º 15
0
    def backup(self):
        """Backup all enabled components."""

        self._process_input_docs()
        self._process_configuration_docs()

        # Get backup config document
        backup_doc = select_single(self.configuration_docs,
                                   lambda x: x.kind == 'configuration/backup')

        self._update_role_files_and_vars('backup', backup_doc)

        # Execute all enabled component playbooks sequentially
        for component_name, component_config in sorted(
                backup_doc.specification.components.items()):
            if component_config.enabled:
                self._update_playbook_files_and_run('backup', component_name)

        return 0
Ejemplo n.º 16
0
    def _process_input_docs(self):
        """Load, validate and merge (with defaults) input yaml documents."""

        path_to_manifest = os.path.join(self.build_directory, MANIFEST_FILE_NAME)
        if not os.path.isfile(path_to_manifest):
            raise Exception('No manifest.yml inside the build folder')

        # Get existing manifest config documents
        self.manifest_docs = load_yamls_file(path_to_manifest)
        self.cluster_model = select_single(self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')

        # Load backup / recovery configuration documents
        self.input_docs = load_yamls_file(self.file)

        # Validate input documents
        with SchemaValidator(self.cluster_model, self.input_docs) as schema_validator:
            schema_validator.run_for_individual_documents()

        # Merge the input docs with defaults
        with DefaultMerger(self.input_docs) as doc_merger:
            self.input_docs = doc_merger.run()
Ejemplo n.º 17
0
    def process_input_docs(self):
        # Load the user input YAML docs from the input file.
        if os.path.isabs(self.file):
            path_to_load = self.file
        else:
            path_to_load = os.path.join(os.getcwd(), self.file)
        user_file_stream = open(path_to_load, 'r')
        self.input_docs = safe_load_all(user_file_stream)

        # Merge the input docs with defaults
        with DefaultMerger(self.input_docs) as doc_merger:
            self.input_docs = doc_merger.run()

        # Get the cluster model.
        self.cluster_model = select_single(
            self.input_docs, lambda x: x.kind == 'epiphany-cluster')
        if self.cluster_model is None:
            raise Exception('No cluster model defined in input YAML file')

        # Validate input documents
        with SchemaValidator(self.cluster_model,
                             self.input_docs) as schema_validator:
            schema_validator.run()
Ejemplo n.º 18
0
    def _process_configuration_docs(self):
        """Populate input yaml documents with additional required ad-hoc data."""

        # Seed the self.configuration_docs
        self.configuration_docs = copy.deepcopy(self.input_docs)

        # Please notice using DefaultMerger is not needed here, since it is done already at this point.
        # We just check if documents are missing and insert default ones without the unneeded merge operation.
        for kind in {'configuration/backup', 'configuration/recovery'}:
            try:
                # Check if the required document is in user inputs
                document = select_single(self.configuration_docs, lambda x: x.kind == kind)
            except ExpectedSingleResultException:
                # If there is no document provided by the user, then fallback to defaults
                document = load_yaml_obj(data_types.DEFAULT, 'common', kind)
                # Inject the required "version" attribute
                document['version'] = VERSION
                # Copy the "provider" value from the cluster model
                document['provider'] = self.cluster_model.provider
                # Save the document for later use
                self.configuration_docs.append(document)
            finally:
                # Copy the "provider" value to the specification as well
                document.specification['provider'] = document['provider']
Ejemplo n.º 19
0
    def upgrade(self):
        inventory_path = get_inventory_path_for_build(self.backup_build_dir)
        build_version = check_build_output_version(self.backup_build_dir)

        self.logger.info(f'Loading backup Ansible inventory: {inventory_path}')
        loaded_inventory = InventoryManager(loader=DataLoader(),
                                            sources=inventory_path)

        # move loaded inventory to templating structure
        new_inventory = []
        for key in loaded_inventory.groups:
            if key != 'all' and key != 'ungrouped':
                group_hosts = loaded_inventory.groups[key].hosts
                new_hosts = []
                for host in group_hosts:
                    new_hosts.append(
                        AnsibleHostModel(host.address,
                                         host.vars['ansible_host']))
                new_inventory.append(AnsibleInventoryItem(key, new_hosts))

        if build_version == BUILD_LEGACY:
            self.logger.info(f'Upgrading Ansible inventory Epiphany < 0.3.0')

            # Epiphany < 0.3.0 did not have manifest file in build folder so lets create bare minimum cluster model from inventory
            self.cluster_model = dict_to_objdict({
                'provider': 'any',
                'specification': {
                    'admin_user': {
                        'name':
                        loaded_inventory.groups['all'].vars['ansible_user'],
                        'key_path':
                        loaded_inventory.groups['all'].
                        vars['ansible_ssh_private_key_file']
                    }
                }
            })

            # Remap roles
            self.rename_role(new_inventory, 'master', 'kubernetes_master')
            self.rename_role(new_inventory, 'worker', 'kubernetes_node')
            self.rename_role(new_inventory, 'deployments', 'applications')
            self.rename_role(new_inventory, 'elasticsearch-curator',
                             'elasticsearch_curator')
            self.rename_role(new_inventory, 'jmx-exporter', 'jmx_exporter')
            self.rename_role(new_inventory, 'kafka-exporter', 'kafka_exporter')
            self.rename_role(new_inventory, 'haproxy_tls_termination',
                             'haproxy')

            # remove linux and reboot roles if present
            self.delete_role(new_inventory, 'linux')
            self.delete_role(new_inventory, 'reboot')
        else:
            self.logger.info(f'Upgrading Ansible inventory Epiphany => 0.3.0')

            # load cluster model from manifest
            self.manifest_docs = load_manifest_docs(self.backup_build_dir)
            self.cluster_model = select_single(
                self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')

        # Merge manifest cluster config with newer defaults
        default_cluster_model = load_yaml_obj(data_types.DEFAULT, 'common',
                                              'epiphany-cluster')
        merge_objdict(default_cluster_model, self.cluster_model)
        self.cluster_model = default_cluster_model

        # Check if repo roles are present and if not add them
        master = self.get_role(new_inventory, 'kubernetes_master')
        if master == None:
            raise Exception('No kubernetes_master to use as repository')
        master_node = master.hosts[0]

        # add image_registry
        image_registry = self.get_role(new_inventory, 'image_registry')
        if image_registry == None:
            hosts = []
            hosts.append(AnsibleHostModel(master_node.name, master_node.ip))
            new_inventory.append(AnsibleInventoryItem('image_registry', hosts))

        # add repository
        repository = self.get_role(new_inventory, 'repository')
        if repository == None:
            hosts = []
            hosts.append(AnsibleHostModel(master_node.name, master_node.ip))
            new_inventory.append(AnsibleInventoryItem('repository', hosts))

        # save new inventory
        save_inventory(new_inventory, self.cluster_model, self.build_dir)

        return 0
Ejemplo n.º 20
0
 def get_available_roles(self):
     features_map = select_single(
         self.config_docs,
         lambda x: x.kind == 'configuration/feature-mapping')
     return features_map.specification.available_roles
Ejemplo n.º 21
0
 def get_roles_for_feature(self, component_key):
     features_map = select_single(
         self.config_docs,
         lambda x: x.kind == 'configuration/feature-mapping')
     return features_map.specification.roles_mapping[component_key]
Ejemplo n.º 22
0
def test_select_single_should_return_none_if_data_is_none():

    actual = select_single(
        None, lambda item: item.name == 'name-that-does-not-exist')

    assert (actual is None)
Ejemplo n.º 23
0
 def __init__(self, docs):
     super().__init__(__name__)
     self.cluster_model = select_single(
         docs, lambda x: x.kind == 'epiphany-cluster')
     self.docs = docs
Ejemplo n.º 24
0
def test_select_single_should_return_single_matching_element():

    actual = select_single(DATA, lambda item: item.index == 2)

    assert (isinstance(actual, ObjDict))
    assert (actual.index == 2 and actual.name == DATA[actual.index].name)
Ejemplo n.º 25
0
def test_select_single_should_raise_if_there_are_too_many_matching_elements():
    with pytest.raises(ExpectedSingleResultException):
        select_single(DATA, lambda item: item.name == 'test-name23')
Ejemplo n.º 26
0
def test_select_single_should_raise_if_there_is_no_matching_element():
    with pytest.raises(ExpectedSingleResultException):
        select_single(DATA,
                      lambda item: item.name == 'name-that-does-not-exist')