示例#1
0
 def __init__(self,
              cluster_model=None,
              config_docs=None,
              build_dir=None,
              backup_build_dir=None):
     super().__init__(__name__)
     self.cluster_model = cluster_model
     self.config_docs = config_docs
     self.build_dir = build_dir
     self.backup_build_dir = backup_build_dir
     self.ansible_command = AnsibleCommand()
 def __init__(self, input_data):
     # super(BackupRecoveryEngineBase, self).__init__(__name__) needs to be called in any subclass
     self.file = input_data.file
     self.build_directory = input_data.build_directory
     self.manifest_docs = list()
     self.input_docs = list()
     self.configuration_docs = list()
     self.cluster_model = None
     self.backup_doc = None
     self.recovery_doc = None
     self.ansible_command = AnsibleCommand()
示例#3
0
 def __init__(self, input_data):
     super().__init__(__name__)
     self.build_dir = input_data.build_directory
     self.ansible_options = {
         'profile_tasks': getattr(input_data, 'profile_ansible_tasks',
                                  False)
     }
     self.backup_build_dir = ''
     self.ansible_command = AnsibleCommand()
示例#4
0
class PatchEngine(Step):
    def __init__(self, input_data):
        super().__init__(__name__)
        self.build_directory = input_data.build_directory
        self.ansible_command = AnsibleCommand()

    def __enter__(self):
        super().__enter__()
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        super().__exit__(exc_type, exc_value, traceback)

    def backup(self):
        self.upgrade_patch_files_and_run('backup')
        return 0

    def recovery(self):
        self.upgrade_patch_files_and_run('recovery')
        return 0

    def upgrade_patch_files_and_run(self, action):
        self.logger.info(f'Running {action}...')

        #copy role files
        roles_build_path = os.path.join(self.build_directory, 'ansible/roles',
                                        action)
        roles_source_path = os.path.join(AnsibleRunner.ANSIBLE_PLAYBOOKS_PATH,
                                         'roles', action)
        copy_files_recursively(roles_source_path, roles_build_path)

        #copy playbook file
        playbook_build_path = os.path.join(self.build_directory,
                                           'ansible/') + action + '.yml'
        playbook_source_path = os.path.join(
            AnsibleRunner.ANSIBLE_PLAYBOOKS_PATH) + action + '.yml'
        copy_file(playbook_source_path, playbook_build_path)

        #run the playbook
        inventory_path = get_inventory_path_for_build(self.build_directory)
        self.ansible_command.run_playbook(inventory=inventory_path,
                                          playbook_path=playbook_build_path)
示例#5
0
class AnsibleRunner(Step):
    ANSIBLE_PLAYBOOKS_PATH = DATA_FOLDER_PATH + '/common/ansible/playbooks/'

    def __init__(self,
                 cluster_model=None,
                 config_docs=None,
                 build_dir=None,
                 backup_build_dir=None,
                 ansible_options=None):
        super().__init__(__name__)
        self.cluster_model = cluster_model
        self.config_docs = config_docs
        self.build_dir = build_dir
        self.backup_build_dir = backup_build_dir
        self.ansible_options = ansible_options
        self.ansible_command = AnsibleCommand()

    def __enter__(self):
        super().__enter__()
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        super().__exit__(exc_type, exc_value, traceback)

    def playbook_path(self, name):
        if self.cluster_model != None:
            return os.path.join(
                get_ansible_path(self.cluster_model.specification.name),
                f'{name}.yml')
        else:
            return os.path.join(get_ansible_path_for_build(self.build_dir),
                                f'{name}.yml')

    def copy_resources(self):
        self.logger.info('Copying Ansible resources')
        if self.cluster_model != None:
            ansible_dir = get_ansible_path(
                self.cluster_model.specification.name)
        else:
            ansible_dir = get_ansible_path_for_build(self.build_dir)

        shutil.rmtree(ansible_dir, ignore_errors=True)
        copy_files_recursively(AnsibleRunner.ANSIBLE_PLAYBOOKS_PATH,
                               ansible_dir)

        # copy skopeo so Ansible can move it to the repositry machine
        if not Config().offline_requirements:
            shutil.copy(
                os.path.join(dirname(dirname(inspect.getfile(os))),
                             'skopeo_linux'), '/tmp')

    def pre_flight(self, inventory_path):
        self.logger.info('Checking connection to each machine')
        self.ansible_command.run_task_with_retries(inventory=inventory_path,
                                                   module="ping",
                                                   hosts="all",
                                                   retries=5)

        self.logger.info('Checking preflight conditions on each machine')
        self.ansible_command.run_playbook_with_retries(
            inventory=inventory_path,
            playbook_path=self.playbook_path('preflight'),
            retries=1)

        self.logger.info(
            'Setting up repository for cluster provisioning. This will take a while...'
        )
        self.ansible_command.run_playbook_with_retries(
            inventory=inventory_path,
            playbook_path=self.playbook_path('repository_setup'),
            retries=1)

        self.ansible_command.run_playbook(
            inventory=inventory_path,
            playbook_path=self.playbook_path('common'))

    def post_flight(self, inventory_path):
        self.ansible_command.run_playbook(
            inventory=inventory_path,
            playbook_path=self.playbook_path('repository_teardown'))

    def apply(self):
        inventory_path = get_inventory_path(
            self.cluster_model.specification.name)

        # copy resources
        self.copy_resources()

        # create inventory
        inventory_creator = AnsibleInventoryCreator(self.cluster_model,
                                                    self.config_docs)
        inventory_creator.create()
        time.sleep(10)

        # create ansible.cfg
        ansible_config_file_path = get_ansible_config_file_path(
            self.cluster_model.specification.name)
        ansible_cfg_creator = AnsibleConfigFileCreator(
            self.ansible_options, ansible_config_file_path)
        ansible_cfg_creator.create()

        # generate vars
        ansible_vars_generator = AnsibleVarsGenerator(
            inventory_creator=inventory_creator)
        ansible_vars_generator.generate()

        # pre-flight to prepare machines
        self.pre_flight(inventory_path)

        # run roles
        enabled_roles = inventory_creator.get_enabled_roles()
        for role in enabled_roles:
            self.ansible_command.run_playbook(
                inventory=inventory_path,
                playbook_path=self.playbook_path(to_role_name(role)),
                vault_file=Config().vault_password_location)

        #post-flight after we are done
        self.post_flight(inventory_path)

    def upgrade(self):
        inventory_path = get_inventory_path_for_build(self.build_dir)

        # copy resources
        self.copy_resources()

        # upgrade inventory
        inventory_upgrade = AnsibleInventoryUpgrade(self.build_dir,
                                                    self.backup_build_dir)
        inventory_upgrade.upgrade()

        # create ansible.cfg
        ansible_config_file_path = get_ansible_config_file_path_for_build(
            self.build_dir)
        ansible_cfg_creator = AnsibleConfigFileCreator(
            self.ansible_options, ansible_config_file_path)
        ansible_cfg_creator.create()

        # generate vars
        ansible_vars_generator = AnsibleVarsGenerator(
            inventory_upgrade=inventory_upgrade)
        ansible_vars_generator.generate()

        # pre-flight to prepare machines
        self.pre_flight(inventory_path)

        # run image_registry playbook
        self.ansible_command.run_playbook(
            inventory=inventory_path,
            playbook_path=self.playbook_path('image_registry'))

        # run upgrade playbook
        self.ansible_command.run_playbook(
            inventory=inventory_path,
            playbook_path=self.playbook_path('upgrade'))

        #post-flight after we are done
        self.post_flight(inventory_path)
示例#6
0
 def __init__(self, input_data):
     super().__init__(__name__)
     self.build_dir = input_data.build_directory
     self.backup_build_dir = ''
     self.ansible_command = AnsibleCommand()
class BackupRecoveryEngineBase(Step):
    """Perform backup and recovery operations (abstract base class)."""

    def __init__(self, input_data):
        # super(BackupRecoveryEngineBase, self).__init__(__name__) needs to be called in any subclass
        self.file = input_data.file
        self.build_directory = input_data.build_directory
        self.manifest_docs = list()
        self.input_docs = list()
        self.configuration_docs = list()
        self.cluster_model = None
        self.backup_doc = None
        self.recovery_doc = None
        self.ansible_command = AnsibleCommand()

    def __enter__(self):
        super().__enter__()
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        super().__exit__(exc_type, exc_value, traceback)

    def _process_input_docs(self):
        """Load, validate and merge (with defaults) input yaml documents."""

        path_to_manifest = os.path.join(self.build_directory, MANIFEST_FILE_NAME)
        if not os.path.isfile(path_to_manifest):
            raise Exception('No manifest.yml inside the build folder')

        # Get existing manifest config documents
        self.manifest_docs = load_yamls_file(path_to_manifest)
        self.cluster_model = select_single(self.manifest_docs, lambda x: x.kind == 'epiphany-cluster')

        # Load backup / recovery configuration documents
        self.input_docs = load_yamls_file(self.file)

        # Validate input documents
        with SchemaValidator(self.cluster_model, self.input_docs) as schema_validator:
            schema_validator.run_for_individual_documents()

        # Merge the input docs with defaults
        with DefaultMerger(self.input_docs) as doc_merger:
            self.input_docs = doc_merger.run()

    def _process_configuration_docs(self):
        """Populate input yaml documents with additional required ad-hoc data."""

        # Seed the self.configuration_docs
        self.configuration_docs = copy.deepcopy(self.input_docs)

        # Please notice using DefaultMerger is not needed here, since it is done already at this point.
        # We just check if documents are missing and insert default ones without the unneeded merge operation.
        for kind in {'configuration/backup', 'configuration/recovery'}:
            try:
                # Check if the required document is in user inputs
                document = select_single(self.configuration_docs, lambda x: x.kind == kind)
            except ExpectedSingleResultException:
                # If there is no document provided by the user, then fallback to defaults
                document = load_yaml_obj(data_types.DEFAULT, 'common', kind)
                # Inject the required "version" attribute
                document['version'] = VERSION
                # Copy the "provider" value from the cluster model
                document['provider'] = self.cluster_model.provider
                # Save the document for later use
                self.configuration_docs.append(document)
            finally:
                # Copy the "provider" value to the specification as well
                document.specification['provider'] = document['provider']

    def _update_role_files_and_vars(self, action, document):
        """Render mandatory vars files for backup/recovery ansible roles inside the existing build directory."""

        self.logger.info(f'Updating {action} role files...')

        # Copy role files
        roles_build_path = os.path.join(self.build_directory, 'ansible/roles', action)
        roles_source_path = os.path.join(AnsibleRunner.ANSIBLE_PLAYBOOKS_PATH, 'roles', action)
        copy_files_recursively(roles_source_path, roles_build_path)

        # Render role vars
        vars_dir = os.path.join(roles_build_path, 'vars')
        os.makedirs(vars_dir, exist_ok=True)
        vars_file_path = os.path.join(vars_dir, 'main.yml')
        with open(vars_file_path, 'w') as stream:
            dump(document, stream)

    def _update_playbook_files_and_run(self, action, component):
        """Update backup/recovery ansible playbooks inside the existing build directory and run the provisioning."""

        self.logger.info(f'Running {action} on {component}...')

        # Copy playbook file
        playbook_build_path = os.path.join(self.build_directory, 'ansible', f'{action}_{component}.yml')
        playbook_source_path = os.path.join(AnsibleRunner.ANSIBLE_PLAYBOOKS_PATH, f'{action}_{component}.yml')
        copy_file(playbook_source_path, playbook_build_path)

        # Run the playbook
        inventory_path = get_inventory_path_for_build(self.build_directory)
        self.ansible_command.run_playbook(inventory=inventory_path, playbook_path=playbook_build_path)