def run(self, command, env, auto_approve=False):
        cmd = ['terraform', command]

        if auto_approve:
            cmd.append('--auto-approve')

        if command == self.APPLY_COMMAND or command == self.DESTROY_COMMAND:
            cmd.append(f'-state={self.working_directory}/terraform.tfstate')

        cmd.append(self.working_directory)

        cmd = ' '.join(cmd)
        self.logger.info(f'Running: "{cmd}"')

        if Config().debug > 0:
            env['TF_LOG'] = terraform_verbosity[Config().debug]

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe, env=env,  shell=True) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception(f'Error running: "{cmd}"')
        else:
            self.logger.info(f'Done running "{cmd}"')
Exemple #2
0
    def populate_group_vars(self, ansible_dir):
        main_vars = ObjDict()
        main_vars['admin_user'] = self.cluster_model.specification.admin_user
        main_vars['validate_certs'] = Config().validate_certs
        main_vars['offline_requirements'] = Config().offline_requirements
        main_vars['wait_for_pods'] = Config().wait_for_pods

        shared_config_doc = select_first(
            self.config_docs,
            lambda x: x.kind == 'configuration/shared-config')
        if shared_config_doc == None:
            shared_config_doc = load_yaml_obj(types.DEFAULT, 'common',
                                              'configuration/shared-config')

        self.set_vault_path(shared_config_doc)
        main_vars.update(shared_config_doc.specification)

        vars_dir = os.path.join(ansible_dir, 'group_vars')
        if not os.path.exists(vars_dir):
            os.makedirs(vars_dir)

        vars_file_name = 'all.yml'
        vars_file_path = os.path.join(vars_dir, vars_file_name)

        with open(vars_file_path, 'a') as stream:
            dump(main_vars, stream)
Exemple #3
0
    def run_task(self, hosts, inventory, module, args=None):
        cmd = ['ansible']

        cmd.extend(["-m", module])

        if args is not None and len(args) > 0:
            cmd.extend(["-a", args])

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        cmd.append(hosts)

        if Config().debug > 0:
            cmd.append(ansible_verbosity[Config().debug])

        self.logger.info('Running: "' + ' '.join(module) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')
Exemple #4
0
def adjust_paths_from_file(args):
    if not os.path.isabs(args.file):
        args.file = os.path.join(os.getcwd(), args.file)
    if not os.path.isfile(args.file):
        Config().output_dir = os.getcwd()  # Default to working dir so we can at least write logs.
        raise Exception(f'File "{args.file}" does not excist')
    if Config().output_dir is None:
        Config().output_dir = os.path.join(os.path.dirname(args.file), 'build')
    dump_config(Config())
Exemple #5
0
def ensure_vault_password_is_set(args):
    vault_password = args.vault_password
    if vault_password is None:
        vault_password = prompt_for_password(
            "Provide password to encrypt vault: ")

    directory_path = os.path.dirname(Config().vault_password_location)
    os.makedirs(directory_path, exist_ok=True)
    save_to_file(Config().vault_password_location, vault_password)
Exemple #6
0
def adjust_paths_from_build(args):
    if not os.path.isabs(args.build_directory):
        args.build_directory = os.path.join(os.getcwd(), args.build_directory)
    if not os.path.exists(args.build_directory):
        Config().output_dir = os.getcwd()  # Default to working dir so we can at least write logs.
        raise Exception(f'Build directory "{args.build_directory}" does not excist')
    if args.build_directory[-1:] == '/':
        args.build_directory = args.build_directory.rstrip('/')
    if Config().output_dir is None:
        Config().output_dir = os.path.split(args.build_directory)[0]
    dump_config(Config())
    def add_validate_certs(self, document):
        if document is None:
            raise Exception('Config is empty for: ' + 'group_vars/all.yml')

        document['validate_certs'] = Config().validate_certs

        return document
 def set_vault_path(self, shared_config):
     if shared_config.specification.vault_location == '':
         shared_config.specification.vault_tmp_file_location = Config(
         ).vault_password_location
         cluster_name = self.get_cluster_name()
         shared_config.specification.vault_location = get_ansible_vault_path(
             cluster_name)
Exemple #9
0
    def apply(self):
        inventory_path = get_inventory_path(
            self.cluster_model.specification.name)

        # copy resources
        self.copy_resources()

        # create inventory
        inventory_creator = AnsibleInventoryCreator(self.cluster_model,
                                                    self.config_docs)
        inventory_creator.create()
        time.sleep(10)

        # generate vars
        ansible_vars_generator = AnsibleVarsGenerator(
            inventory_creator=inventory_creator)
        ansible_vars_generator.generate()

        # pre-flight to prepare machines
        self.pre_flight(inventory_path)

        # run roles
        enabled_roles = inventory_creator.get_enabled_roles()
        for role in enabled_roles:
            self.ansible_command.run_playbook(
                inventory=inventory_path,
                playbook_path=self.playbook_path(to_role_name(role)),
                vault_file=Config().vault_password_location)

        #post-flight after we are done
        self.post_flight(inventory_path)
Exemple #10
0
    def run_upgrade(self):
        try:
            build_directory = Config().output_dir
            build_roles_directory = os.path.join(build_directory, 'ansible/roles')

            upgrade_playbook_path = os.path.join(build_roles_directory, 'upgrade')
            backup_playbook_path = os.path.join(build_roles_directory, 'backup')
            recovery_playbook_path = os.path.join(build_roles_directory, 'recovery')

            upgrade_role_path = os.path.join(build_directory, 'ansible', 'upgrade.yml')

            epiphany_playbooks_path = os.path.dirname(__file__) + AnsibleRunner.ANSIBLE_PLAYBOOKS_PATH
            epiphany_roles_path = os.path.join(epiphany_playbooks_path, 'roles')

            upgrade_role_source_path = os.path.join(epiphany_roles_path, 'upgrade')
            backup_role_source_path = os.path.join(epiphany_roles_path, 'backup')
            restore_role_source_path = os.path.join(epiphany_roles_path, 'recovery')
            playbook_source_path = os.path.join(epiphany_playbooks_path, 'upgrade.yml')

            copy_files_recursively(upgrade_role_source_path, upgrade_playbook_path)
            copy_files_recursively(backup_role_source_path, backup_playbook_path)
            copy_files_recursively(restore_role_source_path, recovery_playbook_path)
            copy_file(playbook_source_path, upgrade_role_path)

            inventory_path = get_inventory_path_for_build(build_directory)
            self.ansible_command.run_playbook(inventory=inventory_path, playbook_path=upgrade_role_path)
            return 0
        except Exception as e:
            self.logger.error(e, exc_info=True)  # TODO extensive debug output might not always be wanted. Make this configurable with input flag?
            return 1
Exemple #11
0
    def prepare(self):
        prepare_src = os.path.join(self.PREPARE_PATH, self.os)
        charts_src = self.CHARTS_PATH
        skopeo_src = os.path.join(dirname(dirname(inspect.getfile(os))),
                                  'skopeo_linux')

        prepare_dst = os.path.join(Config().output_dir, 'prepare_scripts')
        charts_dst = os.path.join(prepare_dst, 'charts', 'system')

        if not os.path.exists(prepare_src):
            supported_os = os.listdir(self.PREPARE_PATH)
            raise Exception(
                f'Unsupported OS: {self.os}. Currently supported: {supported_os}'
            )

        if not os.path.exists(skopeo_src):
            raise Exception('Skopeo dependency not found')

        # copy files to output dir
        copy_files_recursively(prepare_src, prepare_dst)
        copy_files_recursively(charts_src, charts_dst)
        shutil.copy(skopeo_src, prepare_dst)

        # make sure the scripts and skopeo are executable
        self.make_file_executable(os.path.join(prepare_dst, 'skopeo_linux'))
        self.make_file_executable(
            os.path.join(prepare_dst, 'download-requirements.sh'))

        self.logger.info(
            f'Prepared files for downloading the offline requirements in: {prepare_dst}'
        )
        return 0
Exemple #12
0
def pytest_configure(config):
    """
    Allows plugins and conftest files to perform initial configuration.
    This hook is called for every plugin and initial conftest
    file after command line options have been parsed.
    """
    Config().output_dir = os.path.join(
        os.path.dirname(os.path.realpath(__file__)), 'results/')
Exemple #13
0
def run_recovery(args):
    if not query_yes_no(
            'This is an experimental feature and could change at any time. Do you want to continue?'
    ):
        return 0
    Config().output_dir = args.build_directory
    with PatchEngine() as engine:
        return engine.run_recovery()
Exemple #14
0
 def __new__(cls, logger_name):
     if Log.instance is None:
         Log.instance = Log.__LogBase()
     config = Config()
     logger = logging.getLogger(logger_name)
     if config.log_type == 'json':
         logger.addHandler(Log.instance.json_stream_handler)
     logger.addHandler(Log.instance.json_file_handler)
     return logger
    def run(self, command, env, auto_approve=False, auto_retries=1):
        cmd = ['terraform', command]

        if auto_approve:
            cmd.append('--auto-approve')

        if command == self.APPLY_COMMAND or command == self.DESTROY_COMMAND:
            cmd.append(f'-state={self.working_directory}/terraform.tfstate')

        cmd.append('-no-color')

        cmd.append(self.working_directory)

        cmd = ' '.join(cmd)
        self.logger.info(f'Running: "{cmd}"')

        if Config().debug > 0:
            env['TF_LOG'] = terraform_verbosity[Config().debug]

        retries = 1
        do_retry = True
        while ((retries <= auto_retries) and do_retry):
            logpipe = LogPipe(__name__)
            with subprocess.Popen(cmd,
                                  stdout=logpipe,
                                  stderr=logpipe,
                                  env=env,
                                  shell=True) as sp:
                logpipe.close()
            retries = retries + 1
            do_retry = next(
                (True for s in logpipe.stderrstrings if 'RetryableError' in s),
                False)
            if do_retry and retries <= auto_retries:
                self.logger.warning(
                    f'Terraform failed with "RetryableError" error. Retry: ' +
                    str(retries) + '/' + str(auto_retries))

        if sp.returncode != 0:
            raise Exception(f'Error running: "{cmd}"')
        else:
            self.logger.info(f'Done running "{cmd}"')
Exemple #16
0
    def run_recovery(self):
        try:
            build_directory = Config().output_dir
            backup_role_path = os.path.join(build_directory, 'ansible', 'recovery.yml')
            inventory_path = get_inventory_path_for_build(build_directory)
            self.ansible_command.run_playbook(inventory=inventory_path, playbook_path=backup_role_path)

            return 0
        except Exception as e:
            self.logger.error(e, exc_info=True)  # TODO extensive debug output might not always be wanted. Make this configurable with input flag?
            return 1
Exemple #17
0
 def __init__(self):
     config = Config()
     log_path = os.path.join(get_output_path(), config.log_file)
     logging.basicConfig(level=logging.INFO, format=config.log_format, datefmt=config.log_date_format)
     formatter = jsonlogger.JsonFormatter(config.log_format, datefmt=config.log_date_format)
     should_roll_over = os.path.isfile(log_path)
     handler = logging.handlers.RotatingFileHandler(log_path, backupCount=config.log_count)
     if should_roll_over:
         handler.doRollover()
     self.json_file_handler = handler
     self.json_file_handler.setFormatter(formatter)
     self.json_stream_handler = logging.StreamHandler()
     self.json_stream_handler.setFormatter(formatter)
Exemple #18
0
    def populate_group_vars(self, ansible_dir):
        main_vars = ObjDict()
        main_vars['admin_user'] = self.cluster_model.specification.admin_user
        main_vars[
            'k8s_as_cloud_service'] = self.cluster_model.specification.cloud.k8s_as_cloud_service
        main_vars['validate_certs'] = Config().validate_certs
        main_vars['offline_requirements'] = Config().offline_requirements
        main_vars['wait_for_pods'] = Config().wait_for_pods
        main_vars['is_upgrade_run'] = self.is_upgrade_run
        main_vars['roles_with_generated_vars'] = sorted(
            self.roles_with_generated_vars)

        if self.is_upgrade_run:
            shared_config_doc = self.get_shared_config_from_manifest()
        else:
            shared_config_doc = select_first(
                self.config_docs,
                lambda x: x.kind == 'configuration/shared-config')

        # Fallback if there is completely no trace of the shared-config doc
        if shared_config_doc is None:
            shared_config_doc = load_yaml_obj(types.DEFAULT, 'common',
                                              'configuration/shared-config')

        self.set_vault_path(shared_config_doc)
        main_vars.update(shared_config_doc.specification)

        vars_dir = os.path.join(ansible_dir, 'group_vars')
        if not os.path.exists(vars_dir):
            os.makedirs(vars_dir)

        vars_file_name = 'all.yml'
        vars_file_path = os.path.join(vars_dir, vars_file_name)

        with open(vars_file_path, 'a') as stream:
            dump(main_vars, stream)
Exemple #19
0
    def run_playbook(self, inventory, playbook_path, vault_file=None):
        cmd = ['ansible-playbook']

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        if vault_file is not None:
            cmd.extend(["--vault-password-file", vault_file])

        cmd.append(playbook_path)

        if Config().debug > 0:
            cmd.append(ansible_verbosity[Config().debug])

        self.logger.info('Running: "' + ' '.join(playbook_path) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')
Exemple #20
0
    def copy_resources(self):
        self.logger.info('Copying Ansible resources')
        if self.cluster_model != None:
            ansible_dir = get_ansible_path(
                self.cluster_model.specification.name)
        else:
            ansible_dir = get_ansible_path_for_build(self.build_dir)

        shutil.rmtree(ansible_dir, ignore_errors=True)
        copy_files_recursively(AnsibleRunner.ANSIBLE_PLAYBOOKS_PATH,
                               ansible_dir)

        # copy skopeo so Ansible can move it to the repositry machine
        if not Config().offline_requirements:
            shutil.copy(
                os.path.join(dirname(dirname(inspect.getfile(os))),
                             'skopeo_linux'), '/tmp')
Exemple #21
0
class ColorFormatter(logging.Formatter):
    grey = '\x1b[38;21m'
    yellow = '\x1b[33;21m'
    red = '\x1b[31;21m'
    bold_red = '\x1b[31;1m'
    reset = '\x1b[0m'
    lineformat = Config().log_format

    FORMATS = {
        logging.DEBUG: grey + lineformat + reset,
        logging.INFO: grey + lineformat + reset,
        logging.WARNING: yellow + lineformat + reset,
        logging.ERROR: red + lineformat + reset,
        logging.CRITICAL: bold_red + lineformat + reset
    }

    def format(self, record):
        log_fmt = self.FORMATS.get(record.levelno)
        formatter = logging.Formatter(log_fmt,
                                      datefmt=Config().log_date_format)
        return formatter.format(record)
Exemple #22
0
    def run_playbook(self, inventory, playbook_path):
        cmd = ['ansible-playbook']

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        cmd.append(playbook_path)

        if Config().debug:
            cmd.append('-vvv')

        self.logger.info('Running: "' + ' '.join(cmd) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')
Exemple #23
0
        def __init__(self):
            config = Config()

            json_formatter = jsonlogger.JsonFormatter(
                config.log_format, datefmt=config.log_date_format)
            color_formater = ColorFormatter()

            log_path = os.path.join(get_output_path(), config.log_file)
            should_roll_over = os.path.isfile(log_path)
            self.json_file_handler = logging.handlers.RotatingFileHandler(
                log_path, backupCount=config.log_count)
            self.json_file_handler.setLevel(level=logging.INFO)
            if should_roll_over:
                self.json_file_handler.doRollover()
            self.json_file_handler.setFormatter(json_formatter)

            self.stream_handler = logging.StreamHandler()
            if config.log_type == 'json':
                self.stream_handler.setFormatter(json_formatter)
            else:
                self.stream_handler.setFormatter(color_formater)
Exemple #24
0
def query_yes_no(question, default="yes"):
    if Config().auto_approve:
        return True

    valid = {"yes": True, "y": True, "ye": True, "no": False, "n": False}
    if default is None:
        prompt = " [y/n] "
    elif default == "yes":
        prompt = " [Y/n] "
    elif default == "no":
        prompt = " [y/N] "
    else:
        raise ValueError("invalid default answer: '%s'" % default)

    while True:
        sys.stdout.write(question + prompt)
        choice = input().lower()
        if default is not None and choice == '':
            return valid[default]
        elif choice in valid:
            return valid[choice]
        else:
            sys.stdout.write("Please respond with 'yes' or 'no' "
                             "(or 'y' or 'n').\n")
Exemple #25
0
def adjust_paths(args):
    args.file = get_config_file_path(args.file)
    adjust_output_dir(args.file)
    dump_config(Config())
Exemple #26
0
def ensure_vault_password_is_cleaned():
    if os.path.exists(Config().vault_password_location):
        os.remove(Config().vault_password_location)
Exemple #27
0
def adjust_paths_from_output_dir():
    if not Config().output_dir:
        Config().output_dir = os.getcwd(
        )  # Default to working dir so we can at least write logs.
    dump_config(Config())
Exemple #28
0
def main():
    config = Config()
    parser = argparse.ArgumentParser(
        description=__doc__,
        usage='''epicli <command> [<args>]''',
        formatter_class=argparse.RawDescriptionHelpFormatter)

    # setup some root arguments
    parser.add_argument('--version',
                        action='version',
                        help='Shows the CLI version',
                        version=VERSION)
    parser.add_argument(
        '--licenses',
        action='version',
        help=
        'Shows the third party packages and their licenses the CLI is using.',
        version=json.dumps(LICENSES, indent=4))
    parser.add_argument(
        '-l',
        '--log-file',
        dest='log_name',
        type=str,
        help='The name of the log file written to the output directory')
    parser.add_argument('--log-format',
                        dest='log_format',
                        type=str,
                        help='Format for the logging string.')
    parser.add_argument('--log-date-format',
                        dest='log_date_format',
                        type=str,
                        help='Format for the logging date.')
    parser.add_argument(
        '--log-count',
        dest='log_count',
        type=str,
        help='Roleover count where each CLI run will generate a new log.')
    parser.add_argument('--log-type',
                        choices=['plain', 'json'],
                        default='plain',
                        dest='log_type',
                        action='store',
                        help='Type of logs.')
    parser.add_argument(
        '--validate-certs',
        choices=['true', 'false'],
        default='true',
        action='store',
        dest='validate_certs',
        help=
        '''[Experimental]: Disables certificate checks for certain Ansible operations
                         which might have issues behind proxies (https://github.com/ansible/ansible/issues/32750). 
                         Should NOT be used in production for security reasons.'''
    )
    parser.add_argument(
        '--debug',
        dest='debug',
        action="store_true",
        help=
        'Set this to output extensive debug information. Carries over to Ansible and Terraform.'
    )
    parser.add_argument(
        '--auto-approve',
        dest='auto_approve',
        action="store_true",
        help='Auto approve any user input queries asked by Epicli')
    # some arguments we don't want available when running from the docker image.
    if not config.docker_cli:
        parser.add_argument(
            '-o',
            '--output',
            dest='output_dir',
            type=str,
            help='Directory where the CLI should write it`s output.')

    # setup subparsers
    subparsers = parser.add_subparsers()
    apply_parser(subparsers)
    validate_parser(subparsers)
    init_parser(subparsers)
    upgrade_parser(subparsers)
    backup_parser(subparsers)
    recovery_parser(subparsers)
    delete_parser(subparsers)
    prepare_parser(subparsers)

    # check if there were any variables and display full help
    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit(1)

    arguments = sys.argv[1:]

    # add some arguments to the general config so we can easily use them throughout the CLI
    args = parser.parse_args(arguments)

    config.output_dir = args.output_dir if hasattr(args,
                                                   'output_dir') else None
    config.log_file = args.log_name
    config.log_format = args.log_format
    config.log_date_format = args.log_date_format
    config.log_type = args.log_type
    config.log_count = args.log_count
    config.validate_certs = True if args.validate_certs == 'true' else False
    if 'offline_requirements' in args and not args.offline_requirements is None:
        config.offline_requirements = args.offline_requirements
    if 'wait_for_pods' in args and not args.wait_for_pods is None:
        config.wait_for_pods = args.wait_for_pods
    config.debug = args.debug
    config.auto_approve = args.auto_approve

    try:
        return args.func(args)
    except Exception as e:
        logger = Log('epicli')
        logger.error(e, exc_info=config.debug)
        return 1
Exemple #29
0
 def run_init(args):
     Config().output_dir = os.getcwd()
     dump_config(Config())
     with InitEngine(args) as engine:
         return engine.init()
Exemple #30
0
def adjust_output_dir(config_file_path):
    if Config().output_dir is None:
        config_directory = os.path.dirname(config_file_path)
        Config().output_dir = os.path.join(config_directory, 'build')