Пример #1
0
class AnsibleCommand:
    def __init__(self, working_directory=os.path.dirname(__file__)):
        self.logger = Log(__name__)
        self.working_directory = working_directory

    def __init__(self):
        self.logger = Log(__name__)

    def run_task(self, hosts, inventory, module, args=None):
        cmd = ['ansible']

        cmd.extend(["-m", module])

        if args is not None and len(args) > 0:
            cmd.extend(["-a", args])

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        cmd.append(hosts)

        if Config().debug > 0:
            cmd.append(ansible_verbosity[Config().debug])

        self.logger.info('Running: "' + ' '.join(module) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')

    def run_task_with_retries(self,
                              inventory,
                              module,
                              hosts,
                              retries,
                              timeout=10,
                              args=None):
        for i in range(retries):
            try:
                self.run_task(hosts=hosts,
                              inventory=inventory,
                              module=module,
                              args=args)
                break
            except Exception as e:
                self.logger.error(e)
                self.logger.info('Retry running task: ' + str(i + 1) + '/' +
                                 str(retries))
                time.sleep(timeout)
        else:
            raise Exception(
                f'Failed running task after {str(retries)} retries')

    def run_playbook(self, inventory, playbook_path, vault_file=None):
        cmd = ['ansible-playbook']

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        if vault_file is not None:
            cmd.extend(["--vault-password-file", vault_file])

        cmd.append(playbook_path)

        if Config().debug > 0:
            cmd.append(ansible_verbosity[Config().debug])

        self.logger.info('Running: "' + ' '.join(playbook_path) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')

    def run_playbook_with_retries(self,
                                  inventory,
                                  playbook_path,
                                  retries,
                                  timeout=10):
        for i in range(retries):
            try:
                self.run_playbook(inventory=inventory,
                                  playbook_path=playbook_path)
                break
            except Exception as e:
                self.logger.error(e)
                self.logger.info('Retry running playbook: ' + str(i + 1) +
                                 '/' + str(retries))
                time.sleep(timeout)
        else:
            raise Exception(
                f'Failed running playbook after {str(retries)} retries')
Пример #2
0
def main():
    config = Config()
    parser = argparse.ArgumentParser(
        description=__doc__,
        usage='''epicli <command> [<args>]''',
        formatter_class=argparse.RawDescriptionHelpFormatter)

    # setup some root arguments
    parser.add_argument('--version',
                        action='version',
                        help='Shows the CLI version',
                        version=VERSION)
    parser.add_argument(
        '--licenses',
        action='version',
        help=
        'Shows the third party packages and their licenses the CLI is using.',
        version=json.dumps(LICENSES, indent=4))
    parser.add_argument(
        '-l',
        '--log-file',
        dest='log_name',
        type=str,
        help='The name of the log file written to the output directory')
    parser.add_argument('--log-format',
                        dest='log_format',
                        type=str,
                        help='Format for the logging string.')
    parser.add_argument('--log-date-format',
                        dest='log_date_format',
                        type=str,
                        help='Format for the logging date.')
    parser.add_argument(
        '--log-count',
        dest='log_count',
        type=str,
        help='Roleover count where each CLI run will generate a new log.')
    parser.add_argument('--log-type',
                        choices=['plain', 'json'],
                        default='plain',
                        dest='log_type',
                        action='store',
                        help='Type of logs.')
    parser.add_argument(
        '--validate-certs',
        choices=['true', 'false'],
        default='true',
        action='store',
        dest='validate_certs',
        help=
        '''[Experimental]: Disables certificate checks for certain Ansible operations
                         which might have issues behind proxies (https://github.com/ansible/ansible/issues/32750). 
                         Should NOT be used in production for security reasons.'''
    )
    parser.add_argument(
        '--debug',
        dest='debug',
        action="store_true",
        help=
        'Set this to output extensive debug information. Carries over to Ansible and Terraform.'
    )
    parser.add_argument(
        '--auto-approve',
        dest='auto_approve',
        action="store_true",
        help='Auto approve any user input queries asked by Epicli')
    # some arguments we don't want available when running from the docker image.
    if not config.docker_cli:
        parser.add_argument(
            '-o',
            '--output',
            dest='output_dir',
            type=str,
            help='Directory where the CLI should write it`s output.')

    # setup subparsers
    subparsers = parser.add_subparsers()
    apply_parser(subparsers)
    validate_parser(subparsers)
    init_parser(subparsers)
    upgrade_parser(subparsers)
    backup_parser(subparsers)
    recovery_parser(subparsers)
    delete_parser(subparsers)
    prepare_parser(subparsers)

    # check if there were any variables and display full help
    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit(1)

    arguments = sys.argv[1:]

    # add some arguments to the general config so we can easily use them throughout the CLI
    args = parser.parse_args(arguments)

    config.output_dir = args.output_dir if hasattr(args,
                                                   'output_dir') else None
    config.log_file = args.log_name
    config.log_format = args.log_format
    config.log_date_format = args.log_date_format
    config.log_type = args.log_type
    config.log_count = args.log_count
    config.validate_certs = True if args.validate_certs == 'true' else False
    if 'offline_requirements' in args and not args.offline_requirements is None:
        config.offline_requirements = args.offline_requirements
    if 'wait_for_pods' in args and not args.wait_for_pods is None:
        config.wait_for_pods = args.wait_for_pods
    config.debug = args.debug
    config.auto_approve = args.auto_approve

    try:
        return args.func(args)
    except Exception as e:
        logger = Log('epicli')
        logger.error(e, exc_info=config.debug)
        return 1
Пример #3
0
class EpiphanyEngine:
    def __init__(self, input_data):
        self.file = input_data.file
        self.skip_infrastructure = input_data.no_infra if hasattr(
            input_data, 'no_infra') else False
        self.logger = Log(__name__)

        self.cluster_model = None
        self.input_docs = []
        self.configuration_docs = []
        self.infrastructure_docs = []

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        pass

    def process_input_docs(self):
        # Load the user input YAML docs from the input file.
        if os.path.isabs(self.file):
            path_to_load = self.file
        else:
            path_to_load = os.path.join(os.getcwd(), self.file)
        user_file_stream = open(path_to_load, 'r')
        self.input_docs = safe_load_all(user_file_stream)

        # Merge the input docs with defaults
        with DefaultMerger(self.input_docs) as doc_merger:
            self.input_docs = doc_merger.run()

        # Get the cluster model.
        self.cluster_model = select_single(
            self.input_docs, lambda x: x.kind == 'epiphany-cluster')
        if self.cluster_model is None:
            raise Exception('No cluster model defined in input YAML file')

        # Validate input documents
        with SchemaValidator(self.cluster_model,
                             self.input_docs) as schema_validator:
            schema_validator.run()

    def process_infrastructure_docs(self):
        # Build the infrastructure docs
        with provider_class_loader(
                self.cluster_model.provider, 'InfrastructureBuilder')(
                    self.input_docs) as infrastructure_builder:
            self.infrastructure_docs = infrastructure_builder.run()

        # Validate infrastructure documents
        with SchemaValidator(self.cluster_model,
                             self.infrastructure_docs) as schema_validator:
            schema_validator.run()

    def process_configuration_docs(self):
        # Append with components and configuration docs
        with ConfigurationAppender(self.input_docs) as config_appender:
            self.configuration_docs = config_appender.run()

        # Validate configuration documents
        with SchemaValidator(self.cluster_model,
                             self.configuration_docs) as schema_validator:
            schema_validator.run()

    def collect_infrastructure_config(self):
        with provider_class_loader(self.cluster_model.provider,
                                   'InfrastructureConfigCollector')([
                                       *self.input_docs,
                                       *self.configuration_docs,
                                       *self.infrastructure_docs
                                   ]) as config_collector:
            config_collector.run()

    def verify(self):
        try:
            self.process_input_docs()

            self.process_configuration_docs()

            self.process_infrastructure_docs()

            save_manifest([
                *self.input_docs, *self.configuration_docs,
                *self.infrastructure_docs
            ], self.cluster_model.specification.name)

            return 0
        except Exception as e:
            self.logger.error(
                e, exc_info=True
            )  #TODO extensive debug output might not always be wanted. Make this configurable with input flag?
            return 1

    def apply(self):
        try:
            self.process_input_docs()

            self.process_infrastructure_docs()

            if not self.skip_infrastructure:
                # Generate terraform templates
                with TerraformTemplateGenerator(
                        self.cluster_model,
                        self.infrastructure_docs) as template_generator:
                    template_generator.run()

                # Run Terraform to create infrastructure
                with TerraformRunner(
                        self.cluster_model.specification.name) as tf_runner:
                    tf_runner.run()

            self.process_configuration_docs()

            self.collect_infrastructure_config()

            # Run Ansible to provision infrastructure
            docs = [
                *self.input_docs, *self.configuration_docs,
                *self.infrastructure_docs
            ]
            with AnsibleRunner(self.cluster_model, docs) as ansible_runner:
                ansible_runner.run()

            # Save docs to manifest file
            save_manifest(docs, self.cluster_model.specification.name)

            return 0
        except Exception as e:
            self.logger.error(
                e, exc_info=True
            )  # TODO extensive debug output might not always be wanted. Make this configurable with input flag?
            return 1

    def dry_run(self):

        self.process_input_docs()

        self.process_configuration_docs()

        return [*self.input_docs, *self.configuration_docs]
Пример #4
0
class AnsibleCommand:
    def __init__(self, working_directory=os.path.dirname(__file__)):
        self.logger = Log(__name__)
        self.working_directory = working_directory

    def __init__(self):
        self.logger = Log(__name__)

    def run_task(self, hosts, inventory, module, args):
        cmd = ['ansible']

        cmd.extend(["-m", module])

        if args is not None and len(args) > 0:
            cmd.extend(["-a", args])

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        cmd.append(hosts)

        self.logger.info('Running: "' + ' '.join(cmd) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')

    def run_task_with_retries(self,
                              inventory,
                              module,
                              args,
                              hosts,
                              retries,
                              timeout=10):
        for i in range(retries):
            try:
                self.run_task(hosts=hosts,
                              inventory=inventory,
                              module=module,
                              args=args)
                break
            except Exception as e:
                self.logger.error(e)
                self.logger.info("Retry running task: " + str(i + 1) + "/" +
                                 str(retries))
                time.sleep(timeout)

    def run_playbook(self, inventory, playbook_path):
        cmd = ['ansible-playbook']

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        cmd.append(playbook_path)

        self.logger.info('Running: "' + ' '.join(cmd) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')

    def run_playbook_with_retries(self,
                                  inventory,
                                  playbook_path,
                                  retries,
                                  timeout=10):
        for i in range(retries):

            try:
                self.run_playbook(inventory=inventory,
                                  playbook_path=playbook_path)
                return 0
            except Exception as e:
                self.logger.error(e)
                self.logger.info("Retry running playbook: " + str(i + 1) +
                                 "/" + str(retries))
                time.sleep(timeout)
        return 1
Пример #5
0
def main():
    config = Config()
    parser = argparse.ArgumentParser(
        description=__doc__,
        usage='''epicli <command> [<args>]''',
        formatter_class=argparse.RawTextHelpFormatter)

    # setup some root arguments
    parser.add_argument('--version',
                        action='version',
                        help='Shows the CLI version',
                        version=VERSION)
    parser.add_argument(
        '--licenses',
        action='version',
        help=
        'Shows the third party packages and their licenses the CLI is using.',
        version=json.dumps(LICENSES, indent=4))
    parser.add_argument(
        '-l',
        '--log-file',
        dest='log_name',
        type=str,
        help='The name of the log file written to the output directory')
    parser.add_argument('--log-format',
                        dest='log_format',
                        type=str,
                        help='Format for the logging string.')
    parser.add_argument('--log-date-format',
                        dest='log_date_format',
                        type=str,
                        help='Format for the logging date.')
    parser.add_argument(
        '--log-count',
        dest='log_count',
        type=str,
        help='Roleover count where each CLI run will generate a new log.')
    parser.add_argument('--log-type',
                        choices=['plain', 'json'],
                        default='plain',
                        dest='log_type',
                        action='store',
                        help='Type of logs.')
    parser.add_argument(
        '--validate-certs',
        choices=['true', 'false'],
        default='true',
        action='store',
        dest='validate_certs',
        help=
        '''[Experimental]: Disables certificate checks for certain Ansible operations
which might have issues behind proxies (https://github.com/ansible/ansible/issues/32750). 
Should NOT be used in production for security reasons.''')
    parser.add_argument(
        '--auto-approve',
        dest='auto_approve',
        action="store_true",
        help='Auto approve any user input queries asked by Epicli')

    # set debug verbosity level.
    def debug_level(x):
        x = int(x)
        if x < 0 or x > 4:
            raise argparse.ArgumentTypeError(
                "--debug value should be between 0 and 4")
        return x

    parser.add_argument(
        '--debug',
        dest='debug',
        type=debug_level,
        help='''Set this flag (0..4) to enable debug output where 0 is no
debug output and 1..4 is debug output with different verbosity levels:
Python    : Anything heigher then 0 enables printing of Python stacktraces
Ansible   : 1..4 map to following Ansible verbosity levels:
            1: -v
            2: -vv
            3: -vvv
            4: -vvvv
Terraform : 1..4 map to the following Terraform verbosity levels:
            1: WARN
            2: INFO
            3: DEBUG
            4: TRACE''')

    # some arguments we don't want available when running from the docker image.
    if not config.docker_cli:
        parser.add_argument(
            '-o',
            '--output',
            dest='output_dir',
            type=str,
            help='Directory where the CLI should write it`s output.')

    # setup subparsers
    subparsers = parser.add_subparsers()
    prepare_parser(subparsers)
    init_parser(subparsers)
    apply_parser(subparsers)
    upgrade_parser(subparsers)
    delete_parser(subparsers)
    test_parser(subparsers)
    '''
    validate_parser(subparsers)
    '''
    backup_parser(subparsers)
    recovery_parser(subparsers)

    # check if there were any variables and display full help
    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit(1)

    arguments = sys.argv[1:]

    # add some arguments to the general config so we can easily use them throughout the CLI
    args = parser.parse_args(arguments)

    config.output_dir = getattr(args, 'output_dir', None)
    config.log_file = args.log_name
    config.log_format = args.log_format
    config.log_date_format = args.log_date_format
    config.log_type = args.log_type
    config.log_count = args.log_count
    config.validate_certs = True if args.validate_certs == 'true' else False
    if 'offline_requirements' in args and not args.offline_requirements is None:
        config.offline_requirements = args.offline_requirements
    if 'wait_for_pods' in args and not args.wait_for_pods is None:
        config.wait_for_pods = args.wait_for_pods
    config.debug = args.debug
    config.auto_approve = args.auto_approve

    try:
        return args.func(args)
    except Exception as e:
        logger = Log('epicli')
        logger.error(e, exc_info=(config.debug > 0))
        dump_debug_info()
        return 1