示例#1
0
 def __init__(self, working_directory=os.path.dirname(__file__)):
     self.logger = Log(__name__)
     self.APPLY_COMMAND = "apply"
     self.DESTROY_COMMAND = "destroy"
     self.PLAN_COMMAND = "plan"
     self.INIT_COMMAND = "init"
     self.working_directory = working_directory
示例#2
0
 def __init__(self, cluster_model, config_docs):
     self.cluster_model = cluster_model
     self.cluster_name = self.cluster_model.specification.name.lower()
     self.cluster_prefix = self.cluster_model.specification.prefix.lower()
     self.resource_group_name = resource_name(self.cluster_prefix, self.cluster_name, 'rg')        
     self.config_docs = config_docs
     self.logger = Log(__name__)
示例#3
0
    def __init__(self, input_data):
        self.file = input_data.file
        self.skip_infrastructure = input_data.no_infra if hasattr(
            input_data, 'no_infra') else False
        self.logger = Log(__name__)

        self.cluster_model = None
        self.input_docs = []
        self.configuration_docs = []
        self.infrastructure_docs = []
示例#4
0
class SpecCommand:
    def __init__(self):
        self.logger = Log(__name__)

    def __init__(self):
        self.logger = Log(__name__)

    def check_dependencies(self):
        required_gems = ['serverspec', 'rake', 'rspec_junit_formatter']

        error_str = f'''Missing Ruby or one of the following Ruby gems: {', '.join(required_gems)}
These need to be installed to run the cluster spec tests from epicli'''

        if shutil.which('ruby') == None or shutil.which('gem') == None:
            raise Exception(error_str)

        p = subprocess.Popen(['gem', 'query', '--local'], stdout=PIPE)
        out, err = p.communicate()
        if all(n in out.decode('utf-8') for n in required_gems) == False:
            raise Exception(error_str)

    def run(self, spec_output, inventory, user, key, group):
        self.check_dependencies()

        env = os.environ.copy()
        env['spec_output'] = spec_output
        env['inventory'] = inventory
        env['user'] = user
        env['keypath'] = key

        cmd = f'rake inventory={inventory} user={user} keypath={key} spec_output={spec_output} spec:{group}'

        self.logger.info(f'Running: "{cmd}"')

        logpipe = LogPipe(__name__)
        with Popen(cmd.split(' '),
                   cwd=SPEC_TEST_PATH,
                   env=env,
                   stdout=logpipe,
                   stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception(f'Error running: "{cmd}"')
        else:
            self.logger.info(f'Done running: "{cmd}"')

    @staticmethod
    def get_spec_groups():
        groups = os.listdir(SPEC_TEST_PATH + '/spec')
        groups.remove('spec_helper.rb')
        groups = ['all'] + groups
        sorted(groups, key=str.lower)
        return groups
示例#5
0
class TerraformCommand:
    def __init__(self, working_directory=os.path.dirname(__file__)):
        self.logger = Log(__name__)
        self.APPLY_COMMAND = "apply"
        self.DESTROY_COMMAND = "destroy"
        self.PLAN_COMMAND = "plan"
        self.INIT_COMMAND = "init"
        self.working_directory = working_directory

    def apply(self, auto_approve=False, env=os.environ.copy()):
        self.run(self, self.APPLY_COMMAND, auto_approve=auto_approve, env=env)

    def destroy(self, auto_approve=False, env=os.environ.copy()):
        self.run(self,
                 self.DESTROY_COMMAND,
                 auto_approve=auto_approve,
                 env=env)

    def plan(self, env=os.environ.copy()):
        self.run(self, self.PLAN_COMMAND, env=env)

    def init(self, env=os.environ.copy()):
        self.run(self, self.INIT_COMMAND, env=env)

    @staticmethod
    def run(self, command, env, auto_approve=False):
        cmd = ['terraform', command]

        if auto_approve:
            cmd.append('--auto-approve')

        if command == self.APPLY_COMMAND or command == self.DESTROY_COMMAND:
            cmd.append(f'-state={self.working_directory}/terraform.tfstate')

        cmd.append(self.working_directory)

        cmd = ' '.join(cmd)
        self.logger.info(f'Running: "{cmd}"')

        if Config().debug:
            env['TF_LOG'] = 'TRACE'

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd,
                              stdout=logpipe,
                              stderr=logpipe,
                              env=env,
                              shell=True) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception(f'Error running: "{cmd}"')
        else:
            self.logger.info(f'Done running "{cmd}"')
示例#6
0
class Step(metaclass=ABCMeta):
    def __init__(self, step_name):
        self.logger = Log(step_name)

    def __enter__(self):
        self.start = time.time()
        self.logger.info('Starting run')
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        passed = int(round((time.time() - self.start) * 1000))
        self.logger.info('Run done in ' + str(passed) + 'ms')
示例#7
0
class TerraformCommand:

    def __init__(self, working_directory=os.path.dirname(__file__)):
        self.logger = Log(__name__)
        self.APPLY_COMMAND = "apply"
        self.DESTROY_COMMAND = "destroy"
        self.PLAN_COMMAND = "plan"
        self.INIT_COMMAND = "init"
        self.working_directory = working_directory

    def apply(self, auto_approve=False):
        self.run(self, self.APPLY_COMMAND, auto_approve=auto_approve)

    def destroy(self, auto_approve=False):
        self.run(self, self.DESTROY_COMMAND, auto_approve=auto_approve)

    def plan(self):
        self.run(self, self.PLAN_COMMAND)

    def init(self):
        self.run(self, self.INIT_COMMAND)

    @staticmethod
    def run(self, command, auto_approve=False):
        cmd = ['terraform', command]

        if auto_approve:
            cmd.append('--auto-approve')

        if command == self.APPLY_COMMAND:
            cmd.append('-state=' + self.working_directory + '/terraform.tfstate')

        cmd.append(self.working_directory)

        self.logger.info('Running: "' + ' '.join(cmd) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')
示例#8
0
    def __init__(self, input_data):
        self.file = input_data.file
        self.skip_infrastructure = getattr(input_data, 'no_infra', False)
        self.skip_config = getattr(input_data, 'skip_config', False)
        self.ansible_options = {
            'profile_tasks': getattr(input_data, 'profile_ansible_tasks',
                                     False)
        }
        self.logger = Log(__name__)

        self.cluster_model = None
        self.input_docs = []
        self.configuration_docs = []
        self.infrastructure_docs = []
示例#9
0
 def __init__(self):
     self.logger = Log(__name__)
示例#10
0
 def __init__(self, working_directory=os.path.dirname(__file__)):
     self.logger = Log(__name__)
     self.working_directory = working_directory
示例#11
0
def dump_config(config):
    logger = Log('config')
    for attr in config.__dict__:
        if attr.startswith('_'):
            logger.info('%s = %r' % (attr[1:], getattr(config, attr)))
示例#12
0
def main():
    config = Config()
    parser = argparse.ArgumentParser(
        description=__doc__,
        usage='''epicli <command> [<args>]''',
        formatter_class=argparse.RawDescriptionHelpFormatter)

    # setup some root arguments
    parser.add_argument('--version',
                        action='version',
                        help='Shows the CLI version',
                        version=VERSION)
    parser.add_argument(
        '--licenses',
        action='version',
        help=
        'Shows the third party packages and their licenses the CLI is using.',
        version=json.dumps(LICENSES, indent=4))
    parser.add_argument(
        '-l',
        '--log-file',
        dest='log_name',
        type=str,
        help='The name of the log file written to the output directory')
    parser.add_argument('--log-format',
                        dest='log_format',
                        type=str,
                        help='Format for the logging string.')
    parser.add_argument('--log-date-format',
                        dest='log_date_format',
                        type=str,
                        help='Format for the logging date.')
    parser.add_argument(
        '--log-count',
        dest='log_count',
        type=str,
        help='Roleover count where each CLI run will generate a new log.')
    parser.add_argument('--log-type',
                        choices=['plain', 'json'],
                        default='plain',
                        dest='log_type',
                        action='store',
                        help='Type of logs.')
    parser.add_argument(
        '--validate-certs',
        choices=['true', 'false'],
        default='true',
        action='store',
        dest='validate_certs',
        help=
        '''[Experimental]: Disables certificate checks for certain Ansible operations
                         which might have issues behind proxies (https://github.com/ansible/ansible/issues/32750). 
                         Should NOT be used in production for security reasons.'''
    )
    parser.add_argument(
        '--debug',
        dest='debug',
        action="store_true",
        help=
        'Set this to output extensive debug information. Carries over to Ansible and Terraform.'
    )
    parser.add_argument(
        '--auto-approve',
        dest='auto_approve',
        action="store_true",
        help='Auto approve any user input queries asked by Epicli')
    # some arguments we don't want available when running from the docker image.
    if not config.docker_cli:
        parser.add_argument(
            '-o',
            '--output',
            dest='output_dir',
            type=str,
            help='Directory where the CLI should write it`s output.')

    # setup subparsers
    subparsers = parser.add_subparsers()
    apply_parser(subparsers)
    validate_parser(subparsers)
    init_parser(subparsers)
    upgrade_parser(subparsers)
    backup_parser(subparsers)
    recovery_parser(subparsers)
    delete_parser(subparsers)
    prepare_parser(subparsers)

    # check if there were any variables and display full help
    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit(1)

    arguments = sys.argv[1:]

    # add some arguments to the general config so we can easily use them throughout the CLI
    args = parser.parse_args(arguments)

    config.output_dir = args.output_dir if hasattr(args,
                                                   'output_dir') else None
    config.log_file = args.log_name
    config.log_format = args.log_format
    config.log_date_format = args.log_date_format
    config.log_type = args.log_type
    config.log_count = args.log_count
    config.validate_certs = True if args.validate_certs == 'true' else False
    if 'offline_requirements' in args and not args.offline_requirements is None:
        config.offline_requirements = args.offline_requirements
    if 'wait_for_pods' in args and not args.wait_for_pods is None:
        config.wait_for_pods = args.wait_for_pods
    config.debug = args.debug
    config.auto_approve = args.auto_approve

    try:
        return args.func(args)
    except Exception as e:
        logger = Log('epicli')
        logger.error(e, exc_info=config.debug)
        return 1
示例#13
0
class AnsibleCommand:
    def __init__(self, working_directory=os.path.dirname(__file__)):
        self.logger = Log(__name__)
        self.working_directory = working_directory

    def __init__(self):
        self.logger = Log(__name__)

    def run_task(self, hosts, inventory, module, args):
        cmd = ['ansible']

        cmd.extend(["-m", module])

        if args is not None and len(args) > 0:
            cmd.extend(["-a", args])

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        cmd.append(hosts)

        self.logger.info('Running: "' + ' '.join(cmd) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')

    def run_task_with_retries(self,
                              inventory,
                              module,
                              args,
                              hosts,
                              retries,
                              timeout=10):
        for i in range(retries):
            try:
                self.run_task(hosts=hosts,
                              inventory=inventory,
                              module=module,
                              args=args)
                break
            except Exception as e:
                self.logger.error(e)
                self.logger.info("Retry running task: " + str(i + 1) + "/" +
                                 str(retries))
                time.sleep(timeout)

    def run_playbook(self, inventory, playbook_path):
        cmd = ['ansible-playbook']

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        cmd.append(playbook_path)

        self.logger.info('Running: "' + ' '.join(cmd) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')

    def run_playbook_with_retries(self,
                                  inventory,
                                  playbook_path,
                                  retries,
                                  timeout=10):
        for i in range(retries):

            try:
                self.run_playbook(inventory=inventory,
                                  playbook_path=playbook_path)
                return 0
            except Exception as e:
                self.logger.error(e)
                self.logger.info("Retry running playbook: " + str(i + 1) +
                                 "/" + str(retries))
                time.sleep(timeout)
        return 1
示例#14
0
def main():
    config = Config()
    parser = argparse.ArgumentParser(
        description=__doc__,
        usage='''epicli <command> [<args>]''',
        formatter_class=argparse.RawTextHelpFormatter)

    # setup some root arguments
    parser.add_argument('--version',
                        action='version',
                        help='Shows the CLI version',
                        version=VERSION)
    parser.add_argument(
        '--licenses',
        action='version',
        help=
        'Shows the third party packages and their licenses the CLI is using.',
        version=json.dumps(LICENSES, indent=4))
    parser.add_argument(
        '-l',
        '--log-file',
        dest='log_name',
        type=str,
        help='The name of the log file written to the output directory')
    parser.add_argument('--log-format',
                        dest='log_format',
                        type=str,
                        help='Format for the logging string.')
    parser.add_argument('--log-date-format',
                        dest='log_date_format',
                        type=str,
                        help='Format for the logging date.')
    parser.add_argument(
        '--log-count',
        dest='log_count',
        type=str,
        help='Roleover count where each CLI run will generate a new log.')
    parser.add_argument('--log-type',
                        choices=['plain', 'json'],
                        default='plain',
                        dest='log_type',
                        action='store',
                        help='Type of logs.')
    parser.add_argument(
        '--validate-certs',
        choices=['true', 'false'],
        default='true',
        action='store',
        dest='validate_certs',
        help=
        '''[Experimental]: Disables certificate checks for certain Ansible operations
which might have issues behind proxies (https://github.com/ansible/ansible/issues/32750). 
Should NOT be used in production for security reasons.''')
    parser.add_argument(
        '--auto-approve',
        dest='auto_approve',
        action="store_true",
        help='Auto approve any user input queries asked by Epicli')

    # set debug verbosity level.
    def debug_level(x):
        x = int(x)
        if x < 0 or x > 4:
            raise argparse.ArgumentTypeError(
                "--debug value should be between 0 and 4")
        return x

    parser.add_argument(
        '--debug',
        dest='debug',
        type=debug_level,
        help='''Set this flag (0..4) to enable debug output where 0 is no
debug output and 1..4 is debug output with different verbosity levels:
Python    : Anything heigher then 0 enables printing of Python stacktraces
Ansible   : 1..4 map to following Ansible verbosity levels:
            1: -v
            2: -vv
            3: -vvv
            4: -vvvv
Terraform : 1..4 map to the following Terraform verbosity levels:
            1: WARN
            2: INFO
            3: DEBUG
            4: TRACE''')

    # some arguments we don't want available when running from the docker image.
    if not config.docker_cli:
        parser.add_argument(
            '-o',
            '--output',
            dest='output_dir',
            type=str,
            help='Directory where the CLI should write it`s output.')

    # setup subparsers
    subparsers = parser.add_subparsers()
    prepare_parser(subparsers)
    init_parser(subparsers)
    apply_parser(subparsers)
    upgrade_parser(subparsers)
    delete_parser(subparsers)
    test_parser(subparsers)
    '''
    validate_parser(subparsers)
    '''
    backup_parser(subparsers)
    recovery_parser(subparsers)

    # check if there were any variables and display full help
    if len(sys.argv) < 2:
        parser.print_help()
        sys.exit(1)

    arguments = sys.argv[1:]

    # add some arguments to the general config so we can easily use them throughout the CLI
    args = parser.parse_args(arguments)

    config.output_dir = getattr(args, 'output_dir', None)
    config.log_file = args.log_name
    config.log_format = args.log_format
    config.log_date_format = args.log_date_format
    config.log_type = args.log_type
    config.log_count = args.log_count
    config.validate_certs = True if args.validate_certs == 'true' else False
    if 'offline_requirements' in args and not args.offline_requirements is None:
        config.offline_requirements = args.offline_requirements
    if 'wait_for_pods' in args and not args.wait_for_pods is None:
        config.wait_for_pods = args.wait_for_pods
    config.debug = args.debug
    config.auto_approve = args.auto_approve

    try:
        return args.func(args)
    except Exception as e:
        logger = Log('epicli')
        logger.error(e, exc_info=(config.debug > 0))
        dump_debug_info()
        return 1
示例#15
0
class APIProxy:
    def __init__(self, cluster_model, config_docs):
        self.cluster_model = cluster_model
        self.cluster_name = self.cluster_model.specification.name.lower()
        self.cluster_prefix = self.cluster_model.specification.prefix.lower()
        self.resource_group_name = resource_name(self.cluster_prefix,
                                                 self.cluster_name, 'rg')
        self.config_docs = config_docs
        self.logger = Log(__name__)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        pass

    def login_account(self):
        subscription_name = self.cluster_model.specification.cloud.subscription_name
        all_subscription = self.run(self, 'az login')
        subscription = select_first(all_subscription,
                                    lambda x: x['name'] == subscription_name)
        if subscription is None:
            raise Exception(
                f'User does not have access to subscription: "{subscription_name}"'
            )
        return subscription

    def login_sp(self, sp_data):
        name = sp_data['name']
        password = sp_data['password']
        tenant = sp_data['tenant']
        return self.run(
            self,
            f'az login --service-principal -u \'{name}\' -p "{password}" --tenant \'{tenant}\'',
            False)

    def set_active_subscribtion(self, subscription_id):
        self.run(self, f'az account set --subscription {subscription_id}')

    def get_active_subscribtion(self):
        subscription = self.run(self, f'az account show')
        return subscription

    def create_sp(self, app_name, subscription_id):
        #TODO: make role configurable?
        sp = self.run(
            self,
            f'az ad sp create-for-rbac -n \'{app_name}\' --role=\'Contributor\' --scopes=\'/subscriptions/{subscription_id}\''
        )
        # Sleep for a while. Sometimes the call returns before the rights of the SP are finished creating.
        self.wait(self, 60)
        return sp

    def get_ips_for_feature(self, component_key):
        look_for_public_ip = self.cluster_model.specification.cloud.use_public_ips
        cluster = cluster_tag(self.cluster_prefix, self.cluster_name)
        running_instances = self.run(
            self,
            f'az vm list-ip-addresses --ids $(az resource list --query "[?type==\'Microsoft.Compute/virtualMachines\' && tags.{component_key} == \'\' && tags.cluster == \'{cluster}\'].id" --output tsv)'
        )
        result = []
        for instance in running_instances:
            if isinstance(instance, list):
                instance = instance[0]
            name = instance['virtualMachine']['name']
            if look_for_public_ip:
                ip = instance['virtualMachine']['network'][
                    'publicIpAddresses'][0]['ipAddress']
            else:
                ip = instance['virtualMachine']['network'][
                    'privateIpAddresses'][0]
            result.append(AnsibleHostModel(name, ip))
        return result

    def get_storage_account_primary_key(self, storage_account_name):
        keys = self.run(
            self,
            f'az storage account keys list -g \'{self.resource_group_name}\' -n \'{storage_account_name}\''
        )
        return keys[0]['value']

    @staticmethod
    def wait(self, seconds):
        for x in range(0, seconds):
            self.logger.info(f'Waiting {seconds} seconds...{x}')
            time.sleep(1)

    @staticmethod
    def run(self, cmd, log_cmd=True):
        if log_cmd:
            self.logger.info('Running: "' + cmd + '"')

        logpipe = LogPipe(__name__)
        with Popen(cmd, stdout=PIPE, stderr=logpipe, shell=True) as sp:
            logpipe.close()
            try:
                data = sp.stdout.read().decode('utf-8')
                data = re.sub(r'\s+', '', data)
                data = re.sub(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]', '', data)
                output = json.loads(data)
            except:
                output = {}

        if sp.returncode != 0:
            if log_cmd:
                raise Exception(f'Error running: "{cmd}"')
            else:
                raise Exception(f'Error running Azure APIProxy cmd')
        else:
            if log_cmd:
                self.logger.info(f'Done running "{cmd}"')
            return output
示例#16
0
def dump_debug_info():
    def dump_external_debug_info(title, args):
        dump_file.write(f'\n\n*****{title}******\n')
        p = subprocess.Popen(args, stdout=subprocess.PIPE)
        out, err = p.communicate()
        lines = filter(lambda x: x.strip(),
                       out.decode("utf-8").splitlines(keepends=True))
        dump_file.writelines(lines)

    try:
        logger = Log('dump_debug_info')
        config = Config()

        timestr = time.strftime("%Y%m%d-%H%M%S")
        dump_path = os.getcwd() + f'/epicli_error_{timestr}.dump'
        dump_file = open(dump_path, 'w')

        dump_file.write('*****EPICLI VERSION******\n')
        dump_file.write(f'{VERSION}')

        dump_file.write('\n\n*****EPICLI ARGS******\n')
        dump_file.write(' '.join([*['epicli'], *sys.argv[1:]]))

        dump_file.write('\n\n*****EPICLI CONFIG******\n')
        for attr in config.__dict__:
            if attr.startswith('_'):
                dump_file.write('%s = %r\n' %
                                (attr[1:], getattr(config, attr)))

        dump_file.write('\n\n*****SYSTEM******\n')
        system_data = {
            'platform': platform.system(),
            'release': platform.release(),
            'type': platform.uname().system,
            'arch': platform.uname().machine,
            'cpus': json.dumps(os.cpu_count()),
            'hostname': socket.gethostname()
        }
        dump_file.write(json.dumps(dict(system_data), indent=2))

        dump_file.write('\n\n*****ENVIROMENT VARS******\n')
        dump_file.write(json.dumps(dict(os.environ), indent=2))

        dump_file.write('\n\n*****PYTHON******\n')
        dump_file.write(f'python_version: {platform.python_version()}\n')
        dump_file.write(f'python_build: {platform.python_build()}\n')
        dump_file.write(f'python_revision: {platform.python_revision()}\n')
        dump_file.write(f'python_compiler: {platform.python_compiler()}\n')
        dump_file.write(f'python_branch: {platform.python_branch()}\n')
        dump_file.write(
            f'python_implementation: {platform.python_implementation()}\n')

        dump_external_debug_info('ANSIBLE VERSION', ['ansible', '--version'])
        dump_external_debug_info('ANSIBLE CONFIG', ['ansible-config', 'dump'])
        dump_external_debug_info('ANSIBLE-VAULT VERSION',
                                 ['ansible-vault', '--version'])
        dump_external_debug_info('TERRAFORM VERSION',
                                 ['terraform', '--version'])
        dump_external_debug_info('SKOPEO VERSION', ['skopeo', '--version'])
        dump_external_debug_info('RUBY VERSION', ['ruby', '--version'])
        dump_external_debug_info('RUBY GEM VERSION', ['gem', '--version'])
        dump_external_debug_info('RUBY INSTALLED GEMS',
                                 ['gem', 'query', '--local'])

        dump_file.write('\n\n*****LOG******\n')
        log_path = os.path.join(get_output_path(), config.log_file)
        dump_file.writelines([l for l in open(log_path).readlines()])
    finally:
        dump_file.close()
        logger.info(f'Error dump has been written to: {dump_path}')
        logger.warning(
            'This dump might contain sensitive information. Check before sharing.'
        )
示例#17
0
class EpiphanyEngine:
    def __init__(self, input_data):
        self.file = input_data.file
        self.skip_infrastructure = input_data.no_infra if hasattr(
            input_data, 'no_infra') else False
        self.logger = Log(__name__)

        self.cluster_model = None
        self.input_docs = []
        self.configuration_docs = []
        self.infrastructure_docs = []

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        pass

    def process_input_docs(self):
        # Load the user input YAML docs from the input file.
        if os.path.isabs(self.file):
            path_to_load = self.file
        else:
            path_to_load = os.path.join(os.getcwd(), self.file)
        user_file_stream = open(path_to_load, 'r')
        self.input_docs = safe_load_all(user_file_stream)

        # Merge the input docs with defaults
        with DefaultMerger(self.input_docs) as doc_merger:
            self.input_docs = doc_merger.run()

        # Get the cluster model.
        self.cluster_model = select_single(
            self.input_docs, lambda x: x.kind == 'epiphany-cluster')
        if self.cluster_model is None:
            raise Exception('No cluster model defined in input YAML file')

        # Validate input documents
        with SchemaValidator(self.cluster_model,
                             self.input_docs) as schema_validator:
            schema_validator.run()

    def process_infrastructure_docs(self):
        # Build the infrastructure docs
        with provider_class_loader(
                self.cluster_model.provider, 'InfrastructureBuilder')(
                    self.input_docs) as infrastructure_builder:
            self.infrastructure_docs = infrastructure_builder.run()

        # Validate infrastructure documents
        with SchemaValidator(self.cluster_model,
                             self.infrastructure_docs) as schema_validator:
            schema_validator.run()

    def process_configuration_docs(self):
        # Append with components and configuration docs
        with ConfigurationAppender(self.input_docs) as config_appender:
            self.configuration_docs = config_appender.run()

        # Validate configuration documents
        with SchemaValidator(self.cluster_model,
                             self.configuration_docs) as schema_validator:
            schema_validator.run()

    def collect_infrastructure_config(self):
        with provider_class_loader(self.cluster_model.provider,
                                   'InfrastructureConfigCollector')([
                                       *self.input_docs,
                                       *self.configuration_docs,
                                       *self.infrastructure_docs
                                   ]) as config_collector:
            config_collector.run()

    def verify(self):
        try:
            self.process_input_docs()

            self.process_configuration_docs()

            self.process_infrastructure_docs()

            save_manifest([
                *self.input_docs, *self.configuration_docs,
                *self.infrastructure_docs
            ], self.cluster_model.specification.name)

            return 0
        except Exception as e:
            self.logger.error(
                e, exc_info=True
            )  #TODO extensive debug output might not always be wanted. Make this configurable with input flag?
            return 1

    def apply(self):
        try:
            self.process_input_docs()

            self.process_infrastructure_docs()

            if not self.skip_infrastructure:
                # Generate terraform templates
                with TerraformTemplateGenerator(
                        self.cluster_model,
                        self.infrastructure_docs) as template_generator:
                    template_generator.run()

                # Run Terraform to create infrastructure
                with TerraformRunner(
                        self.cluster_model.specification.name) as tf_runner:
                    tf_runner.run()

            self.process_configuration_docs()

            self.collect_infrastructure_config()

            # Run Ansible to provision infrastructure
            docs = [
                *self.input_docs, *self.configuration_docs,
                *self.infrastructure_docs
            ]
            with AnsibleRunner(self.cluster_model, docs) as ansible_runner:
                ansible_runner.run()

            # Save docs to manifest file
            save_manifest(docs, self.cluster_model.specification.name)

            return 0
        except Exception as e:
            self.logger.error(
                e, exc_info=True
            )  # TODO extensive debug output might not always be wanted. Make this configurable with input flag?
            return 1

    def dry_run(self):

        self.process_input_docs()

        self.process_configuration_docs()

        return [*self.input_docs, *self.configuration_docs]
示例#18
0
class AnsibleCommand:
    def __init__(self, working_directory=os.path.dirname(__file__)):
        self.logger = Log(__name__)
        self.working_directory = working_directory

    def __init__(self):
        self.logger = Log(__name__)

    def run_task(self, hosts, inventory, module, args=None):
        cmd = ['ansible']

        cmd.extend(["-m", module])

        if args is not None and len(args) > 0:
            cmd.extend(["-a", args])

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        cmd.append(hosts)

        if Config().debug > 0:
            cmd.append(ansible_verbosity[Config().debug])

        self.logger.info('Running: "' + ' '.join(module) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')

    def run_task_with_retries(self,
                              inventory,
                              module,
                              hosts,
                              retries,
                              timeout=10,
                              args=None):
        for i in range(retries):
            try:
                self.run_task(hosts=hosts,
                              inventory=inventory,
                              module=module,
                              args=args)
                break
            except Exception as e:
                self.logger.error(e)
                self.logger.info('Retry running task: ' + str(i + 1) + '/' +
                                 str(retries))
                time.sleep(timeout)
        else:
            raise Exception(
                f'Failed running task after {str(retries)} retries')

    def run_playbook(self, inventory, playbook_path, vault_file=None):
        cmd = ['ansible-playbook']

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        if vault_file is not None:
            cmd.extend(["--vault-password-file", vault_file])

        cmd.append(playbook_path)

        if Config().debug > 0:
            cmd.append(ansible_verbosity[Config().debug])

        self.logger.info('Running: "' + ' '.join(playbook_path) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')

    def run_playbook_with_retries(self,
                                  inventory,
                                  playbook_path,
                                  retries,
                                  timeout=10):
        for i in range(retries):
            try:
                self.run_playbook(inventory=inventory,
                                  playbook_path=playbook_path)
                break
            except Exception as e:
                self.logger.error(e)
                self.logger.info('Retry running playbook: ' + str(i + 1) +
                                 '/' + str(retries))
                time.sleep(timeout)
        else:
            raise Exception(
                f'Failed running playbook after {str(retries)} retries')
示例#19
0
class TerraformCommand:
    def __init__(self, working_directory=os.path.dirname(__file__)):
        self.logger = Log(__name__)
        self.APPLY_COMMAND = "apply"
        self.DESTROY_COMMAND = "destroy"
        self.PLAN_COMMAND = "plan"
        self.INIT_COMMAND = "init"
        self.working_directory = working_directory

    def apply(self, auto_approve=False, env=os.environ.copy()):
        self.run(self,
                 self.APPLY_COMMAND,
                 auto_approve=auto_approve,
                 env=env,
                 auto_retries=3)

    def destroy(self, auto_approve=False, env=os.environ.copy()):
        self.run(self,
                 self.DESTROY_COMMAND,
                 auto_approve=auto_approve,
                 env=env)

    def plan(self, env=os.environ.copy()):
        self.run(self, self.PLAN_COMMAND, env=env)

    def init(self, env=os.environ.copy()):
        self.run(self, self.INIT_COMMAND, env=env)

    @staticmethod
    def run(self, command, env, auto_approve=False, auto_retries=1):
        cmd = ['terraform', command]

        if auto_approve:
            cmd.append('--auto-approve')

        if command == self.APPLY_COMMAND or command == self.DESTROY_COMMAND:
            cmd.append(f'-state={self.working_directory}/terraform.tfstate')

        cmd.append('-no-color')

        cmd.append(self.working_directory)

        cmd = ' '.join(cmd)
        self.logger.info(f'Running: "{cmd}"')

        if Config().debug > 0:
            env['TF_LOG'] = terraform_verbosity[Config().debug]

        retries = 1
        do_retry = True
        while ((retries <= auto_retries) and do_retry):
            logpipe = LogPipe(__name__)
            with subprocess.Popen(cmd,
                                  stdout=logpipe,
                                  stderr=logpipe,
                                  env=env,
                                  shell=True) as sp:
                logpipe.close()
            retries = retries + 1
            do_retry = next(
                (True for s in logpipe.stderrstrings if 'RetryableError' in s),
                False)
            if do_retry and retries <= auto_retries:
                self.logger.warning(
                    f'Terraform failed with "RetryableError" error. Retry: ' +
                    str(retries) + '/' + str(auto_retries))

        if sp.returncode != 0:
            raise Exception(f'Error running: "{cmd}"')
        else:
            self.logger.info(f'Done running "{cmd}"')
示例#20
0
文件: Step.py 项目: branlav/epiphany
 def __init__(self, step_name):
     self.logger = Log(step_name)
示例#21
0
 def __init__(self, cluster_model, config_docs):
     self.cluster_model = cluster_model
     self.config_docs = config_docs
     self.logger = Log(__name__)