コード例 #1
0
class TerraformCommand:
    def __init__(self, working_directory=os.path.dirname(__file__)):
        self.logger = Log(__name__)
        self.APPLY_COMMAND = "apply"
        self.DESTROY_COMMAND = "destroy"
        self.PLAN_COMMAND = "plan"
        self.INIT_COMMAND = "init"
        self.working_directory = working_directory

    def apply(self, auto_approve=False, env=os.environ.copy()):
        self.run(self, self.APPLY_COMMAND, auto_approve=auto_approve, env=env)

    def destroy(self, auto_approve=False, env=os.environ.copy()):
        self.run(self,
                 self.DESTROY_COMMAND,
                 auto_approve=auto_approve,
                 env=env)

    def plan(self, env=os.environ.copy()):
        self.run(self, self.PLAN_COMMAND, env=env)

    def init(self, env=os.environ.copy()):
        self.run(self, self.INIT_COMMAND, env=env)

    @staticmethod
    def run(self, command, env, auto_approve=False):
        cmd = ['terraform', command]

        if auto_approve:
            cmd.append('--auto-approve')

        if command == self.APPLY_COMMAND or command == self.DESTROY_COMMAND:
            cmd.append(f'-state={self.working_directory}/terraform.tfstate')

        cmd.append(self.working_directory)

        cmd = ' '.join(cmd)
        self.logger.info(f'Running: "{cmd}"')

        if Config().debug:
            env['TF_LOG'] = 'TRACE'

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd,
                              stdout=logpipe,
                              stderr=logpipe,
                              env=env,
                              shell=True) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception(f'Error running: "{cmd}"')
        else:
            self.logger.info(f'Done running "{cmd}"')
コード例 #2
0
class SpecCommand:
    def __init__(self):
        self.logger = Log(__name__)

    def __init__(self):
        self.logger = Log(__name__)

    def check_dependencies(self):
        required_gems = ['serverspec', 'rake', 'rspec_junit_formatter']

        error_str = f'''Missing Ruby or one of the following Ruby gems: {', '.join(required_gems)}
These need to be installed to run the cluster spec tests from epicli'''

        if shutil.which('ruby') == None or shutil.which('gem') == None:
            raise Exception(error_str)

        p = subprocess.Popen(['gem', 'query', '--local'], stdout=PIPE)
        out, err = p.communicate()
        if all(n in out.decode('utf-8') for n in required_gems) == False:
            raise Exception(error_str)

    def run(self, spec_output, inventory, user, key, group):
        self.check_dependencies()

        env = os.environ.copy()
        env['spec_output'] = spec_output
        env['inventory'] = inventory
        env['user'] = user
        env['keypath'] = key

        cmd = f'rake inventory={inventory} user={user} keypath={key} spec_output={spec_output} spec:{group}'

        self.logger.info(f'Running: "{cmd}"')

        logpipe = LogPipe(__name__)
        with Popen(cmd.split(' '),
                   cwd=SPEC_TEST_PATH,
                   env=env,
                   stdout=logpipe,
                   stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception(f'Error running: "{cmd}"')
        else:
            self.logger.info(f'Done running: "{cmd}"')

    @staticmethod
    def get_spec_groups():
        groups = os.listdir(SPEC_TEST_PATH + '/spec')
        groups.remove('spec_helper.rb')
        groups = ['all'] + groups
        sorted(groups, key=str.lower)
        return groups
コード例 #3
0
class Step(metaclass=ABCMeta):
    def __init__(self, step_name):
        self.logger = Log(step_name)

    def __enter__(self):
        self.start = time.time()
        self.logger.info('Starting run')
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        passed = int(round((time.time() - self.start) * 1000))
        self.logger.info('Run done in ' + str(passed) + 'ms')
コード例 #4
0
class TerraformCommand:

    def __init__(self, working_directory=os.path.dirname(__file__)):
        self.logger = Log(__name__)
        self.APPLY_COMMAND = "apply"
        self.DESTROY_COMMAND = "destroy"
        self.PLAN_COMMAND = "plan"
        self.INIT_COMMAND = "init"
        self.working_directory = working_directory

    def apply(self, auto_approve=False):
        self.run(self, self.APPLY_COMMAND, auto_approve=auto_approve)

    def destroy(self, auto_approve=False):
        self.run(self, self.DESTROY_COMMAND, auto_approve=auto_approve)

    def plan(self):
        self.run(self, self.PLAN_COMMAND)

    def init(self):
        self.run(self, self.INIT_COMMAND)

    @staticmethod
    def run(self, command, auto_approve=False):
        cmd = ['terraform', command]

        if auto_approve:
            cmd.append('--auto-approve')

        if command == self.APPLY_COMMAND:
            cmd.append('-state=' + self.working_directory + '/terraform.tfstate')

        cmd.append(self.working_directory)

        self.logger.info('Running: "' + ' '.join(cmd) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')
コード例 #5
0
class AnsibleCommand:
    def __init__(self, working_directory=os.path.dirname(__file__)):
        self.logger = Log(__name__)
        self.working_directory = working_directory

    def __init__(self):
        self.logger = Log(__name__)

    def run_task(self, hosts, inventory, module, args=None):
        cmd = ['ansible']

        cmd.extend(["-m", module])

        if args is not None and len(args) > 0:
            cmd.extend(["-a", args])

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        cmd.append(hosts)

        if Config().debug > 0:
            cmd.append(ansible_verbosity[Config().debug])

        self.logger.info('Running: "' + ' '.join(module) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')

    def run_task_with_retries(self,
                              inventory,
                              module,
                              hosts,
                              retries,
                              timeout=10,
                              args=None):
        for i in range(retries):
            try:
                self.run_task(hosts=hosts,
                              inventory=inventory,
                              module=module,
                              args=args)
                break
            except Exception as e:
                self.logger.error(e)
                self.logger.info('Retry running task: ' + str(i + 1) + '/' +
                                 str(retries))
                time.sleep(timeout)
        else:
            raise Exception(
                f'Failed running task after {str(retries)} retries')

    def run_playbook(self, inventory, playbook_path, vault_file=None):
        cmd = ['ansible-playbook']

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        if vault_file is not None:
            cmd.extend(["--vault-password-file", vault_file])

        cmd.append(playbook_path)

        if Config().debug > 0:
            cmd.append(ansible_verbosity[Config().debug])

        self.logger.info('Running: "' + ' '.join(playbook_path) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')

    def run_playbook_with_retries(self,
                                  inventory,
                                  playbook_path,
                                  retries,
                                  timeout=10):
        for i in range(retries):
            try:
                self.run_playbook(inventory=inventory,
                                  playbook_path=playbook_path)
                break
            except Exception as e:
                self.logger.error(e)
                self.logger.info('Retry running playbook: ' + str(i + 1) +
                                 '/' + str(retries))
                time.sleep(timeout)
        else:
            raise Exception(
                f'Failed running playbook after {str(retries)} retries')
コード例 #6
0
def dump_config(config):
    logger = Log('config')
    for attr in config.__dict__:
        if attr.startswith('_'):
            logger.info('%s = %r' % (attr[1:], getattr(config, attr)))
コード例 #7
0
ファイル: AnsibleCommand.py プロジェクト: branlav/epiphany
class AnsibleCommand:
    def __init__(self, working_directory=os.path.dirname(__file__)):
        self.logger = Log(__name__)
        self.working_directory = working_directory

    def __init__(self):
        self.logger = Log(__name__)

    def run_task(self, hosts, inventory, module, args):
        cmd = ['ansible']

        cmd.extend(["-m", module])

        if args is not None and len(args) > 0:
            cmd.extend(["-a", args])

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        cmd.append(hosts)

        self.logger.info('Running: "' + ' '.join(cmd) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')

    def run_task_with_retries(self,
                              inventory,
                              module,
                              args,
                              hosts,
                              retries,
                              timeout=10):
        for i in range(retries):
            try:
                self.run_task(hosts=hosts,
                              inventory=inventory,
                              module=module,
                              args=args)
                break
            except Exception as e:
                self.logger.error(e)
                self.logger.info("Retry running task: " + str(i + 1) + "/" +
                                 str(retries))
                time.sleep(timeout)

    def run_playbook(self, inventory, playbook_path):
        cmd = ['ansible-playbook']

        if inventory is not None and len(inventory) > 0:
            cmd.extend(["-i", inventory])

        cmd.append(playbook_path)

        self.logger.info('Running: "' + ' '.join(cmd) + '"')

        logpipe = LogPipe(__name__)
        with subprocess.Popen(cmd, stdout=logpipe, stderr=logpipe) as sp:
            logpipe.close()

        if sp.returncode != 0:
            raise Exception('Error running: "' + ' '.join(cmd) + '"')
        else:
            self.logger.info('Done running "' + ' '.join(cmd) + '"')

    def run_playbook_with_retries(self,
                                  inventory,
                                  playbook_path,
                                  retries,
                                  timeout=10):
        for i in range(retries):

            try:
                self.run_playbook(inventory=inventory,
                                  playbook_path=playbook_path)
                return 0
            except Exception as e:
                self.logger.error(e)
                self.logger.info("Retry running playbook: " + str(i + 1) +
                                 "/" + str(retries))
                time.sleep(timeout)
        return 1
コード例 #8
0
ファイル: APIProxy.py プロジェクト: sunshine69/epiphany
class APIProxy:
    def __init__(self, cluster_model, config_docs):
        self.cluster_model = cluster_model
        self.cluster_name = self.cluster_model.specification.name.lower()
        self.cluster_prefix = self.cluster_model.specification.prefix.lower()
        self.resource_group_name = resource_name(self.cluster_prefix,
                                                 self.cluster_name, 'rg')
        self.config_docs = config_docs
        self.logger = Log(__name__)

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        pass

    def login_account(self):
        subscription_name = self.cluster_model.specification.cloud.subscription_name
        all_subscription = self.run(self, 'az login')
        subscription = select_first(all_subscription,
                                    lambda x: x['name'] == subscription_name)
        if subscription is None:
            raise Exception(
                f'User does not have access to subscription: "{subscription_name}"'
            )
        return subscription

    def login_sp(self, sp_data):
        name = sp_data['name']
        password = sp_data['password']
        tenant = sp_data['tenant']
        return self.run(
            self,
            f'az login --service-principal -u \'{name}\' -p "{password}" --tenant \'{tenant}\'',
            False)

    def set_active_subscribtion(self, subscription_id):
        self.run(self, f'az account set --subscription {subscription_id}')

    def get_active_subscribtion(self):
        subscription = self.run(self, f'az account show')
        return subscription

    def create_sp(self, app_name, subscription_id):
        #TODO: make role configurable?
        sp = self.run(
            self,
            f'az ad sp create-for-rbac -n \'{app_name}\' --role=\'Contributor\' --scopes=\'/subscriptions/{subscription_id}\''
        )
        # Sleep for a while. Sometimes the call returns before the rights of the SP are finished creating.
        self.wait(self, 60)
        return sp

    def get_ips_for_feature(self, component_key):
        look_for_public_ip = self.cluster_model.specification.cloud.use_public_ips
        cluster = cluster_tag(self.cluster_prefix, self.cluster_name)
        running_instances = self.run(
            self,
            f'az vm list-ip-addresses --ids $(az resource list --query "[?type==\'Microsoft.Compute/virtualMachines\' && tags.{component_key} == \'\' && tags.cluster == \'{cluster}\'].id" --output tsv)'
        )
        result = []
        for instance in running_instances:
            if isinstance(instance, list):
                instance = instance[0]
            name = instance['virtualMachine']['name']
            if look_for_public_ip:
                ip = instance['virtualMachine']['network'][
                    'publicIpAddresses'][0]['ipAddress']
            else:
                ip = instance['virtualMachine']['network'][
                    'privateIpAddresses'][0]
            result.append(AnsibleHostModel(name, ip))
        return result

    def get_storage_account_primary_key(self, storage_account_name):
        keys = self.run(
            self,
            f'az storage account keys list -g \'{self.resource_group_name}\' -n \'{storage_account_name}\''
        )
        return keys[0]['value']

    @staticmethod
    def wait(self, seconds):
        for x in range(0, seconds):
            self.logger.info(f'Waiting {seconds} seconds...{x}')
            time.sleep(1)

    @staticmethod
    def run(self, cmd, log_cmd=True):
        if log_cmd:
            self.logger.info('Running: "' + cmd + '"')

        logpipe = LogPipe(__name__)
        with Popen(cmd, stdout=PIPE, stderr=logpipe, shell=True) as sp:
            logpipe.close()
            try:
                data = sp.stdout.read().decode('utf-8')
                data = re.sub(r'\s+', '', data)
                data = re.sub(r'(\x9B|\x1B\[)[0-?]*[ -\/]*[@-~]', '', data)
                output = json.loads(data)
            except:
                output = {}

        if sp.returncode != 0:
            if log_cmd:
                raise Exception(f'Error running: "{cmd}"')
            else:
                raise Exception(f'Error running Azure APIProxy cmd')
        else:
            if log_cmd:
                self.logger.info(f'Done running "{cmd}"')
            return output
コード例 #9
0
class TerraformCommand:
    def __init__(self, working_directory=os.path.dirname(__file__)):
        self.logger = Log(__name__)
        self.APPLY_COMMAND = "apply"
        self.DESTROY_COMMAND = "destroy"
        self.PLAN_COMMAND = "plan"
        self.INIT_COMMAND = "init"
        self.working_directory = working_directory

    def apply(self, auto_approve=False, env=os.environ.copy()):
        self.run(self,
                 self.APPLY_COMMAND,
                 auto_approve=auto_approve,
                 env=env,
                 auto_retries=3)

    def destroy(self, auto_approve=False, env=os.environ.copy()):
        self.run(self,
                 self.DESTROY_COMMAND,
                 auto_approve=auto_approve,
                 env=env)

    def plan(self, env=os.environ.copy()):
        self.run(self, self.PLAN_COMMAND, env=env)

    def init(self, env=os.environ.copy()):
        self.run(self, self.INIT_COMMAND, env=env)

    @staticmethod
    def run(self, command, env, auto_approve=False, auto_retries=1):
        cmd = ['terraform', command]

        if auto_approve:
            cmd.append('--auto-approve')

        if command == self.APPLY_COMMAND or command == self.DESTROY_COMMAND:
            cmd.append(f'-state={self.working_directory}/terraform.tfstate')

        cmd.append('-no-color')

        cmd.append(self.working_directory)

        cmd = ' '.join(cmd)
        self.logger.info(f'Running: "{cmd}"')

        if Config().debug > 0:
            env['TF_LOG'] = terraform_verbosity[Config().debug]

        retries = 1
        do_retry = True
        while ((retries <= auto_retries) and do_retry):
            logpipe = LogPipe(__name__)
            with subprocess.Popen(cmd,
                                  stdout=logpipe,
                                  stderr=logpipe,
                                  env=env,
                                  shell=True) as sp:
                logpipe.close()
            retries = retries + 1
            do_retry = next(
                (True for s in logpipe.stderrstrings if 'RetryableError' in s),
                False)
            if do_retry and retries <= auto_retries:
                self.logger.warning(
                    f'Terraform failed with "RetryableError" error. Retry: ' +
                    str(retries) + '/' + str(auto_retries))

        if sp.returncode != 0:
            raise Exception(f'Error running: "{cmd}"')
        else:
            self.logger.info(f'Done running "{cmd}"')
コード例 #10
0
def dump_debug_info():
    def dump_external_debug_info(title, args):
        dump_file.write(f'\n\n*****{title}******\n')
        p = subprocess.Popen(args, stdout=subprocess.PIPE)
        out, err = p.communicate()
        lines = filter(lambda x: x.strip(),
                       out.decode("utf-8").splitlines(keepends=True))
        dump_file.writelines(lines)

    try:
        logger = Log('dump_debug_info')
        config = Config()

        timestr = time.strftime("%Y%m%d-%H%M%S")
        dump_path = os.getcwd() + f'/epicli_error_{timestr}.dump'
        dump_file = open(dump_path, 'w')

        dump_file.write('*****EPICLI VERSION******\n')
        dump_file.write(f'{VERSION}')

        dump_file.write('\n\n*****EPICLI ARGS******\n')
        dump_file.write(' '.join([*['epicli'], *sys.argv[1:]]))

        dump_file.write('\n\n*****EPICLI CONFIG******\n')
        for attr in config.__dict__:
            if attr.startswith('_'):
                dump_file.write('%s = %r\n' %
                                (attr[1:], getattr(config, attr)))

        dump_file.write('\n\n*****SYSTEM******\n')
        system_data = {
            'platform': platform.system(),
            'release': platform.release(),
            'type': platform.uname().system,
            'arch': platform.uname().machine,
            'cpus': json.dumps(os.cpu_count()),
            'hostname': socket.gethostname()
        }
        dump_file.write(json.dumps(dict(system_data), indent=2))

        dump_file.write('\n\n*****ENVIROMENT VARS******\n')
        dump_file.write(json.dumps(dict(os.environ), indent=2))

        dump_file.write('\n\n*****PYTHON******\n')
        dump_file.write(f'python_version: {platform.python_version()}\n')
        dump_file.write(f'python_build: {platform.python_build()}\n')
        dump_file.write(f'python_revision: {platform.python_revision()}\n')
        dump_file.write(f'python_compiler: {platform.python_compiler()}\n')
        dump_file.write(f'python_branch: {platform.python_branch()}\n')
        dump_file.write(
            f'python_implementation: {platform.python_implementation()}\n')

        dump_external_debug_info('ANSIBLE VERSION', ['ansible', '--version'])
        dump_external_debug_info('ANSIBLE CONFIG', ['ansible-config', 'dump'])
        dump_external_debug_info('ANSIBLE-VAULT VERSION',
                                 ['ansible-vault', '--version'])
        dump_external_debug_info('TERRAFORM VERSION',
                                 ['terraform', '--version'])
        dump_external_debug_info('SKOPEO VERSION', ['skopeo', '--version'])
        dump_external_debug_info('RUBY VERSION', ['ruby', '--version'])
        dump_external_debug_info('RUBY GEM VERSION', ['gem', '--version'])
        dump_external_debug_info('RUBY INSTALLED GEMS',
                                 ['gem', 'query', '--local'])

        dump_file.write('\n\n*****LOG******\n')
        log_path = os.path.join(get_output_path(), config.log_file)
        dump_file.writelines([l for l in open(log_path).readlines()])
    finally:
        dump_file.close()
        logger.info(f'Error dump has been written to: {dump_path}')
        logger.warning(
            'This dump might contain sensitive information. Check before sharing.'
        )