def load_all_assistants(cls, superassistants): """Fills self._assistants with loaded YamlAssistant instances of requested roles. Tries to use cache (updated/created if needed). If cache is unusable, it falls back to loading all assistants. Args: roles: list of required assistant roles """ # mapping of assistant roles to lists of top-level assistant instances _assistants = {} # {'crt': CreatorAssistant, ...} superas_dict = dict(map(lambda a: (a.name, a), superassistants)) to_load = set(superas_dict.keys()) for tl in to_load: dirs = [os.path.join(d, tl) for d in cls.assistants_dirs] file_hierarchy = cls.get_assistants_file_hierarchy(dirs) # load all if we're not using cache or if we fail to load it load_all = not settings.USE_CACHE if settings.USE_CACHE: try: cch = cache.Cache() cch.refresh_role(tl, file_hierarchy) _assistants[tl] = cls.get_assistants_from_cache_hierarchy(cch.cache[tl], superas_dict[tl], role=tl) except BaseException as e: logger.debug('Failed to use DevAssistant cachefile {0}: {1}'.format( settings.CACHE_FILE, e)) load_all = True if load_all: _assistants[tl] = cls.get_assistants_from_file_hierarchy(file_hierarchy, superas_dict[tl], role=tl) return _assistants
def resolve(cls, *args): # TODO: we may need to rewrite this for e.g. suse, which # is rpm based, but doesn't use yum; same for install()/is_available()/can_operate() logger.info('Resolving RPM dependencies ...') import yum y = yum.YumBase() y.setCacheDir(tempfile.mkdtemp()) for pkg in args: if pkg.startswith('@'): y.selectGroup(pkg[1:]) else: try: y.install(y.returnPackageByDep(pkg)) except yum.Errors.YumBaseError: msg = 'Package not found: {pkg}'.format(pkg=pkg) logger.error(msg) raise exceptions.DependencyException(msg) y.resolveDeps() logger.debug('Installing/Updating:') to_install = [] for pkg in y.tsInfo.getMembers(): to_install.append(pkg.po.ui_envra) logger.debug(pkg.po.ui_envra) return to_install
def run_command(cls, cmd_str, fg=False, log_as_info=False): """Runs a command from string, e.g. "cp foo bar" """ result = None split_string = cmd_str.split() for i, s in enumerate(split_string): if '~' in s: split_string[i] = os.path.expanduser(s) # hack for cd to behave like shell cd and stay in the directory if split_string[0] == 'cd': plumbum.local.cwd.chdir(split_string[1]) else: cmd = plumbum.local[split_string[0]] fixed_args = cls._connect_quoted(split_string[1:]) fixed_args = cls._strip_trailing_quotes(fixed_args) for i in fixed_args: cmd = cmd[i] # log the invocation log_string = settings.COMMAND_LOG_STRING.format(cmd=cmd) if log_as_info: logger.info(log_string) else: logger.debug(log_string) # actually invoke the command if fg: result = cmd & plumbum.FG else: result = cmd() return result
def resolve(cls, *deps): """ Return all dependencies which will be installed. NOTE Simplified (naive) implementation will show the list of correctly spelled packages to be installed. For example 'firefox' will be resolved to 'www-client/firefox-25.0.1'... TODO ... or maybe version part must be stripped? """ import portage logger.info('[portage] Resolving dependencies ...') porttree = portage.db[portage.root]['porttree'] to_install = set() for dep in deps: res = porttree.dep_bestmatch(dep) logger.debug('{0} resolved to {1}'.format(repr(dep), repr(res))) if res: to_install.add(res) else: msg = 'Package not found or spec is invalid: {pkg}'.format(pkg=dep) raise exceptions.DependencyException(msg) cls.throw_package_list(list(to_install))
def load_all_assistants(cls, superassistants): """Fills self._assistants with loaded YamlAssistant instances of requested roles. Tries to use cache (updated/created if needed). If cache is unusable, it falls back to loading all assistants. Args: roles: list of required assistant roles """ # {'crt': CreatorAssistant, ...} superas_dict = dict(map(lambda a: (a.name, a), superassistants)) to_load = set(superas_dict.keys()) - set(cls._assistants.keys()) for tl in to_load: dirs = [os.path.join(d, tl) for d in cls.assistants_dirs] file_hierarchy = cls.get_assistants_file_hierarchy(dirs) # load all if we're not using cache or if we fail to load it load_all = not current_run.USE_CACHE if current_run.USE_CACHE: try: cch = cache.Cache() cch.refresh_role(tl, file_hierarchy) cls._assistants[tl] = cls.get_assistants_from_cache_hierarchy(cch.cache[tl], superas_dict[tl], role=tl) except BaseException as e: logger.debug(e) load_all = True if load_all or not current_run.USE_CACHE: cls._assistants[tl] = cls.get_assistants_from_file_hierarchy(file_hierarchy, superas_dict[tl], role=tl)
def _render_one_template(cls, env, template, result_filename, data, overwrite): # Get a template instance tpl = None try: logger.debug('Using template file: {0}'.format(template)) tpl = env.get_template(template) except jinja2.TemplateNotFound as e: raise exceptions.CommandException('Template {t} not found in path {p}.'.\ format(t=template, p=env.loader.searchpath)) except jinja2.TemplateError as e: raise exceptions.CommandException('Template file failure: {0}'.format(e.message)) # Check if destination file exists, overwrite if needed if os.path.exists(result_filename): if overwrite: logger.info('Overwriting the destination file {0}'.format(result_filename)) os.remove(result_filename) else: raise exceptions.CommandException('The destination file already exists: {0}'.\ format(result_filename)) # Generate an output file finally... with open(result_filename, 'w') as out: result = tpl.render(**data) out.write(result) return (True, 'success')
def _get_section_to_run(self, section, kwargs_override=False, **kwargs): """Returns the proper section to run. Args: section: name of section to run kwargs_override: whether or not first of [_run_{arg} for arg in kwargs] is preffered over specified section **kwargs: devassistant arguments Returns: section to run - dict (if not found, returns empty dict) """ to_run = {} if section: underscored = '_' + section if underscored in dir(self): to_run = getattr(self, underscored) if kwargs_override: for method in dir(self): if method.startswith('_run_'): if method[len('_run_'):] in kwargs: to_run = getattr(self, method) if not to_run: logger.debug( 'Couldn\'t find section {0} or any other appropriate.'.format( section)) return to_run
def _get_section_to_run(self, section, kwargs_override=False, **kwargs): """Returns the proper section to run. Args: section: name of section to run kwargs_override: whether or not first of [_run_{arg} for arg in kwargs] is preffered over specified section **kwargs: devassistant arguments Returns: section to run - dict (if not found, returns empty dict) """ to_run = {} if section: underscored = '_' + section if underscored in dir(self): to_run = getattr(self, underscored) if kwargs_override: for method in dir(self): if method.startswith('_run_'): if method[len('_run_'):] in kwargs: to_run = getattr(self, method) if not to_run: logger.debug('Couldn\'t find section {0} or any other appropriate.'.format(section)) return to_run
def resolve(cls, *args): logger.info('Resolving RPM dependencies ...') import yum y = yum.YumBase() y.setCacheDir(tempfile.mkdtemp()) for pkg in args: if pkg.startswith('@'): y.selectGroup(pkg[1:]) else: try: y.install(y.returnPackageByDep(pkg)) except yum.Errors.YumBaseError: msg = 'Package not found: {pkg}'.format(pkg=pkg) raise exceptions.DependencyException(msg) try: y.resolveDeps() except yum.Errors.PackageSackError as e: # Resolution of Issue 154 raise exceptions.DependencyException('Error resolving RPM dependencies: {0}'.\ format(str(e))) logger.debug('Installing/Updating:') to_install = [] for pkg in y.tsInfo.getMembers(): to_install.append(pkg.po.ui_envra) logger.debug(pkg.po.ui_envra) return to_install
def _render_one_template(cls, env, template, result_filename, data, overwrite): # Get a template instance tpl = None try: logger.debug('Using template file: {0}'.format(template)) tpl = env.get_template(template) except jinja2.TemplateNotFound as e: raise exceptions.CommandException('Template {t} not found in path {p}.'.\ format(t=template, p=env.loader.searchpath)) except jinja2.TemplateError as e: raise exceptions.CommandException('Template file failure: {0}'.format(e.message)) # Check if destination file exists, overwrite if needed if os.path.exists(result_filename): if overwrite: logger.info('Overwriting the destination file {0}'.format(result_filename)) os.remove(result_filename) else: raise exceptions.CommandException('The destination file already exists: {0}'.\ format(result_filename)) # Generate an output file finally... with open(result_filename, 'w') as out: result = tpl.render(**data) out.write(result) return (True, 'success')
def resolve(cls, *args): logger.info('Resolving RPM dependencies with DNF...') import dnf import hawkey base = dnf.Base() base.conf.cachedir = tempfile.mkdtemp() base.conf.substitutions['releasever'] = platform.linux_distribution()[1] base.read_all_repos() base.fill_sack(load_system_repo=True, load_available_repos=True) for pkg in (str(arg) for arg in args): if pkg.startswith('@'): base.group_install(pkg[1:]) else: try: res = base.sack.query().available().filter(provides=pkg).run() base.install(str(res[0])) except (hawkey.QueryException, IndexError): msg = 'Package not found: {pkg}'.format(pkg=pkg) raise exceptions.DependencyException(msg) try: base.resolve() except dnf.exceptions.Error as e: raise exceptions.DependencyException('Error resolving RPM dependencies with DNF: {0}'. format(utils.exc_as_decoded_string(e))) logger.debug('Installing/Updating:') to_install = [] for pkg in base.transaction.install_set: to_install.append(str(pkg)) logger.debug(str(pkg)) return to_install
def resolve(cls, *deps): """ Return all dependencies which will be installed. NOTE Simplified (naive) implementation will show the list of correctly spelled packages to be installed. For example 'firefox' will be resolved to 'www-client/firefox-25.0.1'... TODO ... or maybe version part must be stripped? """ import portage logger.info('[portage] Resolving dependencies ...') porttree = portage.db[portage.root]['porttree'] to_install = set() for dep in deps: res = porttree.dep_bestmatch(dep) logger.debug('{0} resolved to {1}'.format(repr(dep), repr(res))) if res: to_install.add(res) else: msg = 'Package not found or spec is invalid: {pkg}'.format(pkg=dep) raise exceptions.DependencyException(msg) cls.throw_package_list(list(to_install))
def resolve(cls, *args): logger.info('Resolving RPM dependencies with DNF...') import dnf import hawkey base = dnf.Base() base.conf.cachedir = tempfile.mkdtemp() base.conf.substitutions['releasever'] = platform.linux_distribution( )[1] base.read_all_repos() base.fill_sack(load_system_repo=True, load_available_repos=True) for pkg in (str(arg) for arg in args): if pkg.startswith('@'): base.group_install(pkg[1:]) else: try: res = base.sack.query().available().filter( provides=pkg).run() base.install(str(res[0])) except (hawkey.QueryException, IndexError): msg = 'Package not found: {pkg}'.format(pkg=pkg) raise exceptions.DependencyException(msg) try: base.resolve() except dnf.exceptions.Error as e: raise exceptions.DependencyException( 'Error resolving RPM dependencies with DNF: {0}'.format( utils.exc_as_decoded_string(e))) logger.debug('Installing/Updating:') to_install = [] for pkg in base.transaction.install_set: to_install.append(str(pkg)) logger.debug(str(pkg)) return to_install
def _try_obtain_mandatory_params(cls, args): """ Retrieve required parameters from `args` dict: - 'template' template descriptor from `files' section. it consist of the only `source' key -- a name of template to use - 'data' dict of parameters to use when rendering - 'destination' path for output files """ if 'template' not in args or not isinstance(args['template'], dict): raise exceptions.CommandException('Missed template parameter or wrong type') template = args['template'] if 'source' not in template or not isinstance(template['source'], str): raise exceptions.CommandException('Missed template parameter or wrong type') template = template['source'] if 'destination' not in args or not isinstance(args['destination'], str): raise exceptions.CommandException('Missed destination parameter or wrong type') if not os.path.isdir(args['destination']): raise exceptions.CommandException("Destination directory doesn't exists") data = {} if 'data' in args and isinstance(args['data'], dict): data = args['data'] logger.debug('Template context data: {0}'.format(data)) return (template, cls._make_output_file_name(args, template), data)
def load_all_assistants(cls, superassistants): """Fills self._assistants with loaded YamlAssistant instances of requested roles. Tries to use cache (updated/created if needed). If cache is unusable, it falls back to loading all assistants. Args: roles: list of required assistant roles """ # {'crt': CreatorAssistant, ...} superas_dict = dict(map(lambda a: (a.name, a), superassistants)) to_load = set(superas_dict.keys()) - set(cls._assistants.keys()) for tl in to_load: dirs = [os.path.join(d, tl) for d in cls.assistants_dirs] file_hierarchy = cls.get_assistants_file_hierarchy(dirs) # load all if we're not using cache or if we fail to load it load_all = not current_run.USE_CACHE if current_run.USE_CACHE: try: cch = cache.Cache() cch.refresh_role(tl, file_hierarchy) cls._assistants[ tl] = cls.get_assistants_from_cache_hierarchy( cch.cache[tl], superas_dict[tl], role=tl) except BaseException as e: logger.debug(e) load_all = True if load_all: cls._assistants[tl] = cls.get_assistants_from_file_hierarchy( file_hierarchy, superas_dict[tl], role=tl)
def _github_create_twofactor_authorization(cls, ui): """Create an authorization for a GitHub user using two-factor authentication. Unlike its non-two-factor counterpart, this method does not traverse the available authentications as they are not visible until the user logs in. Please note: cls._user's attributes are not accessible until the authorization is created due to the way (py)github works. """ try: try: # This is necessary to trigger sending a 2FA key to the user auth = cls._user.create_authorization() except cls._gh_exceptions.GithubException: onetime_pw = DialogHelper.ask_for_password( ui, prompt='Your one time password:'******'repo', 'user', 'admin:public_key'], note="DevAssistant", onetime_password=onetime_pw) cls._user = cls._gh_module.Github( login_or_token=auth.token).get_user() logger.debug( 'Two-factor authorization for user "{0}" created'.format( cls._user.login)) cls._github_store_authorization(cls._user, auth) logger.debug('Two-factor authorization token stored') except cls._gh_exceptions.GithubException as e: logger.warning( 'Creating two-factor authorization failed: {0}'.format(e))
def resolve(cls, *args): # TODO: we may need to rewrite this for e.g. suse, which # is rpm based, but doesn't use yum; same for install()/is_available()/can_operate() logger.info('Resolving RPM dependencies ...') import yum y = yum.YumBase() y.setCacheDir(tempfile.mkdtemp()) for pkg in args: if pkg.startswith('@'): y.selectGroup(pkg[1:]) else: try: y.install(y.returnPackageByDep(pkg)) except yum.Errors.YumBaseError: msg = 'Package not found: {pkg}'.format(pkg=pkg) logger.error(msg) raise exceptions.DependencyException(msg) y.resolveDeps() logger.debug('Installing/Updating:') to_install = [] for pkg in y.tsInfo.getMembers(): to_install.append(pkg.po.ui_envra) logger.debug(pkg.po.ui_envra) return to_install
def resolve(cls, *args): logger.info('Resolving RPM dependencies ...') import yum y = yum.YumBase() y.setCacheDir(tempfile.mkdtemp()) for pkg in args: if pkg.startswith('@'): y.selectGroup(pkg[1:]) else: try: y.install(y.returnPackageByDep(pkg)) except yum.Errors.YumBaseError: msg = 'Package not found: {pkg}'.format(pkg=pkg) raise exceptions.DependencyException(msg) try: y.resolveDeps() except yum.Errors.PackageSackError as e: # Resolution of Issue 154 raise exceptions.DependencyException('Error resolving RPM dependencies: {0}'.\ format(str(e))) logger.debug('Installing/Updating:') to_install = [] for pkg in y.tsInfo.getMembers(): to_install.append(pkg.po.ui_envra) logger.debug(pkg.po.ui_envra) return to_install
def load_all_assistants(cls, superassistants): """Fills self._assistants with loaded YamlAssistant instances of requested roles. Tries to use cache (updated/created if needed). If cache is unusable, it falls back to loading all assistants. Args: roles: list of required assistant roles """ # mapping of assistant roles to lists of top-level assistant instances _assistants = {} # {'crt': CreatorAssistant, ...} superas_dict = dict(map(lambda a: (a.name, a), superassistants)) to_load = set(superas_dict.keys()) for tl in to_load: dirs = [os.path.join(d, tl) for d in cls.assistants_dirs] file_hierarchy = cls.get_assistants_file_hierarchy(dirs) # load all if we're not using cache or if we fail to load it load_all = not settings.USE_CACHE if settings.USE_CACHE: try: cch = cache.Cache() cch.refresh_role(tl, file_hierarchy) _assistants[tl] = cls.get_assistants_from_cache_hierarchy( cch.cache[tl], superas_dict[tl], role=tl) except BaseException as e: logger.debug( 'Failed to use DevAssistant cachefile {0}: {1}'.format( settings.CACHE_FILE, e)) load_all = True if load_all: _assistants[tl] = cls.get_assistants_from_file_hierarchy( file_hierarchy, superas_dict[tl], role=tl) return _assistants
def run_command(cls, cmd_str, fg=False, log_as_info=False): """Runs a command from string, e.g. "cp foo bar" """ result = None split_string = cmd_str.split() for i, s in enumerate(split_string): if '~' in s: split_string[i] = os.path.expanduser(s) # hack for cd to behave like shell cd and stay in the directory if split_string[0] == 'cd': plumbum.local.cwd.chdir(split_string[1]) else: cmd = plumbum.local[split_string[0]] fixed_args = cls._connect_quoted(split_string[1:]) fixed_args = cls._strip_trailing_quotes(fixed_args) for i in fixed_args: cmd = cmd[i] # log the invocation log_string = settings.COMMAND_LOG_STRING.format(cmd=cmd) if log_as_info: logger.info(log_string) else: logger.debug(log_string) # actually invoke the command if fg: result = cmd & plumbum.FG else: result = cmd() return result
def is_pkg_installed(cls, pkg): """Is a package managed by this manager installed?""" import portage # Get access to installed packages DB vartree = portage.db[portage.root]['vartree'] try: r = vartree.dbapi.match(pkg) logger.debug('Checking is installed: {0} -> {1}'.format(pkg, repr(r))) except portage.exception.InvalidAtom: raise exceptions.DependencyException('Invalid dependency specification: {0}'.format(pkg)) # TODO Compare package version! return bool(r)
def resolve(cls, *args): logger.info('Resolving Homebrew dependencies ...') for pkg in args: logger.debug('Looking at {0}'.format(pkg)) logger.debug('Installing/Updating:') to_install = set() for pkg in args: query = ClHelper.run_command(' '.join([cls.c_homebrew, 'deps -n', pkg])) to_install.update(query.split('\n')) return list(to_install)
def resolve(cls, *args): logger.info('Resolving Homebrew dependencies ...') for pkg in args: logger.debug('Looking at {0}'.format(pkg)) logger.debug('Installing/Updating:') to_install = set() for pkg in args: query = ClHelper.run_command(' '.join([cls.c_homebrew, 'deps -n', pkg])) to_install.update(query.split('\n')) return list(to_install)
def is_pkg_installed(cls, pkg): """Is a package managed by this manager installed?""" import portage # Get access to installed packages DB vartree = portage.db[portage.root]['vartree'] try: r = vartree.dbapi.match(pkg) logger.debug('Checking is installed: {0} -> {1}'.format(pkg, repr(r))) except portage.exception.InvalidAtom: raise exceptions.DependencyException('Invalid dependency specification: {0}'.\ format(pkg)) # TODO Compare package version! return bool(r)
def is_pkg_installed(cls, dep): """Is a package managed by this manager installed?""" import paludis env = paludis.EnvironmentFactory.instance.create('') installed = env.fetch_repository('installed') try: pkg = paludis.parse_user_package_dep_spec(dep, env, paludis.UserPackageDepSpecOptions()) # TODO Compare package version! r = [] for i in installed.package_ids(str(pkg.package), []): r.append(str(i)) logger.debug('Checking is installed: {0} -> {1}'.format(pkg, repr(r))) return r except paludis.BaseException as e: msg = 'Dependency specification is invalid [{0}]: {1}'.format(dep, str(e)) raise exceptions.DependencyException(msg)
def is_pkg_installed(cls, dep): """Is a package managed by this manager installed?""" import paludis env = paludis.EnvironmentFactory.instance.create('') installed = env.fetch_repository('installed') try: pkg = paludis.parse_user_package_dep_spec(dep, env, paludis.UserPackageDepSpecOptions()) # TODO Compare package version! r = [] for i in installed.package_ids(str(pkg.package), []): r.append(str(i)) logger.debug('Checking is installed: {0} -> {1}'.format(pkg, repr(r))) return r except paludis.BaseException as e: msg = 'Dependency specification is invalid [{0}]: {1}'.format(dep, str(e)) raise exceptions.DependencyException(msg)
def _try_obtain_common_params(cls, comm): """ Retrieve parameters common for all jinja_render* actions from Command instance. These are mandatory: - 'template' template descriptor from `files' section. it consist of the only `source' key -- a name of template to use - 'data' dict of parameters to use when rendering - 'destination' path for output files These are optional: - 'overwrite' overwrite file(s) if it (they) exist(s) """ args = comm.input_res ct = comm.comm_type wrong_tpl_msg = '{0} requires a "template" argument which must point to a file'.format(ct) wrong_tpl_msg += ' in "files" section. Got: {0}'.format(args.get('template', None)) if 'template' not in args or not isinstance(args['template'], dict): raise exceptions.CommandException(wrong_tpl_msg) template = args['template'] if 'source' not in template or not isinstance(template['source'], six.string_types): raise exceptions.CommandException(wrong_tpl_msg) template = template['source'] if 'destination' not in args or not isinstance(args['destination'], six.string_types): msg = '{0} requires a string "destination" argument. Got: {1}'.\ format(ct, args.get('destination')) raise exceptions.CommandException(msg) destination = args['destination'] if not os.path.isdir(destination): msg = '{0}: Specified "destination" directory "{1}" doesn\'t exist!'.\ format(ct, destination) raise exceptions.CommandException(msg) data = {} if 'data' in args and isinstance(args['data'], dict): data = args['data'] logger.debug('Template context data: {0}'.format(data)) overwrite = args.get('overwrite', False) overwrite = True if str(overwrite).lower() in ['true', 'yes'] else False return (template, destination, data, overwrite)
def _try_obtain_common_params(cls, comm): """ Retrieve parameters common for all jinja_render* actions from Command instance. These are mandatory: - 'template' template descriptor from `files' section. it consist of the only `source' key -- a name of template to use - 'data' dict of parameters to use when rendering - 'destination' path for output files These are optional: - 'overwrite' overwrite file(s) if it (they) exist(s) """ args = comm.input_res ct = comm.comm_type wrong_tpl_msg = '{0} requires a "template" argument which must point to a file'.format(ct) wrong_tpl_msg += ' in "files" section. Got: {0}'.format(args.get('template', None)) if 'template' not in args or not isinstance(args['template'], dict): raise exceptions.CommandException(wrong_tpl_msg) template = args['template'] if 'source' not in template or not isinstance(template['source'], six.string_types): raise exceptions.CommandException(wrong_tpl_msg) template = template['source'] if 'destination' not in args or not isinstance(args['destination'], six.string_types): msg = '{0} requires a string "destination" argument. Got: {1}'.\ format(ct, args.get('destination')) raise exceptions.CommandException(msg) destination = args['destination'] if not os.path.isdir(destination): msg = '{0}: Specified "destination" directory "{1}" doesn\'t exist!'.\ format(ct, destination) raise exceptions.CommandException(msg) data = {} if 'data' in args and isinstance(args['data'], dict): data = args['data'] logger.debug('Template context data: {0}'.format(data)) overwrite = args.get('overwrite', False) overwrite = True if str(overwrite).lower() in ['true', 'yes'] else False return (template, destination, data, overwrite)
def load_all_assistants(cls, roles): """Fills self._assistants with loaded YamlAssistant instances of requested roles. Tries to use cache (updated/created if needed). If cache is unusable, it falls back to loading all assistants. Args: roles: list of required assistant roles """ to_load = set(roles) - set(cls._assistants.keys()) for tl in to_load: dirs = [os.path.join(d, tl) for d in cls.assistants_dirs] file_hierarchy = cls.get_assistants_file_hierarchy(dirs) try: cch = cache.Cache() cch.refresh_role(tl, file_hierarchy) cls._assistants[tl] = cls.get_assistants_from_cache_hierarchy(cch.cache[tl], role=tl) except BaseException as e: logger.debug(e) cls._assistants[tl] = cls.get_assistants_from_file_hierarchy(file_hierarchy, role=tl)
def run(cls, c): # Transform list of dicts (where keys are unique) into a single dict args = c.input_res logger.debug("Jinja2Runner args={0}".format(repr(args))) # Create a jinja environment logger.debug("Using templates dir: {0}".format(c.files_dir)) env = jinja2.Environment(loader=jinja2.FileSystemLoader(c.files_dir)) env.trim_blocks = True env.lstrip_blocks = True template, destination, data, overwrite = cls._try_obtain_common_params(c) if c.comm_type == "jinja_render": given_output = args.get("output", "") if not isinstance(given_output, six.string_types): raise exceptions.CommandException("Jinja2Runner: output must be string, got {0}".format(given_output)) result_fn = cls._make_output_file_name(destination, template, given_output) cls._render_one_template(env, template, result_fn, data, overwrite) elif c.comm_type == "jinja_render_dir": cls._render_dir(env, template, destination, data, overwrite) return (True, "success")
def run(cls, c): # Transform list of dicts (where keys are unique) into a single dict args = c.input_res logger.debug('Jinja2Runner args={0}'.format(repr(args))) # Create a jinja environment logger.debug('Using templates dir: {0}'.format(c.files_dir)) env = jinja2.Environment(loader=jinja2.FileSystemLoader(c.files_dir)) env.trim_blocks = True env.lstrip_blocks = True template, destination, data, overwrite = cls._try_obtain_common_params(c) if c.comm_type == 'jinja_render': given_output = args.get('output', '') if not isinstance(given_output, six.string_types): raise exceptions.CommandException('Jinja2Runner: output must be string, got {0}'.\ format(given_output)) result_fn = cls._make_output_file_name(destination, template, given_output) cls._render_one_template(env, template, result_fn, data, overwrite) elif c.comm_type == 'jinja_render_dir': cls._render_dir(env, template, destination, data, overwrite) return (True, 'success')
def _github_create_twofactor_authorization(cls, ui): """Create an authorization for a GitHub user using two-factor authentication. Unlike its non-two-factor counterpart, this method does not traverse the available authentications as they are not visible until the user logs in. Please note: cls._user's attributes are not accessible until the authorization is created due to the way (py)github works. """ try: try: # This is necessary to trigger sending a 2FA key to the user auth = cls._user.create_authorization() except cls._gh_exceptions.GithubException: onetime_pw = DialogHelper.ask_for_password(ui, prompt='Your one time password:'******'repo', 'user', 'admin:public_key'], note="DevAssistant", onetime_password=onetime_pw) cls._user = cls._gh_module.Github(login_or_token=auth.token).get_user() logger.debug('Two-factor authorization for user "{0}" created'.format(cls._user.login)) cls._github_store_authorization(cls._user, auth) logger.debug('Two-factor authorization token stored') except cls._gh_exceptions.GithubException as e: logger.warning('Creating two-factor authorization failed: {0}'.format(e))
def docker_group_active(cls): logger.debug('Determining if current user has active "docker" group ...') # we have to run cl command, too see if the user has already re-logged # after being added to docker group, so that he can effectively use it if 'docker' in ClHelper.run_command('groups').split(): logger.debug('Current user is in "docker" group.') return True else: logger.debug('Current user is not in "docker" group.') return False
def docker_group_active(cls): logger.debug( 'Determining if current user has active "docker" group ...') # we have to run cl command, too see if the user has already re-logged # after being added to docker group, so that he can effectively use it if 'docker' in ClHelper.run_command('groups').split(): logger.debug('Current user is in "docker" group.') return True else: logger.debug('Current user is not in "docker" group.') return False
def _docker_group_active(cls): if cls._has_docker_group is None: logger.debug('Determining if current user has active "docker" group ...') # we have to run cl command, too see if the user has already re-logged # after being added to docker group, so that he can effectively use it if "docker" in ClHelper.run_command("groups").split(): logger.debug('Current user is in "docker" group.') cls._has_docker_group = True else: logger.debug('Current user is not in "docker" group.') cls._has_docker_group = False return cls._has_docker_group
def run(cls, c): # Transform list of dicts (where keys are unique) into a single dict args = c.format_deep(True) logger.debug('args={0}'.format(repr(args))) # Get parameters template, result_filename, data = cls._try_obtain_mandatory_params(args) # Create an environment! logger.debug('Using templats dir: {0}'.format(c.files_dir)) env = jinja2.Environment(loader=jinja2.FileSystemLoader(c.files_dir)) env.trim_blocks = True env.lstrip_blocks = True # Get a template instance tpl = None try: logger.debug('Using template file: {0}'.format(template)) tpl = env.get_template(template) except jinja2.TemplateError as e: raise exceptions.CommandException('Template file failure: {0}'.format(e.message)) # Check if destination file exists, overwrite if needed if os.path.exists(result_filename): overwrite = args['overwrite'] if 'overwrite' in args else False overwrite = True if overwrite == 'True' or overwrite == 'true' or overwrite == 'yes' else False if overwrite: logger.info('Overwriting the destination file {0}'.format(result_filename)) os.remove(result_filename) else: raise exceptions.CommandException('The destination file already exists: {0}'.format(result_filename)) # Generate an output file finally... with open(result_filename, 'w') as out: result = tpl.render(**data) out.write(result) return (True, 'success')
def _debug_doesnt_work(cls, msg, name=None): logger.debug('{0} not operational - {1}'.format( name or cls.__name__, msg))
def _debug_doesnt_work(cls, msg, name=None): logger.debug('{0} not operational - {1}'.format(name or cls.__name__, msg))