def render(value, **kwargs): """ Use Jinja2 rendering for given text an key key/values. Args: value (str): the template to be rendered. kwargs (dict): named parameters representing available variables inside the template. >>> model = {"message": "hello world 1!"} >>> rendered_text = render("{{model.message}}", model=model) >>> rendered_text == 'hello world 1!' True >>> model = {"message": "hello world 2!", "template": "{{ model.message }}"} >>> rendered_text = render("{{ model.template|render(model=model) }}", model=model) >>> rendered_text == 'hello world 2!' True The pipeline process is all about Bash code (inside and outside Docker) and autoescaping wouldn't help. Usually the pipeline runs in a isolated environment and there should not be any injection from outside; that's why: nosec. """ try: environment = Environment(autoescape=False) # nosec environment.filters['render'] = render environment.filters['docker_environment'] = docker_environment environment.filters['find_matrix'] = find_matrix environment.filters['find_stages'] = find_stages template = environment.from_string(value) return template.render(**kwargs) except UndefinedError as exception: Logger.get_logger(__name__).error("render(undefined): %s", exception) except TemplateSyntaxError as exception: Logger.get_logger(__name__).error("render(syntax error): %s", exception) return None
def generate(store, report_format, path): """ Generate file in defined format representing the report of pipeline(s). Args: store (Store): report data. report_format (str): currently "html" is supported only. path (str): path where to write the report to. Missing sub folders will be created. """ success = False if report_format in ['html']: rendered_content = { 'html': generate_html }[report_format](store) if not os.path.isdir(path): os.makedirs(path) if rendered_content is not None: # writing report file with open(os.path.join(path, 'pipeline.' + report_format), 'w') as handle: handle.write(rendered_content) success = True else: Logger.get_logger(__name__).error("Unknown report format %s", report_format) return success
def configure(**kwargs): """Global configuration for event handling.""" for key in kwargs: if key == 'is_logging_enabled': Event.is_logging_enabled = kwargs[key] elif key == 'collector_queue': Event.collector_queue = kwargs[key] else: Logger.get_logger(__name__).error("Unknown key %s in configure or bad type %s", key, type(kwargs[key]))
def run(self): """Collector main loop.""" while True: data = self.queue.get() if data is None: Logger.get_logger(__name__).info( "Stopping collector process ...") break # updating the report data self.store.update(data) # writing the report generate(self.store, 'html', os.getcwd())
def get_tokens(condition): """ Get AST tokens for Python condition. Returns: list: list of AST tokens """ try: ast_tokens = list(ast.walk(ast.parse(condition.strip()))) except SyntaxError as exception: Logger.get_logger(__name__).error("Syntax error: %s", exception) ast_tokens = [] return ast_tokens
def __init__(self, context, timestamp, **kwargs): """Initialize event with optional additional information.""" self.context = context self.created = timestamp self.finished = timestamp self.status = 'started' self.information = {} self.information.update(kwargs) self.update_report_collector(int(time.mktime(self.created.timetuple()))) if Event.is_logging_enabled: self.logger = Logger.get_logger(context + ".event") else: self.logger = Logger.get_logger(None)
def __init__(self, pipeline, parallel): """Initializing with referenz to pipeline main object.""" self.event = Event.create(__name__) self.pipeline = pipeline self.parallel = parallel if not pipeline.options.dry_run else False self.logger = Logger.get_logger(__name__) self.next_task_id = 1
class VersionsCheck(object): """Evaluating versions of required tools.""" LOGGER = Logger.get_logger(__name__) """Logger instance for this class.""" BASH_VERSION = r'''bash --version|head -1|grep -Po "\d+(\.\d+)+"''' """Find Bash version.""" DOCKER_VERSION = r'''docker version|grep "Version:"|head -1|grep -Po "\d+(\.\d+)+"''' """Find Docker version.""" PACKER_VERSION = r'''packer -version''' """Find Packer version.""" ANSIBLE_VERSION = r'''ansible --version|grep -Po "\d+(\.\d+)+"''' """Find Ansible version.""" def __init__(self): """Do nothing the moment.""" pass def process(self, document): """Logging versions of required tools.""" content = json.dumps(document) versions = {} versions.update({'Spline': Version(VERSION)}) versions.update(self.get_version("Bash", self.BASH_VERSION)) if content.find('"docker(container)":') >= 0 or content.find('"docker(image)":') >= 0: versions.update(VersionsCheck.get_version("Docker", self.DOCKER_VERSION)) if content.find('"packer":') >= 0: versions.update(VersionsCheck.get_version("Packer", self.PACKER_VERSION)) if content.find('"ansible(simple)":') >= 0: versions.update(VersionsCheck.get_version('Ansible', self.ANSIBLE_VERSION)) return versions @staticmethod def get_version(tool_name, tool_command): """ Get name and version of a tool defined by given command. Args: tool_name (str): name of the tool. tool_command (str): Bash one line command to get the version of the tool. Returns: dict: tool name and version or empty when no line has been found """ result = {} for line in Bash(ShellConfig(script=tool_command, internal=True)).process(): if line.find("command not found") >= 0: VersionsCheck.LOGGER.error("Required tool '%s' not found (stopping pipeline)!", tool_name) sys.exit(1) result = {tool_name: Version(line)} break return result
def test_get_logger_with_external_configuration(self): """Testing function Logger.get_logger for real logger.""" Logger.use_external_configuration = True with patch('logging.getLogger') as mocked_logging_get_logger: logger = Logger.get_logger('test') mocked_logging_get_logger.assert_called_with('test') assert_that(logger.propagate, equal_to(False)) Logger.use_external_configuration = False
def __init__(self, model=None, env=None, options=None): """Initializing pipeline with definition (loaded from a yaml file).""" self.event = Event.create(__name__) self.options = options self.model = {} if not isinstance(model, dict) else model self.data = PipelineData() self.data.env_list[0].update([] if env is None else env) self.logger = Logger.get_logger(__name__) self.variables = {}
def matrix_worker(data): """ Run pipelines in parallel. Args: data(dict): parameters for the pipeline (model, options, ...). Returns: dict: with two fields: success True/False and captured output (list of str). """ matrix = data['matrix'] Logger.get_logger(__name__ + '.worker').info( "Processing pipeline for matrix entry '%s'", matrix['name']) env = matrix['env'].copy() env.update({'PIPELINE_MATRIX': matrix['name']}) pipeline = Pipeline(model=data['model'], env=env, options=data['options']) pipeline.hooks = data['hooks'] return pipeline.process(data['pipeline'])
def __init__(self, **kwargs): """ Initializing and validating fields. Args: kwargs (dict): application command line options. Raises: RuntimeError: when validation of parameters has failed. """ try: arguments = Adapter( CollectorStage.schema_complete().validate(kwargs)) self.stage = arguments.stage self.status = arguments.status self.events = arguments.events except SchemaError as exception: Logger.get_logger(__name__).error(exception) raise RuntimeError(str(exception))
def test_no_logger(self): """Testing retrieval of NoLogger instance.""" logger = Logger.get_logger(None) assert_that(isinstance(logger, NoLogger), equal_to(True)) with stdout_redirector() as stream: logger.info("hello") logger.warning("hello") logger.severe("hello") assert_that(len(stream.getvalue()), equal_to(0))
def __init__(self, **options): """ Initialize application with command line options. Args: options (ApplicationOptions): given command line options. """ self.options = options self.logging_level = logging.DEBUG self.setup_logging() self.logger = Logger.get_logger(__name__) self.results = []
def worker(data): """Running on shell via multiprocessing.""" creator = get_creator_by_name(data['creator']) shell = creator( data['entry'], ShellConfig( script=data['entry']['script'], title=data['entry']['title'] if 'title' in data['entry'] else '', model=data['model'], env=data['env'], item=data['item'], dry_run=data['dry_run'], debug=data['debug'], strict=data['strict'], variables=data['variables'], temporary_scripts_path=data['temporary_scripts_path'])) output = [] for line in shell.process(): output.append(line) Logger.get_logger(__name__ + '.worker').info(" | %s", line) return {'id': data['id'], 'success': shell.success, 'output': output}
def add(self, timestamp, information): """ Add event information. Args: timestamp (int): event timestamp. information (dict): event information. Raises: RuntimeError: when validation of parameters has failed. """ try: item = Schema(CollectorStage.schema_event_items()).validate({ 'timestamp': timestamp, 'information': information }) self.events.append(item) except SchemaError as exception: Logger.get_logger(__name__).error(exception) raise RuntimeError(str(exception))
def evaluate(condition): """ Evaluate simple condition. >>> Condition.evaluate(' 2 == 2 ') True >>> Condition.evaluate(' not 2 == 2 ') False >>> Condition.evaluate(' not "abc" == "xyz" ') True >>> Condition.evaluate('2 in [2, 4, 6, 8, 10]') True >>> Condition.evaluate('5 in [2, 4, 6, 8, 10]') False >>> Condition.evaluate('"apple" in ["apple", "kiwi", "orange"]') True >>> Condition.evaluate('5 not in [2, 4, 6, 8, 10]') True >>> Condition.evaluate('"apple" not in ["kiwi", "orange"]') True Args: condition (str): Python condition as string. Returns: bool: True when condition evaluates to True. """ success = False if len(condition) > 0: try: rule_name, ast_tokens, evaluate_function = Condition.find_rule(condition) if not rule_name == 'undefined': success = evaluate_function(ast_tokens) except AttributeError as exception: Logger.get_logger(__name__).error("Attribute error: %s", exception) else: success = True return success
class VersionsReport(object): """Logging versions.""" LOGGER = Logger.get_logger(__name__) """Logger instance for this class.""" def process(self, versions): """Logging version sorted ascending by tool name.""" for tool_name in sorted(versions.keys()): version = versions[tool_name] self._log("Using tool '%s', %s" % (tool_name, version)) def _log(self, message): """Logging a message.""" self.LOGGER.info(message)
def __init__(self, model=None, env=None, options=None): """ Initializing pipeline with definition (loaded from a yaml file). Args: model (dict): if you have a model defined in your pipeline definition (yaml) env (dict): the env as defined (if) per matrix options (dict): command line options for spline """ self.event = Event.create(__name__) self.options = options self.model = {} if not isinstance(model, dict) else model self.data = PipelineData() self.data.env_list[0].update([] if env is None else env) self.logger = Logger.get_logger(__name__) self.variables = {}
def __init__(self, config): """ Initialize with Bash code and optional environment variables. Args: config(ShellConfig): options for configuring Bash environment and behavior """ self.event = Event.create(__name__) self.logger = Logger.get_logger(__name__) self.config = config self.success = True self.env = {} self.env.update(config.env) self.stdout = subprocess.PIPE self.stderr = subprocess.STDOUT self.shell = False self.exit_code = 0
def __init__(self, matrix, parallel=False): """Initialize pipeline with matrix data.""" self.event = Event.create(__name__) self.logger = Logger.get_logger(__name__) self.matrix = matrix self.parallel = parallel
def test_get_logger(self): """Testing function Logger.get_logger for real logger.""" with patch('logging.getLogger') as mocked_logging_get_logger: Logger.get_logger('test') mocked_logging_get_logger.assert_called_with('test')