Esempio n. 1
0
def test_C7N_interpreted_runner():
    """
    This test does not use many mocks. In a way, it's an integration test.
    It also serves to demonstrate the C7N interface sequence.

    1. Create Environment.
    2. Compile.
    3. Build Program.
    4. Build Activation.
    5. Evaluate.
    """
    decls = {
        "Resource": celpy.celtypes.MapType,
        "Now": celpy.celtypes.TimestampType,
        "C7N": celpy.celtypes.Value,  # Generally, this is opaque to CEL
    }
    decls.update(celpy.c7nlib.DECLARATIONS)
    cel_env = celpy.Environment(annotations=decls, runner_class=celpy.c7nlib.C7N_Interpreted_Runner)
    cel_ast = cel_env.compile("1+1==2")
    cel_prgm = cel_env.program(cel_ast, functions=celpy.c7nlib.FUNCTIONS)
    cel_activation = {
        "Resource": celpy.celtypes.MapType({}),
        "Now": celpy.celtypes.TimestampType("2020-09-10T11:12:13Z"),
        "C7N": SimpleNamespace(filter=sentinel.the_filter, policy=sentinel.the_policy)
    }
    cel_result = cel_prgm.evaluate(cel_activation)
    assert cel_result
    assert celpy.c7nlib.C7N.policy == sentinel.the_policy
    assert celpy.c7nlib.C7N.filter == sentinel.the_filter
Esempio n. 2
0
def evaluate(context):
    """
    This does not use the :py:class:`celpy.c7nlib.C7NContext`.
    Instead, it provides the context and filter as arguments to :meth:`evaluate`.
    """
    decls = {
        "Resource": celpy.celtypes.MapType,
        "Now": celpy.celtypes.TimestampType,
    }
    decls.update(celpy.c7nlib.DECLARATIONS)
    context.cel['env'] = celpy.Environment(
        annotations=decls,
        runner_class=celpy.c7nlib.C7N_Interpreted_Runner
    )
    context.cel['ast'] = context.cel['env'].compile(context.cel['source'])
    context.cel['prgm'] = context.cel['env'].program(context.cel['ast'], functions=celpy.c7nlib.FUNCTIONS)
    build_mock_resources(context)
    if "TZ_ALIASES" in context.cel:
        celpy.celtypes.TimestampType.TZ_ALIASES.update(context.cel["TZ_ALIASES"])
    try:
        context.cel['result'] = context.cel['prgm'].evaluate(
            context=context.cel['activation'],
            filter=context.cel['filter'])
    except celpy.CELEvalError as ex:
        context.cel['result'] = ex
Esempio n. 3
0
def cel_compile(text: str) -> celpy.Runner:
    decls: Dict[str, celpy.Annotation] = {
        "Resource": celpy.celtypes.MapType,
        "Now": celpy.celtypes.TimestampType,
    }
    env = celpy.Environment(annotations=decls)
    ast = env.compile(text)
    prgm = env.program(ast)
    return prgm
Esempio n. 4
0
def test_environment(mock_parser, mock_runner, mock_activation):
    e = celpy.Environment(sentinel.package, {sentinel.variable, celtypes.UintType})
    ast = e.compile(sentinel.Source)
    assert ast == sentinel.AST
    assert mock_parser.return_value.parse.mock_calls == [call(sentinel.Source)]

    pgm = e.program(ast, functions=[sentinel.Function])
    assert pgm == mock_runner.return_value
    assert mock_runner.mock_calls == [call(e, sentinel.AST, [sentinel.Function])]
    act = e.activation()
    assert act == mock_activation.return_value
    assert mock_activation.mock_calls == [
        call(
            annotations={sentinel.variable, celtypes.UintType}, package=sentinel.package
        )
    ]
Esempio n. 5
0
    def run(self, error_limit: Optional[int] = None) -> None:
        self.run_times: List[float] = []
        self.exception_times: List[float] = []
        self.errors: Counter[Exception] = collections.Counter()
        self.results: Counter[celpy.celtypes.Value] = collections.Counter()

        decls = {"resource": celpy.celtypes.MapType}
        decls.update(celpy.c7nlib.DECLARATIONS)
        cel_env = celpy.Environment(annotations=decls)
        ast = cel_env.compile(self.example.filter_expr)
        program = cel_env.program(ast, functions=celpy.c7nlib.FUNCTIONS)

        if self.text_from:
            celpy.c7nlib.__dict__['text_from'] = self.text_from

        overall_start = time.perf_counter()
        for resource in self.resources:
            start = time.perf_counter()
            activation = {
                "resource": celpy.json_to_cel(resource)
            }
            try:
                result = program.evaluate(activation)
                end = time.perf_counter()
                self.run_times.append((end-start)*1000)
                self.results[result] += 1
            except celpy.CELEvalError as ex:
                end = time.perf_counter()
                self.exception_times.append((end-start)*1000)
                self.errors[repr(ex)] += 1
                logger.debug(repr(ex))
                logger.debug(resource)
                if error_limit:
                    error_limit -= 1
                    if error_limit == 0:
                        raise
        overall_end = time.perf_counter()
        self.overall_run = (overall_end-overall_start)*1000
        self.volume = len(self.run_times) + len(self.exception_times)
Esempio n. 6
0
def step_impl(context):
    context.cel['source'] = C7N_Rewriter.c7n_rewrite(context.text)
    decls = {
        "Resource": celpy.celtypes.MapType,
        "Now": celpy.celtypes.TimestampType,
        "C7N": celpy.celtypes.Value,  # Generally, this is opaque to CEL
    }
    decls.update(celpy.c7nlib.DECLARATIONS)
    context.cel['env'] = celpy.Environment(
        annotations=decls,
        runner_class=celpy.c7nlib.C7N_Interpreted_Runner
    )
    context.cel['ast'] = context.cel['env'].compile(context.cel['source'])
    context.cel['prgm'] = context.cel['env'].program(context.cel['ast'], functions=celpy.c7nlib.FUNCTIONS)
    # C7N namespace has active Policy, resource_manager, and filter_registry
    context.cel['activation'] = {
        "C7N": SimpleNamespace(
            filter=Mock(name="mock filter"),
            policy=Mock(name="mock policy"),
        ),
        "Resource": None,
        "Now": None
    }
    print(f"\nCEL: {context.cel['source']}\n")