コード例 #1
0
async def test_stream_processor_pull_unexpected_error(run_engine, _,
                                                      logger_class,
                                                      redis_stream,
                                                      redis_cache):
    logs.setup_logging()
    logger = logger_class.return_value

    run_engine.side_effect = Exception

    await worker.push(
        redis_stream,
        123,
        "owner",
        "repo",
        123,
        "pull_request",
        {"payload": "whatever"},
    )

    p = worker.StreamProcessor(redis_stream, redis_cache)
    await p.consume("stream~owner~123")
    await p.consume("stream~owner~123")

    # Exception have been logged, redis must be clean
    assert len(run_engine.mock_calls) == 2
    assert len(logger.error.mock_calls) == 2
    assert logger.error.mock_calls[0].args == (
        "failed to process pull request", )
    assert logger.error.mock_calls[1].args == (
        "failed to process pull request", )
    assert 1 == (await redis_stream.zcard("streams"))
    assert 1 == len(await redis_stream.keys("stream~*"))
    assert 0 == len(await redis_stream.hgetall("attempts"))
コード例 #2
0
ファイル: bridge.py プロジェクト: eladb/mergify-engine
def run():
    parser = argparse.ArgumentParser()
    parser.add_argument("--clean", action="store_true")
    parser.add_argument("--dest", default="http://localhost:8802/event")

    args = parser.parse_args()

    logs.setup_logging()

    session = httpx.Client(trust_env=False)

    payload_data = os.urandom(250)
    payload_hmac = utils.compute_hmac(payload_data)

    if args.clean:
        # httpx doesn't allow data= here yet: https://github.com/encode/httpx/pull/900
        r = session.request(
            "DELETE",
            "https://gh.mergify.io/events-testing",
            data=payload_data,
            headers={"X-Hub-Signature": "sha1=" + payload_hmac},
        )
        r.raise_for_status()

    while True:
        try:
            # httpx doesn't allow data= here yet: https://github.com/encode/httpx/pull/900
            resp = session.request(
                "GET",
                "https://gh.mergify.io/events-testing",
                data=payload_data,
                headers={"X-Hub-Signature": "sha1=" + payload_hmac},
            )
            events = resp.json()
            for event in reversed(events):
                LOG.info("")
                LOG.info("==================================================")
                LOG.info(
                    ">>> GOT EVENT: %s %s/%s",
                    event["id"],
                    event["type"],
                    event["payload"].get("state", event["payload"].get("action")),
                )
                data = json.dumps(event["payload"])
                hmac = utils.compute_hmac(data.encode("utf8"))
                session.post(
                    args.dest,
                    headers={
                        "X-GitHub-Event": event["type"],
                        "X-GitHub-Delivery": event["id"],
                        "X-Hub-Signature": "sha1=%s" % hmac,
                        "Content-type": "application/json",
                    },
                    data=data,
                    verify=False,
                )
        except Exception:
            LOG.error("event handling failure", exc_info=True)
        time.sleep(1)
コード例 #3
0
ファイル: import_check.py プロジェクト: kerick91/Cloud-Init
def main() -> int:
    logs.setup_logging()
    signals.setup()

    from mergify_engine.web.root import app  # noqa isort:skip
    from mergify_engine import worker  # noqa isort:skip
    from mergify_engine import actions  # noqa isort:skip

    return 0
コード例 #4
0
ファイル: bridge.py プロジェクト: v1v/mergify-engine
async def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("--clean", action="store_true")
    parser.add_argument("--dest", default="http://localhost:8802/event")

    args = parser.parse_args()

    logs.setup_logging()

    payload_data = os.urandom(250)
    payload_hmac = utils.compute_hmac(payload_data)

    async with http.AsyncClient(
            base_url="https://test-forwarder.mergify.io",
            headers={"X-Hub-Signature": "sha1=" + payload_hmac},
    ) as session:

        if args.clean:
            r = await session.request("DELETE",
                                      "/events-testing",
                                      data=payload_data)
            r.raise_for_status()

        while True:
            try:
                resp = await session.request("GET",
                                             "/events-testing",
                                             data=payload_data)
                events = resp.json()
                for event in reversed(events):
                    LOG.info("")
                    LOG.info(
                        "==================================================")
                    LOG.info(
                        ">>> GOT EVENT: %s %s/%s",
                        event["id"],
                        event["type"],
                        event["payload"].get("state",
                                             event["payload"].get("action")),
                    )
                    data = json.dumps(event["payload"])
                    hmac = utils.compute_hmac(data.encode("utf8"))
                    await session.post(
                        args.dest,
                        headers={
                            "X-GitHub-Event": event["type"],
                            "X-GitHub-Delivery": event["id"],
                            "X-Hub-Signature": f"sha1={hmac}",
                            "Content-type": "application/json",
                        },
                        data=data,
                    )
            except Exception:
                LOG.error("event handling failure", exc_info=True)
            time.sleep(1)
コード例 #5
0
ファイル: conftest.py プロジェクト: leodica/mergify-engine
def logger_checker(request, caplog):
    # daiquiri removes all handlers during setup, as we want to sexy output and the pytest
    # capability at the same, we must add back the pytest handler
    logs.setup_logging()
    logging.getLogger(None).addHandler(caplog.handler)
    yield
    for when in ("setup", "call", "teardown"):
        assert [] == [
            rec.getMessage() for rec in caplog.get_records(when)
            if rec.levelname in ("CRITICAL", "ERROR")
        ]
コード例 #6
0
ファイル: conftest.py プロジェクト: napetrov/mergify-engine
def logger_checker(request, caplog):
    # daiquiri removes all handlers during setup, as we want to sexy output and the pytest
    # capability at the same, we must add back the pytest handler
    logs.setup_logging()
    logging.getLogger(None).addHandler(caplog.handler)
    yield
    for when in ("setup", "call", "teardown"):
        messages = [
            rec.getMessage() for rec in caplog.get_records(when)
            if rec.levelname in ("CRITICAL", "ERROR")
        ]
        # NOTE(sileht): The asyncio task spawned to automatically close redis connection
        # cleanly is not held by an variable, making hard to track them. Since this is one
        # annoying for testing just ignore message about them.
        messages = [
            m for m in messages if
            "coro=<ConnectionPool.disconnect_on_idle_time_exceeded()" not in m
        ]
        assert [] == messages
コード例 #7
0
def setup(service_name: str, dump_config: bool = True) -> None:
    service_name = "engine-" + service_name

    _version = os.environ.get("HEROKU_RELEASE_VERSION")

    if config.SENTRY_URL:  # pragma: no cover
        sentry_sdk.init(  # type: ignore[abstract]
            config.SENTRY_URL,
            max_breadcrumbs=10,
            release=_version,
            environment=config.SENTRY_ENVIRONMENT,
            integrations=[
                httpx.HttpxIntegration(),
            ],
        )
        sentry_sdk.utils.MAX_STRING_LENGTH = 2048

    ddtrace.config.version = _version
    statsd.constant_tags.append(f"service:{service_name}")
    ddtrace.config.service = service_name

    ddtrace.config.httpx["split_by_domain"] = True

    logs.setup_logging(dump_config=dump_config)

    # NOTE(sileht): For security reason, we don't expose env after this point
    # env is authorized during modules loading and pre service initializarion
    # after it's not.
    envs_to_preserve = ("PATH", "LANG", "VIRTUAL_ENV")

    saved_env = {
        env: os.environ[env]
        for env in os.environ
        if env in envs_to_preserve or env.startswith("DD_")
    }
    os.environ.clear()
    os.environ.update(saved_env)
コード例 #8
0
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

from sentry_sdk.integrations.asgi import SentryAsgiMiddleware

from mergify_engine import config
from mergify_engine import logs
from mergify_engine.web import app as application  # noqa

if config.SENTRY_URL:
    application = SentryAsgiMiddleware(application)

logs.setup_logging()
コード例 #9
0
ファイル: asgi.py プロジェクト: eladb/mergify-engine
# -*- encoding: utf-8 -*-
#
# Copyright © 2017 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
#      http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.

from sentry_sdk.integrations.asgi import SentryAsgiMiddleware

from mergify_engine import config
from mergify_engine import logs
from mergify_engine.web import app as application  # noqa

if config.SENTRY_URL:
    application = SentryAsgiMiddleware(application)

logs.setup_logging(worker="asgi")
コード例 #10
0
async def test_stream_processor_retrying_stream_failure(
        run_engine, _, logger, redis_stream, redis_cache):
    logs.setup_logging()

    response = mock.Mock()
    response.json.return_value = {"message": "boom"}
    response.status_code = 401
    run_engine.side_effect = http.HTTPClientSideError(message="foobar",
                                                      request=response.request,
                                                      response=response)

    await worker.push(
        redis_stream,
        123,
        "owner",
        "repo",
        123,
        "pull_request",
        {"payload": "whatever"},
    )
    await worker.push(
        redis_stream,
        123,
        "owner",
        "repo",
        123,
        "comment",
        {"payload": "foobar"},
    )

    assert 1 == (await redis_stream.zcard("streams"))
    assert 1 == len(await redis_stream.keys("stream~*"))
    assert 2 == await redis_stream.xlen("stream~owner~123")
    assert 0 == len(await redis_stream.hgetall("attempts"))

    p = worker.StreamProcessor(redis_stream, redis_cache)
    await p.consume("stream~owner~123")

    assert len(run_engine.mock_calls) == 1
    assert run_engine.mock_calls[0] == mock.call(
        InstallationMatcher(owner="owner"),
        "repo",
        123,
        [
            {
                "event_type": "pull_request",
                "data": {
                    "payload": "whatever"
                },
                "timestamp": mock.ANY,
            },
            {
                "event_type": "comment",
                "data": {
                    "payload": "foobar"
                },
                "timestamp": mock.ANY,
            },
        ],
    )

    # Check stream still there and attempts recorded
    assert 1 == (await redis_stream.zcard("streams"))
    assert 1 == len(await redis_stream.keys("stream~*"))
    assert 1 == len(await redis_stream.hgetall("attempts"))

    assert {
        b"stream~owner~123": b"1"
    } == await redis_stream.hgetall("attempts")

    await p.consume("stream~owner~123")
    assert len(run_engine.mock_calls) == 2
    assert {
        b"stream~owner~123": b"2"
    } == await redis_stream.hgetall("attempts")

    await p.consume("stream~owner~123")
    assert len(run_engine.mock_calls) == 3

    # Still there
    assert 3 == len(logger.info.mock_calls)
    assert 0 == len(logger.error.mock_calls)
    assert logger.info.mock_calls[0].args == (
        "failed to process stream, retrying", )
    assert logger.info.mock_calls[1].args == (
        "failed to process stream, retrying", )
    assert logger.info.mock_calls[2].args == (
        "failed to process stream, retrying", )
    assert 1 == (await redis_stream.zcard("streams"))
    assert 1 == len(await redis_stream.keys("stream~*"))
    assert 1 == len(await redis_stream.hgetall("attempts"))
コード例 #11
0
async def test_stream_processor_retrying_pull(run_engine, _, logger_class,
                                              redis_stream, redis_cache):
    logs.setup_logging()
    logger = logger_class.return_value

    # One retries once, the other reaches max_retry
    run_engine.side_effect = [
        exceptions.MergeableStateUnknown(mock.Mock()),
        exceptions.MergeableStateUnknown(mock.Mock()),
        mock.Mock(),
        exceptions.MergeableStateUnknown(mock.Mock()),
        exceptions.MergeableStateUnknown(mock.Mock()),
    ]

    await worker.push(
        redis_stream,
        123,
        "owner",
        "repo",
        123,
        "pull_request",
        {"payload": "whatever"},
    )
    await worker.push(
        redis_stream,
        123,
        "owner",
        "repo",
        42,
        "comment",
        {"payload": "foobar"},
    )

    assert 1 == (await redis_stream.zcard("streams"))
    assert 1 == len(await redis_stream.keys("stream~*"))
    assert 2 == await redis_stream.xlen("stream~owner~123")
    assert 0 == len(await redis_stream.hgetall("attempts"))

    p = worker.StreamProcessor(redis_stream, redis_cache)
    await p.consume("stream~owner~123")

    assert len(run_engine.mock_calls) == 2
    assert run_engine.mock_calls == [
        mock.call(
            InstallationMatcher(owner="owner"),
            "repo",
            123,
            [
                {
                    "event_type": "pull_request",
                    "data": {
                        "payload": "whatever"
                    },
                    "timestamp": mock.ANY,
                },
            ],
        ),
        mock.call(
            InstallationMatcher(owner="owner"),
            "repo",
            42,
            [
                {
                    "event_type": "comment",
                    "data": {
                        "payload": "foobar"
                    },
                    "timestamp": mock.ANY,
                },
            ],
        ),
    ]

    # Check stream still there and attempts recorded
    assert 1 == (await redis_stream.zcard("streams"))
    assert 1 == len(await redis_stream.keys("stream~*"))
    assert {
        b"pull~owner~repo~42": b"1",
        b"pull~owner~repo~123": b"1",
    } == await redis_stream.hgetall("attempts")

    await p.consume("stream~owner~123")
    assert 1 == (await redis_stream.zcard("streams"))
    assert 1 == len(await redis_stream.keys("stream~*"))
    assert 1 == len(await redis_stream.hgetall("attempts"))
    assert len(run_engine.mock_calls) == 4
    assert {
        b"pull~owner~repo~42": b"2"
    } == await redis_stream.hgetall("attempts")

    await p.consume("stream~owner~123")
    assert len(run_engine.mock_calls) == 5

    # Too many retries, everything is gone
    assert 3 == len(logger.info.mock_calls)
    assert 1 == len(logger.error.mock_calls)
    assert logger.info.mock_calls[0].args == (
        "failed to process pull request, retrying", )
    assert logger.info.mock_calls[1].args == (
        "failed to process pull request, retrying", )
    assert logger.error.mock_calls[0].args == (
        "failed to process pull request, abandoning", )
    assert 0 == (await redis_stream.zcard("streams"))
    assert 0 == len(await redis_stream.keys("stream~*"))
    assert 0 == len(await redis_stream.hgetall("attempts"))
コード例 #12
0
ファイル: worker.py プロジェクト: imam121genk/mergify-engine
def main() -> None:
    logs.setup_logging()
    return asyncio.run(run_forever())
コード例 #13
0
def main():
    logs.setup_logging()
    asyncio.run(run_forever())
コード例 #14
0
ファイル: test_worker.py プロジェクト: eladb/mergify-engine
async def test_stream_processor_retrying_stream_failure(
        run_engine, get_install_by_id, logger, redis):
    get_install_by_id.side_effect = fake_install_id
    logs.setup_logging(worker="streams")

    response = mock.Mock()
    response.json.return_value = {"message": "boom"}
    response.status_code = 401
    run_engine.side_effect = httpx.HTTPClientSideError(response=response)

    await worker.push(
        redis,
        12345,
        "owner",
        "repo",
        123,
        "pull_request",
        {"payload": "whatever"},
    )
    await worker.push(
        redis,
        12345,
        "owner",
        "repo",
        123,
        "comment",
        {"payload": "foobar"},
    )

    assert 1 == (await redis.zcard("streams"))
    assert 1 == len(await redis.keys("stream~*"))
    assert 2 == await redis.xlen("stream~12345")
    assert 0 == len(await redis.hgetall("attempts"))

    p = worker.StreamProcessor(redis)
    await p.consume("stream~12345")

    assert len(run_engine.mock_calls) == 1
    assert run_engine.mock_calls[0] == mock.call(
        fake_install_id(12345),
        "owner",
        "repo",
        123,
        [
            {
                "event_type": "pull_request",
                "data": {
                    "payload": "whatever"
                }
            },
            {
                "event_type": "comment",
                "data": {
                    "payload": "foobar"
                }
            },
        ],
    )

    # Check stream still there and attempts recorded
    assert 1 == (await redis.zcard("streams"))
    assert 1 == len(await redis.keys("stream~*"))
    assert 1 == len(await redis.hgetall("attempts"))

    assert {b"stream~12345": b"1"} == await redis.hgetall("attempts")

    await p.consume("stream~12345")
    assert len(run_engine.mock_calls) == 2
    assert {b"stream~12345": b"2"} == await redis.hgetall("attempts")

    await p.consume("stream~12345")
    assert len(run_engine.mock_calls) == 3

    # Still there
    assert 3 == len(logger.info.mock_calls)
    assert 0 == len(logger.error.mock_calls)
    assert logger.info.mock_calls[0].args == (
        "failed to process stream, retrying", )
    assert logger.info.mock_calls[1].args == (
        "failed to process stream, retrying", )
    assert logger.info.mock_calls[2].args == (
        "failed to process stream, retrying", )
    assert 1 == (await redis.zcard("streams"))
    assert 1 == len(await redis.keys("stream~*"))
    assert 1 == len(await redis.hgetall("attempts"))
コード例 #15
0
ファイル: worker.py プロジェクト: eladb/mergify-engine
def main():
    uvloop.install()
    logs.setup_logging(worker="streams")
    asyncio.run(run_forever())
コード例 #16
0
def celery_logging(**kwargs):  # pragma: no cover
    logs.setup_logging(worker="celery")