Example #1
0
def test_stream_commits_state_upgrade():

    repository_args_with_start_date = {
        "repositories": ["organization/repository"],
        "page_size_for_large_streams": 100,
        "start_date": "2022-02-02T10:10:02Z",
    }

    default_branches = {"organization/repository": "master"}
    branches_to_pull = {"organization/repository": ["master"]}

    stream = Commits(**repository_args_with_start_date, branches_to_pull=branches_to_pull, default_branches=default_branches)

    responses.add(
        "GET",
        "https://api.github.com/repos/organization/repository/commits",
        json=[
            {"sha": 1, "commit": {"author": {"date": "2022-02-02T10:10:02Z"}}},
            {"sha": 2, "commit": {"author": {"date": "2022-02-02T10:10:04Z"}}},
        ],
        match=[matchers.query_param_matcher({"since": "2022-02-02T10:10:02Z", "sha": "master"}, strict_match=False)],
    )

    stream_state = {"organization/repository": {"created_at": "2022-02-02T10:10:02Z"}}
    records = read_incremental(stream, stream_state)
    assert [r["sha"] for r in records] == [2]
    assert stream_state == {"organization/repository": {"master": {"created_at": "2022-02-02T10:10:04Z"}}}
Example #2
0
def test_existing_user_is_member(faker):
    uuid = faker.uuid4()
    voting_tag_uuid = faker.uuid4()
    assert uuid != voting_tag_uuid
    url = "https://actionnetwork.org/api/v2/people"
    responses.add(
        responses.GET,
        url=url,
        match=[
            matchers.query_param_matcher(
                {"filter": "email_address eq '*****@*****.**'"})
        ],
        json={"_links": {
            "osdi:people": [{
                "href": f"{url}/{uuid}"
            }]
        }},
    )
    responses.add(
        responses.GET,
        url=f"{url}/{uuid}/taggings",
        json={
            "_embedded": {
                "osdi:taggings": [{
                    "_links": {
                        "osdi:tag": {
                            "href":
                            f"https://actionnetwork.org/api/v2/tags/{voting_tag_uuid}"
                        }
                    }
                }]
            }
        },
    )
    assert voting_tag_uuid in an.Taggings(uuid).tags
Example #3
0
 def run():
     expected_query_params = {"float": 5.0, "int": 2}
     responses.add(
         responses.GET,
         "https://example.com/",
         match=[
             matchers.query_param_matcher(expected_query_params),
         ],
     )
     requests.get("https://example.com", params=expected_query_params)
Example #4
0
 def run():
     expected_query_params = {"only_one_param": "test"}
     responses.add(
         responses.GET,
         "https://example.com/",
         match=[
             matchers.query_param_matcher(expected_query_params, strict_match=False),
         ],
     )
     requests.get(
         "https://example.com", params={"only_one_param": "test", "second": "param"}
     )
Example #5
0
def setup_responses(responses: RequestsMock,
                    search_term: str,
                    body: str,
                    num_pages: int = 5):
    for page in range(1, num_pages + 1):
        responses.add(
            method=responses.GET,
            url=RECIPE_SEARCH_URI,
            match=[
                matchers.query_param_matcher({
                    "search": search_term,
                    "page": str(page)
                })
            ],
            headers={"Cookie": "euConsent=true"},
            body=body,
        )
Example #6
0
def test_people_from_email(faker, people_endpoint):
    email = faker.email()
    responses.add(
        responses.GET,
        url=people_endpoint,
        match=[
            matchers.query_param_matcher(
                {"filter": f"email_address eq '{email}'"})
        ],
        json={
            "_links": {
                "osdi:people": [{
                    "href": people_endpoint + faker.uuid4()
                }]
            }
        },
        status=200,
    )
    assert len(an.People(email=email).ids) > 0
Example #7
0
def test_get_uuid_when_doesnt_have_one(faker):
    uuid = faker.uuid4()
    email = faker.email()
    url = "https://actionnetwork.org/api/v2/people"
    responses.add(
        responses.GET,
        url=url,
        match=[
            matchers.query_param_matcher(
                {"filter": f"email_address eq '{email}'"})
        ],
        json={"_links": {
            "osdi:people": [{
                "href": f"{url}/{uuid}"
            }]
        }},
    )

    assert User(email=email).get_uuid() == uuid
Example #8
0
    def run():
        expected_query_params = {"does_not_exist": "test"}
        responses.add(
            responses.GET,
            "https://example.com/",
            match=[
                matchers.query_param_matcher(expected_query_params, strict_match=False),
            ],
        )
        with pytest.raises(ConnectionError) as exc:
            requests.get(
                "https://example.com",
                params={"only_one_param": "test", "second": "param"},
            )

        assert (
            "- GET https://example.com/ Parameters do not match. {} doesn't"
            " match {does_not_exist: test}\n"
            "You can use `strict_match=True` to do a strict parameters check."
        ) in str(exc.value)
Example #9
0
    def run():
        url = "http://example.com/test"
        params = {"hello": "world", "I am": "a big test"}
        responses.add(
            method=responses.GET,
            url=url,
            body="test",
            match=[matchers.query_param_matcher(params)],
            match_querystring=False,
        )

        # exchange parameter places for the test
        params = {
            "I am": "a big test",
            "hello": "world",
        }
        resp = requests.get(url, params=params)

        constructed_url = r"http://example.com/test?I+am=a+big+test&hello=world"
        assert resp.url == constructed_url
        assert resp.request.url == constructed_url

        resp_params = getattr(resp.request, "params")
        assert resp_params == params
Example #10
0
def test_stream_workflow_runs_read_incremental(monkeypatch):

    repository_args_with_start_date = {
        "repositories": ["org/repos"],
        "page_size_for_large_streams": 30,
        "start_date": "2022-01-01T00:00:00Z",
    }

    monkeypatch.setattr(streams, "DEFAULT_PAGE_SIZE", 1)
    stream = WorkflowRuns(**repository_args_with_start_date)

    data = [
        {"id": 4, "created_at": "2022-02-05T00:00:00Z", "updated_at": "2022-02-05T00:00:00Z", "repository": {"full_name": "org/repos"}},
        {"id": 3, "created_at": "2022-01-15T00:00:00Z", "updated_at": "2022-01-15T00:00:00Z", "repository": {"full_name": "org/repos"}},
        {"id": 2, "created_at": "2022-01-03T00:00:00Z", "updated_at": "2022-01-03T00:00:00Z", "repository": {"full_name": "org/repos"}},
        {"id": 1, "created_at": "2022-01-02T00:00:00Z", "updated_at": "2022-01-02T00:00:00Z", "repository": {"full_name": "org/repos"}},
    ]

    responses.add(
        "GET",
        "https://api.github.com/repos/org/repos/actions/runs",
        json={"total_count": len(data), "workflow_runs": data[0:1]},
        headers={"Link": '<https://api.github.com/repositories/283046497/actions/runs?per_page=1&page=2>; rel="next"'},
        match=[matchers.query_param_matcher({"per_page": "1"}, strict_match=True)],
    )

    responses.add(
        "GET",
        "https://api.github.com/repos/org/repos/actions/runs",
        json={"total_count": len(data), "workflow_runs": data[1:2]},
        headers={"Link": '<https://api.github.com/repositories/283046497/actions/runs?per_page=1&page=3>; rel="next"'},
        match=[matchers.query_param_matcher({"per_page": "1", "page": "2"}, strict_match=True)],
    )

    responses.add(
        "GET",
        "https://api.github.com/repos/org/repos/actions/runs",
        json={"total_count": len(data), "workflow_runs": data[2:3]},
        headers={"Link": '<https://api.github.com/repositories/283046497/actions/runs?per_page=1&page=4>; rel="next"'},
        match=[matchers.query_param_matcher({"per_page": "1", "page": "3"}, strict_match=True)],
    )

    responses.add(
        "GET",
        "https://api.github.com/repos/org/repos/actions/runs",
        json={"total_count": len(data), "workflow_runs": data[3:4]},
        match=[matchers.query_param_matcher({"per_page": "1", "page": "4"}, strict_match=True)],
    )

    state = {}
    records = read_incremental(stream, state)
    assert state == {"org/repos": {"updated_at": "2022-02-05T00:00:00Z"}}

    assert records == [
        {"id": 4, "repository": {"full_name": "org/repos"}, "created_at": "2022-02-05T00:00:00Z", "updated_at": "2022-02-05T00:00:00Z"},
        {"id": 3, "repository": {"full_name": "org/repos"}, "created_at": "2022-01-15T00:00:00Z", "updated_at": "2022-01-15T00:00:00Z"},
        {"id": 2, "repository": {"full_name": "org/repos"}, "created_at": "2022-01-03T00:00:00Z", "updated_at": "2022-01-03T00:00:00Z"},
        {"id": 1, "repository": {"full_name": "org/repos"}, "created_at": "2022-01-02T00:00:00Z", "updated_at": "2022-01-02T00:00:00Z"},
    ]

    assert len(responses.calls) == 4

    data.insert(
        0,
        {
            "id": 5,
            "created_at": "2022-02-07T00:00:00Z",
            "updated_at": "2022-02-07T00:00:00Z",
            "repository": {"full_name": "org/repos"},
        },
    )

    data[2]["updated_at"] = "2022-02-08T00:00:00Z"

    responses.add(
        "GET",
        "https://api.github.com/repos/org/repos/actions/runs",
        json={"total_count": len(data), "workflow_runs": data[0:1]},
        headers={"Link": '<https://api.github.com/repositories/283046497/actions/runs?per_page=1&page=2>; rel="next"'},
        match=[matchers.query_param_matcher({"per_page": "1"}, strict_match=True)],
    )

    responses.add(
        "GET",
        "https://api.github.com/repos/org/repos/actions/runs",
        json={"total_count": len(data), "workflow_runs": data[1:2]},
        headers={"Link": '<https://api.github.com/repositories/283046497/actions/runs?per_page=1&page=3>; rel="next"'},
        match=[matchers.query_param_matcher({"per_page": "1", "page": "2"}, strict_match=True)],
    )

    responses.add(
        "GET",
        "https://api.github.com/repos/org/repos/actions/runs",
        json={"total_count": len(data), "workflow_runs": data[2:3]},
        headers={"Link": '<https://api.github.com/repositories/283046497/actions/runs?per_page=1&page=4>; rel="next"'},
        match=[matchers.query_param_matcher({"per_page": "1", "page": "3"}, strict_match=True)],
    )

    responses.add(
        "GET",
        "https://api.github.com/repos/org/repos/actions/runs",
        json={"total_count": len(data), "workflow_runs": data[3:4]},
        headers={"Link": '<https://api.github.com/repositories/283046497/actions/runs?per_page=1&page=5>; rel="next"'},
        match=[matchers.query_param_matcher({"per_page": "1", "page": "4"}, strict_match=True)],
    )

    responses.calls.reset()
    records = read_incremental(stream, state)

    assert state == {"org/repos": {"updated_at": "2022-02-08T00:00:00Z"}}
    assert records == [
        {"id": 5, "repository": {"full_name": "org/repos"}, "created_at": "2022-02-07T00:00:00Z", "updated_at": "2022-02-07T00:00:00Z"},
        {"id": 3, "repository": {"full_name": "org/repos"}, "created_at": "2022-01-15T00:00:00Z", "updated_at": "2022-02-08T00:00:00Z"},
    ]

    assert len(responses.calls) == 4
Example #11
0
def test_stream_comments():

    repository_args_with_start_date = {
        "repositories": ["organization/repository", "airbytehq/airbyte"],
        "page_size_for_large_streams": 2,
        "start_date": "2022-02-02T10:10:01Z",
    }

    stream = Comments(**repository_args_with_start_date)

    data = [
        {"id": 1, "updated_at": "2022-02-02T10:10:02Z"},
        {"id": 2, "updated_at": "2022-02-02T10:10:04Z"},
        {"id": 3, "updated_at": "2022-02-02T10:12:06Z"},
        {"id": 4, "updated_at": "2022-02-02T10:12:08Z"},
        {"id": 5, "updated_at": "2022-02-02T10:12:10Z"},
        {"id": 6, "updated_at": "2022-02-02T10:12:12Z"},
    ]

    api_url = "https://api.github.com/repos/organization/repository/issues/comments"

    responses.add(
        "GET",
        api_url,
        json=data[0:2],
        match=[matchers.query_param_matcher({"since": "2022-02-02T10:10:01Z", "per_page": "2"})],
    )

    responses.add(
        "GET",
        api_url,
        json=data[1:3],
        headers={
            "Link": '<https://api.github.com/repos/organization/repository/issues/comments?per_page=2&since=2022-02-02T10%3A10%3A04Z&page=2>; rel="next"'
        },
        match=[matchers.query_param_matcher({"since": "2022-02-02T10:10:04Z", "per_page": "2"})],
    )

    responses.add(
        "GET",
        api_url,
        json=data[3:5],
        headers={
            "Link": '<https://api.github.com/repos/organization/repository/issues/comments?per_page=2&since=2022-02-02T10%3A10%3A04Z&page=3>; rel="next"'
        },
        match=[matchers.query_param_matcher({"since": "2022-02-02T10:10:04Z", "page": "2", "per_page": "2"})],
    )

    responses.add(
        "GET",
        api_url,
        json=data[5:],
        match=[matchers.query_param_matcher({"since": "2022-02-02T10:10:04Z", "page": "3", "per_page": "2"})],
    )

    data = [
        {"id": 1, "updated_at": "2022-02-02T10:11:02Z"},
        {"id": 2, "updated_at": "2022-02-02T10:11:04Z"},
        {"id": 3, "updated_at": "2022-02-02T10:13:06Z"},
        {"id": 4, "updated_at": "2022-02-02T10:13:08Z"},
        {"id": 5, "updated_at": "2022-02-02T10:13:10Z"},
        {"id": 6, "updated_at": "2022-02-02T10:13:12Z"},
    ]

    api_url = "https://api.github.com/repos/airbytehq/airbyte/issues/comments"

    responses.add(
        "GET",
        api_url,
        json=data[0:2],
        match=[matchers.query_param_matcher({"since": "2022-02-02T10:10:01Z", "per_page": "2"})],
    )

    responses.add(
        "GET",
        api_url,
        json=data[1:3],
        headers={
            "Link": '<https://api.github.com/repos/airbytehq/airbyte/issues/comments?per_page=2&since=2022-02-02T10%3A11%3A04Z&page=2>; rel="next"'
        },
        match=[matchers.query_param_matcher({"since": "2022-02-02T10:11:04Z", "per_page": "2"})],
    )

    responses.add(
        "GET",
        api_url,
        json=data[3:5],
        headers={
            "Link": '<https://api.github.com/repos/airbytehq/airbyte/issues/comments?per_page=2&since=2022-02-02T10%3A11%3A04Z&page=3>; rel="next"'
        },
        match=[matchers.query_param_matcher({"since": "2022-02-02T10:11:04Z", "page": "2", "per_page": "2"})],
    )

    responses.add(
        "GET",
        api_url,
        json=data[5:],
        match=[matchers.query_param_matcher({"since": "2022-02-02T10:11:04Z", "page": "3", "per_page": "2"})],
    )

    stream_state = {}
    records = read_incremental(stream, stream_state)
    assert records == [
        {"id": 1, "repository": "organization/repository", "updated_at": "2022-02-02T10:10:02Z"},
        {"id": 2, "repository": "organization/repository", "updated_at": "2022-02-02T10:10:04Z"},
        {"id": 1, "repository": "airbytehq/airbyte", "updated_at": "2022-02-02T10:11:02Z"},
        {"id": 2, "repository": "airbytehq/airbyte", "updated_at": "2022-02-02T10:11:04Z"},
    ]

    assert stream_state == {
        "airbytehq/airbyte": {"updated_at": "2022-02-02T10:11:04Z"},
        "organization/repository": {"updated_at": "2022-02-02T10:10:04Z"},
    }

    records = read_incremental(stream, stream_state)
    assert records == [
        {"id": 3, "repository": "organization/repository", "updated_at": "2022-02-02T10:12:06Z"},
        {"id": 4, "repository": "organization/repository", "updated_at": "2022-02-02T10:12:08Z"},
        {"id": 5, "repository": "organization/repository", "updated_at": "2022-02-02T10:12:10Z"},
        {"id": 6, "repository": "organization/repository", "updated_at": "2022-02-02T10:12:12Z"},
        {"id": 3, "repository": "airbytehq/airbyte", "updated_at": "2022-02-02T10:13:06Z"},
        {"id": 4, "repository": "airbytehq/airbyte", "updated_at": "2022-02-02T10:13:08Z"},
        {"id": 5, "repository": "airbytehq/airbyte", "updated_at": "2022-02-02T10:13:10Z"},
        {"id": 6, "repository": "airbytehq/airbyte", "updated_at": "2022-02-02T10:13:12Z"},
    ]
    assert stream_state == {
        "airbytehq/airbyte": {"updated_at": "2022-02-02T10:13:12Z"},
        "organization/repository": {"updated_at": "2022-02-02T10:12:12Z"},
    }
Example #12
0
def test_stream_pull_requests_incremental_read():

    page_size = 2
    repository_args_with_start_date = {
        "repositories": ["organization/repository"],
        "page_size_for_large_streams": page_size,
        "start_date": "2022-02-02T10:10:03Z",
    }

    stream = PullRequests(**repository_args_with_start_date)

    data = [
        {"id": 1, "updated_at": "2022-02-02T10:10:02Z"},
        {"id": 2, "updated_at": "2022-02-02T10:10:04Z"},
        {"id": 3, "updated_at": "2022-02-02T10:10:06Z"},
        {"id": 4, "updated_at": "2022-02-02T10:10:08Z"},
        {"id": 5, "updated_at": "2022-02-02T10:10:10Z"},
        {"id": 6, "updated_at": "2022-02-02T10:10:12Z"},
    ]

    api_url = "https://api.github.com/repos/organization/repository/pulls"

    responses.add(
        "GET",
        api_url,
        json=data[0:2],
        headers={"Link": '<https://api.github.com/repositories/400052213/pulls?page=2>; rel="next"'},
        match=[matchers.query_param_matcher({"per_page": str(page_size), "direction": "asc"}, strict_match=False)],
    )

    responses.add(
        "GET",
        api_url,
        json=data[2:4],
        match=[matchers.query_param_matcher({"per_page": str(page_size), "direction": "asc", "page": "2"}, strict_match=False)],
    )

    responses.add(
        "GET",
        api_url,
        json=data[5:3:-1],
        headers={"Link": '<https://api.github.com/repositories/400052213/pulls?page=2>; rel="next"'},
        match=[matchers.query_param_matcher({"per_page": str(page_size), "direction": "desc"}, strict_match=False)],
    )

    responses.add(
        "GET",
        api_url,
        json=data[3:1:-1],
        headers={"Link": '<https://api.github.com/repositories/400052213/pulls?page=3>; rel="next"'},
        match=[matchers.query_param_matcher({"per_page": str(page_size), "direction": "desc", "page": "2"}, strict_match=False)],
    )

    stream_state = {}
    records = read_incremental(stream, stream_state)
    assert [r["id"] for r in records] == [2, 3, 4]
    assert stream_state == {"organization/repository": {"updated_at": "2022-02-02T10:10:08Z"}}

    records = read_incremental(stream, stream_state)
    assert [r["id"] for r in records] == [6, 5]
    assert stream_state == {"organization/repository": {"updated_at": "2022-02-02T10:10:12Z"}}
Example #13
0
    def run():
        with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
            rsps.add(
                "POST",
                "http://example.com",
                match=[matchers.urlencoded_params_matcher({"foo": "bar"})],
            )
            rsps.add(
                "POST",
                "http://example.com",
                match=[matchers.json_params_matcher({"fail": "json"})],
            )

            with pytest.raises(ConnectionError) as excinfo:
                requests.post("http://example.com", data={"id": "bad"})

            msg = str(excinfo.value)
            assert (
                "request.body doesn't match: {id: bad} doesn't match {foo: bar}" in msg
            )

            assert (
                "request.body doesn't match: JSONDecodeError: Cannot parse request.body"
                in msg
            )

        with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
            rsps.add(
                "GET",
                "http://111.com",
                match=[matchers.query_param_matcher({"my": "params"})],
            )

            rsps.add(
                method=responses.GET,
                url="http://111.com/",
                body="two",
                match=[matchers.json_params_matcher({"page": "one"})],
            )

            with pytest.raises(ConnectionError) as excinfo:
                requests.get(
                    "http://111.com", params={"id": "bad"}, json={"page": "two"}
                )

            msg = str(excinfo.value)
            assert (
                "Parameters do not match. {id: bad} doesn't match {my: params}" in msg
            )
            assert (
                "request.body doesn't match: {page: two} doesn't match {page: one}"
                in msg
            )

        with responses.RequestsMock(assert_all_requests_are_fired=False) as rsps:
            req_kwargs = {
                "stream": True,
                "verify": False,
            }
            rsps.add(
                "GET",
                "http://111.com",
                match=[matchers.request_kwargs_matcher(req_kwargs)],
            )

            with pytest.raises(ConnectionError) as excinfo:
                requests.get("http://111.com", stream=True)

            msg = str(excinfo.value)
            assert (
                "Arguments don't match: "
                "{stream: True, verify: True} doesn't match {stream: True, verify: False}"
            ) in msg