def test_openidconnect_invalid_scope(params_from_base_test_setup, sg_conf_name):
    """Try to discover the authenticate endpoint URL with a test provider that has an
    invalid scope, and expect an error"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_invalid_scope'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    try:
        discover_authenticate_endpoint(sg_url, sg_db, "testinvalidscope")
    except HTTPError:
        log_info("got expected HTTPError trying to get the authenticate endpoint")
        # ok we got an exception, which is expected since we are using an invalid scope
        return

    raise Exception("Expected HTTPError since we are using invalid scope")
Пример #2
0
def test_openidconnect_no_session(params_from_base_test_setup, sg_conf_name):
    """Authenticate with a test openid provider that is configured to NOT add a Set-Cookie header"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_no_session'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }

    authenticate_url = discover_authenticate_url(sg_url, sg_db, "testnosessions")

    # Make the request to _oidc_testing
    response = requests.post(authenticate_url, files=formdata)
    log_r(response)
    assert "Set-Cookie" not in response.headers
Пример #3
0
def test_openidconnect_oidc_challenge_invalid_provider_name(params_from_base_test_setup, sg_conf_name):
    """
    If oidc_challenge is called with an invalid provider name, it should not return
    an Www-Authenticate header
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_oidc_challenge_invalid_provider_name'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # make a request to the _oidc_challenge endpoint
    oidc_challenge_url = "{}/{}/_oidc_challenge?provider={}".format(sg_url, sg_db, "bogusprovider")
    response = requests.get(oidc_challenge_url)
    log_info("response.headers: {}".format(response.headers))
    assert "Www-Authenticate" not in response.headers
    assert response.status_code == 400
def test_openidconnect_no_session(params_from_base_test_setup, sg_conf_name):
    """Authenticate with a test openid provider that is configured to NOT add a Set-Cookie header"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_no_session'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }

    authenticate_url = discover_authenticate_url(sg_url, sg_db, "testnosessions")

    # Make the request to _oidc_testing
    response = requests.post(authenticate_url, files=formdata)
    log_r(response)
    assert "Set-Cookie" not in response.headers
def test_openidconnect_oidc_challenge_invalid_provider_name(params_from_base_test_setup, sg_conf_name):
    """
    If oidc_challenge is called with an invalid provider name, it should not return
    an Www-Authenticate header
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_oidc_challenge_invalid_provider_name'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # make a request to the _oidc_challenge endpoint
    oidc_challenge_url = "{}/{}/_oidc_challenge?provider={}".format(sg_url, sg_db, "bogusprovider")
    response = requests.get(oidc_challenge_url)
    log_info("response.headers: {}".format(response.headers))
    assert "Www-Authenticate" not in response.headers
    assert response.status_code == 400
Пример #6
0
def test_openidconnect_invalid_scope(params_from_base_test_setup, sg_conf_name):
    """Try to discover the authenticate endpoint URL with a test provider that has an
    invalid scope, and expect an error"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_invalid_scope'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    try:
        discover_authenticate_endpoint(sg_url, sg_db, "testinvalidscope")
    except HTTPError:
        log_info("got expected HTTPError trying to get the authenticate endpoint")
        # ok we got an exception, which is expected since we are using an invalid scope
        return

    raise Exception("Expected HTTPError since we are using invalid scope")
Пример #7
0
def test_rolling_ttl_remove_expirary(params_from_base_test_setup, sg_conf_name):
    """
    1. PUT /db/doc1 via SG with property "_exp":3
    2. Once per second for 10 seconds, update /db/doc1 with a new revision (also with "_exp":3)
    3. Update /db/doc1 with a revision with no expiry
    3. Get /db/doc1.  Assert response is 200
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_rolling_ttl_remove_expirary'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)

    doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels)
    doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
    doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session)

    client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, expiry=3, delay=1, auth=sg_user_session)
    client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=1, auth=sg_user_session)

    # If expiry was not removed in the last update, this would expire doc_exp_3
    time.sleep(5)

    # doc_exp_3 should still be around due to removal of expiry
    doc_exp_3 = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session)
    assert doc_exp_3["_id"] == "exp_3"

    # doc_exp_10 should be expired due to the updates (10s) + sleep (5s)
    with pytest.raises(HTTPError) as he:
        client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session)
    assert he.value[0].startswith("404 Client Error: Not Found for url:")
Пример #8
0
def test_rolling_ttl_expires(params_from_base_test_setup, sg_conf_name):
    """
    1. PUT /db/doc1 via SG with property "_exp":3
    2. Update /db/doc1 10 times with a new revision (also with "_exp":3)
    3. Wait 5 seconds
    4. Get /db/doc1.  Assert response is 200
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_rolling_ttl_expires'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)

    doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels)
    doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
    doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session)

    client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, expiry=3, auth=sg_user_session)

    # Sleep should allow doc_exp_3 to expire, but still be in the window to get doc_exp_10
    time.sleep(5)

    # doc_exp_3 should be expired
    with pytest.raises(HTTPError) as he:
        client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session)
    assert he.value[0].startswith("404 Client Error: Not Found for url:")

    # doc_exp_10 should be available still
    doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session)
    assert doc_exp_10_result["_id"] == "exp_10"
Пример #9
0
def test_removing_expiry(params_from_base_test_setup, sg_conf_name):
    """
    1. PUT /db/doc1 via SG with property "_exp":3
    2. Update /db/doc1 with a new revision with no expiry value
    3. After 10 updates, update /db/doc1 with a revision with no expiry
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_removing_expiry'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)

    doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels)
    doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
    doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session)

    doc_exp_3_updated = client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, auth=sg_user_session)

    # Sleep should allow an expiry to happen on doc_exp_3 if it had not been removed.
    # Expected behavior is that the doc_exp_3 will still be around due to the removal of the expiry
    time.sleep(5)

    # doc_exp_3 should no longer have an expiry and should not raise an exception
    doc_exp_3_updated_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3_updated["id"], auth=sg_user_session)
    assert doc_exp_3_updated_result["_id"] == "exp_3"

    # doc_exp_10 should be available still and should not raise an exception
    doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session)
    assert doc_exp_10_result["_id"] == "exp_10"
def test_writing_attachment_to_couchbase_server(params_from_base_test_setup, sg_conf_name):
    """
    1. Start sync_gateway with sync function that rejects all writes:
    function(doc, oldDoc) {
      throw({forbidden:"No writes!"});
    }
    2. Create a doc with attachment
    3. Use CBS sdk to see if attachment doc exists.  Doc ID will look like _sync:att:sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0= (where the suffix is the digest)
    4. Assert att doc does not exist
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(cluster_config, sg_conf)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    bucket = "data-bucket"

    log_info("Running 'test_writing_attachment_to_couchbase_server'")
    log_info("Using cbs_url: {}".format(cbs_url))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_url_admin: {}".format(sg_url_admin))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using bucket: {}".format(bucket))

    sg_user_name = "sg_user"
    sg_user_password = "******"

    sg_user_channels = ["NBC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)

    docs = client.add_docs(url=sg_url, db=sg_db, number=100, id_prefix=sg_db, channels=sg_user_channels, auth=sg_user_session)
    assert len(docs) == 100

    # Create doc with attachment and push to sync_gateway
    doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachment_name="sample_text.txt", channels=sg_user_channels)

    client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=sg_user_session)
    server = CouchbaseServer(cbs_url)

    # Assert that the attachment doc gets written to couchbase server
    server_att_docs = server.get_server_docs_with_prefix(bucket=bucket, prefix="_sync:att:")
    num_att_docs = len(server_att_docs)
    assert num_att_docs == 1
def test_openidconnect_large_scope(params_from_base_test_setup, sg_conf_name):
    """Authenticate against a test provider config that only has a larger scope than the default,
    and make sure things like the nickname are returned in the jwt token returned back"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_large_scope'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, "testlargescope")

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    # {u'iss': u'http://localhost:4984/db/_oidc_testing', u'iat': 1466050188, u'aud': u'sync_gateway', u'exp': 1466053788, u'sub': u'testuser'}
    decoded_id_token = jwt.decode(id_token, verify=False)

    log_info("decoded_id_token: {}".format(decoded_id_token))

    assert "nickname" in decoded_id_token.keys()
def test_load_balance_sanity(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_util = ClusterKeywords()
    cluster_util.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf_path
    )

    topology = cluster_util.get_cluster_topology(cluster_config)
    admin_sg_one = topology["sync_gateways"][0]["admin"]
    lb_url = "{}:4984".format(topology["load_balancers"][0])

    sg_db = "db"
    num_docs = 1000
    sg_user_name = "seth"
    sg_user_password = "******"
    channels = ["ABC", "CBS"]

    client = MobileRestClient()

    user = client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels)
    session = client.create_session(admin_sg_one, sg_db, sg_user_name)

    log_info(user)
    log_info(session)

    log_info("Adding docs to the load balancer ...")

    ct = ChangesTracker(url=lb_url, db=sg_db, auth=session)

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
        log_info("Starting ...")
        ct_task = executor.submit(ct.start)
        log_info("Adding docs ...")
        docs = client.add_docs(lb_url, sg_db, num_docs, "test_doc", channels=channels, auth=session)
        assert len(docs) == num_docs

        log_info("Adding docs done")
        wait_for_changes = executor.submit(ct.wait_until, docs)

        if wait_for_changes.result():
            log_info("Stopping ...")
            log_info("Found all docs ...")
            executor.submit(ct.stop)
            ct_task.result()
        else:
            executor.submit(ct.stop)
            ct_task.result()
            raise Exception("Could not find all changes in feed before timeout!!")
Пример #13
0
def test_openidconnect_large_scope(params_from_base_test_setup, sg_conf_name):
    """Authenticate against a test provider config that only has a larger scope than the default,
    and make sure things like the nickname are returned in the jwt token returned back"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_large_scope'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, "testlargescope")

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    # {u'iss': u'http://localhost:4984/db/_oidc_testing', u'iat': 1466050188, u'aud': u'sync_gateway', u'exp': 1466053788, u'sub': u'testuser'}
    decoded_id_token = jwt.decode(id_token, verify=False)

    log_info("decoded_id_token: {}".format(decoded_id_token))

    assert "nickname" in decoded_id_token.keys()
def test_load_balance_sanity(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_util = ClusterKeywords()
    cluster_util.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf_path
    )

    topology = cluster_util.get_cluster_topology(cluster_config)
    admin_sg_one = topology["sync_gateways"][0]["admin"]
    lb_url = topology["load_balancers"][0]

    sg_db = "db"
    num_docs = 1000
    sg_user_name = "seth"
    sg_user_password = "******"
    channels = ["ABC", "CBS"]

    client = MobileRestClient()

    user = client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels)
    session = client.create_session(admin_sg_one, sg_db, sg_user_name)

    log_info(user)
    log_info(session)

    log_info("Adding docs to the load balancer ...")

    ct = ChangesTracker(url=lb_url, db=sg_db, auth=session)

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
        log_info("Starting ...")
        ct_task = executor.submit(ct.start)
        log_info("Adding docs ...")
        docs = client.add_docs(lb_url, sg_db, num_docs, "test_doc", channels=channels, auth=session)
        assert len(docs) == num_docs

        log_info("Adding docs done")
        wait_for_changes = executor.submit(ct.wait_until, docs)

        if wait_for_changes.result():
            log_info("Stopping ...")
            log_info("Found all docs ...")
            executor.submit(ct.stop)
            ct_task.result()
        else:
            executor.submit(ct.stop)
            ct_task.result()
            raise Exception("Could not find all changes in feed before timeout!!")
Пример #15
0
def test_setting_expiry_in_bulk_docs(params_from_base_test_setup, sg_conf_name):
    """
    1. PUT /db/_bulk_docs with 10 documents.  Set the "_exp":3 on 5 of these documents
    2. Wait five seconds
    3. POST /db/_bulk_get for the 10 documents.  Validate that only the 5 non-expiring documents are returned
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_setting_expiry_in_bulk_docs'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)

    doc_exp_3_bodies = document.create_docs(doc_id_prefix="exp_3", number=5, expiry=3, channels=sg_user_channels)
    doc_exp_10_bodies = document.create_docs(doc_id_prefix="exp_10", number=5, expiry=10, channels=sg_user_channels)

    bulk_bodies = doc_exp_3_bodies + doc_exp_10_bodies

    bulk_docs = client.add_bulk_docs(url=sg_url, db=sg_db, docs=bulk_bodies, auth=sg_user_session)

    # Allow exp_3 docs to expire
    time.sleep(5)

    bulk_get_docs = client.get_bulk_docs(url=sg_url, db=sg_db, docs=bulk_docs, auth=sg_user_session)

    expected_ids = ["exp_10_0", "exp_10_1", "exp_10_2", "exp_10_3", "exp_10_4"]
    expected_missing_ids = ["exp_3_0", "exp_3_1", "exp_3_2", "exp_3_3", "exp_3_4"]

    client.verify_doc_ids_found_in_response(response=bulk_get_docs, expected_doc_ids=expected_ids)
    client.verify_doc_ids_not_found_in_response(response=bulk_get_docs, expected_missing_doc_ids=expected_missing_ids)
def test_openidconnect_negative_token_expiry(params_from_base_test_setup, sg_conf_name):
    """Create a token with a negative expiry time and expect that authentication
    is not possible"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_negative_token_expiry'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    token_expiry_seconds = -5

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user'),
        'tokenttl': ('', "{}".format(token_expiry_seconds)),
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    response = requests.post(url, files=formdata)
    assert response.status_code == 500
Пример #17
0
def test_openidconnect_negative_token_expiry(params_from_base_test_setup, sg_conf_name):
    """Create a token with a negative expiry time and expect that authentication
    is not possible"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_negative_token_expiry'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    token_expiry_seconds = -5

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user'),
        'tokenttl': ('', "{}".format(token_expiry_seconds)),
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    response = requests.post(url, files=formdata)
    assert response.status_code == 500
Пример #18
0
def test_openidconnect_notauthenticated(params_from_base_test_setup, sg_conf_name):
    """Simulate a failed authentication and make sure no session is created"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_notauthenticated'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    authenticate_endpoint_url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    formdata = {
        'username': ('', 'testuser'),
        'notauthenticated': ('', 'Return an authorization error for this user')
    }
    response = requests.post(authenticate_endpoint_url, files=formdata)
    assert response.status_code == 401
def test_openidconnect_notauthenticated(params_from_base_test_setup, sg_conf_name):
    """Simulate a failed authentication and make sure no session is created"""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_notauthenticated'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    authenticate_endpoint_url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    formdata = {
        'username': ('', 'testuser'),
        'notauthenticated': ('', 'Return an authorization error for this user')
    }
    response = requests.post(authenticate_endpoint_url, files=formdata)
    assert response.status_code == 401
Пример #20
0
def test_string_expiry_as_unix_date(params_from_base_test_setup, sg_conf_name):
    """
    1. Calculate (server time + 3 seconds) as unix time (i.e. Epoch time, e.g. 1466465122)
    2. PUT /db/doc1 via SG with property "_exp":"[unix time]"
       PUT /db/doc2 via SG with property "_exp":"1767225600"  (Jan 1 2026) Note: the maximum epoch time supported by CBS is maxUint32, or Sun 07 Feb 2106, in case you want to move it out further than 2026.
    3. Wait five seconds
    4. Get /db/doc1.  Assert response is 404
       Get /db/doc2.  Assert response is 200
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_string_expiry_as_unix_date'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)

    time_util = Time()
    unix_time_3s_ahead = time_util.get_unix_timestamp(delta=3)

    # Convert unix timestamp to string
    unix_time_3s_ahead_string = str(unix_time_3s_ahead)

    # Using string representation for unix time
    doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=unix_time_3s_ahead_string, channels=sg_user_channels)
    doc_exp_years_body = document.create_doc(doc_id="exp_years", expiry="1767225600", channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
    doc_exp_years = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_years_body, auth=sg_user_session)

    # Sleep should allow doc_exp_3 to expire
    time.sleep(10)

    # doc_exp_3 should be expired
    with pytest.raises(HTTPError) as he:
        client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session)
    assert he.value[0].startswith("404 Client Error: Not Found for url:")

    # doc_exp_years should be available still
    doc_exp_years_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_years["id"], auth=sg_user_session)
    assert doc_exp_years_result["_id"] == "exp_years"
Пример #21
0
def test_rolling_ttl_remove_expirary(params_from_base_test_setup,
                                     sg_conf_name):
    """
    1. PUT /db/doc1 via SG with property "_exp":3
    2. Once per second for 10 seconds, update /db/doc1 with a new revision (also with "_exp":3)
    3. Update /db/doc1 with a revision with no expiry
    3. Get /db/doc1.  Assert response is 200
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_rolling_ttl_remove_expirary'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]
    bucket_name = "data-bucket"
    cbs_ip = host_for_url(cbs_url)

    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name),
                        password='******')

    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    doc_exp_3_body = document.create_doc(doc_id="exp_3",
                                         expiry=3,
                                         channels=sg_user_channels)
    doc_exp_10_body = document.create_doc(doc_id="exp_10",
                                          expiry=10,
                                          channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc_exp_3_body,
                               auth=sg_user_session)
    doc_exp_10 = client.add_doc(url=sg_url,
                                db=sg_db,
                                doc=doc_exp_10_body,
                                auth=sg_user_session)

    client.update_doc(url=sg_url,
                      db=sg_db,
                      doc_id=doc_exp_3["id"],
                      number_updates=10,
                      expiry=3,
                      delay=1,
                      auth=sg_user_session)
    client.update_doc(url=sg_url,
                      db=sg_db,
                      doc_id=doc_exp_3["id"],
                      number_updates=1,
                      auth=sg_user_session)

    # If expiry was not removed in the last update, this would expire doc_exp_3
    time.sleep(5)

    # doc_exp_3 should still be around due to removal of expiry
    doc_exp_3 = client.get_doc(url=sg_url,
                               db=sg_db,
                               doc_id=doc_exp_3["id"],
                               auth=sg_user_session)
    assert doc_exp_3["_id"] == "exp_3"

    # doc_exp_10 should be expired due to the updates (10s) + sleep (5s)
    with pytest.raises(HTTPError) as he:
        client.get_doc(url=sg_url,
                       db=sg_db,
                       doc_id=doc_exp_10["id"],
                       auth=sg_user_session)

    # In XATTR mode, the expiry results in a tombstone
    # In Doc Meta mode, the expiry results in a purge
    if xattrs_enabled:
        assert he.value[0].startswith("403 Client Error: Forbidden for url:")
    else:
        assert he.value[0].startswith("404 Client Error: Not Found for url:")

    verify_doc_deletion_on_server(doc_id=doc_exp_10["id"],
                                  sdk_client=sdk_client,
                                  sg_client=client,
                                  sg_admin_url=sg_url_admin,
                                  sg_db=sg_db,
                                  xattrs_enabled=xattrs_enabled)
Пример #22
0
def test_numeric_expiry_as_ttl(params_from_base_test_setup, sg_conf_name):
    """
    1. PUT /db/doc1 via SG with property "_exp":3
       PUT /db/doc2 via SG with property "_exp":10
    2. Wait five seconds
    3. Get /db/doc1.  Assert response is 404
       Get /db/doc2.  Assert response is 200
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_numeric_expiry_as_ttl'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]
    bucket_name = "data-bucket"
    cbs_ip = host_for_url(cbs_url)

    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name),
                        password='******')
    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    doc_exp_3_body = document.create_doc(doc_id="exp_3",
                                         expiry=3,
                                         channels=sg_user_channels)
    doc_exp_10_body = document.create_doc(doc_id="exp_10",
                                          expiry=10,
                                          channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc_exp_3_body,
                               auth=sg_user_session)
    doc_exp_10 = client.add_doc(url=sg_url,
                                db=sg_db,
                                doc=doc_exp_10_body,
                                auth=sg_user_session)

    # Sleep should allow doc_exp_3 to expire, but still be in the window to get doc_exp_10
    time.sleep(5)

    # doc_exp_3 should be expired
    with pytest.raises(HTTPError) as he:
        client.get_doc(url=sg_url,
                       db=sg_db,
                       doc_id=doc_exp_3["id"],
                       auth=sg_user_session)

    # In XATTR mode, the expiry results in a tombstone
    # In Doc Meta mode, the expiry results in a purge
    if xattrs_enabled:
        assert he.value[0].startswith("403 Client Error: Forbidden for url:")
    else:
        assert he.value[0].startswith("404 Client Error: Not Found for url:")

    verify_doc_deletion_on_server(doc_id=doc_exp_3["id"],
                                  sdk_client=sdk_client,
                                  sg_client=client,
                                  sg_admin_url=sg_url_admin,
                                  sg_db=sg_db,
                                  xattrs_enabled=xattrs_enabled)

    # doc_exp_10 should be available still
    doc_exp_10_result = client.get_doc(url=sg_url,
                                       db=sg_db,
                                       doc_id=doc_exp_10["id"],
                                       auth=sg_user_session)
    assert doc_exp_10_result["_id"] == "exp_10"
def test_server_goes_down_rebuild_channels(params_from_base_test_setup):
    """
    1. Start with a two node couchbase server cluster
    2. Starting adding docs
    3. Kill one of the server nodes and signal completion
    4. Stop adding docs
    5. Verify that that the expected docs are present and in the changes feed.
    6. Start server again and add to cluster
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_version = get_sg_version(cluster_config)
    if compare_versions(sg_version, '1.5') < 0:
        pytest.skip("This test needs multiple URLs in the SG config, not supported by SG < 1.5")

    cluster_helper = ClusterKeywords()

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf_path)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    admin_sg = topology["sync_gateways"][0]["admin"]
    sg_url = topology["sync_gateways"][0]["public"]
    coucbase_servers = topology["couchbase_servers"]

    cbs_one_url = coucbase_servers[0]
    cbs_two_url = coucbase_servers[1]

    log_info("Running: 'test_server_goes_down_sanity'")
    log_info("cluster_config: {}".format(cluster_config))
    log_info("admin_sg: {}".format(admin_sg))
    log_info("sg_url: {}".format(sg_url))
    log_info("cbs_one_url: {}".format(cbs_one_url))
    log_info("cbs_two_url: {}".format(cbs_two_url))

    sg_db = "db"
    num_docs = 100

    admin_user_info = userinfo.UserInfo(
        name="admin",
        password="******",
        channels=["ABC"],
        roles=[]
    )

    seth_user_info = userinfo.UserInfo(
        name="seth",
        password="******",
        channels=["ABC"],
        roles=[]
    )

    client = MobileRestClient()
    main_server = couchbaseserver.CouchbaseServer(cbs_one_url)
    flakey_server = couchbaseserver.CouchbaseServer(cbs_two_url)

    admin_auth = client.create_user(
        admin_sg,
        sg_db,
        admin_user_info.name,
        admin_user_info.password,
        channels=admin_user_info.channels
    )

    client.create_user(
        admin_sg,
        sg_db,
        seth_user_info.name,
        seth_user_info.password,
        channels=seth_user_info.channels
    )
    seth_session = client.create_session(admin_sg, sg_db, seth_user_info.name)

    # allow any user docs to make it to changes
    initial_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_session)

    # push docs from admin
    docs = client.add_docs(
        url=sg_url,
        db=sg_db,
        number=num_docs,
        id_prefix=None,
        channels=admin_user_info.channels,
        auth=admin_auth
    )

    assert len(docs) == num_docs

    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session)
    changes_before_failover = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session)
    assert len(changes_before_failover["results"]) == num_docs

    # Stop server via 'service stop'
    flakey_server.stop()

    start = time.time()
    while True:
        # Fail tests if all docs do not succeed before timeout
        if (time.time() - start) > 60:
            # Bring server back up before failing the test
            flakey_server.start()
            main_server.recover(flakey_server)
            main_server.rebalance_in(coucbase_servers, flakey_server)
            raise keywords.exceptions.TimeoutError("Failed to rebuild changes")

        try:
            # Poll until failover happens (~30 second)
            client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session)
            # changes requests succeeded, exit loop
            break
        except requests.exceptions.HTTPError:
            # Changes will fail until failover of the down server happens. Wait and try again.
            log_info("/db/_changes failed due to server down. Retrying ...")
            time.sleep(1)

    # Verify no new changes
    changes = client.get_changes(
        url=sg_url,
        db=sg_db,
        since=changes_before_failover["last_seq"],
        auth=seth_session,
        feed="normal"
    )
    assert len(changes["results"]) == 0

    # Check that all changes are intact from initial changes request
    changes = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session)
    assert len(changes["results"]) == num_docs

    coucbase_servers = topology["couchbase_servers"]

    # Test succeeded without timeout, bring server back into topology
    flakey_server.start()
    main_server.recover(flakey_server)
    main_server.rebalance_in(coucbase_servers, flakey_server)
Пример #24
0
def test_openidconnect_expired_token(params_from_base_test_setup, sg_conf_name):
    """Authenticate and create an ID token that only lasts for 5 seconds, wait 10 seconds
       and make sure the token is rejected
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_expired_token'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    token_expiry_seconds = 5

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user'),
        'tokenttl': ('', "{}".format(token_expiry_seconds)),
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    # wait until token expires
    time.sleep(token_expiry_seconds + 1)

    # make a request using the ID token against the db and expect a 200 response
    headers = {"Authorization": "Bearer {}".format(id_token)}
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, headers=headers)
    log_r(resp)
    assert resp.status_code != 200, "Expected non-200 response"
def test_openidconnect_basic_test(params_from_base_test_setup, sg_conf_name, is_admin_port, expect_signed_id_token):
    """Tests the basic OpenIDConnect login flow against the non-admin port when is_admin_port=False
    Tests the basic OpenIDConnect login flow against the admin port when is_admin_port=True
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_basic_test'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using is_admin_port: {}".format(is_admin_port))
    log_info("Using expect_signed_id_token: {}".format(expect_signed_id_token))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # make a request against the db and expect a 401 response since we haven't authenticated yet.
    # (but there's no point in doing this on the admin port since we'll never get a 401)
    if not is_admin_port:
        db_url = "{}/{}".format(sg_url, sg_db)
        resp = requests.get(db_url)
        assert resp.status_code == 401, "Expected 401 response"

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    authenticate_endpoint_url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }
    authenticate_response = requests.post(authenticate_endpoint_url, files=formdata)
    set_cookie_response_header = authenticate_response.headers['Set-Cookie']
    log_r(authenticate_response)

    # extract the token from the response
    authenticate_response_json = authenticate_response.json()
    id_token = authenticate_response_json["id_token"]
    refresh_token = authenticate_response_json["refresh_token"]

    # make sure the id token has the email field in it
    decoded_id_token = jwt.decode(id_token, verify=False)
    assert "email" in decoded_id_token.keys()

    # make a request using the ID token against the db and expect a 200 response
    headers = {"Authorization": "Bearer {}".format(id_token)}
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, headers=headers)
    log_r(resp)
    if expect_signed_id_token:
        assert resp.status_code == 200, "Expected 200 response for bearer ID token"
    else:
        assert resp.status_code == 401, "Expected 401 response for bearer ID token"

    # make a request using the cookie against the db and expect a 200 response
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, cookies=extract_cookie(set_cookie_response_header))
    log_r(resp)
    assert resp.status_code == 200, "Expected 200 response when using session cookie"

    # make a request using the session_id that's sent in the body
    resp = requests.get(db_url, cookies={"SyncGatewaySession": authenticate_response_json["session_id"]})
    assert resp.status_code == 200, "Expected 200 response using session_id from body"

    # try to use the refresh token to get a few new id_tokens
    id_tokens = [id_token]
    for i in xrange(3):

        # This pause is required because according to @ajres:
        # The id_token will only be unique if the two calls are more than a second apart.
        # It would be easy to add an atomically incrementing nonce claim to each token to ensure that they are always unique
        time.sleep(2)

        refresh_token_url = "{}/{}/_oidc_refresh?refresh_token={}&provider={}".format(sg_url, sg_db, refresh_token, "test")
        authenticate_response = requests.get(refresh_token_url)
        authenticate_response_json = authenticate_response.json()
        id_token_refresh = authenticate_response_json["id_token"]
        # make sure we get a unique id token each time
        assert id_token_refresh not in id_tokens

        # make a request using the ID token against the db and expect a 200 response
        headers = {"Authorization": "Bearer {}".format(id_token_refresh)}
        resp = requests.get(db_url, headers=headers)
        log_r(resp)
        if expect_signed_id_token:
            assert resp.status_code == 200, "Expected 200 response for bearer ID token on refresh"
        else:
            assert resp.status_code == 401, "Expected 401 response for bearer ID token on refresh"

        id_tokens.append(id_token_refresh)
Пример #26
0
def test_writing_attachment_to_couchbase_server(params_from_base_test_setup,
                                                sg_conf_name):
    """
    1. Start sync_gateway with sync function that rejects all writes:
    function(doc, oldDoc) {
      throw({forbidden:"No writes!"});
    }
    2. Create a doc with attachment
    3. Use CBS sdk to see if attachment doc exists.  Doc ID will look like _sync:att:sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0= (where the suffix is the digest)
    4. Assert att doc does not exist
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(cluster_config, sg_conf)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    bucket = "data-bucket"

    log_info("Running 'test_writing_attachment_to_couchbase_server'")
    log_info("Using cbs_url: {}".format(cbs_url))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_url_admin: {}".format(sg_url_admin))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using bucket: {}".format(bucket))

    sg_user_name = "sg_user"
    sg_user_password = "******"

    sg_user_channels = ["NBC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    docs = client.add_docs(url=sg_url,
                           db=sg_db,
                           number=100,
                           id_prefix=sg_db,
                           channels=sg_user_channels,
                           auth=sg_user_session)
    assert len(docs) == 100

    # Create doc with attachment and push to sync_gateway
    atts = attachment.load_from_data_dir(["sample_text.txt"])
    doc_with_att = document.create_doc(doc_id="att_doc",
                                       content={"sample_key": "sample_val"},
                                       attachments=atts,
                                       channels=sg_user_channels)

    client.add_doc(url=sg_url,
                   db=sg_db,
                   doc=doc_with_att,
                   auth=sg_user_session)
    server = couchbaseserver.CouchbaseServer(cbs_url)

    # Assert that the attachment doc gets written to couchbase server
    server_att_docs = server.get_server_docs_with_prefix(bucket=bucket,
                                                         prefix="_sync:att:")
    num_att_docs = len(server_att_docs)
    assert num_att_docs == 1
Пример #27
0
def test_attachment_revpos_when_ancestor_unavailable(
        params_from_base_test_setup, sg_conf_name):
    """
    Creates a document with an attachment, then updates that document so that
    the body of the revision that originally pushed the document is no
    longer available.  Add a new revision that's not a child of the
    active revision, and validate that it's uploaded successfully.
    Example:
       1. Document is created with attachment at rev-1
       2. Document is updated (strip digests and length, only put revpos & stub) multiple times on the server, goes to rev-10
       3. Client attempts to add a new (conflicting) revision 2, with parent rev-1.
       4. If the body of rev-1 is no longer available on the server (temporary backup of revision has expired, and is no longer stored
         in the in-memory rev cache), we were throwing an error to client
         because we couldn't verify based on the _attachments property in rev-1.
       5. In this scenario, before returning error, we are now checking if the active revision has a common ancestor with the incoming revision.
    If so, we can validate any revpos values equal to or earlier than the common ancestor against the active revision
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]

    if no_conflicts_enabled:
        pytest.skip('--no-conflicts is not enabled, so skipping the test')

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(cluster_config, sg_conf)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    bucket = "data-bucket"

    log_info("Running 'test_attachment_revpos_when_ancestor_unavailable'")
    log_info("Using cbs_url: {}".format(cbs_url))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_url_admin: {}".format(sg_url_admin))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using bucket: {}".format(bucket))

    channels_list = ["ABC"]

    client = MobileRestClient()
    sg_util = SyncGateway()
    cb_server = couchbaseserver.CouchbaseServer(cbs_url)

    user1 = client.create_user(url=sg_url_admin,
                               db=sg_db,
                               name="user1",
                               password="******",
                               channels=channels_list)
    atts = attachment.load_from_data_dir(["sample_text.txt"])
    doc_with_att = document.create_doc(doc_id="att_doc",
                                       content={"sample_key": "sample_val"},
                                       attachments=atts,
                                       channels=channels_list)

    doc_gen_1 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc_with_att,
                               auth=user1)
    client.update_doc(url=sg_url,
                      db=sg_db,
                      doc_id=doc_gen_1["id"],
                      number_updates=10,
                      auth=user1)

    # Clear cached rev doc bodys from server and cycle sync_gateway
    sg_util.stop_sync_gateways(cluster_config=cluster_config, url=sg_url)

    cb_server.delete_couchbase_server_cached_rev_bodies(bucket=bucket)
    sg_util.start_sync_gateways(cluster_config=cluster_config,
                                url=sg_url,
                                config=sg_conf)

    client.add_conflict(url=sg_url,
                        db=sg_db,
                        doc_id=doc_gen_1["id"],
                        parent_revisions=doc_gen_1["rev"],
                        new_revision="2-foo",
                        auth=user1)
Пример #28
0
def test_attachment_revpos_when_ancestor_unavailable_active_revision_doesnt_share_ancestor(
        params_from_base_test_setup, sg_conf_name):
    """
    Creates a document with an attachment, then updates that document so that
    the body of the revision that originally pushed the document is no
    longer available.  Add a new revision that's not a child of the
    active revision, and validate that it's uploaded successfully.
    Example:
       1. Document is created with no attachment at rev-1
       2. Server adds revision with attachment at rev-2 {"hello.txt", revpos=2}
       2. Document is updated multiple times on the server, goes to rev-4
       3. Client attempts to add a new (conflicting) revision 3a, with ancestors rev-2a (with it's own attachment), rev-1.
       4. When client attempts to push rev-3a with attachment stub {"hello.txt", revpos=2}.  Should throw an error, since the revpos
       of the attachment is later than the common ancestor (rev-1)
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]

    if no_conflicts_enabled:
        pytest.skip('--no-conflicts is not enabled, so skipping the test')

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(cluster_config, sg_conf)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    bucket = "data-bucket"

    log_info(
        "Running 'test_attachment_revpos_when_ancestor_unavailable_active_revision_doesnt_share_ancestor'"
    )
    log_info("Using cbs_url: {}".format(cbs_url))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_url_admin: {}".format(sg_url_admin))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using bucket: {}".format(bucket))

    sg_user_name = "sg_user"
    sg_user_password = "******"

    sg_user_channels = ["NBC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    doc = document.create_doc(doc_id="doc_1",
                              content={"sample_key": "sample_val"},
                              channels=sg_user_channels)
    doc_gen_1 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc,
                               auth=sg_user_session)
    client.update_doc(url=sg_url,
                      db=sg_db,
                      doc_id=doc_gen_1["id"],
                      attachment_name="sample_text.txt",
                      auth=sg_user_session)
    client.update_doc(url=sg_url,
                      db=sg_db,
                      doc_id=doc_gen_1["id"],
                      auth=sg_user_session)
    client.update_doc(url=sg_url,
                      db=sg_db,
                      doc_id=doc_gen_1["id"],
                      auth=sg_user_session)

    parent_rev_list = ["2-foo2", doc_gen_1["rev"]]

    # Sync Gateway should error since it has no references attachment in its ancestors
    with pytest.raises(HTTPError) as he:
        client.add_conflict(url=sg_url,
                            db=sg_db,
                            doc_id=doc_gen_1["id"],
                            parent_revisions=parent_rev_list,
                            new_revision="3-foo3",
                            auth=sg_user_session)
    assert he.value[0].startswith("400 Client Error: Bad Request for url: ")
def test_openidconnect_public_session_endpoint(params_from_base_test_setup, sg_conf_name):
    """Create a new session from the OpenID Connect token returned by hitting
    the public _session endpoint and make sure the response contains the Set-Cookie header."""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_public_session_endpoint'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    headers = {
        "Authorization": "Bearer {}".format(id_token),
        "Content-Type": "application/json"
    }
    url = "{}/{}/_session".format(
        sg_url,
        sg_db
    )

    response = requests.post(url, headers=headers)
    assert "Set-Cookie" in response.headers.keys()
    set_cookie_response = response.headers['Set-Cookie']
    assert "SyncGatewaySession" in set_cookie_response
def test_server_goes_down_sanity(params_from_base_test_setup):
    """
    1. Start with a two node couchbase server cluster
    2. Starting adding docs
    3. Kill one of the server nodes and signal completion
    4. Stop adding docs
    5. Verify that that the expected docs are present and in the changes feed.
    6. Start server again and add to cluster
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_version = get_sg_version(cluster_config)
    if compare_versions(sg_version, '1.5') < 0:
        pytest.skip("This test needs multiple URLs in the SG config, not supported by SG < 1.5")

    cluster_helper = ClusterKeywords()

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf_path)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    admin_sg = topology["sync_gateways"][0]["admin"]
    sg_url = topology["sync_gateways"][0]["public"]
    coucbase_servers = topology["couchbase_servers"]

    cbs_one_url = coucbase_servers[0]
    cbs_two_url = coucbase_servers[1]

    log_info("Running: 'test_server_goes_down_sanity'")
    log_info("cluster_config: {}".format(cluster_config))
    log_info("admin_sg: {}".format(admin_sg))
    log_info("sg_url: {}".format(sg_url))
    log_info("cbs_one_url: {}".format(cbs_one_url))
    log_info("cbs_two_url: {}".format(cbs_two_url))

    sg_db = "db"
    num_docs = 100
    sg_user_name = "seth"
    sg_user_password = "******"
    channels = ["ABC", "CBS"]

    client = MobileRestClient()
    main_server = couchbaseserver.CouchbaseServer(cbs_one_url)
    flakey_server = couchbaseserver.CouchbaseServer(cbs_two_url)

    client.create_user(admin_sg, sg_db, sg_user_name, sg_user_password, channels=channels)
    session = client.create_session(admin_sg, sg_db, sg_user_name)

    # Stop second server
    flakey_server.stop()

    # Try to add 100 docs in a loop until all succeed, if the never do, fail with timeout
    errors = num_docs

    # Wait 30 seconds for auto failover
    # (Minimum value suggested - http://docs.couchbase.com/admin/admin/Tasks/tasks-nodeFailover.html)
    # + 15 seconds to add docs
    timeout = 45
    start = time.time()

    successful_add = False
    while not successful_add:

        # Fail tests if all docs do not succeed before timeout
        if (time.time() - start) > timeout:
            # Bring server back up before failing the test
            flakey_server.start()
            main_server.rebalance_in(coucbase_servers, flakey_server)
            raise TimeoutError("Failed to successfully put docs before timeout")

        try:
            docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix=None, auth=session, channels=channels)

            # If the above add doc does not throw, it was a successfull add.
            successful_add = True
        except requests.exceptions.HTTPError as he:
            log_info("Failed to add docs: {}".format(he))

        log_info("Seeing: {} errors".format(errors))
        time.sleep(1)

    assert len(docs) == 100
    client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs, auth=session)

    try:
        client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=session, polling_interval=5)
    except keywords.exceptions.TimeoutException:
        # timeout verifying docs. Bring server back in to restore topology, then fail
        # Failing due to https://github.com/couchbase/sync_gateway/issues/2197
        flakey_server.start()
        main_server.recover(flakey_server)
        main_server.rebalance_in(coucbase_servers, flakey_server)
        raise keywords.exceptions.TimeoutException("Failed to get all changes")

    # Test succeeded without timeout, bring server back into topology
    flakey_server.start()
    main_server.recover(flakey_server)
    main_server.rebalance_in(coucbase_servers, flakey_server)

    # Make sure all docs were not added before server was
    log_info("test_server_goes_down_sanity complete!")
def test_openidconnect_expired_token(params_from_base_test_setup, sg_conf_name):
    """Authenticate and create an ID token that only lasts for 5 seconds, wait 10 seconds
       and make sure the token is rejected
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_expired_token'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    token_expiry_seconds = 5

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user'),
        'tokenttl': ('', "{}".format(token_expiry_seconds)),
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    # wait until token expires
    time.sleep(token_expiry_seconds + 1)

    # make a request using the ID token against the db and expect a 200 response
    headers = {"Authorization": "Bearer {}".format(id_token)}
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, headers=headers)
    log_r(resp)
    assert resp.status_code != 200, "Expected non-200 response"
def test_attachment_revpos_when_ancestor_unavailable(params_from_base_test_setup, sg_conf_name):
    """
    Creates a document with an attachment, then updates that document so that
    the body of the revision that originally pushed the document is no
    longer available.  Add a new revision that's not a child of the
    active revision, and validate that it's uploaded successfully.
    Example:
       1. Document is created with attachment at rev-1
       2. Document is updated (strip digests and length, only put revpos & stub) multiple times on the server, goes to rev-4
       3. Client attempts to add a new (conflicting) revision 2, with parent rev-1.
       4. If the body of rev-1 is no longer available on the server (temporary backup of revision has expired, and is no longer stored
         in the in-memory rev cache), we were throwing an error to client
         because we couldn't verify based on the _attachments property in rev-1.
       5. In this scenario, before returning error, we are now checking if the active revision has a common ancestor with the incoming revision.
    If so, we can validate any revpos values equal to or earlier than the common ancestor against the active revision
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(cluster_config, sg_conf)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    bucket = "data-bucket"

    log_info("Running 'test_attachment_revpos_when_ancestor_unavailable'")
    log_info("Using cbs_url: {}".format(cbs_url))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_url_admin: {}".format(sg_url_admin))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using bucket: {}".format(bucket))

    channels_list = ["ABC"]

    client = MobileRestClient()
    sg_util = SyncGateway()
    cb_server = CouchbaseServer(cbs_url)

    user1 = client.create_user(url=sg_url_admin, db=sg_db, name="user1", password="******", channels=channels_list)
    doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachment_name="sample_text.txt", channels=channels_list)

    doc_gen_1 = client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=user1)
    client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], number_updates=10, auth=user1)

    # Clear cached rev doc bodys from server and cycle sync_gateway
    sg_util.stop_sync_gateway(cluster_config=cluster_config, url=sg_url)

    cb_server.delete_couchbase_server_cached_rev_bodies(bucket=bucket)
    sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_url, config=sg_conf)

    client.add_conflict(
        url=sg_url, db=sg_db,
        doc_id=doc_gen_1["id"],
        parent_revisions=doc_gen_1["rev"],
        new_revision="2-foo",
        auth=user1
    )
Пример #33
0
def test_openidconnect_public_session_endpoint(params_from_base_test_setup, sg_conf_name):
    """Create a new session from the OpenID Connect token returned by hitting
    the public _session endpoint and make sure the response contains the Set-Cookie header."""

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_public_session_endpoint'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    response = requests.post(url, files=formdata)
    log_r(response)

    # extract the token from the response
    response_json = response.json()
    id_token = response_json["id_token"]

    headers = {
        "Authorization": "Bearer {}".format(id_token),
        "Content-Type": "application/json"
    }
    url = "{}/{}/_session".format(
        sg_url,
        sg_db
    )

    response = requests.post(url, headers=headers)
    assert "Set-Cookie" in response.headers.keys()
    set_cookie_response = response.headers['Set-Cookie']
    assert "SyncGatewaySession" in set_cookie_response
Пример #34
0
def test_string_expiry_as_unix_date(params_from_base_test_setup, sg_conf_name):
    """
    1. Calculate (server time + 3 seconds) as unix time (i.e. Epoch time, e.g. 1466465122)
    2. PUT /db/doc1 via SG with property "_exp":"[unix time]"
       PUT /db/doc2 via SG with property "_exp":"1767225600"  (Jan 1 2026) Note: the maximum epoch time supported by CBS is maxUint32, or Sun 07 Feb 2106, in case you want to move it out further than 2026.
    3. Wait five seconds
    4. Get /db/doc1.  Assert response is 404
       Get /db/doc2.  Assert response is 200
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_string_expiry_as_unix_date'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]
    bucket_name = "data-bucket"
    cbs_ip = host_for_url(cbs_url)

    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name),
                        password='******')

    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    time_util = Time()
    unix_time_3s_ahead = time_util.get_unix_timestamp(delta=3)

    # Convert unix timestamp to string
    unix_time_3s_ahead_string = str(unix_time_3s_ahead)

    # Using string representation for unix time
    doc_exp_3_body = document.create_doc(doc_id="exp_3",
                                         expiry=unix_time_3s_ahead_string,
                                         channels=sg_user_channels)
    doc_exp_years_body = document.create_doc(doc_id="exp_years",
                                             expiry="1767225600",
                                             channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc_exp_3_body,
                               auth=sg_user_session)
    doc_exp_years = client.add_doc(url=sg_url,
                                   db=sg_db,
                                   doc=doc_exp_years_body,
                                   auth=sg_user_session)

    # Sleep should allow doc_exp_3 to expire
    time.sleep(10)

    # doc_exp_3 should be expired
    with pytest.raises(HTTPError) as he:
        client.get_doc(url=sg_url,
                       db=sg_db,
                       doc_id=doc_exp_3["id"],
                       auth=sg_user_session)

    # In XATTR mode, the expiry results in a tombstone
    # In Doc Meta mode, the expiry results in a purge
    if xattrs_enabled:
        assert he.value[0].startswith("403 Client Error: Forbidden for url:")
    else:
        assert he.value[0].startswith("404 Client Error: Not Found for url:")

    verify_doc_deletion_on_server(doc_id=doc_exp_3["id"],
                                  sdk_client=sdk_client,
                                  sg_client=client,
                                  sg_admin_url=sg_url_admin,
                                  sg_db=sg_db,
                                  xattrs_enabled=xattrs_enabled)

    # doc_exp_years should be available still
    doc_exp_years_result = client.get_doc(url=sg_url,
                                          db=sg_db,
                                          doc_id=doc_exp_years["id"],
                                          auth=sg_user_session)
    assert doc_exp_years_result["_id"] == "exp_years"
def test_attachment_revpos_when_ancestor_unavailable_active_revision_doesnt_share_ancestor(params_from_base_test_setup, sg_conf_name):
    """
    Creates a document with an attachment, then updates that document so that
    the body of the revision that originally pushed the document is no
    longer available.  Add a new revision that's not a child of the
    active revision, and validate that it's uploaded successfully.
    Example:
       1. Document is created with no attachment at rev-1
       2. Server adds revision with attachment at rev-2 {"hello.txt", revpos=2}
       2. Document is updated multiple times on the server, goes to rev-4
       3. Client attempts to add a new (conflicting) revision 3a, with ancestors rev-2a (with it's own attachment), rev-1.
       4. When client attempts to push rev-3a with attachment stub {"hello.txt", revpos=2}.  Should throw an error, since the revpos
       of the attachment is later than the common ancestor (rev-1)
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(cluster_config, sg_conf)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    bucket = "data-bucket"

    log_info("Running 'test_attachment_revpos_when_ancestor_unavailable_active_revision_doesnt_share_ancestor'")
    log_info("Using cbs_url: {}".format(cbs_url))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_url_admin: {}".format(sg_url_admin))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using bucket: {}".format(bucket))

    sg_user_name = "sg_user"
    sg_user_password = "******"

    sg_user_channels = ["NBC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)

    doc = document.create_doc(doc_id="doc_1", content={"sample_key": "sample_val"}, channels=sg_user_channels)
    doc_gen_1 = client.add_doc(url=sg_url, db=sg_db, doc=doc, auth=sg_user_session)
    client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], attachment_name="sample_text.txt", auth=sg_user_session)
    client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], auth=sg_user_session)
    client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], auth=sg_user_session)

    parent_rev_list = ["2-foo2", doc_gen_1["rev"]]

    # Sync Gateway should error since it has no references attachment in its ancestors
    with pytest.raises(HTTPError) as he:
        client.add_conflict(
            url=sg_url,
            db=sg_db,
            doc_id=doc_gen_1["id"],
            parent_revisions=parent_rev_list,
            new_revision="3-foo3",
            auth=sg_user_session
        )
    assert he.value[0].startswith("400 Client Error: Bad Request for url: ")
Пример #36
0
def test_string_expiry_as_iso_8601_date(params_from_base_test_setup, sg_conf_name):
    """
    1. Calculate (server time + 3 seconds) as ISO-8601 date (e.g. 2016-01-01T00:00:00.000+00:00)
    2. PUT /db/doc1 via SG with property "_exp":"[date]"
       PUT /db/doc2 via SG with property "_exp":"2026-01-01T00:00:00.000+00:00"
    3. Wait five seconds
    4. Get /db/doc1.  Assert response is 404
       Get /db/doc2.  Assert response is 20
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_string_expiry_as_ISO_8601_Date'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)

    time_util = Time()
    iso_datetime = time_util.get_iso_datetime(delta=3)

    doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=iso_datetime, channels=sg_user_channels)
    doc_exp_years_body = document.create_doc(doc_id="exp_years", expiry="2026-01-01T00:00:00.000+00:00", channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
    doc_exp_years = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_years_body, auth=sg_user_session)

    # Sleep should allow doc_exp_3 to expire
    time.sleep(10)

    # doc_exp_3 should be expired
    with pytest.raises(HTTPError) as he:
        client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session)
    assert he.value[0].startswith("404 Client Error: Not Found for url:")

    # doc_exp_years should be available still
    doc_exp_years_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_years["id"], auth=sg_user_session)
    assert doc_exp_years_result["_id"] == "exp_years"
def test_server_goes_down_rebuild_channels(params_from_base_test_setup):
    """
    1. Start with a two node couchbase server cluster
    2. Starting adding docs
    3. Kill one of the server nodes and signal completion
    4. Stop adding docs
    5. Verify that that the expected docs are present and in the changes feed.
    6. Start server again and add to cluster
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    cluster_helper = ClusterKeywords()

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf_path)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    admin_sg = topology["sync_gateways"][0]["admin"]
    sg_url = topology["sync_gateways"][0]["public"]
    coucbase_servers = topology["couchbase_servers"]

    cbs_one_url = coucbase_servers[0]
    cbs_two_url = coucbase_servers[1]

    log_info("Running: 'test_server_goes_down_sanity'")
    log_info("cluster_config: {}".format(cluster_config))
    log_info("admin_sg: {}".format(admin_sg))
    log_info("sg_url: {}".format(sg_url))
    log_info("cbs_one_url: {}".format(cbs_one_url))
    log_info("cbs_two_url: {}".format(cbs_two_url))

    sg_db = "db"
    num_docs = 100

    admin_user_info = userinfo.UserInfo(
        name="admin",
        password="******",
        channels=["ABC"],
        roles=[]
    )

    seth_user_info = userinfo.UserInfo(
        name="seth",
        password="******",
        channels=["ABC"],
        roles=[]
    )

    client = MobileRestClient()
    main_server = CouchbaseServer(cbs_one_url)
    flakey_server = CouchbaseServer(cbs_two_url)

    admin_auth = client.create_user(
        admin_sg,
        sg_db,
        admin_user_info.name,
        admin_user_info.password,
        channels=admin_user_info.channels
    )

    client.create_user(
        admin_sg,
        sg_db,
        seth_user_info.name,
        seth_user_info.password,
        channels=seth_user_info.channels
    )
    seth_session = client.create_session(admin_sg, sg_db, seth_user_info.name)

    # allow any user docs to make it to changes
    initial_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=seth_session)

    # push docs from admin
    docs = client.add_docs(
        url=sg_url,
        db=sg_db,
        number=num_docs,
        id_prefix=None,
        channels=admin_user_info.channels,
        auth=admin_auth
    )

    assert len(docs) == num_docs

    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session)
    changes_before_failover = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session)
    assert len(changes_before_failover["results"]) == num_docs

    # Stop server via 'service stop'
    flakey_server.stop()

    start = time.time()
    while True:
        # Fail tests if all docs do not succeed before timeout
        if (time.time() - start) > 60:
            # Bring server back up before failing the test
            flakey_server.start()
            main_server.recover(flakey_server)
            main_server.rebalance_in(coucbase_servers, flakey_server)
            raise keywords.exceptions.TimeoutError("Failed to rebuild changes")

        try:
            # Poll until failover happens (~30 second)
            client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=seth_session)
            # changes requests succeeded, exit loop
            break
        except requests.exceptions.HTTPError:
            # Changes will fail until failover of the down server happens. Wait and try again.
            log_info("/db/_changes failed due to server down. Retrying ...")
            time.sleep(1)

    # Verify no new changes
    changes = client.get_changes(
        url=sg_url,
        db=sg_db,
        since=changes_before_failover["last_seq"],
        auth=seth_session,
        feed="normal"
    )
    assert len(changes["results"]) == 0

    # Check that all changes are intact from initial changes request
    changes = client.get_changes(url=sg_url, db=sg_db, since=initial_changes["last_seq"], auth=seth_session)
    assert len(changes["results"]) == num_docs

    coucbase_servers = topology["couchbase_servers"]

    # Test succeeded without timeout, bring server back into topology
    flakey_server.start()
    main_server.recover(flakey_server)
    main_server.rebalance_in(coucbase_servers, flakey_server)
def test_rebalance_sanity(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    cluster_helper = ClusterKeywords()

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf_path)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    admin_sg_one = topology["sync_gateways"][0]["admin"]
    sg_one_url = topology["sync_gateways"][0]["public"]

    cluster_servers = topology["couchbase_servers"]
    cbs_one_url = cluster_servers[0]
    cbs_two_url = cluster_servers[1]

    log_info("Running: 'test_distributed_index_rebalance_sanity'")
    log_info("cluster_config: {}".format(cluster_config))
    log_info("admin_sg: {}".format(admin_sg_one))
    log_info("sg_url: {}".format(sg_one_url))
    log_info("cbs_one_url: {}".format(cbs_one_url))
    log_info("cbs_two_url: {}".format(cbs_two_url))

    sg_db = "db"
    num_docs = 100
    num_updates = 100
    sg_user_name = "seth"
    sg_user_password = "******"
    channels = ["ABC", "CBS"]

    client = MobileRestClient()
    cb_server = CouchbaseServer(cbs_one_url)
    server_to_remove = CouchbaseServer(cbs_two_url)

    client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels)
    session = client.create_session(admin_sg_one, sg_db, sg_user_name)

    with concurrent.futures.ThreadPoolExecutor(5) as executor:

        # Add docs to sg
        log_info("Adding docs to sync_gateway")
        docs = client.add_docs(sg_one_url, sg_db, num_docs, "test_doc", channels=channels, auth=session)
        assert len(docs) == num_docs

        # Start updating docs and rebalance out one CBS node
        log_info("Updating docs on sync_gateway")
        update_docs_task = executor.submit(client.update_docs, sg_one_url, sg_db, docs, num_updates, auth=session)

        # Run rebalance in background
        cb_server.rebalance_out(cluster_servers, server_to_remove)

        updated_docs = update_docs_task.result()
        log_info(updated_docs)

    # Verify docs / revisions present
    client.verify_docs_present(sg_one_url, sg_db, updated_docs, auth=session)

    # Verify docs revisions in changes feed
    client.verify_docs_in_changes(sg_one_url, sg_db, updated_docs, auth=session)

    # Rebalance Server back in to the pool
    cb_server.add_node(server_to_remove)
    cb_server.rebalance_in(cluster_servers, server_to_remove)
Пример #39
0
def test_string_expiry_as_iso_8601_date(params_from_base_test_setup,
                                        sg_conf_name):
    """
    1. Calculate (server time + 3 seconds) as ISO-8601 date (e.g. 2016-01-01T00:00:00.000+00:00)
    2. PUT /db/doc1 via SG with property "_exp":"[date]"
       PUT /db/doc2 via SG with property "_exp":"2026-01-01T00:00:00.000+00:00"
    3. Wait five seconds
    4. Get /db/doc1.  Assert response is 404
       Get /db/doc2.  Assert response is 20
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_string_expiry_as_ISO_8601_Date'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]
    bucket_name = "data-bucket"
    cbs_ip = host_for_url(cbs_url)

    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name),
                        password='******')

    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    time_util = Time()
    iso_datetime = time_util.get_iso_datetime(delta=3)

    doc_exp_3_body = document.create_doc(doc_id="exp_3",
                                         expiry=iso_datetime,
                                         channels=sg_user_channels)
    doc_exp_years_body = document.create_doc(
        doc_id="exp_years",
        expiry="2026-01-01T00:00:00.000+00:00",
        channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc_exp_3_body,
                               auth=sg_user_session)
    doc_exp_years = client.add_doc(url=sg_url,
                                   db=sg_db,
                                   doc=doc_exp_years_body,
                                   auth=sg_user_session)

    # Sleep should allow doc_exp_3 to expire
    time.sleep(10)

    # doc_exp_3 should be expired
    with pytest.raises(HTTPError) as he:
        client.get_doc(url=sg_url,
                       db=sg_db,
                       doc_id=doc_exp_3["id"],
                       auth=sg_user_session)

    # In XATTR mode, the expiry results in a tombstone
    # In Doc Meta mode, the expiry results in a purge
    if xattrs_enabled:
        assert he.value[0].startswith("403 Client Error: Forbidden for url:")
    else:
        assert he.value[0].startswith("404 Client Error: Not Found for url:")

    verify_doc_deletion_on_server(doc_id=doc_exp_3["id"],
                                  sdk_client=sdk_client,
                                  sg_client=client,
                                  sg_admin_url=sg_url_admin,
                                  sg_db=sg_db,
                                  xattrs_enabled=xattrs_enabled)

    # doc_exp_years should be available still
    doc_exp_years_result = client.get_doc(url=sg_url,
                                          db=sg_db,
                                          doc_id=doc_exp_years["id"],
                                          auth=sg_user_session)
    assert doc_exp_years_result["_id"] == "exp_years"
def test_server_goes_down_sanity(params_from_base_test_setup):
    """
    1. Start with a two node couchbase server cluster
    2. Starting adding docs
    3. Kill one of the server nodes and signal completion
    4. Stop adding docs
    5. Verify that that the expected docs are present and in the changes feed.
    6. Start server again and add to cluster
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    cluster_helper = ClusterKeywords()

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf_path)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    admin_sg = topology["sync_gateways"][0]["admin"]
    sg_url = topology["sync_gateways"][0]["public"]
    coucbase_servers = topology["couchbase_servers"]

    cbs_one_url = coucbase_servers[0]
    cbs_two_url = coucbase_servers[1]

    log_info("Running: 'test_server_goes_down_sanity'")
    log_info("cluster_config: {}".format(cluster_config))
    log_info("admin_sg: {}".format(admin_sg))
    log_info("sg_url: {}".format(sg_url))
    log_info("cbs_one_url: {}".format(cbs_one_url))
    log_info("cbs_two_url: {}".format(cbs_two_url))

    sg_db = "db"
    num_docs = 100
    sg_user_name = "seth"
    sg_user_password = "******"
    channels = ["ABC", "CBS"]

    client = MobileRestClient()
    main_server = CouchbaseServer(cbs_one_url)
    flakey_server = CouchbaseServer(cbs_two_url)

    client.create_user(admin_sg, sg_db, sg_user_name, sg_user_password, channels=channels)
    session = client.create_session(admin_sg, sg_db, sg_user_name)

    # Stop second server
    flakey_server.stop()

    # Try to add 100 docs in a loop until all succeed, if the never do, fail with timeout
    errors = num_docs

    # Wait 30 seconds for auto failover
    # (Minimum value suggested - http://docs.couchbase.com/admin/admin/Tasks/tasks-nodeFailover.html)
    # + 15 seconds to add docs
    timeout = 45
    start = time.time()

    successful_add = False
    while not successful_add:

        # Fail tests if all docs do not succeed before timeout
        if (time.time() - start) > timeout:
            # Bring server back up before failing the test
            flakey_server.start()
            main_server.rebalance_in(coucbase_servers, flakey_server)
            raise TimeoutError("Failed to successfully put docs before timeout")

        try:
            docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs, id_prefix=None, auth=session, channels=channels)

            # If the above add doc does not throw, it was a successfull add.
            successful_add = True
        except requests.exceptions.HTTPError as he:
            log_info("Failed to add docs: {}".format(he))

        log_info("Seeing: {} errors".format(errors))
        time.sleep(1)

    assert len(docs) == 100
    client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=docs, auth=session)

    try:
        client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=docs, auth=session, polling_interval=5)
    except keywords.exceptions.TimeoutException:
        # timeout verifying docs. Bring server back in to restore topology, then fail
        # Failing due to https://github.com/couchbase/sync_gateway/issues/2197
        flakey_server.start()
        main_server.recover(flakey_server)
        main_server.rebalance_in(coucbase_servers, flakey_server)
        raise keywords.exceptions.TimeoutException("Failed to get all changes")

    # Test succeeded without timeout, bring server back into topology
    flakey_server.start()
    main_server.recover(flakey_server)
    main_server.rebalance_in(coucbase_servers, flakey_server)

    # Make sure all docs were not added before server was
    log_info("test_server_goes_down_sanity complete!")
Пример #41
0
def test_removing_expiry(params_from_base_test_setup, sg_conf_name):
    """
    1. PUT /db/doc1 via SG with property "_exp":3
    2. Update /db/doc1 with a new revision with no expiry value
    3. After 10 updates, update /db/doc1 with a revision with no expiry
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_removing_expiry'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    doc_exp_3_body = document.create_doc(doc_id="exp_3",
                                         expiry=3,
                                         channels=sg_user_channels)
    doc_exp_10_body = document.create_doc(doc_id="exp_10",
                                          expiry=10,
                                          channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc_exp_3_body,
                               auth=sg_user_session)
    doc_exp_10 = client.add_doc(url=sg_url,
                                db=sg_db,
                                doc=doc_exp_10_body,
                                auth=sg_user_session)

    doc_exp_3_updated = client.update_doc(url=sg_url,
                                          db=sg_db,
                                          doc_id=doc_exp_3["id"],
                                          number_updates=10,
                                          auth=sg_user_session)

    # Sleep should allow an expiry to happen on doc_exp_3 if it had not been removed.
    # Expected behavior is that the doc_exp_3 will still be around due to the removal of the expiry
    time.sleep(5)

    # doc_exp_3 should no longer have an expiry and should not raise an exception
    doc_exp_3_updated_result = client.get_doc(url=sg_url,
                                              db=sg_db,
                                              doc_id=doc_exp_3_updated["id"],
                                              auth=sg_user_session)
    assert doc_exp_3_updated_result["_id"] == "exp_3"

    # doc_exp_10 should be available still and should not raise an exception
    doc_exp_10_result = client.get_doc(url=sg_url,
                                       db=sg_db,
                                       doc_id=doc_exp_10["id"],
                                       auth=sg_user_session)
    assert doc_exp_10_result["_id"] == "exp_10"
Пример #42
0
def test_openidconnect_basic_test(params_from_base_test_setup, sg_conf_name, is_admin_port, expect_signed_id_token):
    """Tests the basic OpenIDConnect login flow against the non-admin port when is_admin_port=False
    Tests the basic OpenIDConnect login flow against the admin port when is_admin_port=True
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)
    sg_url = topology["sync_gateways"][0]["public"]
    sg_db = "db"

    log_info("Running 'test_openidconnect_basic_test'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using is_admin_port: {}".format(is_admin_port))
    log_info("Using expect_signed_id_token: {}".format(expect_signed_id_token))

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    # make a request against the db and expect a 401 response since we haven't authenticated yet.
    # (but there's no point in doing this on the admin port since we'll never get a 401)
    if not is_admin_port:
        db_url = "{}/{}".format(sg_url, sg_db)
        resp = requests.get(db_url)
        assert resp.status_code == 401, "Expected 401 response"

    # get the authenticate endpoint and query params, should look something like:
    #     authenticate?client_id=sync_gateway&redirect_uri= ...
    authenticate_endpoint = discover_authenticate_endpoint(sg_url, sg_db, DEFAULT_PROVIDER)

    # build the full url
    authenticate_endpoint_url = "{}/{}/_oidc_testing/{}".format(
        sg_url,
        sg_db,
        authenticate_endpoint
    )

    # Make the request to _oidc_testing
    # multipart/form data content
    formdata = {
        'username': ('', 'testuser'),
        'authenticated': ('', 'Return a valid authorization code for this user')
    }
    authenticate_response = requests.post(authenticate_endpoint_url, files=formdata)
    set_cookie_response_header = authenticate_response.headers['Set-Cookie']
    log_r(authenticate_response)

    # extract the token from the response
    authenticate_response_json = authenticate_response.json()
    id_token = authenticate_response_json["id_token"]
    refresh_token = authenticate_response_json["refresh_token"]

    # make sure the id token has the email field in it
    decoded_id_token = jwt.decode(id_token, verify=False)
    assert "email" in decoded_id_token.keys()

    # make a request using the ID token against the db and expect a 200 response
    headers = {"Authorization": "Bearer {}".format(id_token)}
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, headers=headers)
    log_r(resp)
    if expect_signed_id_token:
        assert resp.status_code == 200, "Expected 200 response for bearer ID token"
    else:
        assert resp.status_code == 401, "Expected 401 response for bearer ID token"

    # make a request using the cookie against the db and expect a 200 response
    db_url = "{}/{}".format(sg_url, sg_db)
    resp = requests.get(db_url, cookies=extract_cookie(set_cookie_response_header))
    log_r(resp)
    assert resp.status_code == 200, "Expected 200 response when using session cookie"

    # make a request using the session_id that's sent in the body
    resp = requests.get(db_url, cookies={"SyncGatewaySession": authenticate_response_json["session_id"]})
    assert resp.status_code == 200, "Expected 200 response using session_id from body"

    # try to use the refresh token to get a few new id_tokens
    id_tokens = [id_token]
    for i in xrange(3):

        # This pause is required because according to @ajres:
        # The id_token will only be unique if the two calls are more than a second apart.
        # It would be easy to add an atomically incrementing nonce claim to each token to ensure that they are always unique
        time.sleep(2)

        refresh_token_url = "{}/{}/_oidc_refresh?refresh_token={}&provider={}".format(sg_url, sg_db, refresh_token, "test")
        authenticate_response = requests.get(refresh_token_url)
        authenticate_response_json = authenticate_response.json()
        id_token_refresh = authenticate_response_json["id_token"]
        # make sure we get a unique id token each time
        assert id_token_refresh not in id_tokens

        # make a request using the ID token against the db and expect a 200 response
        headers = {"Authorization": "Bearer {}".format(id_token_refresh)}
        resp = requests.get(db_url, headers=headers)
        log_r(resp)
        if expect_signed_id_token:
            assert resp.status_code == 200, "Expected 200 response for bearer ID token on refresh"
        else:
            assert resp.status_code == 401, "Expected 401 response for bearer ID token on refresh"

        id_tokens.append(id_token_refresh)
Пример #43
0
def test_setting_expiry_in_bulk_docs(params_from_base_test_setup,
                                     sg_conf_name):
    """
    1. PUT /db/_bulk_docs with 10 documents.  Set the "_exp":3 on 5 of these documents
    2. Wait five seconds
    3. POST /db/_bulk_get for the 10 documents.  Validate that only the 5 non-expiring documents are returned
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_setting_expiry_in_bulk_docs'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]
    bucket_name = "data-bucket"
    cbs_ip = host_for_url(cbs_url)

    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name),
                        password='******')

    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    doc_exp_3_bodies = document.create_docs(doc_id_prefix="exp_3",
                                            number=5,
                                            expiry=3,
                                            channels=sg_user_channels)
    doc_exp_10_bodies = document.create_docs(doc_id_prefix="exp_10",
                                             number=5,
                                             expiry=10,
                                             channels=sg_user_channels)

    bulk_bodies = doc_exp_3_bodies + doc_exp_10_bodies

    bulk_docs = client.add_bulk_docs(url=sg_url,
                                     db=sg_db,
                                     docs=bulk_bodies,
                                     auth=sg_user_session)

    # Allow exp_3 docs to expire
    time.sleep(5)

    bulk_docs_ids = [doc["id"] for doc in bulk_docs]

    expected_ids = ["exp_10_0", "exp_10_1", "exp_10_2", "exp_10_3", "exp_10_4"]
    expected_missing_ids = [
        "exp_3_0", "exp_3_1", "exp_3_2", "exp_3_3", "exp_3_4"
    ]

    bulk_get_docs, errors = client.get_bulk_docs(url=sg_url,
                                                 db=sg_db,
                                                 doc_ids=bulk_docs_ids,
                                                 auth=sg_user_session,
                                                 validate=False)
    assert len(bulk_get_docs) == len(expected_ids)
    assert len(errors) == len(expected_missing_ids)

    bulk_get_doc_ids = [doc["_id"] for doc in bulk_get_docs]
    error_ids = [doc["id"] for doc in errors]

    assert bulk_get_doc_ids == expected_ids
    assert error_ids == expected_missing_ids

    client.verify_doc_ids_found_in_response(response=bulk_get_docs,
                                            expected_doc_ids=expected_ids)
    client.verify_doc_ids_not_found_in_response(
        response=errors, expected_missing_doc_ids=expected_missing_ids)

    for expired_doc in error_ids:
        verify_doc_deletion_on_server(doc_id=expired_doc,
                                      sdk_client=sdk_client,
                                      sg_client=client,
                                      sg_admin_url=sg_url_admin,
                                      sg_db=sg_db,
                                      xattrs_enabled=xattrs_enabled)
def test_rebalance_sanity(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_version = get_sg_version(cluster_config)
    if compare_versions(sg_version, '1.5') < 0:
        pytest.skip("This test needs multiple URLs in the SG config, not supported by SG < 1.5")

    cluster_helper = ClusterKeywords()

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf_path)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    admin_sg_one = topology["sync_gateways"][0]["admin"]
    sg_one_url = topology["sync_gateways"][0]["public"]

    cluster_servers = topology["couchbase_servers"]
    cbs_one_url = cluster_servers[0]
    cbs_two_url = cluster_servers[1]

    log_info("Running: 'test_distributed_index_rebalance_sanity'")
    log_info("cluster_config: {}".format(cluster_config))
    log_info("admin_sg: {}".format(admin_sg_one))
    log_info("sg_url: {}".format(sg_one_url))
    log_info("cbs_one_url: {}".format(cbs_one_url))
    log_info("cbs_two_url: {}".format(cbs_two_url))

    sg_db = "db"
    num_docs = 100
    num_updates = 100
    sg_user_name = "seth"
    sg_user_password = "******"
    channels = ["ABC", "CBS"]

    client = MobileRestClient()
    cb_server = couchbaseserver.CouchbaseServer(cbs_one_url)
    server_to_remove = couchbaseserver.CouchbaseServer(cbs_two_url)

    client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels)
    session = client.create_session(admin_sg_one, sg_db, sg_user_name)

    with concurrent.futures.ThreadPoolExecutor(5) as executor:

        # Add docs to sg
        log_info("Adding docs to sync_gateway")
        docs = client.add_docs(sg_one_url, sg_db, num_docs, "test_doc", channels=channels, auth=session)
        assert len(docs) == num_docs

        # Start updating docs and rebalance out one CBS node
        log_info("Updating docs on sync_gateway")
        update_docs_task = executor.submit(client.update_docs, sg_one_url, sg_db, docs, num_updates, auth=session)

        # Run rebalance in background
        cb_server.rebalance_out(cluster_servers, server_to_remove)

        updated_docs = update_docs_task.result()
        log_info(updated_docs)

    # Verify docs / revisions present
    client.verify_docs_present(sg_one_url, sg_db, updated_docs, auth=session)

    # Verify docs revisions in changes feed
    client.verify_docs_in_changes(sg_one_url, sg_db, updated_docs, auth=session)

    # Rebalance Server back in to the pool
    cb_server.add_node(server_to_remove)
    cb_server.rebalance_in(cluster_servers, server_to_remove)