예제 #1
0
def setup_p2p_test(request, setup_p2p_suite):

    """Test setup fixture for p2p client tests"""

    log_info("Setting up P2P test ...")

    liteserv_one = setup_p2p_suite["liteserv_one"]
    liteserv_two = setup_p2p_suite["liteserv_two"]

    test_name = request.node.name

    print("Starting LiteServ One ...")
    ls_logging_one = "{}/logs/{}-ls1-{}-{}.txt".format(RESULTS_DIR, type(liteserv_one).__name__, test_name, datetime.datetime.now())
    ls_url_one = liteserv_one.start(ls_logging_one)

    print("Starting LiteServ Two ...")
    ls_logging_two = "{}/logs/{}-ls2-{}-{}.txt".format(RESULTS_DIR, type(liteserv_two).__name__, test_name, datetime.datetime.now())
    ls_url_two = liteserv_two.start(ls_logging_two)

    # Yield values to test case via fixture argument
    yield {"ls_url_one": ls_url_one, "ls_url_two": ls_url_two}

    log_info("Tearing down test")

    # Teardown test
    client = MobileRestClient()
    client.delete_databases(ls_url_one)
    client.delete_databases(ls_url_two)

    liteserv_one.stop()
    liteserv_two.stop()
def test_raw_attachment(setup_client_syncgateway_test):
    """
    1.  Add Text attachment to sync_gateway
    2.  Try to get the raw attachment
    Pass: It is possible to get the raw attachment
    """

    log_info("Running 'test_raw_attachment'")

    ls_url = setup_client_syncgateway_test["ls_url"]
    log_info("ls_url: {}".format(ls_url))

    client = MobileRestClient()

    ls_db = client.create_database(ls_url, name="ls_db")

    ls_user_channels = ["NBC"]

    atts = attachment.load_from_data_dir(["sample_text.txt"])
    doc_with_att = document.create_doc(doc_id="att_doc",
                                       content={"sample_key": "sample_val"},
                                       attachments=atts,
                                       channels=ls_user_channels)

    doc = client.add_doc(url=ls_url, db=ls_db, doc=doc_with_att)

    att = client.get_attachment(url=ls_url,
                                db=ls_db,
                                doc_id=doc["id"],
                                attachment_name="sample_text.txt")

    expected_text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\nDuis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\nExcepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."

    assert expected_text == att
def setup_p2p_test(request, setup_p2p_suite):

    """Test setup fixture for p2p client tests"""

    log_info("Setting up P2P test ...")

    liteserv_one = setup_p2p_suite["liteserv_one"]
    liteserv_two = setup_p2p_suite["liteserv_two"]

    test_name = request.node.name

    print("Starting LiteServ One ...")
    ls_logging_one = "{}/logs/{}-ls1-{}-{}.txt".format(RESULTS_DIR, type(liteserv_one).__name__, test_name, datetime.datetime.now())
    ls_url_one = liteserv_one.start(ls_logging_one)

    print("Starting LiteServ Two ...")
    ls_logging_two = "{}/logs/{}-ls2-{}-{}.txt".format(RESULTS_DIR, type(liteserv_two).__name__, test_name, datetime.datetime.now())
    ls_url_two = liteserv_two.start(ls_logging_two)

    # Yield values to test case via fixture argument
    yield {"ls_url_one": ls_url_one, "ls_url_two": ls_url_two}

    log_info("Tearing down test")

    # Teardown test
    client = MobileRestClient()
    client.delete_databases(ls_url_one)
    client.delete_databases(ls_url_two)

    liteserv_one.stop()
    liteserv_two.stop()
def test_raw_attachment(setup_client_syncgateway_test):
    """
    1.  Add Text attachment to sync_gateway
    2.  Try to get the raw attachment
    Pass: It is possible to get the raw attachment
    """

    log_info("Running 'test_raw_attachment'")

    ls_url = setup_client_syncgateway_test["ls_url"]
    log_info("ls_url: {}".format(ls_url))

    client = MobileRestClient()

    ls_db = client.create_database(ls_url, name="ls_db")

    ls_user_channels = ["NBC"]

    doc_with_att = document.create_doc(
        doc_id="att_doc",
        content={"sample_key": "sample_val"},
        attachment_name="sample_text.txt",
        channels=ls_user_channels,
    )

    doc = client.add_doc(url=ls_url, db=ls_db, doc=doc_with_att)

    att = client.get_attachment(url=ls_url, db=ls_db, doc_id=doc["id"], attachment_name="sample_text.txt")

    expected_text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\nDuis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\nExcepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."

    assert expected_text == att
예제 #5
0
def grant_users_access(users, channels, sg_admin_url, sg_db):
    sg_client = MobileRestClient()
    for username in users:
        sg_client.update_user(url=sg_admin_url,
                              db=sg_db,
                              name=username,
                              channels=channels)
예제 #6
0
def send_changes_termination_doc(auth, terminator_doc_id, terminator_channel,
                                 ls_url, ls_db):
    ls_client = MobileRestClient()
    doc_body = {}
    doc_body["channels"] = terminator_channel
    doc_body["_id"] = terminator_doc_id
    doc_body["foo"] = "bar"
    ls_client.add_doc(ls_url, ls_db, doc_body, auth=auth, use_post=False)
예제 #7
0
def send_changes_termination_doc(sg_url, sg_db, users, terminator_doc_id,
                                 terminator_channel):
    sg_client = MobileRestClient()

    random_user_id = random.choice(users.keys())
    random_user = users[random_user_id]
    log_info('Sending changes termination doc for all users')
    doc = {'_id': terminator_doc_id, 'channels': [terminator_channel]}
    sg_client.add_doc(url=sg_url, db=sg_db, doc=doc, auth=random_user['auth'])
def test_load_balance_sanity(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_util = ClusterKeywords()
    cluster_util.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf_path
    )

    topology = cluster_util.get_cluster_topology(cluster_config)
    admin_sg_one = topology["sync_gateways"][0]["admin"]
    lb_url = "{}:4984".format(topology["load_balancers"][0])

    sg_db = "db"
    num_docs = 1000
    sg_user_name = "seth"
    sg_user_password = "******"
    channels = ["ABC", "CBS"]

    client = MobileRestClient()

    user = client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels)
    session = client.create_session(admin_sg_one, sg_db, sg_user_name)

    log_info(user)
    log_info(session)

    log_info("Adding docs to the load balancer ...")

    ct = ChangesTracker(url=lb_url, db=sg_db, auth=session)

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
        log_info("Starting ...")
        ct_task = executor.submit(ct.start)
        log_info("Adding docs ...")
        docs = client.add_docs(lb_url, sg_db, num_docs, "test_doc", channels=channels, auth=session)
        assert len(docs) == num_docs

        log_info("Adding docs done")
        wait_for_changes = executor.submit(ct.wait_until, docs)

        if wait_for_changes.result():
            log_info("Stopping ...")
            log_info("Found all docs ...")
            executor.submit(ct.stop)
            ct_task.result()
        else:
            executor.submit(ct.stop)
            ct_task.result()
            raise Exception("Could not find all changes in feed before timeout!!")
def test_load_balance_sanity(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_util = ClusterKeywords()
    cluster_util.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf_path
    )

    topology = cluster_util.get_cluster_topology(cluster_config)
    admin_sg_one = topology["sync_gateways"][0]["admin"]
    lb_url = topology["load_balancers"][0]

    sg_db = "db"
    num_docs = 1000
    sg_user_name = "seth"
    sg_user_password = "******"
    channels = ["ABC", "CBS"]

    client = MobileRestClient()

    user = client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels)
    session = client.create_session(admin_sg_one, sg_db, sg_user_name)

    log_info(user)
    log_info(session)

    log_info("Adding docs to the load balancer ...")

    ct = ChangesTracker(url=lb_url, db=sg_db, auth=session)

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
        log_info("Starting ...")
        ct_task = executor.submit(ct.start)
        log_info("Adding docs ...")
        docs = client.add_docs(lb_url, sg_db, num_docs, "test_doc", channels=channels, auth=session)
        assert len(docs) == num_docs

        log_info("Adding docs done")
        wait_for_changes = executor.submit(ct.wait_until, docs)

        if wait_for_changes.result():
            log_info("Stopping ...")
            log_info("Found all docs ...")
            executor.submit(ct.stop)
            ct_task.result()
        else:
            executor.submit(ct.stop)
            ct_task.result()
            raise Exception("Could not find all changes in feed before timeout!!")
def test_net_msft_storage_engine(request, liteserv_with_storage_engine_from_fixture):

    liteserv = liteserv_with_storage_engine_from_fixture

    test_name = request.node.name
    logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
    ls_url = liteserv.start(logfile)

    client = MobileRestClient()
    client.create_database(ls_url, "ls_db")

    liteserv.stop()

    storage_engine = liteserv.storage_engine
    log_info("Testing storage_engine: {}".format(storage_engine))

    with open(logfile, "r") as f:
        contents = f.read()

        if storage_engine == "SQLite":

            # Note: SQLite mode uses SQLCipher by default
            assert (
                "Using Couchbase.Lite.Storage.SQLCipher.SqliteCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2"
                in contents
            )
            assert "encryption key given" not in contents

        elif storage_engine == "SQLCipher":

            assert (
                "Using Couchbase.Lite.Storage.SQLCipher.SqliteCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2"
                in contents
            )
            assert "Open C:\Users\user\Desktop\LiteServ\ls_db.cblite2\db.sqlite3" in contents
            assert "encryption key given"

        elif storage_engine == "ForestDB":

            assert (
                "Using Couchbase.Lite.Storage.ForestDB.ForestDBCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2"
                in contents
            )
            assert "Database is encrypted; setting CBForest encryption key" not in contents

        elif storage_engine == "ForestDB+Encryption":

            assert (
                "Using Couchbase.Lite.Storage.ForestDB.ForestDBCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2"
                in contents
            )
            assert "Database is encrypted; setting CBForest encryption key" in contents

        else:
            pytest.xfail("Invalid Storage Engine")
예제 #11
0
def test_setting_expiry_in_bulk_docs(params_from_base_test_setup, sg_conf_name):
    """
    1. PUT /db/_bulk_docs with 10 documents.  Set the "_exp":3 on 5 of these documents
    2. Wait five seconds
    3. POST /db/_bulk_get for the 10 documents.  Validate that only the 5 non-expiring documents are returned
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_setting_expiry_in_bulk_docs'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)

    doc_exp_3_bodies = document.create_docs(doc_id_prefix="exp_3", number=5, expiry=3, channels=sg_user_channels)
    doc_exp_10_bodies = document.create_docs(doc_id_prefix="exp_10", number=5, expiry=10, channels=sg_user_channels)

    bulk_bodies = doc_exp_3_bodies + doc_exp_10_bodies

    bulk_docs = client.add_bulk_docs(url=sg_url, db=sg_db, docs=bulk_bodies, auth=sg_user_session)

    # Allow exp_3 docs to expire
    time.sleep(5)

    bulk_get_docs = client.get_bulk_docs(url=sg_url, db=sg_db, docs=bulk_docs, auth=sg_user_session)

    expected_ids = ["exp_10_0", "exp_10_1", "exp_10_2", "exp_10_3", "exp_10_4"]
    expected_missing_ids = ["exp_3_0", "exp_3_1", "exp_3_2", "exp_3_3", "exp_3_4"]

    client.verify_doc_ids_found_in_response(response=bulk_get_docs, expected_doc_ids=expected_ids)
    client.verify_doc_ids_not_found_in_response(response=bulk_get_docs, expected_missing_doc_ids=expected_missing_ids)
예제 #12
0
def verify_sg_docs_revision_history(url, db, added_docs):
    sg_client = MobileRestClient()
    expected_doc_map = {
        added_doc["id"]: added_doc["rev"]
        for added_doc in added_docs
    }
    doc_ids = expected_doc_map.keys()

    log_info("Bulk getting docs from sync gateway")
    docs = sg_client.get_bulk_docs(url, db, doc_ids, rev_history="true")
    assert len(docs[0]) == len(doc_ids)

    for doc in docs:
        for doc_dict in doc:
            rev = doc_dict["_rev"]
            rev_gen = int(rev.split("-")[0])
            doc_id = doc_dict["_id"]
            # Verify meta data
            log_info("Verifying that doc {} has rev {}".format(
                doc_id, expected_doc_map[doc_id]))
            assert rev == expected_doc_map[doc_id]
            log_info(
                "Doc {}: Expected number of revs: {}, Actual revs: {}".format(
                    doc_id, rev_gen, len(doc_dict["_revisions"]["ids"])))
            assert len(doc_dict["_revisions"]["ids"]) == rev_gen
            log_info(
                "Verifying that doc {} is associated with sg_user_channel channel"
                .format(doc_id))
            assert doc_dict["channels"][0] == "sg_user_channel"
            # Verify doc body
            log_info("Verifying doc body for {}".format(doc_id))
            assert "guid" in doc_dict
            assert "index" in doc_dict
            assert "latitude" in doc_dict
            assert "email" in doc_dict
            assert "picture" in doc_dict
            assert len(doc_dict["tags"]) == 3
            assert "date_time_added" in doc_dict
            assert "company" in doc_dict
            assert "eyeColor" in doc_dict
            assert "phone" in doc_dict
            assert "updates" in doc_dict
            assert "address" in doc_dict
            assert len(doc_dict["friends"]) == 2
            assert "isActive" in doc_dict
            assert "about" in doc_dict
            assert "name" in doc_dict
            assert "age" in doc_dict
            assert "registered" in doc_dict
            assert "longitude" in doc_dict
            assert "_attachments" in doc_dict
            assert "range" in doc_dict
            assert "balance" in doc_dict
            log_info("Verified doc body for {}".format(doc_id))
예제 #13
0
def start_polling_changes_worker(sg_url, sg_db, user_name, user_auth,
                                 changes_delay, changes_limit,
                                 terminator_doc_id, feed, channels_filtered,
                                 doc_ids_filtered):
    sg_client = MobileRestClient()
    since = 0
    latest_changes = {}
    found_terminator = False

    # Pass a channel filter to changes request if filtered is true
    filter_type = None
    filter_channels = None
    filter_doc_ids = None

    if channels_filtered:
        filter_type = 'sync_gateway/bychannel'
        filter_channels = ['even', 'terminator']

    elif doc_ids_filtered:
        filter_type = '_doc_ids'
        filter_doc_ids = ['terminator']

    while True:

        # If terminator doc is found, terminate the polling loop
        if found_terminator:
            log_info('Found terminator ({}, {})'.format(user_name, feed))
            return user_name, latest_changes

        log_info('_changes ({}) for ({}) since: {}'.format(
            feed, user_name, since))
        changes = sg_client.get_changes(url=sg_url,
                                        db=sg_db,
                                        since=since,
                                        auth=user_auth,
                                        feed=feed,
                                        limit=changes_limit,
                                        filter_type=filter_type,
                                        filter_channels=filter_channels,
                                        filter_doc_ids=filter_doc_ids)

        # A termination doc was processed, exit on the next loop
        for change in changes['results']:
            if change['id'] == terminator_doc_id:
                found_terminator = True
            else:
                # Add latest rev to to latest_changes map
                if len(change['changes']) >= 1:
                    latest_changes[change['id']] = change['changes'][0]['rev']
                else:
                    latest_changes[change['id']] = ''

        since = changes['last_seq']
        time.sleep(changes_delay)
예제 #14
0
def test_numeric_expiry_as_ttl(params_from_base_test_setup, sg_conf_name):
    """
    1. PUT /db/doc1 via SG with property "_exp":3
       PUT /db/doc2 via SG with property "_exp":10
    2. Wait five seconds
    3. Get /db/doc1.  Assert response is 404
       Get /db/doc2.  Assert response is 200
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_numeric_expiry_as_ttl'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)

    doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels)
    doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
    doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session)

    # Sleep should allow doc_exp_3 to expire, but still be in the window to get doc_exp_10
    time.sleep(5)

    # doc_exp_3 should be expired
    with pytest.raises(HTTPError) as he:
        client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], auth=sg_user_session)
    assert he.value[0].startswith("404 Client Error: Not Found for url:")

    # doc_exp_10 should be available still
    doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session)
    assert doc_exp_10_result["_id"] == "exp_10"
예제 #15
0
def test_removing_expiry(params_from_base_test_setup, sg_conf_name):
    """
    1. PUT /db/doc1 via SG with property "_exp":3
    2. Update /db/doc1 with a new revision with no expiry value
    3. After 10 updates, update /db/doc1 with a revision with no expiry
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf
    )

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_removing_expiry'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)

    doc_exp_3_body = document.create_doc(doc_id="exp_3", expiry=3, channels=sg_user_channels)
    doc_exp_10_body = document.create_doc(doc_id="exp_10", expiry=10, channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_3_body, auth=sg_user_session)
    doc_exp_10 = client.add_doc(url=sg_url, db=sg_db, doc=doc_exp_10_body, auth=sg_user_session)

    doc_exp_3_updated = client.update_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3["id"], number_updates=10, auth=sg_user_session)

    # Sleep should allow an expiry to happen on doc_exp_3 if it had not been removed.
    # Expected behavior is that the doc_exp_3 will still be around due to the removal of the expiry
    time.sleep(5)

    # doc_exp_3 should no longer have an expiry and should not raise an exception
    doc_exp_3_updated_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_3_updated["id"], auth=sg_user_session)
    assert doc_exp_3_updated_result["_id"] == "exp_3"

    # doc_exp_10 should be available still and should not raise an exception
    doc_exp_10_result = client.get_doc(url=sg_url, db=sg_db, doc_id=doc_exp_10["id"], auth=sg_user_session)
    assert doc_exp_10_result["_id"] == "exp_10"
예제 #16
0
def test_auto_prune_listener_sanity(setup_client_syncgateway_test):
    """Sanity test for the autoprune feature

    1. Create a db and put a doc
    2. Update the docs past the default revs_limit (20)
    3. Assert the the docs only retain 20 revs
    """

    ls_url = setup_client_syncgateway_test["ls_url"]
    client = MobileRestClient()

    log_info("Running 'test_auto_prune_listener_sanity' ...")
    log_info("ls_url: {}".format(ls_url))

    num_docs = 1
    num_revs = 100

    ls_db = client.create_database(url=ls_url, name="ls_db")
    docs = client.add_docs(url=ls_url,
                           db=ls_db,
                           number=num_docs,
                           id_prefix="ls_db_doc")
    assert len(docs) == num_docs

    client.update_docs(url=ls_url,
                       db=ls_db,
                       docs=docs,
                       number_updates=num_revs)

    client.verify_max_revs_num_for_docs(url=ls_url,
                                        db=ls_db,
                                        docs=docs,
                                        expected_max_number_revs_per_doc=20)
def test_revs_cache_size(params_from_base_test_setup, sg_conf_name, num_of_docs):
    """ @summary Test for no-conflicts with rev_cache size
    Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    covered #18
    Steps:
    Note : the sg config have rev_cache_size as 1000 , make sure number of docs is less than 1000 to have the test
    work with expected behavior
    1. Add docs to SG.
    2. Get the docs
    3. Verify number of rev_cache_hits is same as number of docs if rev_cache_size is more than number of docs.
    """

    # Setup
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    retrieved_docs = num_of_docs / 2

    sync_gateway_version = params_from_base_test_setup["sync_gateway_version"]

    if sync_gateway_version < "2.0":
        pytest.skip('It does not work with sg < 2.0 , so skipping the test')

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    sg_client = MobileRestClient()
    channels = ["no-conflicts"]
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels)
    autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******')
    # end of Set up

    # 2. Add docs to SG.
    sgdoc_bodies = document.create_docs(doc_id_prefix="sg_docs", number=num_of_docs, channels=channels)
    sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session)
    assert len(sgdoc_bodies) == num_of_docs

    # 3. Get all docs
    for i in range(retrieved_docs):
        doc = sg_docs[i]
        sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)

    # 4. Verify there are number of hits should be same as retrieved docs
    exp_vars = sg_client.get_expvars(url=sg_admin_url)
    revision_cache_hits = exp_vars["syncGateway_stats"]["revisionCache_hits"]
    revision_cache_misses = exp_vars["syncGateway_stats"]["revisionCache_misses"]
    assert revision_cache_hits == retrieved_docs, "Revision Cache hits did not hit with expected number {}".format(num_of_docs)
    assert revision_cache_misses == 0, "Revision Cache misses is not 0"
def test_android_storage_engine(request,
                                liteserv_with_storage_engine_from_fixture):
    liteserv = liteserv_with_storage_engine_from_fixture

    test_name = request.node.name

    logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR,
                                            type(liteserv).__name__, test_name,
                                            datetime.datetime.now())
    ls_url = liteserv.start(logfile)

    client = MobileRestClient()
    client.create_database(ls_url, "ls_db")

    liteserv.stop()

    # Look in adb logcat to see if output match platform / storage engine expectation
    # We can't look at the database files directly to my knowledge without a rooted device
    liteserv_output = []
    with open(logfile, "r") as f:
        lines = f.readlines()
        for line in lines:
            if "LiteServ" in line:
                line = line.strip()
                liteserv_output.append(line)

    log_info(liteserv_output)
    if liteserv.storage_engine == "SQLite":
        assert len(liteserv_output) == 4
        assert liteserv_output[0].endswith("storageType=SQLite")
        assert liteserv_output[1].endswith("dbpassword="******"SQLCipher":
        assert len(liteserv_output) == 4
        assert liteserv_output[0].endswith("storageType=SQLite")
        assert liteserv_output[1].endswith(
            "dbpassword=ls_db:pass,ls_db1:pass,ls_db2:pass")
    elif liteserv.storage_engine == "ForestDB":
        assert len(liteserv_output) == 4
        assert liteserv_output[0].endswith("storageType=ForestDB")
        assert liteserv_output[1].endswith("dbpassword="******"ForestDB+Encryption":
        assert len(liteserv_output) == 4
        assert liteserv_output[0].endswith("storageType=ForestDB")
        assert liteserv_output[1].endswith(
            "dbpassword=ls_db:pass,ls_db1:pass,ls_db2:pass")
    else:
        pytest.xfail("Invalid storage engine")
예제 #19
0
def setup_client_syncgateway_test(request, setup_client_syncgateway_suite):
    """Test setup fixture for client sync_gateway tests"""

    log_info("Setting up client sync_gateway test ...")

    liteserv = setup_client_syncgateway_suite
    test_name = request.node.name

    ls_url = liteserv.start(
        "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
    )

    cluster_helper = ClusterKeywords()
    sg_helper = SyncGateway()

    cluster_hosts = cluster_helper.get_cluster_topology(os.environ["CLUSTER_CONFIG"])

    sg_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_admin_url = cluster_hosts["sync_gateways"][0]["admin"]
    sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_url)

    # Yield values to test case via fixture argument
    yield {
        "cluster_config": os.environ["CLUSTER_CONFIG"],
        "ls_url": ls_url,
        "sg_url": sg_url,
        "sg_admin_url": sg_admin_url,
    }

    log_info("Tearing down test")

    # Teardown test
    client = MobileRestClient()
    client.delete_databases(ls_url)

    liteserv.stop()

    sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_url)

    # if the test failed pull logs
    if request.node.rep_call.failed:
        logging_helper = Logging()
        logging_helper.fetch_and_analyze_logs(cluster_config=os.environ["CLUSTER_CONFIG"], test_name=test_name)
def test_net_msft_storage_engine(request, liteserv_with_storage_engine_from_fixture):

    liteserv = liteserv_with_storage_engine_from_fixture

    test_name = request.node.name
    logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
    ls_url = liteserv.start(logfile)

    client = MobileRestClient()
    client.create_database(ls_url, "ls_db")

    liteserv.stop()

    storage_engine = liteserv.storage_engine
    log_info("Testing storage_engine: {}".format(storage_engine))

    with open(logfile, "r") as f:
        contents = f.read()

        if storage_engine == "SQLite":

            # Note: SQLite mode uses SQLCipher by default
            assert "Using Couchbase.Lite.Storage.SQLCipher.SqliteCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2" in contents
            assert "encryption key given" not in contents

        elif storage_engine == "SQLCipher":

            assert "Using Couchbase.Lite.Storage.SQLCipher.SqliteCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2" in contents
            assert "Open C:\Users\user\Desktop\LiteServ\ls_db.cblite2\db.sqlite3" in contents
            assert "encryption key given"

        elif storage_engine == "ForestDB":

            assert "Using Couchbase.Lite.Storage.ForestDB.ForestDBCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2" in contents
            assert "Database is encrypted; setting CBForest encryption key" not in contents

        elif storage_engine == "ForestDB+Encryption":

            assert "Using Couchbase.Lite.Storage.ForestDB.ForestDBCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2" in contents
            assert "Database is encrypted; setting CBForest encryption key" in contents

        else:
            pytest.xfail("Invalid Storage Engine")
예제 #21
0
def create_users_add_docs_task(user_name, sg_admin_url, sg_url, sg_db,
                               number_docs_per_user, batch_size, create_delay):

    sg_client = MobileRestClient()

    # Create user
    if user_name.startswith('unique'):
        # Doc channel should be unique for each users
        channels = [user_name]
    elif user_name.startswith('shared'):
        # Doc channel should be shared for each doc with this user type
        channels = ['shared']
    elif user_name.startswith('filtered_channel'):
        channels = ['even', 'odd']
    elif user_name.startswith('filtered_doc_ids'):
        channels = ['terminator']
    else:
        raise ValueError('Unexpected user type: {}'.format(user_name))

    sg_client.create_user(url=sg_admin_url,
                          db=sg_db,
                          name=user_name,
                          password=USER_PASSWORD,
                          channels=channels)

    # Create session
    user_auth = sg_client.create_session(url=sg_admin_url,
                                         db=sg_db,
                                         name=user_name,
                                         password=USER_PASSWORD)

    # Start bulk doc creation
    doc_ids = add_user_docs(client=sg_client,
                            sg_url=sg_url,
                            sg_db=sg_db,
                            user_name=user_name,
                            user_auth=user_auth,
                            channels=channels,
                            number_docs_per_user=number_docs_per_user,
                            batch_size=batch_size,
                            create_delay=create_delay)

    return user_name, user_auth, doc_ids
def test_writing_attachment_to_couchbase_server(params_from_base_test_setup, sg_conf_name):
    """
    1. Start sync_gateway with sync function that rejects all writes:
    function(doc, oldDoc) {
      throw({forbidden:"No writes!"});
    }
    2. Create a doc with attachment
    3. Use CBS sdk to see if attachment doc exists.  Doc ID will look like _sync:att:sha1-Kq5sNclPz7QV2+lfQIuc6R7oRu0= (where the suffix is the digest)
    4. Assert att doc does not exist
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(cluster_config, sg_conf)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    bucket = "data-bucket"

    log_info("Running 'test_writing_attachment_to_couchbase_server'")
    log_info("Using cbs_url: {}".format(cbs_url))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_url_admin: {}".format(sg_url_admin))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using bucket: {}".format(bucket))

    sg_user_name = "sg_user"
    sg_user_password = "******"

    sg_user_channels = ["NBC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin, db=sg_db, name=sg_user_name, password=sg_user_password, channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin, db=sg_db, name=sg_user_name)

    docs = client.add_docs(url=sg_url, db=sg_db, number=100, id_prefix=sg_db, channels=sg_user_channels, auth=sg_user_session)
    assert len(docs) == 100

    # Create doc with attachment and push to sync_gateway
    doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachment_name="sample_text.txt", channels=sg_user_channels)

    client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=sg_user_session)
    server = CouchbaseServer(cbs_url)

    # Assert that the attachment doc gets written to couchbase server
    server_att_docs = server.get_server_docs_with_prefix(bucket=bucket, prefix="_sync:att:")
    num_att_docs = len(server_att_docs)
    assert num_att_docs == 1
def test_android_storage_engine(request, liteserv_with_storage_engine_from_fixture):
    liteserv = liteserv_with_storage_engine_from_fixture

    test_name = request.node.name

    logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
    ls_url = liteserv.start(logfile)

    client = MobileRestClient()
    client.create_database(ls_url, "ls_db")

    liteserv.stop()

    # Look in adb logcat to see if output match platform / storage engine expectation
    # We can't look at the database files directly to my knowledge without a rooted device
    liteserv_output = []
    with open(logfile, "r") as f:
        lines = f.readlines()
        for line in lines:
            if "LiteServ" in line:
                line = line.strip()
                liteserv_output.append(line)

    log_info(liteserv_output)
    if liteserv.storage_engine == "SQLite":
        assert len(liteserv_output) == 4
        assert liteserv_output[0].endswith("storageType=SQLite")
        assert liteserv_output[1].endswith("dbpassword="******"SQLCipher":
        assert len(liteserv_output) == 4
        assert liteserv_output[0].endswith("storageType=SQLite")
        assert liteserv_output[1].endswith("dbpassword=ls_db:pass,ls_db1:pass,ls_db2:pass")
    elif liteserv.storage_engine == "ForestDB":
        assert len(liteserv_output) == 4
        assert liteserv_output[0].endswith("storageType=ForestDB")
        assert liteserv_output[1].endswith("dbpassword="******"ForestDB+Encryption":
        assert len(liteserv_output) == 4
        assert liteserv_output[0].endswith("storageType=ForestDB")
        assert liteserv_output[1].endswith("dbpassword=ls_db:pass,ls_db1:pass,ls_db2:pass")
    else:
        pytest.xfail("Invalid storage engine")
예제 #24
0
def start_continuous_changes_worker(sg_url, sg_db, user_name, user_auth,
                                    terminator_doc_id, channels_filtered):

    sg_client = MobileRestClient()

    latest_changes = {}

    # Pass a channel filter to changes request if filtered is true
    filter_type = None
    filter_channels = None

    if channels_filtered:
        filter_type = 'sync_gateway/bychannel'
        filter_channels = ['even', 'terminator']

    log_info('_changes (continuous) for ({}) since: 0'.format(user_name))
    stream = sg_client.stream_continuous_changes(
        sg_url,
        sg_db,
        since=0,
        auth=user_auth,
        filter_type=filter_type,
        filter_channels=filter_channels)

    for line in stream.iter_lines():

        # filter out keep-alive new lines
        if line:
            decoded_line = line.decode('utf-8')
            change = json.loads(decoded_line)

            if change['id'] == terminator_doc_id:
                log_info('Found terminator ({}, continuous)'.format(user_name))
                return user_name, latest_changes

            else:
                if len(change['changes']) >= 1:
                    latest_changes[change['id']] = change['changes'][0]['rev']
                else:
                    latest_changes[change['id']] = ''
def test_peer_2_peer_sanity_pull(setup_p2p_test):
    """
    1. Create ls_db1 database on LiteServ One
    2. Create ls_db2 database on LiteServ Two
    3. Create continuous pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2
    4. Add 5000 docs to LiteServ 2 ls_db2
    5. Verify all docs replicate to LiteServ 1 ls_db1
    6. Verify all docs show up in changes for LiteServ 1 ls_db1
    """

    ls_url_one = setup_p2p_test["ls_url_one"]
    ls_url_two = setup_p2p_test["ls_url_two"]

    num_docs_per_db = 5000

    log_info("ls_url_one: {}".format(ls_url_one))
    log_info("ls_url_two: {}".format(ls_url_two))

    client = MobileRestClient()

    log_info("Creating databases")
    ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")

    # Setup continuous pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1

    pull_repl = client.start_replication(
        url=ls_url_one,
        continuous=True,
        from_url=ls_url_two, from_db=ls_db2,
        to_db=ls_db1
    )

    client.wait_for_replication_status_idle(url=ls_url_one, replication_id=pull_repl)

    ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2")
    assert len(ls_db2_docs) == num_docs_per_db

    client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
    client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
예제 #26
0
def test_longpoll_changes_termination_timeout(setup_client_syncgateway_test):
    """https://github.com/couchbase/couchbase-lite-java-core/issues/1296
    1. Create 30 longpoll _changes in a loop (with timeout parameter = 5s)
    2. Cancel the request after 2s
    3. Wait 5.1s
    4. Create another request GET /db/ on listener and make sure the listener responds
    """
    ls_db = "ls_db"
    ls_url = setup_client_syncgateway_test["ls_url"]

    log_info("Running 'test_longpoll_changes_termination' ...")
    log_info("ls_url: {}".format(ls_url))

    client = MobileRestClient()
    client.create_database(ls_url, ls_db)

    ct = ChangesTracker(ls_url, ls_db)

    with ThreadPoolExecutor(max_workers=35) as executor:

        futures = [
            executor.submit(ct.start, timeout=5000, request_timeout=2000)
            for _ in range(30)
        ]

        for futures in as_completed(futures):
            log_info("Future _changes loop complete")

    log_info("Futures exited")

    # make sure client can still take connections
    dbs = client.get_databases(url=ls_url)
    log_info(dbs)
    database = client.get_database(url=ls_url, db_name=ls_db)
    log_info(database)
예제 #27
0
def setup_client_syncgateway_test(request, setup_client_syncgateway_suite):
    """Test setup fixture for client sync_gateway tests"""

    log_info("Setting up client sync_gateway test ...")

    liteserv = setup_client_syncgateway_suite["liteserv"]
    cluster_config = setup_client_syncgateway_suite["cluster_config"]
    xattrs_enabled = setup_client_syncgateway_suite["xattrs_enabled"]
    device_enabled = setup_client_syncgateway_suite["device_enabled"]
    liteserv_platform = setup_client_syncgateway_suite["liteserv_platform"]
    liteserv_version = setup_client_syncgateway_suite["liteserv_version"]
    test_name = request.node.name

    if request.config.getoption("--liteserv-platform") == "macosx" and \
            str(request.config.getoption("--liteserv-version")).startswith("1.3.1") and \
            str(test_name).startswith("test_longpoll_changes_termination"):
        pytest.skip("test_longpoll_changes_termination tests are known to fail on macosx with 1.3.1 CBL")

    client = MobileRestClient()

    # Start LiteServ and delete any databases
    log_info("Starting LiteServ...")
    if device_enabled and liteserv_platform == "ios":
        ls_url = liteserv.start_device("{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now()))
    else:
        ls_url = liteserv.start("{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now()))
    client.delete_databases(ls_url)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_config=cluster_config)

    sg_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_admin_url = cluster_hosts["sync_gateways"][0]["admin"]
    # Yield values to test case via fixture argument
    yield {
        "cluster_config": cluster_config,
        "sg_mode": setup_client_syncgateway_suite["sg_mode"],
        "ls_url": ls_url,
        "sg_url": sg_url,
        "sg_admin_url": sg_admin_url,
        "xattrs_enabled": xattrs_enabled,
        "liteserv": liteserv,
        "liteserv_platform": liteserv_platform,
        "device_enabled": device_enabled,
        "liteserv_version": liteserv_version
    }

    log_info("Tearing down test")

    client.delete_databases(ls_url)
    liteserv.stop()

    # if the test failed pull logs
    if request.node.rep_call.failed:
        logging_helper = Logging()
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config, test_name=test_name)
예제 #28
0
def test_peer_2_peer_sanity_push_one_shot(setup_p2p_test):
    """
    1. Create ls_db1 database on LiteServ One
    2. Create ls_db2 database on LiteServ Two
    3. Add 10000 docs to LiteServ 1 ls_db1
    4. Create continuous push replication LiteServ 1 ls_db1 -> LiteServ 2 ls_db2
    5. Verify all docs replicate to LiteServ 2 ls_db2
    6. Verify all docs show up in changes for LiteServ 2 ls_db2
    """

    ls_url_one = setup_p2p_test["ls_url_one"]
    ls_url_two = setup_p2p_test["ls_url_two"]

    num_docs_per_db = 10000

    log_info("ls_url_one: {}".format(ls_url_one))
    log_info("ls_url_two: {}".format(ls_url_two))

    client = MobileRestClient()

    log_info("Creating databases")
    ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")

    bulk_docs = create_docs("test_ls_db1", num_docs_per_db)
    ls_db1_docs = client.add_bulk_docs(ls_url_one, ls_db1, bulk_docs)
    assert len(ls_db1_docs) == num_docs_per_db

    # Setup one shot push replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2
    log_info("Setting up a one-shot push replication from ls_db1 to ls_db2")
    push_repl = client.start_replication(
        url=ls_url_one,
        continuous=False,
        from_db=ls_db1,
        to_url=ls_url_two, to_db=ls_db2,
    )
    log_info("Replication ID: {}".format(push_repl))

    client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs)
    client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs)
def test_net_msft_full_life_cycle(request, liteserv_with_storage_engine_from_fixture):

    liteserv = liteserv_with_storage_engine_from_fixture

    test_name = request.node.name
    logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
    ls_url = liteserv.start(logfile)

    client = MobileRestClient()
    client.create_database(ls_url, "ls_db")
    docs = client.add_docs(ls_url, db="ls_db", number=10, id_prefix="test_doc")
    assert len(docs) == 10

    client.delete_databases(ls_url)

    liteserv.stop()
예제 #30
0
def test_longpoll_changes_termination_heartbeat(setup_client_syncgateway_test):
    """https://github.com/couchbase/couchbase-lite-java-core/issues/1296
    Create 30 longpoll _changes in a loop (with heartbeat parameter = 5s)
    Cancel the request after 2s
    Wait 5.1s
    Create another request GET /db/ on listener and make sure the listener responds
    """

    log_info("Running 'longpoll_changes_termination' ...")

    ls_db = "ls_db"
    cluster_config = setup_client_syncgateway_test["cluster_config"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    ls_url = setup_client_syncgateway_test["ls_url"]

    log_info("Running 'test_longpoll_changes_termination' ...")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_url: {}".format(sg_url))

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    client = MobileRestClient()
    if client.get_server_platform(ls_url) == Platform.macosx:
        pytest.skip("https://github.com/couchbase/couchbase-lite-ios/issues/1236")

    client.create_database(ls_url, ls_db)

    ct = ChangesTracker(ls_url, ls_db)

    with ThreadPoolExecutor(max_workers=35) as executor:
        futures = [executor.submit(
            ct.start,
            timeout=5000,
            heartbeat=5000,
            request_timeout=2000
        ) for _ in range(30)]

        for futures in as_completed(futures):
            log_info("Future _changes loop complete")

    log_info("Futures exited")

    # make sure client can still take connections
    dbs = client.get_databases(url=ls_url)
    log_info(dbs)
    database = client.get_database(url=ls_url, db_name=ls_db)
    log_info(database)
def test_ios_full_life_cycle(request, liteserv_with_storage_engine_from_fixture):
    liteserv = liteserv_with_storage_engine_from_fixture

    test_name = request.node.name
    logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
    ls_url = liteserv.start(logfile)

    client = MobileRestClient()
    client.create_database(ls_url, "ls_db")
    docs = client.add_docs(ls_url, db="ls_db", number=10, id_prefix="test_doc")
    assert len(docs) == 10

    client.delete_databases(ls_url)
    liteserv.stop()
예제 #32
0
def setup_client_syncgateway_test(request, setup_client_syncgateway_suite):
    """Test setup fixture for client sync_gateway tests"""

    log_info("Setting up client sync_gateway test ...")

    liteserv = setup_client_syncgateway_suite["liteserv"]
    cluster_config = setup_client_syncgateway_suite["cluster_config"]
    xattrs_enabled = setup_client_syncgateway_suite["xattrs_enabled"]
    test_name = request.node.name

    client = MobileRestClient()

    # Start LiteServ and delete any databases
    ls_url = liteserv.start("{}/logs/{}-{}-{}.txt".format(
        RESULTS_DIR,
        type(liteserv).__name__, test_name, datetime.datetime.now()))
    client.delete_databases(ls_url)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(
        cluster_config=cluster_config)

    sg_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_admin_url = cluster_hosts["sync_gateways"][0]["admin"]

    # Yield values to test case via fixture argument
    yield {
        "cluster_config": cluster_config,
        "sg_mode": setup_client_syncgateway_suite["sg_mode"],
        "ls_url": ls_url,
        "sg_url": sg_url,
        "sg_admin_url": sg_admin_url,
        "xattrs_enabled": xattrs_enabled
    }

    log_info("Tearing down test")
    client.delete_databases(ls_url)
    liteserv.stop()

    # if the test failed pull logs
    if request.node.rep_call.failed:
        logging_helper = Logging()
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config,
                                              test_name=test_name)
예제 #33
0
def test_auto_prune_listener_sanity(setup_client_syncgateway_test):
    """Sanity test for the autoprune feature

    1. Create a db and put a doc
    2. Update the docs past the default revs_limit (20)
    3. Assert the the docs only retain 20 revs
    """

    ls_url = setup_client_syncgateway_test["ls_url"]
    client = MobileRestClient()

    log_info("Running 'test_auto_prune_listener_sanity' ...")
    log_info("ls_url: {}".format(ls_url))

    num_docs = 1
    num_revs = 100

    ls_db = client.create_database(url=ls_url, name="ls_db")
    docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="ls_db_doc")
    assert len(docs) == num_docs

    client.update_docs(url=ls_url, db=ls_db, docs=docs, number_updates=num_revs)

    client.verify_max_revs_num_for_docs(url=ls_url, db=ls_db, docs=docs, expected_max_number_revs_per_doc=20)
def test_replication_with_multiple_client_dbs_and_single_sync_gateway_db(setup_client_syncgateway_test):
    """Test replication from multiple client dbs to one sync_gateway db"""

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    num_docs = 1000

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_replication_with_multiple_client_dbs_and_single_sync_gateway_db'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    client = MobileRestClient()

    ls_db1 = client.create_database(url=ls_url, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url, name="ls_db2")
    sg_db = client.create_database(url=sg_admin_url, name="sg_db", server="walrus:")

    # Setup continuous push / pull replication from ls_db1 to sg_db
    client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db1,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db1
    )

    # Setup continuous push / pull replication from ls_db2 to sg_db
    client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db2,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db2
    )

    ls_db_one_docs = client.add_docs(url=ls_url, db=ls_db1, number=num_docs, id_prefix=ls_db1)
    assert len(ls_db_one_docs) == 1000

    ls_db_two_docs = client.add_docs(url=ls_url, db=ls_db2, number=num_docs, id_prefix=ls_db2)
    assert len(ls_db_two_docs) == 1000

    ls_db1_db2_docs = ls_db_one_docs + ls_db_two_docs

    client.verify_docs_present(url=ls_url, db=ls_db1, expected_docs=ls_db1_db2_docs)
    client.verify_docs_present(url=ls_url, db=ls_db2, expected_docs=ls_db1_db2_docs)
    client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db1_db2_docs)

    client.verify_docs_in_changes(url=sg_admin_url, db=sg_db, expected_docs=ls_db1_db2_docs)
    client.verify_docs_in_changes(url=ls_url, db=ls_db1, expected_docs=ls_db1_db2_docs)
    client.verify_docs_in_changes(url=ls_url, db=ls_db2, expected_docs=ls_db1_db2_docs)
def test_client_to_sync_gateway_complex_replication_with_revs_limit(setup_client_syncgateway_test):
    """ Ported from sync_gateway tests repo
    ...  1.  Clear server buckets
    ...  2.  Restart liteserv with _session
    ...  3.  Restart sync_gateway wil that config
    ...  4.  Create db on LiteServ
    ...  5.  Add numDocs to LiteServ db
    ...  6.  Setup push replication from LiteServ db to sync_gateway
    ...  7.  Verify doc present on sync_gateway (number of docs)
    ...  8.  Update sg docs numRevs * 4 = 480
    ...  9.  Update docs on LiteServ db numRevs * 4 = 480
    ...  10. Setup pull replication from sg -> liteserv db
    ...  11. Verify all docs are replicated
    ...  12. compact LiteServ db (POST _compact)
    ...  13. Verify number of revs in LiteServ db (?revs_info=true) check rev status == available fail if revs available > revs limit
    ...  14. Delete LiteServ db conflicts (?conflicts=true) DELETE _conflicts
    ...  15. Create numDoc number of docs in LiteServ db
    ...  16. Update LiteServ db docs numRevs * 5 (600)
    ...  17. Verify LiteServ db revs is < 602
    ...  18. Verify LiteServ db docs revs prefix (9 * numRevs + 3)
    ...  19. Compact LiteServ db
    ...  20. Verify number of revs <= 10
    ...  21. Delete LiteServ docs
    ...  22. Delete Server bucket
    ...  23. Delete LiteServ db
    """

    ls_db_name = "ls_db"
    sg_db = "db"
    sg_user_name = "sg_user"
    num_docs = 10
    num_revs = 100

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus-revs-limit.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_client_to_sync_gateway_complex_replication_with_revs_limit'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    client = MobileRestClient()

    # Test the endpoint, listener does not support users but should have a default response
    client.get_session(url=ls_url)

    sg_user_channels = ["NBC"]
    client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)

    ls_db = client.create_database(url=ls_url, name=ls_db_name)
    ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix=ls_db, channels=sg_user_channels)
    assert len(ls_db_docs) == num_docs

    # Start replication ls_db -> sg_db
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db_docs)

    # Delay is to the updates here due to couchbase/couchbase-lite-ios#1277.
    # Basically, if your revs depth is small and someone is updating a doc past the revs depth before a push replication,
    # the push replication will have no common ancestor with sync_gateway causing conflicts to be created.
    # Adding a delay between updates helps this situation. There is an alternative for CBL mac and CBL NET to change the default revs client depth
    # but that is not configurable for Android.
    # Currently adding a delay will allow the replication to act as expected for all platforms now.
    client.update_docs(url=sg_url, db=sg_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1, auth=sg_session)
    client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1)

    # Start replication ls_db <- sg_db
    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db
    )

    client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one)
    client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_two)

    client.compact_database(url=ls_url, db=ls_db)

    # LiteServ should only have 20 revisions due to built in client revs limit
    client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20)

    # Sync Gateway should have 100 revisions due to the specified revs_limit in the sg config and possible conflict winners from the liteserv db
    client.verify_max_revs_num_for_docs(url=sg_url, db=sg_db, docs=ls_db_docs, expected_max_number_revs_per_doc=100, auth=sg_session)

    client.delete_conflicts(url=ls_url, db=ls_db, docs=ls_db_docs)
    expected_generation = num_revs + 1
    client.verify_docs_rev_generations(url=ls_url, db=ls_db, docs=ls_db_docs, expected_generation=expected_generation)
    client.verify_docs_rev_generations(url=sg_url, db=sg_db, docs=ls_db_docs, expected_generation=expected_generation, auth=sg_session)

    client.delete_docs(url=ls_url, db=ls_db, docs=ls_db_docs)
    client.verify_docs_deleted(url=ls_url, db=ls_db, docs=ls_db_docs)
    client.verify_docs_deleted(url=sg_admin_url, db=sg_db, docs=ls_db_docs)

    ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix=ls_db, channels=sg_user_channels)
    assert len(ls_db_docs) == 10

    expected_revs = num_revs + 20 + 2
    client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, delay=0.1, number_updates=num_revs)

    client.verify_max_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_max_number_revs_per_doc=expected_revs)

    expected_generation = (num_revs * 2) + 3
    client.verify_docs_rev_generations(url=ls_url, db=ls_db, docs=ls_db_docs, expected_generation=expected_generation)

    client.compact_database(url=ls_url, db=ls_db)
    client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20)

    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db
    )

    client.wait_for_no_replications(url=ls_url)

    client.delete_conflicts(url=ls_url, db=ls_db, docs=ls_db_docs)
    client.delete_conflicts(url=sg_url, db=sg_db, docs=ls_db_docs, auth=sg_session)
    client.delete_docs(url=ls_url, db=ls_db, docs=ls_db_docs)

    # Start push pull and verify that all docs are deleted
    # Start replication ls_db -> sg_db
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    # Start replication ls_db <- sg_db
    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db
    )

    client.verify_docs_deleted(url=ls_url, db=ls_db, docs=ls_db_docs)
    client.verify_docs_deleted(url=sg_admin_url, db=sg_db, docs=ls_db_docs)
def test_replication_with_session_cookie(setup_client_syncgateway_test):
    """Regression test for https://github.com/couchbase/couchbase-lite-android/issues/817
    1. SyncGateway Config with guest disabled = true and One user added (e.g. user1 / 1234)
    2. Create a new session on SGW for the user1 by using POST /_session.
       Capture the SyncGatewaySession cookie from the set-cookie in the response header.
    3. Start continuous push and pull replicator on the LiteServ with SyncGatewaySession cookie.
       Make sure that both replicators start correctly
    4. Delete the session from SGW by sending DELETE /_sessions/ to SGW
    5. Cancel both push and pull replicator on the LiteServ
    6. Repeat step 1 and 2
    """

    ls_db = "ls_db"
    sg_db = "db"

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus-user.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_replication_with_session_cookie'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    client = MobileRestClient()
    client.create_database(url=ls_url, name=ls_db)

    # Get session header for user_1
    session_header = client.create_session_header(url=sg_url, db=sg_db, name="user_1", password="******")

    # Get session id from header
    session_parts = re.split("=|;", session_header)
    session_id = session_parts[1]
    log_info("{}: {}".format(session_parts[0], session_id))
    session = (session_parts[0], session_id)

    # Start authenticated push replication
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_url,
        to_db=sg_db,
        to_auth=session_header
    )

    # Start authenticated pull replication
    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_url,
        from_db=sg_db,
        from_auth=session_header,
        to_db=ls_db,
    )

    # Wait for 2 replications to be 'Idle', On .NET they may not be immediately available via _active_tasks
    client.wait_for_replication_status_idle(ls_url, repl_one)
    client.wait_for_replication_status_idle(ls_url, repl_two)

    replications = client.get_replications(ls_url)
    assert len(replications) == 2, "2 replications (push / pull should be running)"

    num_docs_pushed = 100

    # Sanity test docs
    ls_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs_pushed, id_prefix="ls_doc", channels=["ABC"])
    assert len(ls_docs) == num_docs_pushed

    sg_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs_pushed, id_prefix="sg_doc", auth=session, channels=["ABC"])
    assert len(sg_docs) == num_docs_pushed

    all_docs = client.merge(ls_docs, sg_docs)
    log_info(all_docs)

    client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=all_docs)
    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=all_docs)

    # GET from session endpoint /{db}/_session/{session-id}
    session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
    assert len(session["userCtx"]["channels"]) == 2, "There should be only 2 channels for the user"
    assert "ABC" in session["userCtx"]["channels"], "The channel info should contain 'ABC'"
    assert session["userCtx"]["name"] == "user_1", "The user should have the name 'user_1'"
    assert len(session["authentication_handlers"]) == 2, "There should be 2 authentication_handlers"
    assert "default" in session["authentication_handlers"], "Did not find 'default' in authentication_headers"
    assert "cookie" in session["authentication_handlers"], "Did not find 'cookie' in authentication_headers"

    log_info("SESSIONs: {}".format(session))

    # Delete session via sg admin port and _user rest endpoint
    client.delete_session(url=sg_admin_url, db=sg_db, user_name="user_1", session_id=session_id)

    # Make sure session is deleted
    try:
        session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
    except HTTPError as he:
        expected_error_code = he.response.status_code
        log_info(expected_error_code)

    assert expected_error_code == 404, "Expected 404 status, actual {}".format(expected_error_code)

    # Cancel the replications
    # Stop repl_one
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_url,
        to_db=sg_db,
        to_auth=session_header
    )

    # Stop repl_two
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_url,
        from_db=sg_db,
        from_auth=session_header,
        to_db=ls_db,
    )

    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    assert len(replications) == 0, "All replications should be stopped"

    # Create new session and new push / pull replications
    session_header = client.create_session_header(url=sg_url, db=sg_db, name="user_1", password="******")

    # Get session id from header
    session_parts = re.split("=|;", session_header)
    session_id = session_parts[1]
    log_info("{}: {}".format(session_parts[0], session_id))

    # Start authenticated push replication
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_url,
        to_db=sg_db,
        to_auth=session_header
    )

    # Start authenticated pull replication
    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_url,
        from_db=sg_db,
        from_auth=session_header,
        to_db=ls_db,
    )

    replications = client.get_replications(ls_url)
    assert len(replications) == 2, "2 replications (push / pull should be running), found: {}".format(2)

    session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
    assert len(session["userCtx"]["channels"]) == 2, "There should be only 2 channels for the user"
    assert "ABC" in session["userCtx"]["channels"], "The channel info should contain 'ABC'"
    assert session["userCtx"]["name"] == "user_1", "The user should have the name 'user_1'"
    assert len(session["authentication_handlers"]) == 2, "There should be 2 authentication_handlers"
    assert "default" in session["authentication_handlers"], "Did not find 'default' in authentication_headers"
    assert "cookie" in session["authentication_handlers"], "Did not find 'cookie' in authentication_headers"

    log_info("SESSIONs: {}".format(session))

    # Delete session via sg admin port and db rest endpoint
    client.delete_session(url=sg_admin_url, db=sg_db, session_id=session_id)

    # Make sure session is deleted
    try:
        session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
    except HTTPError as he:
        expected_error_code = he.response.status_code
        log_info(expected_error_code)

    assert expected_error_code == 404, "Expected 404 status, actual {}".format(expected_error_code)
def test_multiple_replications_created_with_unique_properties(setup_client_syncgateway_test):
    """Regression test for couchbase/couchbase-lite-java-core#1386
    1. Setup SGW with a remote database name db for an example
    2. Create a local database such as ls_db
    3. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true
    4. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true, doc_ids=["doc1", "doc2"]
    5. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true, filter="filter1"
    6. Make sure that the session_id from each POST /_replicate are different.
    7. Send GET /_active_tasks to make sure that there are 3 tasks created.
    8. Send 3 POST /_replicate withe the same parameter as Step 3=5 plus cancel=true to stop those replicators
    9. Repeat Step 3 - 8 with source = and target = db for testing the pull replicator.
    """

    sg_db = "db"
    ls_db = "ls_db"

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_multiple_replications_created_with_unique_properties'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_database(url=ls_url, name=ls_db)

    ########
    # PUSH #
    ########
    # Start 3 unique push replication requests
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db
    )
    client.wait_for_replication_status_idle(ls_url, repl_one)

    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db,
        doc_ids=["doc_1", "doc_2"]
    )
    client.wait_for_replication_status_idle(ls_url, repl_two)

    # Create doc filter and add to the design doc
    filters = {
        "language": "javascript",
        "filters": {
            "sample_filter": "function(doc, req) { if (doc.type && doc.type === \"skip\") { return false; } return true; }"
        }
    }
    client.add_design_doc(url=ls_url, db=ls_db, name="by_type", doc=json.dumps(filters))

    repl_three = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db,
        repl_filter="by_type/sample_filter"
    )
    client.wait_for_replication_status_idle(ls_url, repl_three)

    # Verify 3 replicaitons are running
    replications = client.get_replications(ls_url)
    log_info(replications)
    assert len(replications) == 3, "Number of replications, Expected: {} Actual: {}".format(
        3,
        len(replications)
    )

    # Stop repl001
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db
    )

    # Stop repl002
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db,
        doc_ids=["doc_1", "doc_2"]
    )

    # Stop repl003
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db,
        repl_filter="by_type/sample_filter"
    )

    # Verify no replications are running
    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    log_info(replications)
    assert len(replications) == 0, "Number of replications, Expected: {} Actual: {}".format(
        0,
        len(replications)
    )

    ########
    # PULL #
    ########
    # Start 3 unique push replication requests
    repl_four = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db
    )
    client.wait_for_replication_status_idle(ls_url, repl_four)

    # Start filtered pull from sync gateway to LiteServ
    repl_five = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db,
        channels_filter=["ABC", "CBS"]
    )
    client.wait_for_replication_status_idle(ls_url, repl_five)

    # Verify 3 replicaitons are running
    replications = client.get_replications(ls_url)
    log_info(replications)
    assert len(replications) == 2, "Number of replications, Expected: {} Actual: {}".format(
        2,
        len(replications)
    )

    # Stop repl_four
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db
    )

    # Stop repl_five
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db,
        channels_filter=["ABC", "CBS"]
    )

    # Verify no replications are running
    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    log_info(replications)
    assert len(replications) == 0, "Number of replications, Expected: {} Actual: {}".format(
        0,
        len(replications)
    )
def test_backfill_channels_oneshot_changes(params_from_base_test_setup, sg_conf_name, grant_type):

    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    log_info("grant_type: {}".format(grant_type))

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster = Cluster(cluster_config)
    cluster.reset(sg_conf)

    client = MobileRestClient()

    admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[])
    user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[])

    # Create users / sessions
    client.create_user(url=sg_admin_url, db=sg_db,
                       name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels)

    client.create_user(url=sg_admin_url, db=sg_db,
                       name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels)

    admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password)
    user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password)

    # Create 50 "A" channel docs
    a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"])
    assert len(a_docs) == 50

    b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"])
    assert len(b_docs) == 1

    user_doc = {"id": "_user/USER_B", "rev": None}
    b_docs.append(user_doc)

    # Loop until user_b sees b_doc_0 doc and _user/USER_B doc
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True)

    # Get last_seq for user_b
    user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal")

    # Grant access to channel "A"
    if grant_type == "CHANNEL-REST":
        log_info("Granting user access to channel A via Admin REST user update")
        # Grant via update to user in Admin API
        client.update_user(url=sg_admin_url, db=sg_db,
                           name=user_b_user_info.name, channels=["A", "B"])

    elif grant_type == "CHANNEL-SYNC":
        log_info("Granting user access to channel A sync function access()")
        # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels)
        access_doc = document.create_doc("channel_access", channels=["A"])
        access_doc["users"] = ["USER_B"]
        client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session)

    elif grant_type == "ROLE-REST":
        log_info("Granting user access to channel A via Admin REST role grant")
        # Create role with channel A
        client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])
        client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"])

    elif grant_type == "ROLE-SYNC":
        log_info("Granting user access to channel A via sync function role() grant")
        # Create role with channel A
        client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])

        # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles)
        role_access_doc = document.create_doc("role_access")
        role_access_doc["users"] = ["USER_B"]
        role_access_doc["roles"] = ["role:channel-A-role"]
        client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session)

    else:
        pytest.fail("Unsupported grant_type!!!!")

    user_b_changes_after_grant = client.get_changes(url=sg_url, db=sg_db,
                                                    since=user_b_changes["last_seq"], auth=user_b_session, feed="normal")

    # User B shoud have recieved 51 docs (a_docs + 1 _user/USER_B doc) if a REST grant or 50 changes if the grant
    # is via the sync function
    changes_results = user_b_changes_after_grant["results"]
    assert 50 <= len(changes_results) <= 51

    # Create a dictionary of id rev pair of all the docs that are not "_user/" docs from changes
    ids_and_revs_from_user_changes = {
        change["id"]: change["changes"][0]["rev"]
        for change in changes_results if not change["id"].startswith("_user/")
    }

    assert len(ids_and_revs_from_user_changes) == 50

    # Create a list of id rev pair of all of the channel A docs
    ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs}

    assert len(ids_and_revs_from_a_docs) == 50

    # Check that the changes and the a_docs are identical in id and rev
    assert ids_and_revs_from_user_changes == ids_and_revs_from_a_docs

    # Get changes from last_seq of the changes request after the grant. There should be no new changes
    user_b_changes = client.get_changes(url=sg_url, db=sg_db,
                                        since=user_b_changes_after_grant["last_seq"], auth=user_b_session, feed="normal")
    assert len(user_b_changes["results"]) == 0
예제 #39
0
def test_detect_stale_channel_index(params_from_base_test_setup, sg_conf):
    """
    1. Bring up single Sync Gateway node, backed by Couchbase Server with 3 accels indexing
    2. Configure such that the primary bucket and the channel index bucket are different (which is the norm)
    3. Add 1000 documents
    4. Shutdown Sync Gateway
    5. Delete / create the primary bucket ('data-bucket'), but do not touch the channel index bucket
    6. Start Sync Gateway
    7. Assert that sync_gateway fails to start due to stale channel index
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_detect_stale_channel_index'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    cb_server_url = topology["couchbase_servers"][0]
    sg_db = "db"
    num_docs = 1000

    cb_server = couchbaseserver.CouchbaseServer(url=cb_server_url)
    client = MobileRestClient()

    # Create doc pusher user
    doc_pusher_user_info = userinfo.UserInfo(name="doc_pusher",
                                             password="******",
                                             channels=["NASA"],
                                             roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels)

    # Add some docs to Sync Gateway to cause indexing
    docs = document.create_docs(None,
                                number=num_docs,
                                channels=doc_pusher_user_info.channels)
    pushed_docs = client.add_bulk_docs(url=sg_url,
                                       db=sg_db,
                                       docs=docs,
                                       auth=doc_pusher_auth)
    assert len(pushed_docs) == num_docs

    # Shut down sync_gateway
    sg_util = SyncGateway()
    sg_util.stop_sync_gateways(cluster_config=cluster_conf, url=sg_url)

    # Delete server bucket
    cb_server.delete_bucket(name="data-bucket")

    # Create server bucket
    ram_per_bucket_mb = cb_server.get_ram_per_bucket(num_buckets=2)
    cb_server.create_bucket(name="data-bucket", ram_quota_mb=ram_per_bucket_mb)

    # Start sync_gateway and assert that a Provisioning error is raised due to detecting stale index
    with pytest.raises(exceptions.ProvisioningError):
        sg_util.start_sync_gateways(cluster_config=cluster_conf,
                                    url=sg_url,
                                    config=sg_conf)

    # TODO: To make this check even more accurate, could
    # run remote ssh command "systemctl status sync_gateway.service" and look for
    # regex pattern: Main PID: 7185 (code=exited, status=2)

    # Delete index bucket and recreate it
    cb_server.delete_bucket(name="index-bucket")
    cb_server.create_bucket(name="index-bucket",
                            ram_quota_mb=ram_per_bucket_mb)

    # Start sync gateway, should succeed now
    sg_util.start_sync_gateways(cluster_config=cluster_conf,
                                url=sg_url,
                                config=sg_conf)
예제 #40
0
def test_take_down_bring_up_sg_accel_validate_cbgt(params_from_base_test_setup,
                                                   sg_conf):
    """
    Scenario 1

    Start with 3 sg_accels
    Take down 2 sg_accels (block until down -- poll port if needed)
    Doc adds with uuids (~30 sec for cbgt to reshard)
    polling loop: wait for all docs to come back over changes feed
    Call validate pindex with correct number of accels

    Scenario 2 (Continuation)

    When bringing up, you'd have to poll the cbgt_cfg until you get expected number of nodes,
    then you could validate the pindex with 2 accels
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    client = MobileRestClient()

    doc_pusher_user_info = userinfo.UserInfo("doc_pusher",
                                             "pass",
                                             channels=["A"],
                                             roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels)

    log_info("Shutting down sg_accels: [{}, {}]".format(
        cluster.sg_accels[1], cluster.sg_accels[2]))
    # Shutdown two accel nodes in parallel
    with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex:
        sg_accel_down_task_1 = ex.submit(cluster.sg_accels[1].stop)
        sg_accel_down_task_2 = ex.submit(cluster.sg_accels[2].stop)
        assert sg_accel_down_task_1.result() == 0
        assert sg_accel_down_task_2.result() == 0

    log_info("Finished taking nodes down!")

    # It should take some time ~30 for cbgt to pick up failing nodes and reshard the pindexes. During
    # this add a 1000 docs a start a longpoll changes loop to see if those docs make to to the changes feed
    # If the reshard is successful they will show up at somepoint after. If not, the docs will fail to show up.
    doc_pusher_docs = client.add_docs(url=sg_url,
                                      db=sg_db,
                                      number=1000,
                                      id_prefix=None,
                                      auth=doc_pusher_auth,
                                      channels=doc_pusher_user_info.channels)
    assert len(doc_pusher_docs) == 1000
    client.verify_docs_in_changes(url=sg_url,
                                  db=sg_db,
                                  expected_docs=doc_pusher_docs,
                                  auth=doc_pusher_auth,
                                  polling_interval=5)

    # The pindexes should be reshared at this point since all of the changes have shown up
    assert cluster.validate_cbgt_pindex_distribution(num_running_sg_accels=1)

    log_info("Start sg_accels: [{}, {}]".format(cluster.sg_accels[1],
                                                cluster.sg_accels[2]))

    # Start two accel nodes in parallel
    status = cluster.sg_accels[1].start(sg_conf)
    assert status == 0

    # Poll on pIndex reshard after bring 2 accel nodes back
    assert cluster.validate_cbgt_pindex_distribution_retry(
        num_running_sg_accels=2)

    status = cluster.sg_accels[2].start(sg_conf)
    assert status == 0

    # Poll on pIndex reshard after bring 2 accel nodes back
    assert cluster.validate_cbgt_pindex_distribution_retry(
        num_running_sg_accels=3)
예제 #41
0
def test_longpoll_awaken_doc_add_update(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    cluster_topology = params_from_base_test_setup["cluster_topology"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    sg_admin_url = cluster_topology["sync_gateways"][0]["admin"]
    sg_url = cluster_topology["sync_gateways"][0]["public"]

    log_info("sg_conf: {}".format(sg_conf))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    adam_user_info = userinfo.UserInfo(name="adam", password="******", channels=["NBC"], roles=[])
    traun_user_info = userinfo.UserInfo(name="traun", password="******", channels=["CBS"], roles=[])
    andy_user_info = userinfo.UserInfo(name="andy", password="******", channels=["MTV"], roles=[])
    sg_db = "db"

    client = MobileRestClient()

    adam_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                   name=adam_user_info.name, password=adam_user_info.password, channels=adam_user_info.channels)

    traun_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                    name=traun_user_info.name, password=traun_user_info.password, channels=traun_user_info.channels)

    andy_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                   name=andy_user_info.name, password=andy_user_info.password, channels=andy_user_info.channels)

    # Get starting sequence of docs, use the last seq to progress past any user docs
    adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth)
    traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth)
    andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth)

    with concurrent.futures.ProcessPoolExecutor() as ex:
        adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=30, auth=adam_auth)
        traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=30, auth=traun_auth)
        andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=30, auth=andy_auth)

        # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
        time.sleep(2)

        # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
        # and waiting
        assert not adam_changes_task.done()
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        # Add 3 docs with adam auth, one with adam channels, one with traun channels, one with andy auth
        # Tests the changes feed wakes up for user created docs and non user created docs
        adam_add_doc_task_1 = ex.submit(client.add_docs, url=sg_url, db=sg_db,
                                        number=1, id_prefix="adam_doc",
                                        auth=adam_auth, channels=adam_user_info.channels)

        adam_add_doc_task_2 = ex.submit(client.add_docs, url=sg_url, db=sg_db,
                                        number=1, id_prefix="traun_doc",
                                        auth=adam_auth, channels=traun_user_info.channels)

        adam_add_doc_task_3 = ex.submit(client.add_docs, url=sg_url, db=sg_db,
                                        number=1, id_prefix="andy_doc",
                                        auth=adam_auth, channels=andy_user_info.channels)

        # Wait for docs adds to complete
        adam_doc_1 = adam_add_doc_task_1.result()
        assert len(adam_doc_1) == 1

        adam_doc_2 = adam_add_doc_task_2.result()
        assert len(adam_doc_2) == 1

        adam_doc_3 = adam_add_doc_task_3.result()
        assert len(adam_doc_3) == 1

        # Assert that the changes feed woke up and that the doc change was propagated
        adam_changes = adam_changes_task.result()
        assert len(adam_changes["results"]) == 1
        assert adam_changes["results"][0]["id"] == "adam_doc_0"

        traun_changes = traun_changes_task.result()
        assert len(traun_changes["results"]) == 1
        assert traun_changes["results"][0]["id"] == "traun_doc_0"

        andy_changes = andy_changes_task.result()
        assert len(andy_changes["results"]) == 1
        assert andy_changes["results"][0]["id"] == "andy_doc_0"

        # Start another longpoll changes request
        adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], auth=adam_auth)
        traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], auth=traun_auth)
        andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], auth=andy_auth)

        # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
        time.sleep(2)

        # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
        # and waiting
        assert not adam_changes_task.done()
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        # Check to see that doc updates to own user doc wake up changes feed
        adam_update_docs_task = ex.submit(client.update_docs, url=sg_url, db=sg_db, docs=adam_doc_1, number_updates=1, auth=adam_auth)
        traun_update_docs_task = ex.submit(client.update_docs, url=sg_url, db=sg_db, docs=adam_doc_2, number_updates=1, auth=traun_auth)
        andy_update_docs_task = ex.submit(client.update_docs, url=sg_url, db=sg_db, docs=adam_doc_3, number_updates=1, auth=andy_auth)

        # Wait for docs updates to complete
        adam_updated_docs = adam_update_docs_task.result()
        assert len(adam_updated_docs) == 1
        assert adam_updated_docs[0]["rev"].startswith("2-")

        traun_updated_docs = traun_update_docs_task.result()
        assert len(traun_updated_docs) == 1
        assert traun_updated_docs[0]["rev"].startswith("2-")

        andy_updated_docs = andy_update_docs_task.result()
        assert len(andy_updated_docs) == 1
        assert andy_updated_docs[0]["rev"].startswith("2-")

        # Assert that the changes feed woke up and that the doc updates was propagated
        adam_changes = adam_changes_task.result()
        assert len(adam_changes["results"]) == 1
        assert adam_changes["results"][0]["id"] == "adam_doc_0"
        rev_from_change = int(adam_changes["results"][0]["changes"][0]["rev"].split("-")[0])
        assert rev_from_change == 2

        traun_changes = traun_changes_task.result()
        assert len(traun_changes["results"]) == 1
        assert traun_changes["results"][0]["id"] == "traun_doc_0"
        rev_from_change = int(traun_changes["results"][0]["changes"][0]["rev"].split("-")[0])
        assert rev_from_change == 2

        andy_changes = andy_changes_task.result()
        assert len(andy_changes["results"]) == 1
        assert andy_changes["results"][0]["id"] == "andy_doc_0"
        rev_from_change = int(andy_changes["results"][0]["changes"][0]["rev"].split("-")[0])
        assert rev_from_change == 2

        # Start another longpoll changes request from the last_seq
        adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], auth=adam_auth)
        traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], auth=traun_auth)
        andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], auth=andy_auth)

        # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
        time.sleep(2)

        # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
        # and waiting
        assert not adam_changes_task.done()
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        # Add doc with access to all channels
        adam_add_doc_task_4 = ex.submit(client.add_docs, url=sg_url, db=sg_db,
                                        number=1, id_prefix="all_doc",
                                        auth=adam_auth,
                                        channels=adam_user_info.channels + traun_user_info.channels + andy_user_info.channels)

        all_doc = adam_add_doc_task_4.result()
        assert len(all_doc) == 1

        # Assert that the changes feed woke up and that the doc add was propagated to all users
        adam_changes = adam_changes_task.result()
        assert len(adam_changes["results"]) == 1
        assert adam_changes["results"][0]["id"] == "all_doc_0"
        rev_from_change = int(adam_changes["results"][0]["changes"][0]["rev"].split("-")[0])
        assert rev_from_change == 1

        traun_changes = traun_changes_task.result()
        assert len(traun_changes["results"]) == 1
        assert traun_changes["results"][0]["id"] == "all_doc_0"
        rev_from_change = int(traun_changes["results"][0]["changes"][0]["rev"].split("-")[0])
        assert rev_from_change == 1

        andy_changes = andy_changes_task.result()
        assert len(andy_changes["results"]) == 1
        assert andy_changes["results"][0]["id"] == "all_doc_0"
        rev_from_change = int(andy_changes["results"][0]["changes"][0]["rev"].split("-")[0])
        assert rev_from_change == 1

        # Start another longpoll changes request from the last_seq
        adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], auth=adam_auth)
        traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], auth=traun_auth)
        andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], auth=andy_auth)

        # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
        time.sleep(2)

        # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
        # and waiting
        assert not adam_changes_task.done()
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        # Update doc with access to all channels
        adam_update_docs_task_2 = ex.submit(client.update_docs, url=sg_url, db=sg_db, docs=all_doc, number_updates=1, auth=adam_auth)

        # Wait for docs updates to complete
        all_doc_updated = adam_update_docs_task_2.result()
        assert len(all_doc_updated) == 1
        assert all_doc_updated[0]["rev"].startswith("2-")

        # Assert that the changes feed woke up and that the doc update was propagated to all users
        adam_changes = adam_changes_task.result()
        assert len(adam_changes["results"]) == 1
        assert adam_changes["results"][0]["id"] == "all_doc_0"
        rev_from_change = int(adam_changes["results"][0]["changes"][0]["rev"].split("-")[0])
        assert rev_from_change == 2

        traun_changes = traun_changes_task.result()
        assert len(traun_changes["results"]) == 1
        assert traun_changes["results"][0]["id"] == "all_doc_0"
        rev_from_change = int(traun_changes["results"][0]["changes"][0]["rev"].split("-")[0])
        assert rev_from_change == 2

        andy_changes = andy_changes_task.result()
        assert len(andy_changes["results"]) == 1
        assert andy_changes["results"][0]["id"] == "all_doc_0"
        rev_from_change = int(andy_changes["results"][0]["changes"][0]["rev"].split("-")[0])
        assert rev_from_change == 2
예제 #42
0
def test_setting_expiry_in_bulk_docs(params_from_base_test_setup,
                                     sg_conf_name):
    """
    1. PUT /db/_bulk_docs with 10 documents.  Set the "_exp":3 on 5 of these documents
    2. Wait five seconds
    3. POST /db/_bulk_get for the 10 documents.  Validate that only the 5 non-expiring documents are returned
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_setting_expiry_in_bulk_docs'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]
    bucket_name = "data-bucket"
    cbs_ip = host_for_url(cbs_url)

    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name),
                        password='******')

    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    doc_exp_3_bodies = document.create_docs(doc_id_prefix="exp_3",
                                            number=5,
                                            expiry=3,
                                            channels=sg_user_channels)
    doc_exp_10_bodies = document.create_docs(doc_id_prefix="exp_10",
                                             number=5,
                                             expiry=10,
                                             channels=sg_user_channels)

    bulk_bodies = doc_exp_3_bodies + doc_exp_10_bodies

    bulk_docs = client.add_bulk_docs(url=sg_url,
                                     db=sg_db,
                                     docs=bulk_bodies,
                                     auth=sg_user_session)

    # Allow exp_3 docs to expire
    time.sleep(5)

    bulk_docs_ids = [doc["id"] for doc in bulk_docs]

    expected_ids = ["exp_10_0", "exp_10_1", "exp_10_2", "exp_10_3", "exp_10_4"]
    expected_missing_ids = [
        "exp_3_0", "exp_3_1", "exp_3_2", "exp_3_3", "exp_3_4"
    ]

    bulk_get_docs, errors = client.get_bulk_docs(url=sg_url,
                                                 db=sg_db,
                                                 doc_ids=bulk_docs_ids,
                                                 auth=sg_user_session,
                                                 validate=False)
    assert len(bulk_get_docs) == len(expected_ids)
    assert len(errors) == len(expected_missing_ids)

    bulk_get_doc_ids = [doc["_id"] for doc in bulk_get_docs]
    error_ids = [doc["id"] for doc in errors]

    assert bulk_get_doc_ids == expected_ids
    assert error_ids == expected_missing_ids

    client.verify_doc_ids_found_in_response(response=bulk_get_docs,
                                            expected_doc_ids=expected_ids)
    client.verify_doc_ids_not_found_in_response(
        response=errors, expected_missing_doc_ids=expected_missing_ids)

    for expired_doc in error_ids:
        verify_doc_deletion_on_server(doc_id=expired_doc,
                                      sdk_client=sdk_client,
                                      sg_client=client,
                                      sg_admin_url=sg_url_admin,
                                      sg_db=sg_db,
                                      xattrs_enabled=xattrs_enabled)
예제 #43
0
def test_rolling_ttl_remove_expirary(params_from_base_test_setup,
                                     sg_conf_name):
    """
    1. PUT /db/doc1 via SG with property "_exp":3
    2. Once per second for 10 seconds, update /db/doc1 with a new revision (also with "_exp":3)
    3. Update /db/doc1 with a revision with no expiry
    3. Get /db/doc1.  Assert response is 200
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_rolling_ttl_remove_expirary'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]
    bucket_name = "data-bucket"
    cbs_ip = host_for_url(cbs_url)

    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name),
                        password='******')

    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    doc_exp_3_body = document.create_doc(doc_id="exp_3",
                                         expiry=3,
                                         channels=sg_user_channels)
    doc_exp_10_body = document.create_doc(doc_id="exp_10",
                                          expiry=10,
                                          channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc_exp_3_body,
                               auth=sg_user_session)
    doc_exp_10 = client.add_doc(url=sg_url,
                                db=sg_db,
                                doc=doc_exp_10_body,
                                auth=sg_user_session)

    client.update_doc(url=sg_url,
                      db=sg_db,
                      doc_id=doc_exp_3["id"],
                      number_updates=10,
                      expiry=3,
                      delay=1,
                      auth=sg_user_session)
    client.update_doc(url=sg_url,
                      db=sg_db,
                      doc_id=doc_exp_3["id"],
                      number_updates=1,
                      auth=sg_user_session)

    # If expiry was not removed in the last update, this would expire doc_exp_3
    time.sleep(5)

    # doc_exp_3 should still be around due to removal of expiry
    doc_exp_3 = client.get_doc(url=sg_url,
                               db=sg_db,
                               doc_id=doc_exp_3["id"],
                               auth=sg_user_session)
    assert doc_exp_3["_id"] == "exp_3"

    # doc_exp_10 should be expired due to the updates (10s) + sleep (5s)
    with pytest.raises(HTTPError) as he:
        client.get_doc(url=sg_url,
                       db=sg_db,
                       doc_id=doc_exp_10["id"],
                       auth=sg_user_session)

    # In XATTR mode, the expiry results in a tombstone
    # In Doc Meta mode, the expiry results in a purge
    if xattrs_enabled:
        assert he.value[0].startswith("403 Client Error: Forbidden for url:")
    else:
        assert he.value[0].startswith("404 Client Error: Not Found for url:")

    verify_doc_deletion_on_server(doc_id=doc_exp_10["id"],
                                  sdk_client=sdk_client,
                                  sg_client=client,
                                  sg_admin_url=sg_url_admin,
                                  sg_db=sg_db,
                                  xattrs_enabled=xattrs_enabled)
예제 #44
0
def test_removing_expiry(params_from_base_test_setup, sg_conf_name):
    """
    1. PUT /db/doc1 via SG with property "_exp":3
    2. Update /db/doc1 with a new revision with no expiry value
    3. After 10 updates, update /db/doc1 with a revision with no expiry
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_removing_expiry'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]

    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    doc_exp_3_body = document.create_doc(doc_id="exp_3",
                                         expiry=3,
                                         channels=sg_user_channels)
    doc_exp_10_body = document.create_doc(doc_id="exp_10",
                                          expiry=10,
                                          channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc_exp_3_body,
                               auth=sg_user_session)
    doc_exp_10 = client.add_doc(url=sg_url,
                                db=sg_db,
                                doc=doc_exp_10_body,
                                auth=sg_user_session)

    doc_exp_3_updated = client.update_doc(url=sg_url,
                                          db=sg_db,
                                          doc_id=doc_exp_3["id"],
                                          number_updates=10,
                                          auth=sg_user_session)

    # Sleep should allow an expiry to happen on doc_exp_3 if it had not been removed.
    # Expected behavior is that the doc_exp_3 will still be around due to the removal of the expiry
    time.sleep(5)

    # doc_exp_3 should no longer have an expiry and should not raise an exception
    doc_exp_3_updated_result = client.get_doc(url=sg_url,
                                              db=sg_db,
                                              doc_id=doc_exp_3_updated["id"],
                                              auth=sg_user_session)
    assert doc_exp_3_updated_result["_id"] == "exp_3"

    # doc_exp_10 should be available still and should not raise an exception
    doc_exp_10_result = client.get_doc(url=sg_url,
                                       db=sg_db,
                                       doc_id=doc_exp_10["id"],
                                       auth=sg_user_session)
    assert doc_exp_10_result["_id"] == "exp_10"
예제 #45
0
def test_numeric_expiry_as_ttl(params_from_base_test_setup, sg_conf_name):
    """
    1. PUT /db/doc1 via SG with property "_exp":3
       PUT /db/doc2 via SG with property "_exp":10
    2. Wait five seconds
    3. Get /db/doc1.  Assert response is 404
       Get /db/doc2.  Assert response is 200
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_numeric_expiry_as_ttl'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]
    bucket_name = "data-bucket"
    cbs_ip = host_for_url(cbs_url)

    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name),
                        password='******')
    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    doc_exp_3_body = document.create_doc(doc_id="exp_3",
                                         expiry=3,
                                         channels=sg_user_channels)
    doc_exp_10_body = document.create_doc(doc_id="exp_10",
                                          expiry=10,
                                          channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc_exp_3_body,
                               auth=sg_user_session)
    doc_exp_10 = client.add_doc(url=sg_url,
                                db=sg_db,
                                doc=doc_exp_10_body,
                                auth=sg_user_session)

    # Sleep should allow doc_exp_3 to expire, but still be in the window to get doc_exp_10
    time.sleep(5)

    # doc_exp_3 should be expired
    with pytest.raises(HTTPError) as he:
        client.get_doc(url=sg_url,
                       db=sg_db,
                       doc_id=doc_exp_3["id"],
                       auth=sg_user_session)

    # In XATTR mode, the expiry results in a tombstone
    # In Doc Meta mode, the expiry results in a purge
    if xattrs_enabled:
        assert he.value[0].startswith("403 Client Error: Forbidden for url:")
    else:
        assert he.value[0].startswith("404 Client Error: Not Found for url:")

    verify_doc_deletion_on_server(doc_id=doc_exp_3["id"],
                                  sdk_client=sdk_client,
                                  sg_client=client,
                                  sg_admin_url=sg_url_admin,
                                  sg_db=sg_db,
                                  xattrs_enabled=xattrs_enabled)

    # doc_exp_10 should be available still
    doc_exp_10_result = client.get_doc(url=sg_url,
                                       db=sg_db,
                                       doc_id=doc_exp_10["id"],
                                       auth=sg_user_session)
    assert doc_exp_10_result["_id"] == "exp_10"
예제 #46
0
def test_string_expiry_as_iso_8601_date(params_from_base_test_setup,
                                        sg_conf_name):
    """
    1. Calculate (server time + 3 seconds) as ISO-8601 date (e.g. 2016-01-01T00:00:00.000+00:00)
    2. PUT /db/doc1 via SG with property "_exp":"[date]"
       PUT /db/doc2 via SG with property "_exp":"2026-01-01T00:00:00.000+00:00"
    3. Wait five seconds
    4. Get /db/doc1.  Assert response is 404
       Get /db/doc2.  Assert response is 20
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_string_expiry_as_ISO_8601_Date'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]
    bucket_name = "data-bucket"
    cbs_ip = host_for_url(cbs_url)

    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name),
                        password='******')

    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    time_util = Time()
    iso_datetime = time_util.get_iso_datetime(delta=3)

    doc_exp_3_body = document.create_doc(doc_id="exp_3",
                                         expiry=iso_datetime,
                                         channels=sg_user_channels)
    doc_exp_years_body = document.create_doc(
        doc_id="exp_years",
        expiry="2026-01-01T00:00:00.000+00:00",
        channels=sg_user_channels)

    doc_exp_3 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc_exp_3_body,
                               auth=sg_user_session)
    doc_exp_years = client.add_doc(url=sg_url,
                                   db=sg_db,
                                   doc=doc_exp_years_body,
                                   auth=sg_user_session)

    # Sleep should allow doc_exp_3 to expire
    time.sleep(10)

    # doc_exp_3 should be expired
    with pytest.raises(HTTPError) as he:
        client.get_doc(url=sg_url,
                       db=sg_db,
                       doc_id=doc_exp_3["id"],
                       auth=sg_user_session)

    # In XATTR mode, the expiry results in a tombstone
    # In Doc Meta mode, the expiry results in a purge
    if xattrs_enabled:
        assert he.value[0].startswith("403 Client Error: Forbidden for url:")
    else:
        assert he.value[0].startswith("404 Client Error: Not Found for url:")

    verify_doc_deletion_on_server(doc_id=doc_exp_3["id"],
                                  sdk_client=sdk_client,
                                  sg_client=client,
                                  sg_admin_url=sg_url_admin,
                                  sg_db=sg_db,
                                  xattrs_enabled=xattrs_enabled)

    # doc_exp_years should be available still
    doc_exp_years_result = client.get_doc(url=sg_url,
                                          db=sg_db,
                                          doc_id=doc_exp_years["id"],
                                          auth=sg_user_session)
    assert doc_exp_years_result["_id"] == "exp_years"
def test_backfill_channels_looping_longpoll_changes(params_from_base_test_setup, sg_conf_name, grant_type):

    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    log_info("grant_type: {}".format(grant_type))

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster = Cluster(cluster_config)
    cluster.reset(sg_conf)

    client = MobileRestClient()

    admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[])
    user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[])

    # Create users / sessions
    client.create_user(url=sg_admin_url, db=sg_db,
                       name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels)

    client.create_user(url=sg_admin_url, db=sg_db,
                       name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels)

    admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password)
    user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password)

    # Create 50 "A" channel docs
    a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"])
    assert len(a_docs) == 50

    b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"])
    assert len(b_docs) == 1

    user_doc = {"id": "_user/USER_B", "rev": None}
    b_docs.append(user_doc)

    # Loop until user_b sees b_doc_0 doc and _user/USER_B doc
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True)

    # Create a dictionary keyed on doc id for all of channel A docs
    ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs}
    assert len(ids_and_revs_from_a_docs.keys()) == 50

    # Get last_seq for user_b
    user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal")

    with concurrent.futures.ProcessPoolExecutor() as ex:

        # Start long poll changes feed.
        changes_task = ex.submit(client.get_changes,
                                 url=sg_url, db=sg_db, since=user_b_changes["last_seq"], auth=user_b_session, timeout=10, limit=20)

        # Grant access to channel "A"
        if grant_type == "CHANNEL-REST":
            log_info("Granting user access to channel A via Admin REST user update")
            # Grant via update to user in Admin API
            client.update_user(url=sg_admin_url, db=sg_db,
                               name=user_b_user_info.name, channels=["A", "B"])

        elif grant_type == "CHANNEL-SYNC":
            log_info("Granting user access to channel A sync function access()")
            # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels)
            access_doc = document.create_doc("channel_access", channels=["A"])
            access_doc["users"] = ["USER_B"]
            client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session)

        elif grant_type == "ROLE-REST":
            log_info("Granting user access to channel A via Admin REST role grant")
            # Create role with channel A
            client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])
            client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"])

        elif grant_type == "ROLE-SYNC":
            log_info("Granting user access to channel A via sync function role() grant")
            # Create role with channel A
            client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])

            # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles)
            role_access_doc = document.create_doc("role_access")
            role_access_doc["users"] = ["USER_B"]
            role_access_doc["roles"] = ["role:channel-A-role"]
            client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session)

        else:
            pytest.fail("Unsupported grant_type!!!!")

        # Block on return of longpoll changes, feed should wake up and return 20 results
        changes = changes_task.result()

    assert len(changes["results"]) == 20
    num_requests = 1

    # Cross the results off from the 'a_docs' dictionary
    for doc in changes["results"]:
        del ids_and_revs_from_a_docs[doc["id"]]

    # Start looping longpoll changes with limit, cross off changes from dictionary each time one is found
    # Since 20 changes should be crossed off already, this should execute 2x.
    log_info("Starting looping longpoll changes with limit!")
    last_seq = changes["last_seq"]
    while True:

        if len(ids_and_revs_from_a_docs.keys()) == 0:
            log_info("All docs were found! Exiting polling loop")
            break

        changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=user_b_session, limit=20, timeout=10)
        num_requests += 1

        # There are more than 2 requests, throw an exception.
        if num_requests == 2:
            assert len(changes["results"]) == 20
        elif num_requests == 3:
            # This will be 10 or 11 depending on if the _user/ doc is returned
            assert 10 <= len(changes["results"]) <= 11
        else:
            raise exceptions.ChangesError("Looping longpoll should only have to perform 3 requests to get all the changes!!")

        # Cross the results off from the 'a_docs' dictionary.
        # This will blow up in docs duplicate docs are sent to changes
        for doc in changes["results"]:
            if doc["id"] != "_user/USER_B":
                del ids_and_revs_from_a_docs[doc["id"]]

        last_seq = changes["last_seq"]

    # Shanges after longpoll
    zero_results = client.get_changes(url=sg_url, db=sg_db,
                                      since=last_seq,
                                      auth=user_b_session, feed="normal")

    # Changes should be caught up and there should be no results
    assert len(zero_results["results"]) == 0
def test_verify_open_revs_with_revs_limit_push_conflict(setup_client_syncgateway_test):
    """Test replication from multiple client dbs to one sync_gateway db

    https://github.com/couchbase/couchbase-lite-ios/issues/1277
    """

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    num_docs = 100
    num_revs = 20

    sg_db = "db"
    sg_user_name = "sg_user"

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_verify_open_revs_with_revs_limit_push_conflict'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revs: {}".format(num_revs))

    client = MobileRestClient()

    # Test the endpoint, listener does not support users but should have a default response
    client.get_session(url=ls_url)
    sg_user_channels = ["NBC"]
    client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)

    ls_db = client.create_database(url=ls_url, name="ls_db")
    ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="ls_db", channels=sg_user_channels)
    assert len(ls_db_docs) == num_docs

    # Start replication ls_db -> sg_db
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db_docs)

    client.update_docs(url=sg_url, db=sg_db, docs=ls_db_docs, number_updates=num_revs, auth=sg_session)
    sg_current_doc = client.get_doc(url=sg_url, db=sg_db, doc_id="ls_db_2", auth=sg_session)

    client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, number_updates=num_revs)
    ls_current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id="ls_db_2")

    client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one)

    client.verify_doc_rev_generation(url=ls_url, db=ls_db, doc_id=ls_current_doc["_id"], expected_generation=21)
    client.verify_doc_rev_generation(url=sg_url, db=sg_db, doc_id=sg_current_doc["_id"], expected_generation=21, auth=sg_session)

    expected_ls_revs = [ls_current_doc["_rev"]]
    client.verify_open_revs(url=ls_url, db=ls_db, doc_id=ls_current_doc["_id"], expected_open_revs=expected_ls_revs)

    expected_sg_revs = [ls_current_doc["_rev"], sg_current_doc["_rev"]]
    client.verify_open_revs(url=sg_admin_url, db=sg_db, doc_id=sg_current_doc["_id"], expected_open_revs=expected_sg_revs)
예제 #49
0
def test_deleted_docs_from_changes_active_only(params_from_base_test_setup,
                                               sg_conf_name):
    """
    https://github.com/couchbase/sync_gateway/issues/2955
    1. Create a document
    2. Delete the document
    3. Restart Sync Gateway (to force rebuild of cache from view)
    4. Issue an active_only=true changes request
    5. Issue an active_only=false changes request
    The deleted document was not being included in the result set in step 5.
    """
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    num_docs = 10
    client = MobileRestClient()

    # Add doc to SG
    added_doc = client.add_docs(url=sg_admin_url,
                                db=sg_db,
                                number=num_docs,
                                id_prefix="test_changes")

    # Delete 1 doc
    doc_id = added_doc[0]["id"]
    log_info("Deleting {}".format(doc_id))
    doc = client.get_doc(url=sg_admin_url, db=sg_db, doc_id=doc_id)
    doc_rev = doc['_rev']
    client.delete_doc(sg_admin_url, sg_db, doc_id, doc_rev)

    # Restart SG
    sg_obj = SyncGateway()
    sg_obj.restart_sync_gateways(cluster_config)

    # Changes request with active_only=true
    session = Session()
    request_url = "{}/{}/_changes?active_only=true".format(sg_admin_url, sg_db)
    log_info("Issuing changes request {}".format(request_url))
    resp = session.get(request_url)
    resp.raise_for_status()
    resp_obj = resp.json()
    log_info(
        "Checking that the deleted doc is not included in the active_only=true changes request"
    )
    for d in resp_obj["results"]:
        assert doc_id not in d

    # Changes request with active_only=false
    request_url = "{}/{}/_changes?active_only=false".format(
        sg_admin_url, sg_db)
    log_info("Issuing changes request {}".format(request_url))
    resp = session.get(request_url)
    resp.raise_for_status()
    resp_obj = resp.json()
    doc_found = False
    for d in resp_obj["results"]:
        if doc_id != d["id"]:
            continue
        else:
            assert doc_id == d["id"]
            assert d["deleted"]
            doc_found = True
            break

    log_info(
        "Checking that the deleted doc is included in the active_only=false changes request"
    )
    assert doc_found
def test_macosx_storage_engines(request,
                                liteserv_with_storage_engine_from_fixture):

    liteserv = liteserv_with_storage_engine_from_fixture

    test_name = request.node.name
    logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR,
                                            type(liteserv).__name__, test_name,
                                            datetime.datetime.now())
    ls_url = liteserv.start(logfile)

    client = MobileRestClient()
    client.create_database(ls_url, "ls_db")

    storage_engine = liteserv.storage_engine
    log_info("Testing storage_engine: {}".format(storage_engine))

    if storage_engine == "SQLite":

        db_files = os.listdir("results/dbs/macosx/ls_db.cblite2")
        assert "db.sqlite3" in db_files
        assert "db.sqlite3-shm" in db_files
        assert "db.sqlite3-wal" in db_files

        att_files = os.listdir("results/dbs/macosx/ls_db.cblite2/attachments")
        assert att_files == []

        client.delete_databases(ls_url)
        assert not os.path.isdir("results/dbs/macosx/ls_db.cblite2/")

    elif storage_engine == "SQLCipher":

        db_files = os.listdir("results/dbs/macosx/ls_db.cblite2")
        assert "db.sqlite3" in db_files
        assert "db.sqlite3-shm" in db_files
        assert "db.sqlite3-wal" in db_files

        att_files = os.listdir("results/dbs/macosx/ls_db.cblite2/attachments")
        assert att_files == ["_encryption"]

        client.delete_databases(ls_url)
        assert not os.path.isdir("results/dbs/macosx/ls_db.cblite2/")

    elif storage_engine == "ForestDB":

        db_files = os.listdir("results/dbs/macosx/ls_db.cblite2")
        assert "db.forest.0" in db_files
        assert "db.forest.meta" in db_files

        att_files = os.listdir("results/dbs/macosx/ls_db.cblite2/attachments")
        assert att_files == []

        client.delete_databases(ls_url)
        assert not os.path.isdir("results/dbs/macosx/ls_db.cblite2/")

    elif storage_engine == "ForestDB+Encryption":

        db_files = os.listdir("results/dbs/macosx/ls_db.cblite2")
        assert "db.forest.0" in db_files
        assert "db.forest.meta" in db_files

        att_files = os.listdir("results/dbs/macosx/ls_db.cblite2/attachments")
        assert att_files == ["_encryption"]

        client.delete_databases(ls_url)

        assert not os.path.isdir("results/dbs/macosx/ls_db.cblite2/")

    else:
        pytest.xfail("Invalid storage engine")

    liteserv.stop()
예제 #51
0
def test_take_all_sgaccels_down(params_from_base_test_setup, sg_conf):
    """
    Scenario that takes all sync_gateway accel nodes offline during doc load.
    After bring the nodes back online during load, the reshard of the DCP feed is verified.
    The changes feed is verified that all docs show up.

    1. Start doc load (1000 doc)
    2. Take all sg_accel nodes down in parallel
    3. Verify node are down
    4. Wait for doc adds to complete, store "doc_push_result_1"
    5. Verify "doc_push_result_1" docs added
    6. Start doc load (1000 docs)
    7. Wait for 5. to complete, store "doc_push_result_2"
    8. Verify "doc_push_result_2" docs added
    9. Start another doc load (1000 docs)
    10. Bring up nodes in parallel
    11. poll on p-index reshard
    12. Wait for 9. to complete, store "doc_push_result_3"
    13. Verify "doc_push_result_3" docs added
    14. Verify "doc_push_result_1" + "doc_push_result_2" + "doc_push_result_3" show up in _changes feed
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    num_docs = 1000

    client = MobileRestClient()

    doc_pusher_user_info = userinfo.UserInfo("doc_pusher",
                                             "pass",
                                             channels=["A"],
                                             roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels)

    a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[])
    client.create_user(url=sg_admin_url,
                       db=sg_db,
                       name=a_user_info.name,
                       password=a_user_info.password,
                       channels=a_user_info.channels)
    a_user_session = client.create_session(url=sg_admin_url,
                                           db=sg_db,
                                           name=a_user_info.name,
                                           password=a_user_info.password)

    # Shutdown all accel nodes in parallel
    with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex:

        # Start adding docs
        docs_1 = document.create_docs(None,
                                      num_docs,
                                      channels=doc_pusher_user_info.channels)
        docs_1_task = ex.submit(client.add_bulk_docs,
                                url=sg_url,
                                db=sg_db,
                                docs=docs_1,
                                auth=doc_pusher_auth)

        # Take down all access nodes
        log_info("Shutting down sg_accels: [{}, {}, {}] ...".format(
            cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2]))
        sg_accel_down_task_1 = ex.submit(cluster.sg_accels[0].stop)
        sg_accel_down_task_2 = ex.submit(cluster.sg_accels[1].stop)
        sg_accel_down_task_3 = ex.submit(cluster.sg_accels[2].stop)
        assert sg_accel_down_task_1.result() == 0
        assert sg_accel_down_task_2.result() == 0
        assert sg_accel_down_task_3.result() == 0

        # Block until bulk_docs is complete
        doc_push_result_1 = docs_1_task.result()
        assert len(doc_push_result_1) == num_docs
        client.verify_docs_present(url=sg_url,
                                   db=sg_db,
                                   expected_docs=doc_push_result_1,
                                   auth=doc_pusher_auth)

        # Load sync_gateway with another batch of docs while the sg_accel nodes are offline
        docs_2_bodies = document.create_docs(
            None, num_docs, channels=doc_pusher_user_info.channels)
        docs_push_result_2 = client.add_bulk_docs(url=sg_url,
                                                  db=sg_db,
                                                  docs=docs_2_bodies,
                                                  auth=doc_pusher_auth)
        assert len(docs_push_result_2) == num_docs
        client.verify_docs_present(url=sg_url,
                                   db=sg_db,
                                   expected_docs=docs_push_result_2,
                                   auth=doc_pusher_auth)

        # Start loading Sync Gateway with another set of docs while bringing the sg_accel nodes online
        docs_3 = document.create_docs(None,
                                      num_docs,
                                      channels=doc_pusher_user_info.channels)
        docs_3_task = ex.submit(client.add_bulk_docs,
                                url=sg_url,
                                db=sg_db,
                                docs=docs_3,
                                auth=doc_pusher_auth)

        # Bring all the sg_accel nodes back up
        # Take down all access nodes
        log_info("Starting sg_accels: [{}, {}, {}] ...".format(
            cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2]))
        sg_accel_up_task_1 = ex.submit(cluster.sg_accels[0].start, sg_conf)
        sg_accel_up_task_2 = ex.submit(cluster.sg_accels[1].start, sg_conf)
        sg_accel_up_task_3 = ex.submit(cluster.sg_accels[2].start, sg_conf)
        assert sg_accel_up_task_1.result() == 0
        assert sg_accel_up_task_2.result() == 0
        assert sg_accel_up_task_3.result() == 0

        # Wait for pindex to reshard correctly
        assert cluster.validate_cbgt_pindex_distribution_retry(3)

        # Block until second bulk_docs is complete
        doc_push_result_3 = docs_3_task.result()
        assert len(doc_push_result_3) == num_docs
        client.verify_docs_present(url=sg_url,
                                   db=sg_db,
                                   expected_docs=doc_push_result_3,
                                   auth=doc_pusher_auth)

    # Combine the 3 push results and make sure the changes propagate to a_user
    # a_user has access to the doc's channel.
    log_info("Verifying all the changes show up for 'a_user' ...")
    all_docs = doc_push_result_1 + docs_push_result_2 + doc_push_result_3
    client.verify_docs_in_changes(url=sg_url,
                                  db=sg_db,
                                  expected_docs=all_docs,
                                  auth=a_user_session,
                                  polling_interval=2)
예제 #52
0
def test_longpoll_awaken_roles(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    cluster_topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    sg_admin_url = cluster_topology["sync_gateways"][0]["admin"]
    sg_url = cluster_topology["sync_gateways"][0]["public"]

    log_info("sg_conf: {}".format(sg_conf))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin_role = "admin_role"
    admin_channel = "admin_channel"

    admin_user_info = userinfo.UserInfo(name="admin", password="******", channels=[], roles=[admin_role])
    adam_user_info = userinfo.UserInfo(name="adam", password="******", channels=[], roles=[])
    traun_user_info = userinfo.UserInfo(name="traun", password="******", channels=[], roles=[])
    andy_user_info = userinfo.UserInfo(name="andy", password="******", channels=[], roles=[])
    sg_db = "db"

    client = MobileRestClient()

    # Create a role on sync_gateway
    client.create_role(url=sg_admin_url, db=sg_db, name=admin_role, channels=[admin_channel])

    # Create users with no channels or roles
    admin_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                    name=admin_user_info.name, password=admin_user_info.password, roles=[admin_role])

    adam_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                   name=adam_user_info.name, password=adam_user_info.password)

    traun_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                    name=traun_user_info.name, password=traun_user_info.password)

    andy_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                   name=andy_user_info.name, password=andy_user_info.password)

    ################################
    # change feed wakes for role add
    ################################

    # Get starting sequence of docs, use the last seq to progress past any _user docs.
    adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth)
    traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth)
    andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth)

    # Add doc with channel associated with the admin role
    admin_doc = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="admin_doc", auth=admin_auth, channels=[admin_channel])
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=admin_auth)

    with concurrent.futures.ProcessPoolExecutor() as ex:
        # Start changes feed for 3 users from latest last_seq
        adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth)
        traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth)
        andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth)

        # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
        time.sleep(2)

        # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
        # and waiting
        assert not adam_changes_task.done()
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        adam_auth = client.update_user(url=sg_admin_url, db=sg_db,
                                       name=adam_user_info.name, password=adam_user_info.password, roles=[admin_role])

        traun_auth = client.update_user(url=sg_admin_url, db=sg_db,
                                        name=traun_user_info.name, password=traun_user_info.password, roles=[admin_role])

        andy_auth = client.update_user(url=sg_admin_url, db=sg_db,
                                       name=andy_user_info.name, password=andy_user_info.password, roles=[admin_role])

        adam_changes = adam_changes_task.result()
        assert 1 <= len(adam_changes["results"]) <= 2
        assert adam_changes["results"][0]["id"] == "admin_doc_0" or adam_changes["results"][0]["id"] == "_user/adam"

        traun_changes = traun_changes_task.result()
        assert 1 <= len(traun_changes["results"]) <= 2
        assert traun_changes["results"][0]["id"] == "admin_doc_0" or traun_changes["results"][0]["id"] == "_user/traun"

        andy_changes = andy_changes_task.result()
        assert 1 <= len(andy_changes["results"]) <= 2
        assert andy_changes["results"][0]["id"] == "admin_doc_0" or andy_changes["results"][0]["id"] == "_user/andy"

    # Check that the user docs all show up in changes feed
    client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/adam", auth=adam_auth)
    client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/traun", auth=traun_auth)
    client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/andy", auth=andy_auth)

    # Check that the admin doc made it to all the changes feeds
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=adam_auth)
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=traun_auth)
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=admin_doc, auth=andy_auth)

    # At this point, each user should have a changes feed that is caught up for the next section

    ###########################################
    # change feed wakes for channel add to role
    ###########################################

    abc_channel = "ABC"
    abc_pusher_info = userinfo.UserInfo(name="abc_pusher", password="******", channels=[abc_channel], roles=[])

    abc_pusher_auth = client.create_user(url=sg_admin_url, db=sg_db, name=abc_pusher_info.name,
                                         password=abc_pusher_info.password, channels=abc_pusher_info.channels)

    # Add doc with ABC channel
    client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="abc_doc", auth=abc_pusher_auth, channels=[abc_channel])

    # Get latest last_seq for next test section
    adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth)
    traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth)
    andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth)

    with concurrent.futures.ProcessPoolExecutor() as ex:
        # Start changes feed for 3 users from latest last_seq
        adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth)
        traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth)
        andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth)

        # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
        time.sleep(2)

        # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
        # and waiting
        assert not adam_changes_task.done()
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        # Update admin role to include ABC channel
        # Since adam, traun, and andy are assigned to that role, they should wake up and get the 'abc_pusher_0' doc
        client.update_role(url=sg_admin_url, db=sg_db, name=admin_role, channels=[admin_channel, abc_channel])

        adam_changes = adam_changes_task.result()
        assert len(adam_changes["results"]) == 1
        assert adam_changes["results"][0]["id"] == "abc_doc_0"

        traun_changes = traun_changes_task.result()
        assert len(traun_changes["results"]) == 1
        assert traun_changes["results"][0]["id"] == "abc_doc_0"

        andy_changes = adam_changes_task.result()
        assert len(andy_changes["results"]) == 1
        assert andy_changes["results"][0]["id"] == "abc_doc_0"
def test_initial_pull_replication(setup_client_syncgateway_test, continuous):
    """
    1. Prepare sync-gateway to have 10000 documents.
    2. Create a single shot / continuous pull replicator and to pull the docs into a database.
    3. Verify if all of the docs get pulled.
    Referenced issue: couchbase/couchbase-lite-android#955.
    """

    sg_db = "db"
    ls_db = "ls_db"

    num_docs = 10000

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_initial_pull_replication', continuous: {}".format(continuous))
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_user(sg_one_admin, sg_db, "seth", password="******", channels=["ABC", "NBC"])
    session = client.create_session(sg_one_admin, sg_db, "seth")

    # Create 'num_docs' docs on sync_gateway
    docs = client.add_docs(
        url=sg_one_public,
        db=sg_db,
        number=num_docs,
        id_prefix="seeded_doc",
        generator="four_k",
        auth=session
    )
    assert len(docs) == num_docs

    client.create_database(url=ls_url, name=ls_db)

    # Start oneshot pull replication
    repl_id = client.start_replication(
        url=ls_url,
        continuous=continuous,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db
    )

    start = time.time()

    if continuous:
        log_info("Waiting for replication status 'Idle' for: {}".format(repl_id))
        # Android will report IDLE status, and drop into the 'verify_docs_present' below
        # due to https://github.com/couchbase/couchbase-lite-java-core/issues/1409
        client.wait_for_replication_status_idle(ls_url, repl_id)
    else:
        log_info("Waiting for no replications: {}".format(repl_id))
        client.wait_for_no_replications(ls_url)

    # Verify docs replicated to client
    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=docs, timeout=240)

    all_docs_replicated_time = time.time() - start
    log_info("Replication took: {}s".format(all_docs_replicated_time))

    # Verify docs show up in client's changes feed
    client.verify_docs_in_changes(url=ls_url, db=ls_db, expected_docs=docs)

    replications = client.get_replications(url=ls_url)

    if continuous:
        assert len(replications) == 1, "There should only be one replication running"
        assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'"
        assert replications[0]["continuous"], "Running replication should be continuous"
        # Only .NET has an 'error' property
        if "error" in replications[0]:
            assert len(replications[0]["error"]) == 0
    else:
        assert len(replications) == 0, "No replications should be running"
def test_backfill_channels_oneshot_limit_changes(params_from_base_test_setup, sg_conf_name, grant_type):

    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    log_info("grant_type: {}".format(grant_type))

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster = Cluster(cluster_config)
    cluster.reset(sg_conf)

    client = MobileRestClient()

    admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A"], roles=[])
    user_b_user_info = userinfo.UserInfo("USER_B", "pass", channels=["B"], roles=[])

    # Create users / sessions
    client.create_user(url=sg_admin_url, db=sg_db,
                       name=admin_user_info.name, password=admin_user_info.password, channels=admin_user_info.channels)

    client.create_user(url=sg_admin_url, db=sg_db,
                       name=user_b_user_info.name, password=user_b_user_info.password, channels=user_b_user_info.channels)

    admin_session = client.create_session(url=sg_admin_url, db=sg_db, name=admin_user_info.name, password=admin_user_info.password)
    user_b_session = client.create_session(url=sg_admin_url, db=sg_db, name=user_b_user_info.name, password=user_b_user_info.password)

    # Create 50 "A" channel docs
    a_docs = client.add_docs(url=sg_url, db=sg_db, number=50, id_prefix=None, auth=admin_session, channels=["A"])
    assert len(a_docs) == 50

    b_docs = client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="b_doc", auth=user_b_session, channels=["B"])
    assert len(b_docs) == 1

    user_doc = {"id": "_user/USER_B", "rev": None}
    b_docs.append(user_doc)

    # Loop until user_b sees b_doc_0 doc and _user/USER_B doc
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=b_docs, auth=user_b_session, strict=True)

    # Get last_seq for user_b
    user_b_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=user_b_session, feed="normal")

    # Grant access to channel "A"
    if grant_type == "CHANNEL-REST":
        log_info("Granting user access to channel A via Admin REST user update")
        # Grant via update to user in Admin API
        client.update_user(url=sg_admin_url, db=sg_db,
                           name=user_b_user_info.name, channels=["A", "B"])

    elif grant_type == "CHANNEL-SYNC":
        log_info("Granting user access to channel A sync function access()")
        # Grant via access() in sync_function, then id 'channel_access' will trigger an access(doc.users, doc.channels)
        access_doc = document.create_doc("channel_access", channels=["A"])
        access_doc["users"] = ["USER_B"]
        client.add_doc(url=sg_url, db=sg_db, doc=access_doc, auth=admin_session)

    elif grant_type == "ROLE-REST":
        log_info("Granting user access to channel A via Admin REST role grant")
        # Create role with channel A
        client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])
        client.update_user(url=sg_admin_url, db=sg_db, name="USER_B", roles=["channel-A-role"])

    elif grant_type == "ROLE-SYNC":
        log_info("Granting user access to channel A via sync function role() grant")
        # Create role with channel A
        client.create_role(url=sg_admin_url, db=sg_db, name="channel-A-role", channels=["A"])

        # Grant via role() in sync_function, then id 'role_access' will trigger an role(doc.users, doc.roles)
        role_access_doc = document.create_doc("role_access")
        role_access_doc["users"] = ["USER_B"]
        role_access_doc["roles"] = ["role:channel-A-role"]
        client.add_doc(sg_url, db=sg_db, doc=role_access_doc, auth=admin_session)

    else:
        pytest.fail("Unsupported grant_type!!!!")

    # Create a dictionary keyed on doc id for all of channel A docs
    ids_and_revs_from_a_docs = {doc["id"]: doc["rev"] for doc in a_docs}
    assert len(ids_and_revs_from_a_docs.keys()) == 50

    log_info("Doing 3, 1 shot changes with limit and last seq!")
    # Issue 3 oneshot changes with a limit of 20

    #################
    # Changes Req #1
    #################
    user_b_changes_after_grant_one = client.get_changes(url=sg_url, db=sg_db,
                                                        since=user_b_changes["last_seq"], auth=user_b_session, feed="normal", limit=20)

    # User B shoud have recieved 20 docs due to limit
    assert len(user_b_changes_after_grant_one["results"]) == 20

    for doc in user_b_changes_after_grant_one["results"]:
        # cross off keys found from 'a_docs' dictionary
        del ids_and_revs_from_a_docs[doc["id"]]

    #################
    # Changes Req #2
    #################
    user_b_changes_after_grant_two = client.get_changes(url=sg_url, db=sg_db,
                                                        since=user_b_changes_after_grant_one["last_seq"],
                                                        auth=user_b_session, feed="normal", limit=20)

    # User B shoud have recieved 20 docs due to limit
    assert len(user_b_changes_after_grant_two["results"]) == 20

    for doc in user_b_changes_after_grant_two["results"]:
        # cross off keys found from 'a_docs' dictionary
        del ids_and_revs_from_a_docs[doc["id"]]

    #################
    # Changes Req #3
    #################
    user_b_changes_after_grant_three = client.get_changes(url=sg_url, db=sg_db,
                                                          since=user_b_changes_after_grant_two["last_seq"],
                                                          auth=user_b_session, feed="normal", limit=20)

    # User B shoud have recieved 20 docs due to limit
    assert len(user_b_changes_after_grant_three["results"]) == 10

    for doc in user_b_changes_after_grant_three["results"]:
        # cross off keys found from 'a_docs' dictionary
        del ids_and_revs_from_a_docs[doc["id"]]

    # Make sure all the docs have been crossed out
    assert len(ids_and_revs_from_a_docs) == 0

    #################
    # Changes Req #4
    #################
    user_b_changes_after_grant_four = client.get_changes(url=sg_url, db=sg_db,
                                                         since=user_b_changes_after_grant_three["last_seq"],
                                                         auth=user_b_session, feed="normal", limit=20)

    # Changes should be caught up and there should be no results
    assert len(user_b_changes_after_grant_four["results"]) == 0
def test_initial_push_replication(setup_client_syncgateway_test, continuous):
    """
    1. Prepare LiteServ to have 10000 documents.
    2. Create a single shot push / continuous replicator and to push the docs into a sync_gateway database.
    3. Verify if all of the docs get pushed.
    """

    sg_db = "db"
    ls_db = "ls_db"
    seth_channels = ["ABC", "NBC"]

    num_docs = 10000

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_initial_push_replication', continuous: {}".format(continuous))
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_user(sg_one_admin, sg_db, "seth", password="******", channels=seth_channels)
    session = client.create_session(sg_one_admin, sg_db, "seth")

    client.create_database(url=ls_url, name=ls_db)

    # Create 'num_docs' docs on LiteServ
    docs = client.add_docs(
        url=ls_url,
        db=ls_db,
        number=num_docs,
        id_prefix="seeded_doc",
        generator="four_k",
        channels=seth_channels
    )
    assert len(docs) == num_docs

    # Start push replication
    repl_id = client.start_replication(
        url=ls_url,
        continuous=continuous,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db
    )

    if continuous:
        log_info("Waiting for replication status 'Idle' for: {}".format(repl_id))
        client.wait_for_replication_status_idle(ls_url, repl_id)
    else:
        log_info("Waiting for no replications: {}".format(repl_id))
        client.wait_for_no_replications(ls_url)

    # Verify docs replicated to sync_gateway
    client.verify_docs_present(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session)

    # Verify docs show up in sync_gateway's changes feed
    client.verify_docs_in_changes(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session)

    replications = client.get_replications(url=ls_url)

    if continuous:
        assert len(replications) == 1, "There should only be one replication running"
        assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'"
        assert replications[0]["continuous"], "Running replication should be continuous"
        # Only .NET has an 'error' property
        if "error" in replications[0]:
            assert len(replications[0]["error"]) == 0
    else:
        assert len(replications) == 0, "No replications should be running"
def test_multiple_replications_not_created_with_same_properties(setup_client_syncgateway_test):
    """Regression test for https://github.com/couchbase/couchbase-lite-android/issues/939
    1. Create LiteServ database and launch sync_gateway with database
    2. Start 5 continuous push replicators with the same source and target
    3. Make sure the sample replication id is returned
    4. Check that 1 one replication exists in 'active_tasks'
    5. Stop the replication with POST /_replicate cancel=true
    6. Start 5 continuous pull replicators with the same source and target
    7. Make sure the sample replication id is returned
    8. Check that 1 one replication exists in 'active_tasks'
    9. Stop the replication with POST /_replicate cancel=true
    """

    sg_db = "db"
    ls_db = "ls_db"

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_multiple_replications_not_created_with_same_properties'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_database(url=ls_url, name=ls_db)

    repl_id_num = 0
    response_one_id_num = 0
    response_two_id_num = 0

    # launch 50 concurrent push replication requests with the same source / target
    with ThreadPoolExecutor(max_workers=10) as executor:
        futures = [executor.submit(
            client.start_replication,
            url=ls_url,
            continuous=True,
            from_db=ls_db,
            to_url=sg_one_admin,
            to_db=sg_db
        ) for _ in range(50)]

        for future in as_completed(futures):
            response_one_id = future.result()
            # Convert session_id from string "repl001" -> int 1
            response_one_id_num = int(response_one_id.replace("repl", ""))
            log_info(response_one_id_num)

    # Assert that concurrent replications have a greater session id than 0
    assert response_one_id_num > repl_id_num, "'response_one_id_num': {} should be greater than 'repl_id_num': {}".format(
        response_one_id_num,
        repl_id_num
    )

    # Check there is only one replication running
    replications = client.get_replications(ls_url)
    assert len(replications) == 1, "Number of replications, Expected: {} Actual {}".format(
        1,
        len(replications)
    )

    # Stop replication
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db
    )

    # Check that no replications are running
    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    assert len(replications) == 0, "Number of replications, Expected: {} Actual {}".format(
        0,
        len(replications)
    )

    # launch 50 concurrent pull replication requests with the same source / target
    with ThreadPoolExecutor(max_workers=10) as executor:
        futures = [executor.submit(
            client.start_replication,
            url=ls_url,
            continuous=True,
            from_db=sg_db,
            from_url=sg_one_admin,
            to_db=ls_db
        ) for _ in range(50)]

        for future in as_completed(futures):
            response_two_id = future.result()
            # Convert session_id from string "repl001" -> int 1
            response_two_id_num = int(response_two_id.replace("repl", ""))
            log_info(response_two_id_num)

    # Assert that the second set of concurrent replication requests has a higher id than the first
    assert response_two_id_num > response_one_id_num, "'response_two_id_num': {} should be greater than 'response_one_id_num': {}".format(
        response_two_id_num,
        response_one_id_num
    )

    # Check there is only one replication running
    replications = client.get_replications(ls_url)
    assert len(replications) == 1, "Number of replications, Expected: {} Actual {}".format(
        1,
        len(replications)
    )

    # Stop replication
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=sg_db,
        from_url=sg_one_admin,
        to_db=ls_db
    )

    # Check that no replications are running
    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    assert len(replications) == 0, "Number of replications, Expected: {} Actual {}".format(
        0,
        len(replications)
    )
예제 #57
0
def test_longpoll_awaken_via_sync_role(params_from_base_test_setup, sg_conf_name):
    """
    Test that longpoll changes feed wakes up on role() in sync_function

    The contrived sync_function below is used:
        function(doc, oldDoc){
            if(doc._id == "role_doc_0") {
                console.log("granting_access!");
                role(["adam", "traun", "andy"], "role:techno");
            }
            channel(doc, doc.channels);
        }
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]
    cluster_topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    sg_admin_url = cluster_topology["sync_gateways"][0]["admin"]
    sg_url = cluster_topology["sync_gateways"][0]["public"]

    log_info("sg_conf: {}".format(sg_conf))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    sg_db = "db"
    techno_role = "techno"
    techno_channel = "aphex"
    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    client = MobileRestClient()

    client.create_role(url=sg_admin_url, db=sg_db, name=techno_role, channels=[techno_channel])
    admin_user_info = userinfo.UserInfo(name="admin", password="******", channels=[], roles=[techno_role])

    adam_user_info = userinfo.UserInfo(name="adam", password="******", channels=[], roles=[])
    traun_user_info = userinfo.UserInfo(name="traun", password="******", channels=[], roles=[])
    andy_user_info = userinfo.UserInfo(name="andy", password="******", channels=[], roles=[])

    admin_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                    name=admin_user_info.name, password=admin_user_info.password, roles=adam_user_info.roles)

    adam_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                   name=adam_user_info.name, password=adam_user_info.password)

    traun_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                    name=traun_user_info.name, password=traun_user_info.password)

    andy_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                   name=andy_user_info.name, password=andy_user_info.password)

    client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="techno_doc", channels=[techno_channel], auth=admin_auth)

    # Get starting sequence of docs, use the last seq to progress past any _user docs.
    adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth)
    traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth)
    andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth)

    with concurrent.futures.ProcessPoolExecutor() as ex:
        # Start changes feed for 3 users from latest last_seq
        adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth)
        traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth)
        andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth)

        # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
        time.sleep(2)

        # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
        # and waiting
        assert not adam_changes_task.done()
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        # Grant adam, traun and andy access to the "NATGEO" channel
        client.add_docs(url=sg_url, db=sg_db, number=1, id_prefix="role_doc", channels=[], auth=admin_auth)

        # Changes feed should wake up with the natgeo_0 doc
        adam_changes = adam_changes_task.result()
        assert len(adam_changes["results"]) == 1
        assert adam_changes["results"][0]["id"] == "techno_doc_0"
        assert adam_changes["results"][0]["changes"][0]["rev"].startswith("1-")

        traun_changes = traun_changes_task.result()
        assert len(traun_changes["results"]) == 1
        assert traun_changes["results"][0]["id"] == "techno_doc_0"
        assert traun_changes["results"][0]["changes"][0]["rev"].startswith("1-")

        andy_changes = andy_changes_task.result()
        assert len(andy_changes["results"]) == 1
        assert andy_changes["results"][0]["id"] == "techno_doc_0"
        assert andy_changes["results"][0]["changes"][0]["rev"].startswith("1-")

    # Assert that the changes are caught up and should recieve no new changes from last_seq
    # Test for https://github.com/couchbase/sync_gateway/issues/2186
    adam_changes = client.get_changes(url=sg_url, db=sg_db, since=adam_changes["last_seq"], auth=adam_auth, timeout=1)
    assert len(adam_changes["results"]) == 0

    traun_changes = client.get_changes(url=sg_url, db=sg_db, since=traun_changes["last_seq"], auth=traun_auth, timeout=1)
    assert len(traun_changes["results"]) == 0

    andy_changes = client.get_changes(url=sg_url, db=sg_db, since=andy_changes["last_seq"], auth=andy_auth, timeout=1)
    assert len(andy_changes["results"]) == 0
예제 #58
0
def test_longpoll_awaken_channels(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    cluster_topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    sg_admin_url = cluster_topology["sync_gateways"][0]["admin"]
    sg_url = cluster_topology["sync_gateways"][0]["public"]

    log_info("sg_conf: {}".format(sg_conf))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    adam_user_info = userinfo.UserInfo(name="adam", password="******", channels=["NBC", "ABC"], roles=[])
    traun_user_info = userinfo.UserInfo(name="traun", password="******", channels=[], roles=[])
    andy_user_info = userinfo.UserInfo(name="andy", password="******", channels=[], roles=[])
    sg_db = "db"
    doc_id = "adam_doc_0"

    client = MobileRestClient()

    adam_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                   name=adam_user_info.name, password=adam_user_info.password, channels=adam_user_info.channels)

    traun_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                    name=traun_user_info.name, password=traun_user_info.password, channels=traun_user_info.channels)

    andy_auth = client.create_user(url=sg_admin_url, db=sg_db,
                                   name=andy_user_info.name, password=andy_user_info.password, channels=andy_user_info.channels)

    ############################################################
    # changes feed wakes with Channel Access via Admin API
    ############################################################

    # Get starting sequence of docs, use the last seq to progress past any _user docs.
    adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth)
    traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth)
    andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth)

    with concurrent.futures.ProcessPoolExecutor() as ex:

        # Start changes feed for 3 users
        adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth)
        traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth)
        andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth)

        # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
        time.sleep(2)

        # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
        # and waiting
        assert not adam_changes_task.done()
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        # Add add a doc for adam with "NBC" and "ABC" channels
        # Add one doc, this should wake up the changes feed
        adam_add_docs_task = ex.submit(client.add_docs, url=sg_url, db=sg_db,
                                       number=1, id_prefix="adam_doc",
                                       auth=adam_auth, channels=adam_user_info.channels)

        # Wait for docs adds to complete
        adam_docs = adam_add_docs_task.result()
        assert len(adam_docs) == 1

        # Assert that the changes feed woke up and that the doc change was propagated
        adam_changes = adam_changes_task.result()
        assert len(adam_changes["results"]) == 1
        assert adam_changes["results"][0]["id"] == doc_id

        # Verify that the changes feed is still listening for Traun and Andy
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        # Update the traun and andy to have one of adam's channels
        update_traun_user_task = ex.submit(client.update_user, url=sg_admin_url, db=sg_db,
                                           name=traun_user_info.name, password=traun_user_info.password, channels=["NBC"])
        traun_auth = update_traun_user_task.result()

        update_andy_user_task = ex.submit(client.update_user, url=sg_admin_url, db=sg_db,
                                          name=andy_user_info.name, password=andy_user_info.password, channels=["ABC"])
        andy_auth = update_andy_user_task.result()

        # Make sure changes feed wakes up and contains at least one change, 2 may be possible if the _user doc is included
        # Make sure the first change is 'adam_doc'
        traun_changes = traun_changes_task.result()
        assert 1 <= len(traun_changes["results"]) <= 2
        assert traun_changes["results"][0]["id"] == "adam_doc_0" or traun_changes["results"][0]["id"] == "_user/traun"

        andy_changes = andy_changes_task.result()
        assert 1 <= len(andy_changes["results"]) <= 2
        assert andy_changes["results"][0]["id"] == "adam_doc_0" or andy_changes["results"][0]["id"] == "_user/andy"

    # Block until user docs are seen
    client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/adam", auth=adam_auth)
    client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/traun", auth=traun_auth)
    client.verify_doc_id_in_changes(url=sg_url, db=sg_db, expected_doc_id="_user/andy", auth=andy_auth)

    # Make sure that adams doc shows up in changes due to the fact that the changes feed may be woken up with a _user doc above
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=adam_docs, auth=adam_auth)
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=adam_docs, auth=traun_auth)
    client.verify_docs_in_changes(url=sg_url, db=sg_db, expected_docs=adam_docs, auth=andy_auth)

    ############################################################
    # changes feed wakes with Channel Removal via Sync function
    ############################################################

    # Get latest last_seq for next test section
    adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth)
    traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth)
    andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth)

    with concurrent.futures.ProcessPoolExecutor() as ex:

        # Start changes feed for 3 users from latest last_seq
        adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth)
        traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth)
        andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth)

        # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
        time.sleep(2)

        # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
        # and waiting
        assert not adam_changes_task.done()
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        # Remove the channels property from the doc
        client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id, auth=traun_auth, channels=[])

        # All three changes feeds should wake up and return one result
        adam_changes = adam_changes_task.result()
        assert len(adam_changes["results"]) == 1
        assert adam_changes["results"][0]["removed"] == ["ABC", "NBC"]

        traun_changes = traun_changes_task.result()
        assert len(traun_changes["results"]) == 1
        assert traun_changes["results"][0]["removed"] == ["NBC"]

        andy_changes = andy_changes_task.result()
        assert len(andy_changes["results"]) == 1
        assert andy_changes["results"][0]["removed"] == ["ABC"]

    # Verify that users no longer can access the doc
    for user_auth in [adam_auth, traun_auth, andy_auth]:
        with pytest.raises(requests.exceptions.HTTPError) as excinfo:
            client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id, auth=user_auth)
        assert "403 Client Error: Forbidden for url:" in excinfo.value.message

    ############################################################
    # changes feed wakes with Channel Grant via Sync function
    ############################################################

    # Get latest last_seq for next test section
    adam_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=adam_auth)
    traun_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=traun_auth)
    andy_changes = client.get_changes(url=sg_url, db=sg_db, since=0, feed="normal", auth=andy_auth)

    admin_auth = client.create_user(url=sg_admin_url, db=sg_db, name="admin", password="******", channels=["admin"])

    channel_grant_doc_id = "channel_grant_with_doc_intially"

    # Add another doc with no channels
    channel_grant_doc_body = document.create_doc(doc_id=channel_grant_doc_id, channels=["admin"])
    client.add_doc(url=sg_url, db=sg_db, doc=channel_grant_doc_body, auth=admin_auth)

    with concurrent.futures.ProcessPoolExecutor() as ex:
        # Start changes feed for 3 users from latest last_seq
        adam_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=adam_changes["last_seq"], timeout=10, auth=adam_auth)
        traun_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=traun_changes["last_seq"], timeout=10, auth=traun_auth)
        andy_changes_task = ex.submit(client.get_changes, url=sg_url, db=sg_db, since=andy_changes["last_seq"], timeout=10, auth=andy_auth)

        # Wait for changes feed to notice there are no changes and enter wait. 2 seconds should be more than enough
        time.sleep(2)

        # Make sure the changes future is still running and has not exited due to any new changes, the feed should be caught up
        # and waiting
        assert not adam_changes_task.done()
        assert not traun_changes_task.done()
        assert not andy_changes_task.done()

        # update the grant doc to have channel for all users
        update_task = ex.submit(client.update_doc, url=sg_url, db=sg_db, doc_id=channel_grant_doc_id, auth=admin_auth, channels=["admin", "ABC", "NBC"])
        updated_doc = update_task.result()
        assert updated_doc["rev"].startswith("2-")

        # Verify that access grant wakes up changes feed for adam, traun, and Andy
        adam_changes = adam_changes_task.result()
        assert len(adam_changes["results"]) == 1
        assert adam_changes["results"][0]["id"] == "channel_grant_with_doc_intially"
        assert adam_changes["results"][0]["changes"][0]["rev"].startswith("2-")

        traun_changes = traun_changes_task.result()
        assert len(traun_changes["results"]) == 1
        assert traun_changes["results"][0]["id"] == "channel_grant_with_doc_intially"
        assert traun_changes["results"][0]["changes"][0]["rev"].startswith("2-")

        andy_changes = andy_changes_task.result()
        assert len(andy_changes["results"]) == 1
        assert andy_changes["results"][0]["id"] == "channel_grant_with_doc_intially"
        assert andy_changes["results"][0]["changes"][0]["rev"].startswith("2-")
def test_listener_two_sync_gateways(setup_client_syncgateway_test):
    """
    Port of https://github.com/couchbaselabs/sync-gateway-tests/blob/master/tests/cbl-replication-mismatch-2-gateways.js
    Scenario:
      1. Start 2 sync_gateways
      2. Create sg_db_one db on sync_gateway one
      3. Create sg_db_two db on sync_gateway two
      4. Create ls_db_one and ls_db_two on Liteserv
      5. Setup continuous push / pull replication from ls_db_one <-> sg_db_one
      6. Setup continuous push / pull replication from ls_db_two <-> sg_db_two
      7. Setup continuous push / pull replication from sg_db_one <-> ls_db_two
      8. Setup continuous push / pull replication from sg_db_two <-> ls_db_one
      9. Add num_docs / 2 to each liteserv database
      10. Verify each database has num_docs docs
      11. Verify all_docs in all dbs
      12. Verify changes feed for sg_db_one and sg_db_two
      13. Verify chnages feed for ls_db_one and ls_db_two
    """

    num_docs = 500

    ls_url = setup_client_syncgateway_test["ls_url"]
    cluster_config = setup_client_syncgateway_test["cluster_config"]
    sg_mode = setup_client_syncgateway_test["sg_mode"]

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_config)

    sg_one_admin_url = topology["sync_gateways"][0]["admin"]
    sg_two_admin_url = topology["sync_gateways"][1]["admin"]
    cb_server_url = topology["couchbase_servers"][0]

    log_info("Sync Gateway 1 admin url: {}".format(sg_one_admin_url))
    log_info("Sync Gateway 2 admin url: {}".format(sg_two_admin_url))
    log_info("Couchbase Server url: {}".format(cb_server_url))

    c = cluster.Cluster(cluster_config)
    sg_config_path = sync_gateway_config_path_for_mode(
        "listener_tests/multiple_sync_gateways", sg_mode)
    c.reset(sg_config_path=sg_config_path)

    ls_db_one = "ls_db1"
    ls_db_two = "ls_db2"
    sg_db_one = "sg_db1"
    sg_db_two = "sg_db2"

    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin_url: {}".format(sg_one_admin_url))
    log_info("sg_two_admin_url: {}".format(sg_two_admin_url))
    log_info("num_docs: {}".format(num_docs))
    log_info("Running 'test_listener_two_sync_gateways' ...")

    client = MobileRestClient()

    # Delete sg_db2 on sync_gateway 1
    client.delete_database(url=sg_one_admin_url, name=sg_db_two)

    # Delete sg_db1 on sync_gateway 2
    client.delete_database(url=sg_two_admin_url, name=sg_db_one)

    # Create dbs on LiteServ
    client.create_database(ls_url, ls_db_one)
    client.create_database(ls_url, ls_db_two)

    # Start continuous push pull replication ls_db_one <-> sg_db_one
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db_one,
                             to_url=sg_one_admin_url,
                             to_db=sg_db_one)
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_one_admin_url,
                             from_db=sg_db_one,
                             to_db=ls_db_one)

    # Start continuous push pull replication ls_db_two <-> sg_db_two
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db_two,
                             to_url=sg_two_admin_url,
                             to_db=sg_db_two)
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_two_admin_url,
                             from_db=sg_db_two,
                             to_db=ls_db_two)

    # Start continuous push pull replication sg_db_one <-> ls_db_two
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_one_admin_url,
                             from_db=sg_db_one,
                             to_db=ls_db_two)
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db_two,
                             to_url=sg_one_admin_url,
                             to_db=sg_db_one)

    # Start continuous push pull replication sg_db_two <-> ls_db_one
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_two_admin_url,
                             from_db=sg_db_two,
                             to_db=ls_db_one)
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db_one,
                             to_url=sg_two_admin_url,
                             to_db=sg_db_two)

    ls_db_one_docs = client.add_docs(url=ls_url,
                                     db=ls_db_one,
                                     number=num_docs / 2,
                                     id_prefix="ls_db_one_doc")
    assert len(ls_db_one_docs) == num_docs / 2

    ls_db_two_docs = client.add_docs(url=ls_url,
                                     db=ls_db_two,
                                     number=num_docs / 2,
                                     id_prefix="ls_db_two_doc")
    assert len(ls_db_two_docs) == num_docs / 2

    all_docs = client.merge(ls_db_one_docs, ls_db_two_docs)
    assert len(all_docs) == 500

    # Verify docs replicate to each db
    client.verify_docs_present(url=ls_url,
                               db=ls_db_one,
                               expected_docs=all_docs)
    client.verify_docs_present(url=ls_url,
                               db=ls_db_two,
                               expected_docs=all_docs)
    client.verify_docs_present(url=sg_one_admin_url,
                               db=sg_db_one,
                               expected_docs=all_docs)
    client.verify_docs_present(url=sg_two_admin_url,
                               db=sg_db_two,
                               expected_docs=all_docs)

    # Verify changes feeds for each db
    client.verify_docs_in_changes(url=ls_url,
                                  db=ls_db_one,
                                  expected_docs=all_docs)
    client.verify_docs_in_changes(url=ls_url,
                                  db=ls_db_two,
                                  expected_docs=all_docs)
    client.verify_docs_in_changes(url=sg_one_admin_url,
                                  db=sg_db_one,
                                  expected_docs=all_docs)
    client.verify_docs_in_changes(url=sg_two_admin_url,
                                  db=sg_db_two,
                                  expected_docs=all_docs)
예제 #60
0
def test_remove_add_channels_to_doc(params_from_base_test_setup, sg_conf_name):

    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster = Cluster(cluster_config)
    cluster.reset(sg_conf)

    client = MobileRestClient()

    admin_user_info = userinfo.UserInfo("admin", "pass", channels=["A", "B"], roles=[])
    a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[])

    admin_user_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=admin_user_info.name,
        password=admin_user_info.password,
        channels=admin_user_info.channels,
    )

    a_user_auth = client.create_user(
        url=sg_admin_url, db=sg_db, name=a_user_info.name, password=a_user_info.password, channels=a_user_info.channels
    )

    a_docs = client.add_docs(
        url=sg_url, db=sg_db, number=50, id_prefix="a_doc", auth=admin_user_auth, channels=admin_user_info.channels
    )

    # Build dictionay of a_docs
    a_docs_id_rev = {doc["id"]: doc["rev"] for doc in a_docs}
    assert len(a_docs_id_rev) == 50

    # Wait for all docs to show up in changes
    client.verify_doc_id_in_changes(sg_url, sg_db, expected_doc_id="_user/a_user", auth=a_user_auth)
    client.verify_docs_in_changes(sg_url, sg_db, expected_docs=a_docs, auth=a_user_auth)

    # Get changes for 'a_user'
    a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=0, auth=a_user_auth, feed="normal")

    # 'a_user' should get 50 'a_doc_*' doc and 1 '_user/a_user' doc
    assert len(a_user_changes["results"]) == 51

    ###########################
    # Remove Channels from doc
    ###########################

    # Copy a_docs_id_rev to dictionary to scratch off values
    remove_docs_scratch_off = a_docs_id_rev.copy()
    assert len(remove_docs_scratch_off) == 50

    # Use admin user to update the docs to remove 'A' from the channels property on the doc and add 'B'
    client.update_docs(url=sg_url, db=sg_db, docs=a_docs, number_updates=1, auth=admin_user_auth, channels=["B"])

    # Longpoll loop requires due to the delay that changes take to permeate to the client
    changes_timeout = 10
    start = time.time()
    last_seq = a_user_changes["last_seq"]
    while True:

        # If take longer than 10 seconds, fail the test
        if time.time() - start > changes_timeout:
            raise keywords.exceptions.TimeoutException("Could not find all expected docs in changs feed")

        # We found everything, exit loop!
        if remove_docs_scratch_off == {}:
            log_info("All expected docs found to be removed")
            break

        # Get changes for 'a_user' from last_seq
        a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, timeout=10)
        assert len(a_user_changes["results"]) > 0

        # Loop over changes found and perform the following
        #   1. Check that the docs is flagged with 'removed'
        #   2. Cross off the doc fromt the the 'remove_docs_scratch_off'
        for change in a_user_changes["results"]:
            assert change["removed"] == ["A"]
            assert change["changes"][0]["rev"].startswith("2-")
            # This will blow up if any change is not found in that dictionary
            del remove_docs_scratch_off[change["id"]]

        # Update last_seq
        last_seq = a_user_changes["last_seq"]

    # Issue changes request from 'last_seq' and verify that the changes are up to date and returns no results
    a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, feed="normal")
    assert len(a_user_changes["results"]) == 0

    #########################
    # Add Channels to doc
    #########################

    # Copy the a_docs_id_rev dictionary for scratch ovee
    add_docs_scratch_off = a_docs_id_rev.copy()
    assert len(add_docs_scratch_off) == 50

    # Use admin user to update the docs to add ['A'] back to document channels
    client.update_docs(url=sg_url, db=sg_db, docs=a_docs, number_updates=1, auth=admin_user_auth, channels=["A"])

    # Longpoll loop requires due to the delay that changes take to permeate to the client
    changes_timeout = 10
    start = time.time()
    last_seq = a_user_changes["last_seq"]
    while True:

        # If take longer than 10 seconds, fail the test
        if time.time() - start > changes_timeout:
            raise keywords.exceptions.TimeoutException("Could not find all expected docs in changs feed")

        # We found everything, exit loop!
        if add_docs_scratch_off == {}:
            log_info("All expected docs found to be removed")
            break

        # Get changes for 'a_user' from last_seq
        a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, timeout=10)
        assert len(a_user_changes["results"]) > 0

        # Loop over changes found and perform the following
        #   1. Check that the docs has a 3rd gen rev prefix
        #   2. Cross off the doc fromt the the 'add_docs_scratch_off'
        for change in a_user_changes["results"]:
            assert change["changes"][0]["rev"].startswith("3-")
            # This will blow up if any change is not found in that dictionary
            del add_docs_scratch_off[change["id"]]

        # Update last_seq
        last_seq = a_user_changes["last_seq"]

    # Issue changes request from 'last_seq' and verify that the changes are up to date and returns no results
    a_user_changes = client.get_changes(url=sg_url, db=sg_db, since=last_seq, auth=a_user_auth, feed="normal")
    assert len(a_user_changes["results"]) == 0