示例#1
0
def grant_users_access(users, channels, sg_admin_url, sg_db):
    sg_client = MobileRestClient()
    for username in users:
        sg_client.update_user(url=sg_admin_url,
                              db=sg_db,
                              name=username,
                              channels=channels)
def test_raw_attachment(setup_client_syncgateway_test):
    """
    1.  Add Text attachment to sync_gateway
    2.  Try to get the raw attachment
    Pass: It is possible to get the raw attachment
    """

    log_info("Running 'test_raw_attachment'")

    ls_url = setup_client_syncgateway_test["ls_url"]
    log_info("ls_url: {}".format(ls_url))

    client = MobileRestClient()

    ls_db = client.create_database(ls_url, name="ls_db")

    ls_user_channels = ["NBC"]

    atts = attachment.load_from_data_dir(["sample_text.txt"])
    doc_with_att = document.create_doc(doc_id="att_doc",
                                       content={"sample_key": "sample_val"},
                                       attachments=atts,
                                       channels=ls_user_channels)

    doc = client.add_doc(url=ls_url, db=ls_db, doc=doc_with_att)

    att = client.get_attachment(url=ls_url,
                                db=ls_db,
                                doc_id=doc["id"],
                                attachment_name="sample_text.txt")

    expected_text = "Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua.\nUt enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat.\nDuis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur.\nExcepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum."

    assert expected_text == att
示例#3
0
def setup_p2p_test(request, setup_p2p_suite):

    """Test setup fixture for p2p client tests"""

    log_info("Setting up P2P test ...")

    liteserv_one = setup_p2p_suite["liteserv_one"]
    liteserv_two = setup_p2p_suite["liteserv_two"]

    test_name = request.node.name

    print("Starting LiteServ One ...")
    ls_logging_one = "{}/logs/{}-ls1-{}-{}.txt".format(RESULTS_DIR, type(liteserv_one).__name__, test_name, datetime.datetime.now())
    ls_url_one = liteserv_one.start(ls_logging_one)

    print("Starting LiteServ Two ...")
    ls_logging_two = "{}/logs/{}-ls2-{}-{}.txt".format(RESULTS_DIR, type(liteserv_two).__name__, test_name, datetime.datetime.now())
    ls_url_two = liteserv_two.start(ls_logging_two)

    # Yield values to test case via fixture argument
    yield {"ls_url_one": ls_url_one, "ls_url_two": ls_url_two}

    log_info("Tearing down test")

    # Teardown test
    client = MobileRestClient()
    client.delete_databases(ls_url_one)
    client.delete_databases(ls_url_two)

    liteserv_one.stop()
    liteserv_two.stop()
def test_longpoll_changes_termination_timeout(setup_client_syncgateway_test):
    """https://github.com/couchbase/couchbase-lite-java-core/issues/1296
    1. Create 30 longpoll _changes in a loop (with timeout parameter = 5s)
    2. Cancel the request after 2s
    3. Wait 5.1s
    4. Create another request GET /db/ on listener and make sure the listener responds
    """
    ls_db = "ls_db"
    ls_url = setup_client_syncgateway_test["ls_url"]

    log_info("Running 'test_longpoll_changes_termination' ...")
    log_info("ls_url: {}".format(ls_url))

    client = MobileRestClient()
    client.create_database(ls_url, ls_db)

    ct = ChangesTracker(ls_url, ls_db)

    with ThreadPoolExecutor(max_workers=35) as executor:

        futures = [
            executor.submit(ct.start, timeout=5000, request_timeout=2000)
            for _ in range(30)
        ]

        for futures in as_completed(futures):
            log_info("Future _changes loop complete")

    log_info("Futures exited")

    # make sure client can still take connections
    dbs = client.get_databases(url=ls_url)
    log_info(dbs)
    database = client.get_database(url=ls_url, db_name=ls_db)
    log_info(database)
def test_auto_prune_listener_sanity(setup_client_syncgateway_test):
    """Sanity test for the autoprune feature

    1. Create a db and put a doc
    2. Update the docs past the default revs_limit (20)
    3. Assert the the docs only retain 20 revs
    """

    ls_url = setup_client_syncgateway_test["ls_url"]
    client = MobileRestClient()

    log_info("Running 'test_auto_prune_listener_sanity' ...")
    log_info("ls_url: {}".format(ls_url))

    num_docs = 1
    num_revs = 100

    ls_db = client.create_database(url=ls_url, name="ls_db")
    docs = client.add_docs(url=ls_url,
                           db=ls_db,
                           number=num_docs,
                           id_prefix="ls_db_doc")
    assert len(docs) == num_docs

    client.update_docs(url=ls_url,
                       db=ls_db,
                       docs=docs,
                       number_updates=num_revs)

    client.verify_max_revs_num_for_docs(url=ls_url,
                                        db=ls_db,
                                        docs=docs,
                                        expected_max_number_revs_per_doc=20)
示例#6
0
def test_peer_2_peer_sanity_push_one_shot_continuous(setup_p2p_test):
    """
    1. Create ls_db1 database on LiteServ One
    2. Create ls_db2 database on LiteServ Two
    3. Add 10000 docs to LiteServ 1 ls_db1
    4. Create one shot push replication LiteServ 1 ls_db1 -> LiteServ 2 ls_db2
    5. sleep for 5 seconds
    6. Create continuous push replication LiteServ 1 ls_db1 -> LiteServ 2 ls_db2
    7. Add 10000 docs to LiteServ 1 ls_db1
    8. Verify all docs replicate to LiteServ 2 ls_db2
    9. Verify all docs show up in changes for LiteServ 2 ls_db2
    """

    ls_url_one = setup_p2p_test["ls_url_one"]
    ls_url_two = setup_p2p_test["ls_url_two"]

    num_docs_per_db = 10000

    log_info("ls_url_one: {}".format(ls_url_one))
    log_info("ls_url_two: {}".format(ls_url_two))

    client = MobileRestClient()

    log_info("Creating databases")
    ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")

    bulk_docs = create_docs("test_ls_db1_oneshot", num_docs_per_db)
    ls_db1_docs_oneshot = client.add_bulk_docs(ls_url_one, ls_db1, bulk_docs)
    assert len(ls_db1_docs_oneshot) == num_docs_per_db

    # Setup one shot push replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2
    log_info("Setting up a one-shot push replication from ls_db1 to ls_db2")
    push_repl = client.start_replication(
        url=ls_url_one,
        continuous=False,
        from_db=ls_db1,
        to_url=ls_url_two, to_db=ls_db2,
    )
    log_info("Replication ID: {}".format(push_repl))

    client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot)
    client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot)

    # Setup continuous push replication from LiteServ 1 ls_db1 to LiteServ 2 ls_db2
    log_info("Setting up a continuous push replication from ls_db1 to ls_db2")
    push_repl = client.start_replication(
        url=ls_url_one,
        continuous=True,
        from_db=ls_db1,
        to_url=ls_url_two, to_db=ls_db2,
    )
    log_info("Replication ID: {}".format(push_repl))

    ls_db1_docs = client.add_docs(url=ls_url_one, db=ls_db1, number=num_docs_per_db, id_prefix="test_ls_db1")
    assert len(ls_db1_docs) == num_docs_per_db

    client.verify_docs_present(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot + ls_db1_docs)
    client.verify_docs_in_changes(url=ls_url_two, db=ls_db2, expected_docs=ls_db1_docs_oneshot + ls_db1_docs)
def send_changes_termination_doc(auth, terminator_doc_id, terminator_channel,
                                 ls_url, ls_db):
    ls_client = MobileRestClient()
    doc_body = {}
    doc_body["channels"] = terminator_channel
    doc_body["_id"] = terminator_doc_id
    doc_body["foo"] = "bar"
    ls_client.add_doc(ls_url, ls_db, doc_body, auth=auth, use_post=False)
示例#8
0
def test_view_query_performance(setup_client_syncgateway_test):
    """
    @summary
     Run this test when new iOS version arrives to make sure CBL query performance is not diminishing
    1. Add 100000 docs to the client with content
    2. Create design doc version 1 to fetch doc._id, doc._rev for docs with content
    3. Update docs 3 times which gets revision number 4-
    4. Run a query and check for 100000 expected docs with design doc version 1
    3. Verify view query finished less than 5 seconds
    """

    log_info("Running 'test_design_doc_update'")

    ls_url = setup_client_syncgateway_test["ls_url"]
    log_info("ls_url: {}".format(ls_url))

    client = MobileRestClient()

    ls_url = setup_client_syncgateway_test["ls_url"]

    num_content_docs_per_db = 100000
    d_doc_name = "dd"
    ls_db = client.create_database(ls_url, name="ls_db")

    # Add 100000 docs to the client with content
    bulk_docs_content = create_docs("doc_content_",
                                    num_content_docs_per_db,
                                    content={"hi": "I should be in the view"})
    ls_db_docs1 = client.add_bulk_docs(url=ls_url,
                                       db=ls_db,
                                       docs=bulk_docs_content)
    assert len(ls_db_docs1) == num_content_docs_per_db

    client.update_docs(url=ls_url,
                       db=ls_db,
                       docs=ls_db_docs1,
                       number_updates=3,
                       delay=0.1)
    # Design doc to to fetch doc._id, doc._rev for docs with content
    view = """{
    "language" : "javascript",
    "views" : {
        "content_view" : {
            "map" : "function(doc, meta) { if (doc.content) { emit(doc._id, doc._rev); } }"
        }
    }
}"""

    client.add_design_doc(url=ls_url, db=ls_db, name=d_doc_name, doc=view)
    start = time.time()
    content_view_rows_1 = client.get_view(url=ls_url,
                                          db=ls_db,
                                          design_doc_name=d_doc_name,
                                          view_name="content_view")
    finish = time.time()
    assert finish - start < 5
    client.verify_view_row_num(view_response=content_view_rows_1,
                               expected_num_rows=10000)
示例#9
0
def send_changes_termination_doc(sg_url, sg_db, users, terminator_doc_id,
                                 terminator_channel):
    sg_client = MobileRestClient()

    random_user_id = random.choice(users.keys())
    random_user = users[random_user_id]
    log_info('Sending changes termination doc for all users')
    doc = {'_id': terminator_doc_id, 'channels': [terminator_channel]}
    sg_client.add_doc(url=sg_url, db=sg_db, doc=doc, auth=random_user['auth'])
示例#10
0
def setup_client_syncgateway_test(request, setup_client_syncgateway_suite):
    """Test setup fixture for client sync_gateway tests"""

    log_info("Setting up client sync_gateway test ...")

    liteserv = setup_client_syncgateway_suite["liteserv"]
    cluster_config = setup_client_syncgateway_suite["cluster_config"]
    xattrs_enabled = setup_client_syncgateway_suite["xattrs_enabled"]
    device_enabled = setup_client_syncgateway_suite["device_enabled"]
    liteserv_platform = setup_client_syncgateway_suite["liteserv_platform"]
    liteserv_version = setup_client_syncgateway_suite["liteserv_version"]
    test_name = request.node.name

    if request.config.getoption("--liteserv-platform") == "macosx" and \
            str(request.config.getoption("--liteserv-version")).startswith("1.3.1") and \
            str(test_name).startswith("test_longpoll_changes_termination"):
        pytest.skip("test_longpoll_changes_termination tests are known to fail on macosx with 1.3.1 CBL")

    client = MobileRestClient()

    # Start LiteServ and delete any databases
    log_info("Starting LiteServ...")
    if device_enabled and liteserv_platform == "ios":
        ls_url = liteserv.start_device("{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now()))
    else:
        ls_url = liteserv.start("{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now()))
    client.delete_databases(ls_url)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_config=cluster_config)

    sg_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_admin_url = cluster_hosts["sync_gateways"][0]["admin"]
    # Yield values to test case via fixture argument
    yield {
        "cluster_config": cluster_config,
        "sg_mode": setup_client_syncgateway_suite["sg_mode"],
        "ls_url": ls_url,
        "sg_url": sg_url,
        "sg_admin_url": sg_admin_url,
        "xattrs_enabled": xattrs_enabled,
        "liteserv": liteserv,
        "liteserv_platform": liteserv_platform,
        "device_enabled": device_enabled,
        "liteserv_version": liteserv_version
    }

    log_info("Tearing down test")

    client.delete_databases(ls_url)
    liteserv.stop()

    # if the test failed pull logs
    if request.node.rep_call.failed:
        logging_helper = Logging()
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config, test_name=test_name)
def test_no_conflicts_enabled(params_from_base_test_setup, sg_conf_name, num_of_docs):
    """ @summary : Enable no conflicts and verify conflicts are not created
    Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    covered #3
    Steps:
    1. Enable allow_conflicts = false in SG config
    2. Add docs to SG.
    3. Update the docs few times.
    4. Try to create a conflict.
    5. Check the revision list for the doc
    6. Verify no conflicts can be created.
    7. Sync Gateway should respond with a 409 upon trying to add a conflict
    8. Should be possible to add more than 1 revision (Default revs_limit = 1000)
    """

    # Setup
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]
    sg_db = "db"

    if not no_conflicts_enabled:
        pytest.skip('--no-conflicts is not enabled, so skipping the test')

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    sg_client = MobileRestClient()
    channels = ["no-conflicts"]
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels)
    autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******')
    # end of Set up

    # 2. Add docs to SG.
    sgdoc_bodies = document.create_docs(doc_id_prefix='sg_docs', number=num_of_docs,
                                        attachments_generator=attachment.generate_2_png_10_10, channels=channels)
    sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session)
    assert len(sgdoc_bodies) == num_of_docs

    # 3. Update the docs few times
    sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=3, delay=None, auth=autouser_session, channels=channels)

    # 4. Try to create a conflict
    for doc in sg_docs:
        with pytest.raises(HTTPError) as he:
            sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo",
                                   auth=autouser_session)
        assert he.value.message.startswith('409 Client Error: Conflict for url:')

    # 6. Update the docs 1 more time
    sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels)
示例#12
0
def test_peer_2_peer_sanity_pull(setup_p2p_test, num_docs_per_db, seeded_db, attachments_generator):
    """
    1. Create ls_db1 database on LiteServ One
    2. Create ls_db2 database on LiteServ Two
    3. Create continuous pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2
    4. Add 5000 docs to LiteServ 2 ls_db2
    5. Verify all docs replicate to LiteServ 1 ls_db1
    6. Verify all docs show up in changes for LiteServ 1 ls_db1
    """

    ls_url_one = setup_p2p_test["ls_url_one"]
    ls_url_two = setup_p2p_test["ls_url_two"]

    log_info("ls_url_one: {}".format(ls_url_one))
    log_info("ls_url_two: {}".format(ls_url_two))

    attachments = False

    if attachments_generator:
        log_info("Running test_peer_2_peer_sanity_pull with attachment {}".format(attachments_generator))
        attachments = True

    client = MobileRestClient()

    log_info("Creating databases")
    ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")

    if seeded_db:
        bulk_docs = create_docs("test_ls_db2_seed", num_docs_per_db)
        ls_db2_docs_seed = client.add_bulk_docs(url=ls_url_two, db=ls_db2, docs=bulk_docs)
        assert len(ls_db2_docs_seed) == num_docs_per_db

    # Setup continuous pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1
    pull_repl = client.start_replication(
        url=ls_url_one,
        continuous=True,
        from_url=ls_url_two, from_db=ls_db2,
        to_db=ls_db1
    )

    client.wait_for_replication_status_idle(url=ls_url_one, replication_id=pull_repl)

    ls_db2_docs = client.add_docs(url=ls_url_two, db=ls_db2, number=num_docs_per_db, id_prefix="test_ls_db2", attachments_generator=attachments_generator)
    assert len(ls_db2_docs) == num_docs_per_db

    client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs, attachments=attachments)
    client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)

    total_ls_db2_docs = ls_db2_docs
    if seeded_db:
        total_ls_db2_docs += ls_db2_docs_seed

    client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=total_ls_db2_docs, attachments=attachments)
    client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=total_ls_db2_docs)
def test_load_balance_sanity(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf_name = "sync_gateway_default_functional_tests"
    sg_conf_path = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_util = ClusterKeywords()
    cluster_util.reset_cluster(
        cluster_config=cluster_config,
        sync_gateway_config=sg_conf_path
    )

    topology = cluster_util.get_cluster_topology(cluster_config)
    admin_sg_one = topology["sync_gateways"][0]["admin"]
    lb_url = "{}:4984".format(topology["load_balancers"][0])

    sg_db = "db"
    num_docs = 1000
    sg_user_name = "seth"
    sg_user_password = "******"
    channels = ["ABC", "CBS"]

    client = MobileRestClient()

    user = client.create_user(admin_sg_one, sg_db, sg_user_name, sg_user_password, channels=channels)
    session = client.create_session(admin_sg_one, sg_db, sg_user_name)

    log_info(user)
    log_info(session)

    log_info("Adding docs to the load balancer ...")

    ct = ChangesTracker(url=lb_url, db=sg_db, auth=session)

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:
        log_info("Starting ...")
        ct_task = executor.submit(ct.start)
        log_info("Adding docs ...")
        docs = client.add_docs(lb_url, sg_db, num_docs, "test_doc", channels=channels, auth=session)
        assert len(docs) == num_docs

        log_info("Adding docs done")
        wait_for_changes = executor.submit(ct.wait_until, docs)

        if wait_for_changes.result():
            log_info("Stopping ...")
            log_info("Found all docs ...")
            executor.submit(ct.stop)
            ct_task.result()
        else:
            executor.submit(ct.stop)
            ct_task.result()
            raise Exception("Could not find all changes in feed before timeout!!")
示例#14
0
def verify_sg_docs_revision_history(url, db, added_docs):
    sg_client = MobileRestClient()
    expected_doc_map = {
        added_doc["id"]: added_doc["rev"]
        for added_doc in added_docs
    }
    doc_ids = expected_doc_map.keys()

    log_info("Bulk getting docs from sync gateway")
    docs = sg_client.get_bulk_docs(url, db, doc_ids, rev_history="true")
    assert len(docs[0]) == len(doc_ids)

    for doc in docs:
        for doc_dict in doc:
            rev = doc_dict["_rev"]
            rev_gen = int(rev.split("-")[0])
            doc_id = doc_dict["_id"]
            # Verify meta data
            log_info("Verifying that doc {} has rev {}".format(
                doc_id, expected_doc_map[doc_id]))
            assert rev == expected_doc_map[doc_id]
            log_info(
                "Doc {}: Expected number of revs: {}, Actual revs: {}".format(
                    doc_id, rev_gen, len(doc_dict["_revisions"]["ids"])))
            assert len(doc_dict["_revisions"]["ids"]) == rev_gen
            log_info(
                "Verifying that doc {} is associated with sg_user_channel channel"
                .format(doc_id))
            assert doc_dict["channels"][0] == "sg_user_channel"
            # Verify doc body
            log_info("Verifying doc body for {}".format(doc_id))
            assert "guid" in doc_dict
            assert "index" in doc_dict
            assert "latitude" in doc_dict
            assert "email" in doc_dict
            assert "picture" in doc_dict
            assert len(doc_dict["tags"]) == 3
            assert "date_time_added" in doc_dict
            assert "company" in doc_dict
            assert "eyeColor" in doc_dict
            assert "phone" in doc_dict
            assert "updates" in doc_dict
            assert "address" in doc_dict
            assert len(doc_dict["friends"]) == 2
            assert "isActive" in doc_dict
            assert "about" in doc_dict
            assert "name" in doc_dict
            assert "age" in doc_dict
            assert "registered" in doc_dict
            assert "longitude" in doc_dict
            assert "_attachments" in doc_dict
            assert "range" in doc_dict
            assert "balance" in doc_dict
            log_info("Verified doc body for {}".format(doc_id))
示例#15
0
def start_polling_changes_worker(sg_url, sg_db, user_name, user_auth,
                                 changes_delay, changes_limit,
                                 terminator_doc_id, feed, channels_filtered,
                                 doc_ids_filtered):
    sg_client = MobileRestClient()
    since = 0
    latest_changes = {}
    found_terminator = False

    # Pass a channel filter to changes request if filtered is true
    filter_type = None
    filter_channels = None
    filter_doc_ids = None

    if channels_filtered:
        filter_type = 'sync_gateway/bychannel'
        filter_channels = ['even', 'terminator']

    elif doc_ids_filtered:
        filter_type = '_doc_ids'
        filter_doc_ids = ['terminator']

    while True:

        # If terminator doc is found, terminate the polling loop
        if found_terminator:
            log_info('Found terminator ({}, {})'.format(user_name, feed))
            return user_name, latest_changes

        log_info('_changes ({}) for ({}) since: {}'.format(
            feed, user_name, since))
        changes = sg_client.get_changes(url=sg_url,
                                        db=sg_db,
                                        since=since,
                                        auth=user_auth,
                                        feed=feed,
                                        limit=changes_limit,
                                        filter_type=filter_type,
                                        filter_channels=filter_channels,
                                        filter_doc_ids=filter_doc_ids)

        # A termination doc was processed, exit on the next loop
        for change in changes['results']:
            if change['id'] == terminator_doc_id:
                found_terminator = True
            else:
                # Add latest rev to to latest_changes map
                if len(change['changes']) >= 1:
                    latest_changes[change['id']] = change['changes'][0]['rev']
                else:
                    latest_changes[change['id']] = ''

        since = changes['last_seq']
        time.sleep(changes_delay)
def test_revs_cache_size(params_from_base_test_setup, sg_conf_name, num_of_docs):
    """ @summary Test for no-conflicts with rev_cache size
    Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    covered #18
    Steps:
    Note : the sg config have rev_cache_size as 1000 , make sure number of docs is less than 1000 to have the test
    work with expected behavior
    1. Add docs to SG.
    2. Get the docs
    3. Verify number of rev_cache_hits is same as number of docs if rev_cache_size is more than number of docs.
    """

    # Setup
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    retrieved_docs = num_of_docs / 2

    sync_gateway_version = params_from_base_test_setup["sync_gateway_version"]

    if sync_gateway_version < "2.0":
        pytest.skip('It does not work with sg < 2.0 , so skipping the test')

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    sg_client = MobileRestClient()
    channels = ["no-conflicts"]
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels)
    autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******')
    # end of Set up

    # 2. Add docs to SG.
    sgdoc_bodies = document.create_docs(doc_id_prefix="sg_docs", number=num_of_docs, channels=channels)
    sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session)
    assert len(sgdoc_bodies) == num_of_docs

    # 3. Get all docs
    for i in range(retrieved_docs):
        doc = sg_docs[i]
        sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)

    # 4. Verify there are number of hits should be same as retrieved docs
    exp_vars = sg_client.get_expvars(url=sg_admin_url)
    revision_cache_hits = exp_vars["syncGateway_stats"]["revisionCache_hits"]
    revision_cache_misses = exp_vars["syncGateway_stats"]["revisionCache_misses"]
    assert revision_cache_hits == retrieved_docs, "Revision Cache hits did not hit with expected number {}".format(num_of_docs)
    assert revision_cache_misses == 0, "Revision Cache misses is not 0"
def test_net_msft_full_life_cycle(request, liteserv_with_storage_engine_from_fixture):

    liteserv = liteserv_with_storage_engine_from_fixture

    test_name = request.node.name
    logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
    ls_url = liteserv.start(logfile)

    client = MobileRestClient()
    client.create_database(ls_url, "ls_db")
    docs = client.add_docs(ls_url, db="ls_db", number=10, id_prefix="test_doc")
    assert len(docs) == 10

    client.delete_databases(ls_url)

    liteserv.stop()
def test_android_storage_engine(request,
                                liteserv_with_storage_engine_from_fixture):
    liteserv = liteserv_with_storage_engine_from_fixture

    test_name = request.node.name

    logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR,
                                            type(liteserv).__name__, test_name,
                                            datetime.datetime.now())
    ls_url = liteserv.start(logfile)

    client = MobileRestClient()
    client.create_database(ls_url, "ls_db")

    liteserv.stop()

    # Look in adb logcat to see if output match platform / storage engine expectation
    # We can't look at the database files directly to my knowledge without a rooted device
    liteserv_output = []
    with open(logfile, "r") as f:
        lines = f.readlines()
        for line in lines:
            if "LiteServ" in line:
                line = line.strip()
                liteserv_output.append(line)

    log_info(liteserv_output)
    if liteserv.storage_engine == "SQLite":
        assert len(liteserv_output) == 4
        assert liteserv_output[0].endswith("storageType=SQLite")
        assert liteserv_output[1].endswith("dbpassword="******"SQLCipher":
        assert len(liteserv_output) == 4
        assert liteserv_output[0].endswith("storageType=SQLite")
        assert liteserv_output[1].endswith(
            "dbpassword=ls_db:pass,ls_db1:pass,ls_db2:pass")
    elif liteserv.storage_engine == "ForestDB":
        assert len(liteserv_output) == 4
        assert liteserv_output[0].endswith("storageType=ForestDB")
        assert liteserv_output[1].endswith("dbpassword="******"ForestDB+Encryption":
        assert len(liteserv_output) == 4
        assert liteserv_output[0].endswith("storageType=ForestDB")
        assert liteserv_output[1].endswith(
            "dbpassword=ls_db:pass,ls_db1:pass,ls_db2:pass")
    else:
        pytest.xfail("Invalid storage engine")
示例#19
0
def setup_client_syncgateway_test(request, setup_client_syncgateway_suite):
    """Test setup fixture for client sync_gateway tests"""

    log_info("Setting up client sync_gateway test ...")

    liteserv = setup_client_syncgateway_suite["liteserv"]
    cluster_config = setup_client_syncgateway_suite["cluster_config"]
    xattrs_enabled = setup_client_syncgateway_suite["xattrs_enabled"]
    test_name = request.node.name

    client = MobileRestClient()

    # Start LiteServ and delete any databases
    ls_url = liteserv.start("{}/logs/{}-{}-{}.txt".format(
        RESULTS_DIR,
        type(liteserv).__name__, test_name, datetime.datetime.now()))
    client.delete_databases(ls_url)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(
        cluster_config=cluster_config)

    sg_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_admin_url = cluster_hosts["sync_gateways"][0]["admin"]

    # Yield values to test case via fixture argument
    yield {
        "cluster_config": cluster_config,
        "sg_mode": setup_client_syncgateway_suite["sg_mode"],
        "ls_url": ls_url,
        "sg_url": sg_url,
        "sg_admin_url": sg_admin_url,
        "xattrs_enabled": xattrs_enabled
    }

    log_info("Tearing down test")
    client.delete_databases(ls_url)
    liteserv.stop()

    # if the test failed pull logs
    if request.node.rep_call.failed:
        logging_helper = Logging()
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config,
                                              test_name=test_name)
def test_net_msft_storage_engine(request, liteserv_with_storage_engine_from_fixture):

    liteserv = liteserv_with_storage_engine_from_fixture

    test_name = request.node.name
    logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
    ls_url = liteserv.start(logfile)

    client = MobileRestClient()
    client.create_database(ls_url, "ls_db")

    liteserv.stop()

    storage_engine = liteserv.storage_engine
    log_info("Testing storage_engine: {}".format(storage_engine))

    with open(logfile, "r") as f:
        contents = f.read()

        if storage_engine == "SQLite":

            # Note: SQLite mode uses SQLCipher by default
            assert "Using Couchbase.Lite.Storage.SQLCipher.SqliteCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2" in contents
            assert "encryption key given" not in contents

        elif storage_engine == "SQLCipher":

            assert "Using Couchbase.Lite.Storage.SQLCipher.SqliteCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2" in contents
            assert "Open C:\Users\user\Desktop\LiteServ\ls_db.cblite2\db.sqlite3" in contents
            assert "encryption key given"

        elif storage_engine == "ForestDB":

            assert "Using Couchbase.Lite.Storage.ForestDB.ForestDBCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2" in contents
            assert "Database is encrypted; setting CBForest encryption key" not in contents

        elif storage_engine == "ForestDB+Encryption":

            assert "Using Couchbase.Lite.Storage.ForestDB.ForestDBCouchStore for db at C:\Users\user\Desktop\LiteServ\ls_db.cblite2" in contents
            assert "Database is encrypted; setting CBForest encryption key" in contents

        else:
            pytest.xfail("Invalid Storage Engine")
示例#21
0
def create_users_add_docs_task(user_name, sg_admin_url, sg_url, sg_db,
                               number_docs_per_user, batch_size, create_delay):

    sg_client = MobileRestClient()

    # Create user
    if user_name.startswith('unique'):
        # Doc channel should be unique for each users
        channels = [user_name]
    elif user_name.startswith('shared'):
        # Doc channel should be shared for each doc with this user type
        channels = ['shared']
    elif user_name.startswith('filtered_channel'):
        channels = ['even', 'odd']
    elif user_name.startswith('filtered_doc_ids'):
        channels = ['terminator']
    else:
        raise ValueError('Unexpected user type: {}'.format(user_name))

    sg_client.create_user(url=sg_admin_url,
                          db=sg_db,
                          name=user_name,
                          password=USER_PASSWORD,
                          channels=channels)

    # Create session
    user_auth = sg_client.create_session(url=sg_admin_url,
                                         db=sg_db,
                                         name=user_name,
                                         password=USER_PASSWORD)

    # Start bulk doc creation
    doc_ids = add_user_docs(client=sg_client,
                            sg_url=sg_url,
                            sg_db=sg_db,
                            user_name=user_name,
                            user_auth=user_auth,
                            channels=channels,
                            number_docs_per_user=number_docs_per_user,
                            batch_size=batch_size,
                            create_delay=create_delay)

    return user_name, user_auth, doc_ids
示例#22
0
def start_continuous_changes_worker(sg_url, sg_db, user_name, user_auth,
                                    terminator_doc_id, channels_filtered):

    sg_client = MobileRestClient()

    latest_changes = {}

    # Pass a channel filter to changes request if filtered is true
    filter_type = None
    filter_channels = None

    if channels_filtered:
        filter_type = 'sync_gateway/bychannel'
        filter_channels = ['even', 'terminator']

    log_info('_changes (continuous) for ({}) since: 0'.format(user_name))
    stream = sg_client.stream_continuous_changes(
        sg_url,
        sg_db,
        since=0,
        auth=user_auth,
        filter_type=filter_type,
        filter_channels=filter_channels)

    for line in stream.iter_lines():

        # filter out keep-alive new lines
        if line:
            decoded_line = line.decode('utf-8')
            change = json.loads(decoded_line)

            if change['id'] == terminator_doc_id:
                log_info('Found terminator ({}, continuous)'.format(user_name))
                return user_name, latest_changes

            else:
                if len(change['changes']) >= 1:
                    latest_changes[change['id']] = change['changes'][0]['rev']
                else:
                    latest_changes[change['id']] = ''
示例#23
0
def test_peer_2_peer_sanity_pull_one_shot(setup_p2p_test):
    """
    1. Create ls_db1 database on LiteServ One
    2. Create ls_db2 database on LiteServ Two
    3. Add 10000 docs to LiteServ 2 ls_db2
    4. Create one shot pull replication LiteServ 1 ls_db1 <- LiteServ 2 ls_db2
    5. Verify all docs replicate to LiteServ 1 ls_db1
    6. Verify all docs show up in changes for LiteServ 1 ls_db1
    """

    ls_url_one = setup_p2p_test["ls_url_one"]
    ls_url_two = setup_p2p_test["ls_url_two"]

    num_docs_per_db = 10000

    log_info("ls_url_one: {}".format(ls_url_one))
    log_info("ls_url_two: {}".format(ls_url_two))

    client = MobileRestClient()

    log_info("Creating databases")
    ls_db1 = client.create_database(url=ls_url_one, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url_two, name="ls_db2")

    bulk_docs = create_docs("test_ls_db2", num_docs_per_db)
    ls_db2_docs = client.add_bulk_docs(url=ls_url_two, db=ls_db2, docs=bulk_docs)
    assert len(ls_db2_docs) == num_docs_per_db

    # Setup one shot pull replication from LiteServ 2 ls_db2 to LiteServ 1 ls_db1
    log_info("Setting up a one-shot pull replication from ls_db2 to ls_db1")
    pull_repl = client.start_replication(
        url=ls_url_one,
        continuous=False,
        from_url=ls_url_two, from_db=ls_db2,
        to_db=ls_db1
    )
    log_info("Replication ID: {}".format(pull_repl))

    client.verify_docs_present(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
    client.verify_docs_in_changes(url=ls_url_one, db=ls_db1, expected_docs=ls_db2_docs)
def test_listener_two_sync_gateways(setup_client_syncgateway_test):
    """
    Port of https://github.com/couchbaselabs/sync-gateway-tests/blob/master/tests/cbl-replication-mismatch-2-gateways.js
    Scenario:
      1. Start 2 sync_gateways
      2. Create sg_db_one db on sync_gateway one
      3. Create sg_db_two db on sync_gateway two
      4. Create ls_db_one and ls_db_two on Liteserv
      5. Setup continuous push / pull replication from ls_db_one <-> sg_db_one
      6. Setup continuous push / pull replication from ls_db_two <-> sg_db_two
      7. Setup continuous push / pull replication from sg_db_one <-> ls_db_two
      8. Setup continuous push / pull replication from sg_db_two <-> ls_db_one
      9. Add num_docs / 2 to each liteserv database
      10. Verify each database has num_docs docs
      11. Verify all_docs in all dbs
      12. Verify changes feed for sg_db_one and sg_db_two
      13. Verify chnages feed for ls_db_one and ls_db_two
    """

    num_docs = 500

    ls_url = setup_client_syncgateway_test["ls_url"]
    cluster_config = setup_client_syncgateway_test["cluster_config"]
    sg_mode = setup_client_syncgateway_test["sg_mode"]

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_config)

    sg_one_admin_url = topology["sync_gateways"][0]["admin"]
    sg_two_admin_url = topology["sync_gateways"][1]["admin"]
    cb_server_url = topology["couchbase_servers"][0]

    log_info("Sync Gateway 1 admin url: {}".format(sg_one_admin_url))
    log_info("Sync Gateway 2 admin url: {}".format(sg_two_admin_url))
    log_info("Couchbase Server url: {}".format(cb_server_url))

    c = cluster.Cluster(cluster_config)
    sg_config_path = sync_gateway_config_path_for_mode(
        "listener_tests/multiple_sync_gateways", sg_mode)
    c.reset(sg_config_path=sg_config_path)

    ls_db_one = "ls_db1"
    ls_db_two = "ls_db2"
    sg_db_one = "sg_db1"
    sg_db_two = "sg_db2"

    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin_url: {}".format(sg_one_admin_url))
    log_info("sg_two_admin_url: {}".format(sg_two_admin_url))
    log_info("num_docs: {}".format(num_docs))
    log_info("Running 'test_listener_two_sync_gateways' ...")

    client = MobileRestClient()

    # Delete sg_db2 on sync_gateway 1
    client.delete_database(url=sg_one_admin_url, name=sg_db_two)

    # Delete sg_db1 on sync_gateway 2
    client.delete_database(url=sg_two_admin_url, name=sg_db_one)

    # Create dbs on LiteServ
    client.create_database(ls_url, ls_db_one)
    client.create_database(ls_url, ls_db_two)

    # Start continuous push pull replication ls_db_one <-> sg_db_one
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db_one,
                             to_url=sg_one_admin_url,
                             to_db=sg_db_one)
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_one_admin_url,
                             from_db=sg_db_one,
                             to_db=ls_db_one)

    # Start continuous push pull replication ls_db_two <-> sg_db_two
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db_two,
                             to_url=sg_two_admin_url,
                             to_db=sg_db_two)
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_two_admin_url,
                             from_db=sg_db_two,
                             to_db=ls_db_two)

    # Start continuous push pull replication sg_db_one <-> ls_db_two
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_one_admin_url,
                             from_db=sg_db_one,
                             to_db=ls_db_two)
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db_two,
                             to_url=sg_one_admin_url,
                             to_db=sg_db_one)

    # Start continuous push pull replication sg_db_two <-> ls_db_one
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_two_admin_url,
                             from_db=sg_db_two,
                             to_db=ls_db_one)
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db_one,
                             to_url=sg_two_admin_url,
                             to_db=sg_db_two)

    ls_db_one_docs = client.add_docs(url=ls_url,
                                     db=ls_db_one,
                                     number=num_docs / 2,
                                     id_prefix="ls_db_one_doc")
    assert len(ls_db_one_docs) == num_docs / 2

    ls_db_two_docs = client.add_docs(url=ls_url,
                                     db=ls_db_two,
                                     number=num_docs / 2,
                                     id_prefix="ls_db_two_doc")
    assert len(ls_db_two_docs) == num_docs / 2

    all_docs = client.merge(ls_db_one_docs, ls_db_two_docs)
    assert len(all_docs) == 500

    # Verify docs replicate to each db
    client.verify_docs_present(url=ls_url,
                               db=ls_db_one,
                               expected_docs=all_docs)
    client.verify_docs_present(url=ls_url,
                               db=ls_db_two,
                               expected_docs=all_docs)
    client.verify_docs_present(url=sg_one_admin_url,
                               db=sg_db_one,
                               expected_docs=all_docs)
    client.verify_docs_present(url=sg_two_admin_url,
                               db=sg_db_two,
                               expected_docs=all_docs)

    # Verify changes feeds for each db
    client.verify_docs_in_changes(url=ls_url,
                                  db=ls_db_one,
                                  expected_docs=all_docs)
    client.verify_docs_in_changes(url=ls_url,
                                  db=ls_db_two,
                                  expected_docs=all_docs)
    client.verify_docs_in_changes(url=sg_one_admin_url,
                                  db=sg_db_one,
                                  expected_docs=all_docs)
    client.verify_docs_in_changes(url=sg_two_admin_url,
                                  db=sg_db_two,
                                  expected_docs=all_docs)
示例#25
0
def test_deleted_docs_from_changes_active_only(params_from_base_test_setup,
                                               sg_conf_name):
    """
    https://github.com/couchbase/sync_gateway/issues/2955
    1. Create a document
    2. Delete the document
    3. Restart Sync Gateway (to force rebuild of cache from view)
    4. Issue an active_only=true changes request
    5. Issue an active_only=false changes request
    The deleted document was not being included in the result set in step 5.
    """
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    num_docs = 10
    client = MobileRestClient()

    # Add doc to SG
    added_doc = client.add_docs(url=sg_admin_url,
                                db=sg_db,
                                number=num_docs,
                                id_prefix="test_changes")

    # Delete 1 doc
    doc_id = added_doc[0]["id"]
    log_info("Deleting {}".format(doc_id))
    doc = client.get_doc(url=sg_admin_url, db=sg_db, doc_id=doc_id)
    doc_rev = doc['_rev']
    client.delete_doc(sg_admin_url, sg_db, doc_id, doc_rev)

    # Restart SG
    sg_obj = SyncGateway()
    sg_obj.restart_sync_gateways(cluster_config)

    # Changes request with active_only=true
    session = Session()
    request_url = "{}/{}/_changes?active_only=true".format(sg_admin_url, sg_db)
    log_info("Issuing changes request {}".format(request_url))
    resp = session.get(request_url)
    resp.raise_for_status()
    resp_obj = resp.json()
    log_info(
        "Checking that the deleted doc is not included in the active_only=true changes request"
    )
    for d in resp_obj["results"]:
        assert doc_id not in d

    # Changes request with active_only=false
    request_url = "{}/{}/_changes?active_only=false".format(
        sg_admin_url, sg_db)
    log_info("Issuing changes request {}".format(request_url))
    resp = session.get(request_url)
    resp.raise_for_status()
    resp_obj = resp.json()
    doc_found = False
    for d in resp_obj["results"]:
        if doc_id != d["id"]:
            continue
        else:
            assert doc_id == d["id"]
            assert d["deleted"]
            doc_found = True
            break

    log_info(
        "Checking that the deleted doc is included in the active_only=false changes request"
    )
    assert doc_found
def test_macosx_storage_engines(request,
                                liteserv_with_storage_engine_from_fixture):

    liteserv = liteserv_with_storage_engine_from_fixture

    test_name = request.node.name
    logfile = "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR,
                                            type(liteserv).__name__, test_name,
                                            datetime.datetime.now())
    ls_url = liteserv.start(logfile)

    client = MobileRestClient()
    client.create_database(ls_url, "ls_db")

    storage_engine = liteserv.storage_engine
    log_info("Testing storage_engine: {}".format(storage_engine))

    if storage_engine == "SQLite":

        db_files = os.listdir("results/dbs/macosx/ls_db.cblite2")
        assert "db.sqlite3" in db_files
        assert "db.sqlite3-shm" in db_files
        assert "db.sqlite3-wal" in db_files

        att_files = os.listdir("results/dbs/macosx/ls_db.cblite2/attachments")
        assert att_files == []

        client.delete_databases(ls_url)
        assert not os.path.isdir("results/dbs/macosx/ls_db.cblite2/")

    elif storage_engine == "SQLCipher":

        db_files = os.listdir("results/dbs/macosx/ls_db.cblite2")
        assert "db.sqlite3" in db_files
        assert "db.sqlite3-shm" in db_files
        assert "db.sqlite3-wal" in db_files

        att_files = os.listdir("results/dbs/macosx/ls_db.cblite2/attachments")
        assert att_files == ["_encryption"]

        client.delete_databases(ls_url)
        assert not os.path.isdir("results/dbs/macosx/ls_db.cblite2/")

    elif storage_engine == "ForestDB":

        db_files = os.listdir("results/dbs/macosx/ls_db.cblite2")
        assert "db.forest.0" in db_files
        assert "db.forest.meta" in db_files

        att_files = os.listdir("results/dbs/macosx/ls_db.cblite2/attachments")
        assert att_files == []

        client.delete_databases(ls_url)
        assert not os.path.isdir("results/dbs/macosx/ls_db.cblite2/")

    elif storage_engine == "ForestDB+Encryption":

        db_files = os.listdir("results/dbs/macosx/ls_db.cblite2")
        assert "db.forest.0" in db_files
        assert "db.forest.meta" in db_files

        att_files = os.listdir("results/dbs/macosx/ls_db.cblite2/attachments")
        assert att_files == ["_encryption"]

        client.delete_databases(ls_url)

        assert not os.path.isdir("results/dbs/macosx/ls_db.cblite2/")

    else:
        pytest.xfail("Invalid storage engine")

    liteserv.stop()
示例#27
0
def test_detect_stale_channel_index(params_from_base_test_setup, sg_conf):
    """
    1. Bring up single Sync Gateway node, backed by Couchbase Server with 3 accels indexing
    2. Configure such that the primary bucket and the channel index bucket are different (which is the norm)
    3. Add 1000 documents
    4. Shutdown Sync Gateway
    5. Delete / create the primary bucket ('data-bucket'), but do not touch the channel index bucket
    6. Start Sync Gateway
    7. Assert that sync_gateway fails to start due to stale channel index
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_detect_stale_channel_index'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    cb_server_url = topology["couchbase_servers"][0]
    sg_db = "db"
    num_docs = 1000

    cb_server = couchbaseserver.CouchbaseServer(url=cb_server_url)
    client = MobileRestClient()

    # Create doc pusher user
    doc_pusher_user_info = userinfo.UserInfo(name="doc_pusher",
                                             password="******",
                                             channels=["NASA"],
                                             roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels)

    # Add some docs to Sync Gateway to cause indexing
    docs = document.create_docs(None,
                                number=num_docs,
                                channels=doc_pusher_user_info.channels)
    pushed_docs = client.add_bulk_docs(url=sg_url,
                                       db=sg_db,
                                       docs=docs,
                                       auth=doc_pusher_auth)
    assert len(pushed_docs) == num_docs

    # Shut down sync_gateway
    sg_util = SyncGateway()
    sg_util.stop_sync_gateways(cluster_config=cluster_conf, url=sg_url)

    # Delete server bucket
    cb_server.delete_bucket(name="data-bucket")

    # Create server bucket
    ram_per_bucket_mb = cb_server.get_ram_per_bucket(num_buckets=2)
    cb_server.create_bucket(name="data-bucket", ram_quota_mb=ram_per_bucket_mb)

    # Start sync_gateway and assert that a Provisioning error is raised due to detecting stale index
    with pytest.raises(exceptions.ProvisioningError):
        sg_util.start_sync_gateways(cluster_config=cluster_conf,
                                    url=sg_url,
                                    config=sg_conf)

    # TODO: To make this check even more accurate, could
    # run remote ssh command "systemctl status sync_gateway.service" and look for
    # regex pattern: Main PID: 7185 (code=exited, status=2)

    # Delete index bucket and recreate it
    cb_server.delete_bucket(name="index-bucket")
    cb_server.create_bucket(name="index-bucket",
                            ram_quota_mb=ram_per_bucket_mb)

    # Start sync gateway, should succeed now
    sg_util.start_sync_gateways(cluster_config=cluster_conf,
                                url=sg_url,
                                config=sg_conf)
示例#28
0
def test_take_all_sgaccels_down(params_from_base_test_setup, sg_conf):
    """
    Scenario that takes all sync_gateway accel nodes offline during doc load.
    After bring the nodes back online during load, the reshard of the DCP feed is verified.
    The changes feed is verified that all docs show up.

    1. Start doc load (1000 doc)
    2. Take all sg_accel nodes down in parallel
    3. Verify node are down
    4. Wait for doc adds to complete, store "doc_push_result_1"
    5. Verify "doc_push_result_1" docs added
    6. Start doc load (1000 docs)
    7. Wait for 5. to complete, store "doc_push_result_2"
    8. Verify "doc_push_result_2" docs added
    9. Start another doc load (1000 docs)
    10. Bring up nodes in parallel
    11. poll on p-index reshard
    12. Wait for 9. to complete, store "doc_push_result_3"
    13. Verify "doc_push_result_3" docs added
    14. Verify "doc_push_result_1" + "doc_push_result_2" + "doc_push_result_3" show up in _changes feed
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    num_docs = 1000

    client = MobileRestClient()

    doc_pusher_user_info = userinfo.UserInfo("doc_pusher",
                                             "pass",
                                             channels=["A"],
                                             roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels)

    a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[])
    client.create_user(url=sg_admin_url,
                       db=sg_db,
                       name=a_user_info.name,
                       password=a_user_info.password,
                       channels=a_user_info.channels)
    a_user_session = client.create_session(url=sg_admin_url,
                                           db=sg_db,
                                           name=a_user_info.name,
                                           password=a_user_info.password)

    # Shutdown all accel nodes in parallel
    with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex:

        # Start adding docs
        docs_1 = document.create_docs(None,
                                      num_docs,
                                      channels=doc_pusher_user_info.channels)
        docs_1_task = ex.submit(client.add_bulk_docs,
                                url=sg_url,
                                db=sg_db,
                                docs=docs_1,
                                auth=doc_pusher_auth)

        # Take down all access nodes
        log_info("Shutting down sg_accels: [{}, {}, {}] ...".format(
            cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2]))
        sg_accel_down_task_1 = ex.submit(cluster.sg_accels[0].stop)
        sg_accel_down_task_2 = ex.submit(cluster.sg_accels[1].stop)
        sg_accel_down_task_3 = ex.submit(cluster.sg_accels[2].stop)
        assert sg_accel_down_task_1.result() == 0
        assert sg_accel_down_task_2.result() == 0
        assert sg_accel_down_task_3.result() == 0

        # Block until bulk_docs is complete
        doc_push_result_1 = docs_1_task.result()
        assert len(doc_push_result_1) == num_docs
        client.verify_docs_present(url=sg_url,
                                   db=sg_db,
                                   expected_docs=doc_push_result_1,
                                   auth=doc_pusher_auth)

        # Load sync_gateway with another batch of docs while the sg_accel nodes are offline
        docs_2_bodies = document.create_docs(
            None, num_docs, channels=doc_pusher_user_info.channels)
        docs_push_result_2 = client.add_bulk_docs(url=sg_url,
                                                  db=sg_db,
                                                  docs=docs_2_bodies,
                                                  auth=doc_pusher_auth)
        assert len(docs_push_result_2) == num_docs
        client.verify_docs_present(url=sg_url,
                                   db=sg_db,
                                   expected_docs=docs_push_result_2,
                                   auth=doc_pusher_auth)

        # Start loading Sync Gateway with another set of docs while bringing the sg_accel nodes online
        docs_3 = document.create_docs(None,
                                      num_docs,
                                      channels=doc_pusher_user_info.channels)
        docs_3_task = ex.submit(client.add_bulk_docs,
                                url=sg_url,
                                db=sg_db,
                                docs=docs_3,
                                auth=doc_pusher_auth)

        # Bring all the sg_accel nodes back up
        # Take down all access nodes
        log_info("Starting sg_accels: [{}, {}, {}] ...".format(
            cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2]))
        sg_accel_up_task_1 = ex.submit(cluster.sg_accels[0].start, sg_conf)
        sg_accel_up_task_2 = ex.submit(cluster.sg_accels[1].start, sg_conf)
        sg_accel_up_task_3 = ex.submit(cluster.sg_accels[2].start, sg_conf)
        assert sg_accel_up_task_1.result() == 0
        assert sg_accel_up_task_2.result() == 0
        assert sg_accel_up_task_3.result() == 0

        # Wait for pindex to reshard correctly
        assert cluster.validate_cbgt_pindex_distribution_retry(3)

        # Block until second bulk_docs is complete
        doc_push_result_3 = docs_3_task.result()
        assert len(doc_push_result_3) == num_docs
        client.verify_docs_present(url=sg_url,
                                   db=sg_db,
                                   expected_docs=doc_push_result_3,
                                   auth=doc_pusher_auth)

    # Combine the 3 push results and make sure the changes propagate to a_user
    # a_user has access to the doc's channel.
    log_info("Verifying all the changes show up for 'a_user' ...")
    all_docs = doc_push_result_1 + docs_push_result_2 + doc_push_result_3
    client.verify_docs_in_changes(url=sg_url,
                                  db=sg_db,
                                  expected_docs=all_docs,
                                  auth=a_user_session,
                                  polling_interval=2)
示例#29
0
def test_take_down_bring_up_sg_accel_validate_cbgt(params_from_base_test_setup,
                                                   sg_conf):
    """
    Scenario 1

    Start with 3 sg_accels
    Take down 2 sg_accels (block until down -- poll port if needed)
    Doc adds with uuids (~30 sec for cbgt to reshard)
    polling loop: wait for all docs to come back over changes feed
    Call validate pindex with correct number of accels

    Scenario 2 (Continuation)

    When bringing up, you'd have to poll the cbgt_cfg until you get expected number of nodes,
    then you could validate the pindex with 2 accels
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    client = MobileRestClient()

    doc_pusher_user_info = userinfo.UserInfo("doc_pusher",
                                             "pass",
                                             channels=["A"],
                                             roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels)

    log_info("Shutting down sg_accels: [{}, {}]".format(
        cluster.sg_accels[1], cluster.sg_accels[2]))
    # Shutdown two accel nodes in parallel
    with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex:
        sg_accel_down_task_1 = ex.submit(cluster.sg_accels[1].stop)
        sg_accel_down_task_2 = ex.submit(cluster.sg_accels[2].stop)
        assert sg_accel_down_task_1.result() == 0
        assert sg_accel_down_task_2.result() == 0

    log_info("Finished taking nodes down!")

    # It should take some time ~30 for cbgt to pick up failing nodes and reshard the pindexes. During
    # this add a 1000 docs a start a longpoll changes loop to see if those docs make to to the changes feed
    # If the reshard is successful they will show up at somepoint after. If not, the docs will fail to show up.
    doc_pusher_docs = client.add_docs(url=sg_url,
                                      db=sg_db,
                                      number=1000,
                                      id_prefix=None,
                                      auth=doc_pusher_auth,
                                      channels=doc_pusher_user_info.channels)
    assert len(doc_pusher_docs) == 1000
    client.verify_docs_in_changes(url=sg_url,
                                  db=sg_db,
                                  expected_docs=doc_pusher_docs,
                                  auth=doc_pusher_auth,
                                  polling_interval=5)

    # The pindexes should be reshared at this point since all of the changes have shown up
    assert cluster.validate_cbgt_pindex_distribution(num_running_sg_accels=1)

    log_info("Start sg_accels: [{}, {}]".format(cluster.sg_accels[1],
                                                cluster.sg_accels[2]))

    # Start two accel nodes in parallel
    status = cluster.sg_accels[1].start(sg_conf)
    assert status == 0

    # Poll on pIndex reshard after bring 2 accel nodes back
    assert cluster.validate_cbgt_pindex_distribution_retry(
        num_running_sg_accels=2)

    status = cluster.sg_accels[2].start(sg_conf)
    assert status == 0

    # Poll on pIndex reshard after bring 2 accel nodes back
    assert cluster.validate_cbgt_pindex_distribution_retry(
        num_running_sg_accels=3)
示例#30
0
def test_setting_expiry_in_bulk_docs(params_from_base_test_setup,
                                     sg_conf_name):
    """
    1. PUT /db/_bulk_docs with 10 documents.  Set the "_exp":3 on 5 of these documents
    2. Wait five seconds
    3. POST /db/_bulk_get for the 10 documents.  Validate that only the 5 non-expiring documents are returned
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cluster_helper.reset_cluster(cluster_config=cluster_config,
                                 sync_gateway_config=sg_conf)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]

    log_info("Running 'test_setting_expiry_in_bulk_docs'")
    log_info("cbs_url: {}".format(cbs_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    sg_user_channels = ["NBC", "ABC"]
    bucket_name = "data-bucket"
    cbs_ip = host_for_url(cbs_url)

    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name),
                        password='******')

    client = MobileRestClient()

    client.create_user(url=sg_url_admin,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_user_session = client.create_session(url=sg_url_admin,
                                            db=sg_db,
                                            name=sg_user_name)

    doc_exp_3_bodies = document.create_docs(doc_id_prefix="exp_3",
                                            number=5,
                                            expiry=3,
                                            channels=sg_user_channels)
    doc_exp_10_bodies = document.create_docs(doc_id_prefix="exp_10",
                                             number=5,
                                             expiry=10,
                                             channels=sg_user_channels)

    bulk_bodies = doc_exp_3_bodies + doc_exp_10_bodies

    bulk_docs = client.add_bulk_docs(url=sg_url,
                                     db=sg_db,
                                     docs=bulk_bodies,
                                     auth=sg_user_session)

    # Allow exp_3 docs to expire
    time.sleep(5)

    bulk_docs_ids = [doc["id"] for doc in bulk_docs]

    expected_ids = ["exp_10_0", "exp_10_1", "exp_10_2", "exp_10_3", "exp_10_4"]
    expected_missing_ids = [
        "exp_3_0", "exp_3_1", "exp_3_2", "exp_3_3", "exp_3_4"
    ]

    bulk_get_docs, errors = client.get_bulk_docs(url=sg_url,
                                                 db=sg_db,
                                                 doc_ids=bulk_docs_ids,
                                                 auth=sg_user_session,
                                                 validate=False)
    assert len(bulk_get_docs) == len(expected_ids)
    assert len(errors) == len(expected_missing_ids)

    bulk_get_doc_ids = [doc["_id"] for doc in bulk_get_docs]
    error_ids = [doc["id"] for doc in errors]

    assert bulk_get_doc_ids == expected_ids
    assert error_ids == expected_missing_ids

    client.verify_doc_ids_found_in_response(response=bulk_get_docs,
                                            expected_doc_ids=expected_ids)
    client.verify_doc_ids_not_found_in_response(
        response=errors, expected_missing_doc_ids=expected_missing_ids)

    for expired_doc in error_ids:
        verify_doc_deletion_on_server(doc_id=expired_doc,
                                      sdk_client=sdk_client,
                                      sg_client=client,
                                      sg_admin_url=sg_url_admin,
                                      sg_db=sg_db,
                                      xattrs_enabled=xattrs_enabled)