def test_offline_false_config_rest(params_from_base_test_setup, sg_conf_name, num_docs):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # all db endpoints should function as expected
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])

    assert len(errors) == 0

    # Scenario 4
    # Check the db has an Online state at each running sync_gateway
    for sg in cluster.sync_gateways:
        admin = Admin(sg)
        db_info = admin.get_db_info("db")
        assert db_info["state"] == "Online"
def create_sg_users_channels(sg1, sg2, db1, db2):
    admin1 = Admin(sg1)
    admin2 = Admin(sg2)
    sg1a_user = admin1.register_user(
        target=sg1,
        db=db1,
        name="sg1A_user",
        password="******",
        channels=["A"],
    )
    sg1b_user = admin1.register_user(
        target=sg1,
        db=db1,
        name="sg1B_user",
        password="******",
        channels=["B"],
    )
    sg2_user = admin2.register_user(
        target=sg2,
        db=db2,
        name="sg2_user",
        password="******",
        channels=["*"],
    )

    return sg1a_user, sg1b_user, sg2_user
def test_offline_true_config_bring_online(params_from_base_test_setup, sg_conf_name, num_docs):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])

    # all db endpoints should fail with 503
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])

    assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
    for error_tuple in errors:
        log_info("({},{})".format(error_tuple[0], error_tuple[1]))
        assert error_tuple[1] == 503

    # Scenario 9
    # POST /db/_online
    status = admin.bring_db_online(db="db")
    assert status == 200

    # all db endpoints should succeed
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
    assert len(errors) == 0
def test_online_to_offline_check_503(params_from_base_test_setup, sg_conf_name, num_docs):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    # all db endpoints should function as expected
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
    assert len(errors) == 0

    # Take bucket offline
    status = admin.take_db_offline(db="db")
    assert status == 200

    # all db endpoints should return 503
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])

    # We hit NUM_ENDPOINT unique REST endpoints + num of doc PUT failures
    assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
    for error_tuple in errors:
        log_info("({},{})".format(error_tuple[0], error_tuple[1]))
        assert error_tuple[1] == 503
def test_single_user_single_channel(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'single_user_single_channel'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    sgs = cluster.sync_gateways

    num_seth_docs = 7000
    num_cbs_docs = 3000

    admin = Admin(sgs[0])
    seth = admin.register_user(target=sgs[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["ABC"])
    cbs_user = admin.register_user(target=sgs[0],
                                   db="db",
                                   name="cbs_user",
                                   password="******",
                                   channels=["CBS"])
    admin_user = admin.register_user(target=sgs[0],
                                     db="db",
                                     name="admin",
                                     password="******",
                                     channels=["ABC", "CBS"])

    seth.add_docs(num_seth_docs)
    cbs_user.add_docs(num_cbs_docs)

    assert len(seth.cache) == num_seth_docs
    assert len(cbs_user.cache) == num_cbs_docs
    assert len(admin_user.cache) == 0

    time.sleep(10)

    verify_changes([seth],
                   expected_num_docs=num_seth_docs,
                   expected_num_revisions=0,
                   expected_docs=seth.cache)
    verify_changes([cbs_user],
                   expected_num_docs=num_cbs_docs,
                   expected_num_revisions=0,
                   expected_docs=cbs_user.cache)

    all_doc_caches = [seth.cache, cbs_user.cache]
    all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()}
    verify_changes([admin_user],
                   expected_num_docs=num_cbs_docs + num_seth_docs,
                   expected_num_revisions=0,
                   expected_docs=all_docs)
Esempio n. 6
0
def test_missing_num_shards(params_from_base_test_setup, sg_conf):
    """
    1. Launch sg_accels missing the following property in the config.
        "num_shards":16
    2. Verify there are 16 shards
    3. Verify they are distributed evenly across the nodes
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_missing_num_shards'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # CBGT REST Admin API endpoint
    admin_api = Admin(cluster.sg_accels[1])
    cbgt_cfg = admin_api.get_cbgt_config()

    # Verify that default number of pindex shards is 16.
    # This may change in the future in which case this test will need to be updated.
    assert cbgt_cfg.num_shards == 16

    # Verify sharding is correct
    assert cluster.validate_cbgt_pindex_distribution_retry(
        num_running_sg_accels=3)
def test_muliple_users_single_channel(params_from_base_test_setup,
                                      sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'muliple_users_single_channel'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    sgs = cluster.sync_gateways

    num_docs_seth = 1000
    num_docs_adam = 2000
    num_docs_traun = 3000

    admin = Admin(sgs[0])

    seth = admin.register_user(target=sgs[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["ABC"])
    adam = admin.register_user(target=sgs[0],
                               db="db",
                               name="adam",
                               password="******",
                               channels=["ABC"])
    traun = admin.register_user(target=sgs[0],
                                db="db",
                                name="traun",
                                password="******",
                                channels=["ABC"])

    seth.add_docs(num_docs_seth)  # ABC
    adam.add_docs(num_docs_adam, bulk=True)  # ABC
    traun.add_docs(num_docs_traun, bulk=True)  # ABC

    assert len(seth.cache) == num_docs_seth
    assert len(adam.cache) == num_docs_adam
    assert len(traun.cache) == num_docs_traun

    # discuss appropriate time with team
    time.sleep(10)

    # Each user should get all docs from all users
    all_caches = [seth.cache, adam.cache, traun.cache]
    all_docs = {k: v for cache in all_caches for k, v in cache.items()}

    verify_changes([seth, adam, traun],
                   expected_num_docs=num_docs_seth + num_docs_adam +
                   num_docs_traun,
                   expected_num_revisions=0,
                   expected_docs=all_docs)
Esempio n. 8
0
def test_seq(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running seq")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_users: {}".format(num_users))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    # all users will share docs due to having the same channel
    users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="******", channels=["ABC"])

    for user in users:
        user.add_docs(num_docs, bulk=True)

    for user in users:
        user.update_docs(num_revisions)

    time.sleep(5)

    user_0_changes = users[0].get_changes(since=0)
    doc_seq = user_0_changes["results"][num_docs / 2]["seq"]

    # https://github.com/couchbase/sync_gateway/issues/1475#issuecomment-172426052
    # verify you can issue _changes with since=12313-0::1023.15
    for user in users:
        changes = user.get_changes(since=doc_seq)
        log_info("Trying changes with since={}".format(doc_seq))
        assert len(changes["results"]) > 0

        second_to_last_doc_entry_seq = changes["results"][-2]["seq"]
        last_doc_entry_seq = changes["results"][-1]["seq"]

        log_info('Second to last doc "seq": {}'.format(second_to_last_doc_entry_seq))
        log_info('Last doc "seq": {}'.format(last_doc_entry_seq))

        if mode == "di":
            # Verify last "seq" follows the formate 12313-0, not 12313-0::1023.15
            log_info('Verify that the last "seq" is a plain hashed value')
            assert len(second_to_last_doc_entry_seq.split("::")) == 2
            assert len(last_doc_entry_seq.split("::")) == 1
        elif mode == "cc":
            assert second_to_last_doc_entry_seq > 0
            assert last_doc_entry_seq > 0
        else:
            raise ValueError("Unsupported 'mode' !!")

    all_doc_caches = [user.cache for user in users]
    all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()}
    verify_changes(users, expected_num_docs=num_users * num_docs, expected_num_revisions=num_revisions, expected_docs=all_docs)
def test_dcp_reshard_single_sg_accel_goes_down_and_up(params_from_base_test_setup, sg_conf):

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # Stop the second sg_accel
    stop_status = cluster.sg_accels[1].stop()
    assert stop_status == 0, "Failed to stop sg_accel"

    admin = Admin(cluster.sync_gateways[0])

    traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"])
    seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["FOX"])

    log_info(">> Users added")

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:

        futures = dict()

        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 8000)] = "seth"

        log_info(">>> Adding Traun docs")  # ABC, NBC, CBS
        futures[executor.submit(traun.add_docs, 10000, bulk=True)] = "traun"

        # take down a sync_gateway
        shutdown_status = cluster.sg_accels[0].stop()
        assert shutdown_status == 0

        # Add more docs while no writers are online
        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 2000, bulk=True)] = "seth"

        # Start a single writer
        start_status = cluster.sg_accels[0].start(sg_conf)
        assert start_status == 0

        for future in concurrent.futures.as_completed(futures):
            tag = futures[future]
            log_info("{} Completed:".format(tag))

    # TODO better way to do this
    time.sleep(120)

    verify_changes(traun, expected_num_docs=10000, expected_num_revisions=0, expected_docs=traun.cache)
    verify_changes(seth, expected_num_docs=10000, expected_num_revisions=0, expected_docs=seth.cache)

    # Start second writer again
    start_status = cluster.sg_accels[1].start(sg_conf)
    assert start_status == 0
def test_db_online_offline_webhooks_offline_two(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions):

    start = time.time()

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'test_db_online_offline_webhooks_offline_two'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_channels: {}".format(num_channels))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_conf)

    init_completed = time.time()
    log_info("Initialization completed. Time taken:{}s".format(init_completed - start))

    channels = ["channel-" + str(i) for i in range(num_channels)]
    password = "******"
    ws = WebServer()
    ws.start()

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    # Register User
    log_info("Register User")
    user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
                                             number=num_users, password=password, channels=channels)

    # Add User
    log_info("Add docs")
    in_parallel(user_objects, 'add_docs', num_docs)

    # Update docs
    log_info("Update docs")
    in_parallel(user_objects, 'update_docs', num_revisions)
    time.sleep(10)

    status = cluster.servers[0].delete_bucket("data-bucket")
    assert status == 0

    log_info("Sleeping for 120 seconds...")
    time.sleep(120)

    webhook_events = ws.get_data()
    time.sleep(5)
    log_info("webhook event {}".format(webhook_events))
    last_event = webhook_events[-1]
    assert last_event['state'] == 'offline'

    ws.stop()
def test_sg_replicate_push_async(params_from_base_test_setup, num_docs):

    assert num_docs > 0

    # if the async stuff works, we should be able to kick off a large
    # push replication and get a missing doc before the replication has
    # a chance to finish.  And then we should later see that doc.

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    log_info("Running 'test_sg_replicate_push_async'")
    log_info("Using cluster_config: {}".format(cluster_config))

    config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode)
    sg1, sg2 = create_sync_gateways(
        cluster_config=cluster_config,
        sg_config_path=config
    )

    admin = Admin(sg1)
    admin.admin_url = sg1.url

    sg1_user, sg2_user = create_sg_users(sg1, sg2, DB1, DB2)

    # Add docs to sg1
    doc_ids_added = []
    last_doc_id_added = None
    for i in xrange(num_docs):
        doc_id = sg1_user.add_doc()
        doc_ids_added.append(doc_id)
        last_doc_id_added = doc_id

    # Wait until doc shows up on sg1's changes feed
    wait_until_doc_in_changes_feed(sg1, DB1, last_doc_id_added)

    # try to get the last doc added from the target -- assert that we get an exception
    assert_does_not_have_doc(sg2_user, last_doc_id_added)

    # kick off a one-off push replication with async=true
    sg1.start_push_replication(
        sg2.admin.admin_url,
        DB1,
        DB2,
        continuous=False,
        use_remote_source=True,
        async=True,
        use_admin_url=True
    )

    # wait until that doc shows up on the target
    wait_until_doc_sync(sg2_user, last_doc_id_added)

    # At this point, the active tasks should be empty
    wait_until_active_tasks_empty(sg1)
Esempio n. 12
0
def test_sync_channel_sanity(params_from_base_test_setup, sg_conf_name):

    num_docs_per_channel = 100
    channels = ["ABC", "NBC", "CBS"]

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'sync_channel_sanity'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    doc_pushers = []
    doc_pusher_caches = []
    # Push some ABC docs
    for channel in channels:
        doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(channel), password="******", channels=[channel])
        doc_pusher.add_docs(num_docs_per_channel, bulk=True)

        doc_pushers.append(doc_pusher)
        doc_pusher_caches.append(doc_pusher.cache)

    # Verfy that none of the doc_pushers get docs. They should all be redirected by the sync function
    verify_changes(doc_pushers, expected_num_docs=0, expected_num_revisions=0, expected_docs={})

    subscriber = admin.register_user(target=cluster.sync_gateways[0], db="db", name="subscriber", password="******", channels=["tv_station_channel"])

    # Allow docs to backfill
    time.sleep(20)

    # subscriber should recieve all docs
    all_docs = {k: v for cache in doc_pusher_caches for k, v in cache.items()}
    verify_changes(subscriber, expected_num_docs=len(channels) * num_docs_per_channel, expected_num_revisions=0, expected_docs=all_docs)

    # update subscribers cache so the user knows what docs to update
    subscriber.cache = all_docs
    subscriber.update_docs(num_revs_per_doc=1)

    # Allow docs to backfill
    time.sleep(20)

    # Verify the doc are back in the repective ABC, NBC, CBS channels
    # HACK: Ignoring rev_id verification due to the fact that the doc was updated the the subscriber user and not the
    # doc_pusher
    for doc_pusher in doc_pushers:
        verify_changes(doc_pusher, expected_num_docs=num_docs_per_channel, expected_num_revisions=1, expected_docs=doc_pusher.cache, ignore_rev_ids=True)

    # Verify that all docs have been flaged with _removed = true in changes feed for subscriber
    verify_docs_removed(subscriber, expected_num_docs=len(all_docs.items()), expected_docs=all_docs)
Esempio n. 13
0
def test_sg_replicate_push_async(params_from_base_test_setup, num_docs):

    assert num_docs > 0

    # if the async stuff works, we should be able to kick off a large
    # push replication and get a missing doc before the replication has
    # a chance to finish.  And then we should later see that doc.

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    log_info("Running 'test_sg_replicate_push_async'")
    log_info("Using cluster_config: {}".format(cluster_config))

    config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode)
    sg1, sg2 = create_sync_gateways(
        cluster_config=cluster_config,
        sg_config_path=config
    )

    admin = Admin(sg1)
    admin.admin_url = sg1.url

    sg1_user, sg2_user = create_sg_users(sg1, sg2, DB1, DB2)

    # Add docs to sg1
    doc_ids_added = []
    last_doc_id_added = None
    for i in xrange(num_docs):
        doc_id = sg1_user.add_doc()
        doc_ids_added.append(doc_id)
        last_doc_id_added = doc_id

    # Wait until doc shows up on sg1's changes feed
    wait_until_doc_in_changes_feed(sg1, DB1, last_doc_id_added)

    # try to get the last doc added from the target -- assert that we get an exception
    assert_does_not_have_doc(sg2_user, last_doc_id_added)

    # kick off a one-off push replication with async=true
    sg1.start_push_replication(
        sg2.admin.admin_url,
        DB1,
        DB2,
        continuous=False,
        use_remote_source=True,
        async=True,
        use_admin_url=True
    )

    # wait until that doc shows up on the target
    wait_until_doc_sync(sg2_user, last_doc_id_added)

    # At this point, the active tasks should be empty
    wait_until_active_tasks_empty(sg1)
def test_online_to_offline_longpoll_changes_feed_controlled_close_sanity_mulitple_users(params_from_base_test_setup, sg_conf_name, num_docs, num_users):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using num_users: {}".format(num_users))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", password="******", number=num_users, channels=["ABC"])

    feed_close_results = list()

    with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
        # start longpoll tracking with no timeout, will block until longpoll is closed by db going offline
        futures = {executor.submit(user.start_longpoll_changes_tracking, termination_doc_id=None, timeout=0, loop=False): user.name for user in users}

        time.sleep(5)
        futures[executor.submit(admin.take_db_offline, "db")] = "db_offline_task"

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            if task_name == "db_offline_task":
                log_info("DB OFFLINE")
                # make sure db_offline returns 200
                assert future.result() == 200
            if task_name.startswith("user"):
                # Long poll will exit with 503, return docs in the exception
                log_info("POLLING DONE")
                try:
                    docs_in_changes, last_seq_num = future.result()
                    feed_close_results.append((docs_in_changes, last_seq_num))
                except Exception as e:
                    log_info("Longpoll feed close error: {}".format(e))
                    # long poll should be closed so this exception should never happen
                    assert 0

    # Assert that the feed close results length is num_users
    assert len(feed_close_results) == num_users

    # Account for _user doc
    # last_seq may be of the form '1' for channel cache or '1-0' for distributed index
    for feed_result in feed_close_results:
        docs_in_changes = feed_result[0]
        seq_num_component = feed_result[1].split("-")
        assert len(docs_in_changes) == 0
        assert int(seq_num_component[0]) > 0
def test_single_user_single_channel_doc_updates(params_from_base_test_setup,
                                                sg_conf_name, num_docs,
                                                num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log.info("Running 'single_user_single_channel_doc_updates'")
    log.info("cluster_conf: {}".format(cluster_conf))
    log.info("sg_conf: {}".format(sg_conf))
    log.info("num_docs: {}".format(num_docs))
    log.info("num_revisions: {}".format(num_revisions))

    start = time.time()

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    num_docs = num_docs
    num_revisions = num_revisions
    username = "******"
    password = "******"
    channels = ["channel-1"]

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    single_user = admin.register_user(target=sgs[0],
                                      db="db",
                                      name=username,
                                      password=password,
                                      channels=channels)

    # Not using bulk docs
    single_user.add_docs(num_docs, name_prefix="test-")

    assert len(single_user.cache) == num_docs

    # let SG catch up with all the changes
    time.sleep(5)

    single_user.update_docs(num_revisions)

    time.sleep(10)

    verify_changes([single_user],
                   expected_num_docs=num_docs,
                   expected_num_revisions=num_revisions,
                   expected_docs=single_user.cache)

    end = time.time()
    log.info("TIME:{}s".format(end - start))
Esempio n. 16
0
def test_sync_sanity_backfill(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'sync_sanity_backfill'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    radio_stations = ["KMOW", "HWOD", "KDWB"]
    number_of_docs_per_pusher = 5000

    admin = Admin(cluster.sync_gateways[0])

    dj_0 = admin.register_user(target=cluster.sync_gateways[0],
                               db="db",
                               name="dj_0",
                               password="******")

    kdwb_caches = []
    for radio_station in radio_stations:
        doc_pusher = admin.register_user(
            target=cluster.sync_gateways[0],
            db="db",
            name="{}_doc_pusher".format(radio_station),
            password="******",
            channels=[radio_station])
        doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True)
        if doc_pusher.name == "KDWB_doc_pusher":
            kdwb_caches.append(doc_pusher.cache)

    access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0],
                                            db="db",
                                            name="access_doc_pusher",
                                            password="******")

    # Grant dj_0 access to KDWB channel via sync after docs are pushed
    access_doc_pusher.add_doc("access_doc", content="access")

    # Build global doc_id, rev dict for all docs from all KDWB caches
    kdwb_docs = {k: v for cache in kdwb_caches for k, v in cache.items()}

    # wait for changes
    time.sleep(5)

    verify_changes(dj_0,
                   expected_num_docs=number_of_docs_per_pusher,
                   expected_num_revisions=0,
                   expected_docs=kdwb_docs)
Esempio n. 17
0
    def save_cbgt_diagnostics(self):

        # CBGT REST Admin API endpoint
        for sync_gateway_writer in self.sg_accels:

            adminApi = Admin(sync_gateway_writer)
            cbgt_diagnostics = adminApi.get_cbgt_diagnostics()
            adminApi.get_cbgt_cfg()

            # dump raw diagnostics
            pretty_print_json = json.dumps(cbgt_diagnostics, sort_keys=True, indent=4, separators=(',', ': '))
            log_info("SG {} CBGT diagnostic output: {}".format(sync_gateway_writer, pretty_print_json))
def test_multiple_users_multiple_channels(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'multiple_users_multiple_channels'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    num_docs_seth = 1000
    num_docs_adam = 2000
    num_docs_traun = 3000

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"])
    adam = admin.register_user(target=sgs[0], db="db", name="adam", password="******", channels=["NBC", "CBS"])
    traun = admin.register_user(target=sgs[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"])

    # TODO use bulk docs
    seth.add_docs(num_docs_seth)  # ABC
    adam.add_docs(num_docs_adam)  # NBC, CBS
    traun.add_docs(num_docs_traun)  # ABC, NBC, CBS

    assert len(seth.cache) == num_docs_seth
    assert len(adam.cache) == num_docs_adam
    assert len(traun.cache) == num_docs_traun

    # discuss appropriate time with team
    time.sleep(10)

    # Seth should get docs from seth + traun
    seth_subset = [seth.cache, traun.cache]
    seth_expected_docs = {k: v for cache in seth_subset for k, v in cache.items()}
    verify_changes([seth], expected_num_docs=num_docs_seth + num_docs_traun, expected_num_revisions=0, expected_docs=seth_expected_docs)

    # Adam should get docs from adam + traun
    adam_subset = [adam.cache, traun.cache]
    adam_expected_docs = {k: v for cache in adam_subset for k, v in cache.items()}
    verify_changes([adam], expected_num_docs=num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=adam_expected_docs)

    # Traun should get docs from seth + adam + traun
    traun_subset = [seth.cache, adam.cache, traun.cache]
    traun_expected_docs = {k: v for cache in traun_subset for k, v in cache.items()}
    verify_changes([traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=traun_expected_docs)
def test_dcp_reshard_sync_gateway_goes_down(params_from_base_test_setup, sg_conf):

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    mode = cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])

    traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"])
    seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["FOX"])

    log_info(">> Users added")

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:

        futures = dict()

        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 8000)] = "seth"

        log_info(">>> Adding Traun docs")  # ABC, NBC, CBS
        futures[executor.submit(traun.add_docs, 2000, bulk=True)] = "traun"

        # stop sg_accel
        shutdown_status = cluster.sg_accels[0].stop()
        assert shutdown_status == 0

        for future in concurrent.futures.as_completed(futures):
            tag = futures[future]
            log_info("{} Completed:".format(tag))

    # TODO better way to do this
    time.sleep(120)

    verify_changes(traun, expected_num_docs=2000, expected_num_revisions=0, expected_docs=traun.cache)
    verify_changes(seth, expected_num_docs=8000, expected_num_revisions=0, expected_docs=seth.cache)

    # Verify that the sg1 is down but the other sync_gateways are running
    errors = cluster.verify_alive(mode)
    assert len(errors) == 1 and errors[0][0].hostname == "ac1"

    # Restart the failing node so that cluster verification does not blow up in test teardown
    start_status = cluster.sg_accels[0].start(sg_conf)
    assert start_status == 0
def test_webhooks(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions):

    start = time.time()

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'test_webhooks'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_channels: {}".format(num_channels))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_conf)

    init_completed = time.time()
    log_info("Initialization completed. Time taken:{}s".format(init_completed - start))

    channels = ["channel-" + str(i) for i in range(num_channels)]
    password = "******"
    ws = WebServer()
    ws.start()

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    # Register User
    log_info("Register User")
    user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
                                             number=num_users, password=password, channels=channels)

    # Add User
    log_info("Add docs")
    in_parallel(user_objects, 'add_docs', num_docs)

    # Update docs
    log_info("Update docs")
    in_parallel(user_objects, 'update_docs', num_revisions)
    time.sleep(30)
    ws.stop()
    expected_events = (num_users * num_docs * num_revisions) + (num_users * num_docs)
    received_events = len(ws.get_data())
    log_info("expected_events: {} received_events {}".format(expected_events, received_events))
    assert expected_events == received_events
def test_sg_replicate_basic_test_channels(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    log_info("Running 'test_sg_replicate_basic_test_channels'")
    log_info("Using cluster_config: {}".format(cluster_config))

    config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode)
    sg1, sg2 = create_sync_gateways(
        cluster_config=cluster_config,
        sg_config_path=config
    )

    admin = Admin(sg1)
    admin.admin_url = sg1.url

    sg1a_user, sg1b_user, sg2_user = create_sg_users_channels(sg1, sg2, DB1, DB2)

    # Add docs to sg1 in channel A and channel B
    doc_id_sg1a = sg1a_user.add_doc()
    doc_id_sg1b = sg1b_user.add_doc()

    # Wait until docs show up in changes feed
    wait_until_doc_in_changes_feed(sg1, DB1, doc_id_sg1a)
    wait_until_doc_in_changes_feed(sg1, DB1, doc_id_sg1b)

    # Make sure it doesn't appear on the target DB
    # even without starting a replication (which will
    # happen if the SG's are sharing a CB bucket)
    time.sleep(5)
    assert_does_not_have_doc(sg2_user, doc_id_sg1a)
    assert_does_not_have_doc(sg2_user, doc_id_sg1b)

    # Start a push replication sg1 -> sg2
    chans = sg1a_user.channels
    sg1.start_push_replication(
        sg2.admin.admin_url,
        DB1,
        DB2,
        continuous=False,
        use_remote_source=True,
        channels=chans,
        use_admin_url=True
    )

    # Verify that the doc added to sg1 made it to sg2
    assert_has_doc(sg2_user, doc_id_sg1a)
def test_continuous_changes_parametrized(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'continuous_changes_parametrized'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_users: {}".format(num_users))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="******", channels=["ABC", "TERMINATE"])
    abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"])
    doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"])

    with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:

        futures = {executor.submit(user.start_continuous_changes_tracking, termination_doc_id="killcontinuous"): user.name for user in users}
        futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher"

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            # Send termination doc to seth continuous changes feed subscriber
            if task_name == "doc_pusher":

                errors = future.result()
                assert len(errors) == 0
                abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)

                time.sleep(10)

                doc_terminator.add_doc("killcontinuous")
            elif task_name.startswith("user"):
                # When the user has continuous _changes feed closed, return the docs and verify the user got all the channel docs
                docs_in_changes = future.result()
                # Expect number of docs + the termination doc + _user doc
                verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)

    # Expect number of docs + the termination doc
    verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)
def test_longpoll_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running: 'longpoll_changes_sanity': {}".format(cluster_conf))
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC", "TERMINATE"])
    abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"])
    doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"])

    docs_in_changes = dict()

    with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:

        futures = dict()
        futures[executor.submit(seth.start_longpoll_changes_tracking, termination_doc_id="killpolling")] = "polling"
        futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher"

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            # Send termination doc to seth long poller
            if task_name == "doc_pusher":
                abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)

                # Allow time for changes to reach subscribers
                time.sleep(5)

                doc_terminator.add_doc("killpolling")
            elif task_name == "polling":
                docs_in_changes, seq_num = future.result()

    # Verify abc_docs_pusher gets the correct docs in changes feed
    verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)

    # Verify docs from seth continous changes is the same as abc_docs_pusher's docs
    verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
Esempio n. 24
0
def test_sg_replicate_basic_test_channels(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    log_info("Running 'test_sg_replicate_basic_test_channels'")
    log_info("Using cluster_config: {}".format(cluster_config))

    config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode)
    sg1, sg2 = create_sync_gateways(
        cluster_config=cluster_config,
        sg_config_path=config
    )

    admin = Admin(sg1)
    admin.admin_url = sg1.url

    sg1a_user, sg1b_user, sg2_user = create_sg_users_channels(sg1, sg2, DB1, DB2)

    # Add docs to sg1 in channel A and channel B
    doc_id_sg1a = sg1a_user.add_doc()
    doc_id_sg1b = sg1b_user.add_doc()

    # Wait until docs show up in changes feed
    wait_until_doc_in_changes_feed(sg1, DB1, doc_id_sg1a)
    wait_until_doc_in_changes_feed(sg1, DB1, doc_id_sg1b)

    # Make sure it doesn't appear on the target DB
    # even without starting a replication (which will
    # happen if the SG's are sharing a CB bucket)
    time.sleep(5)
    assert_does_not_have_doc(sg2_user, doc_id_sg1a)
    assert_does_not_have_doc(sg2_user, doc_id_sg1b)

    # Start a push replication sg1 -> sg2
    chans = sg1a_user.channels
    sg1.start_push_replication(
        sg2.admin.admin_url,
        DB1,
        DB2,
        continuous=False,
        use_remote_source=True,
        channels=chans,
        use_admin_url=True
    )

    # Verify that the doc added to sg1 made it to sg2
    assert_has_doc(sg2_user, doc_id_sg1a)
Esempio n. 25
0
 def __init__(self, cluster_config, target):
     self.ansible_runner = AnsibleRunner(cluster_config)
     self.ip = target["ip"]
     self.url = "http://{}:4984".format(target["ip"])
     self.hostname = target["name"]
     self._headers = {'Content-Type': 'application/json'}
     self.admin = Admin(self)
Esempio n. 26
0
    def save_cbgt_diagnostics(self):

        # CBGT REST Admin API endpoint
        for sync_gateway_writer in self.sg_accels:

            adminApi = Admin(sync_gateway_writer)
            cbgt_diagnostics = adminApi.get_cbgt_diagnostics()
            adminApi.get_cbgt_config()

            # dump raw diagnostics
            pretty_print_json = json.dumps(cbgt_diagnostics,
                                           sort_keys=True,
                                           indent=4,
                                           separators=(',', ': '))
            log_info("SG {} CBGT diagnostic output: {}".format(
                sync_gateway_writer, pretty_print_json))
def init_shadow_cluster(cluster, config_path_shadower,
                        config_path_non_shadower):

    # initially, setup both sync gateways as shadowers -- this needs to be
    # the initial config so that both buckets (source and data) will be created
    mode = cluster.reset(sg_config_path=config_path_shadower)

    # pick a sync gateway and choose it as non-shadower.  reset with config.
    non_shadower_sg = cluster.sync_gateways[1]
    non_shadower_sg.restart(config_path_non_shadower)

    # the other sync gateway will be the shadower
    shadower_sg = cluster.sync_gateways[0]

    admin = Admin(non_shadower_sg)

    alice_shadower = admin.register_user(
        target=shadower_sg,
        db="db",
        name="alice",
        password="******",
        channels=["ABC", "NBC", "CBS"],
    )

    bob_non_shadower = admin.register_user(
        target=non_shadower_sg,
        db="db",
        name="bob",
        password="******",
        channels=["ABC", "NBC", "CBS"],
    )

    source_bucket = cluster.servers[0].get_sdk_bucket(source_bucket_name)
    data_bucket = cluster.servers[0].get_sdk_bucket(data_bucket_name)

    sc = ShadowCluster(
        bob_non_shadower=bob_non_shadower,
        alice_shadower=alice_shadower,
        admin=admin,
        mode=mode,
        shadower_sg=shadower_sg,
        non_shadower_sg=non_shadower_sg,
        source_bucket=source_bucket,
        data_bucket=data_bucket,
    )

    return sc
def test_continuous_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'continuous_changes_sanity'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC", "TERMINATE"])
    abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"])
    doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"])

    docs_in_changes = dict()

    with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:

        futures = dict()
        futures[executor.submit(seth.start_continuous_changes_tracking, termination_doc_id="killcontinuous")] = "continuous"
        futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher"

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            # Send termination doc to seth continuous changes feed subscriber
            if task_name == "doc_pusher":
                abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)

                time.sleep(5)

                doc_terminator.add_doc("killcontinuous")
            elif task_name == "continuous":
                docs_in_changes = future.result()

    # Expect number of docs + the termination doc
    verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)

    # Expect number of docs + the termination doc + _user doc
    verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
def init_shadow_cluster(cluster, config_path_shadower, config_path_non_shadower):

    # initially, setup both sync gateways as shadowers -- this needs to be
    # the initial config so that both buckets (source and data) will be created
    mode = cluster.reset(sg_config_path=config_path_shadower)

    # pick a sync gateway and choose it as non-shadower.  reset with config.
    non_shadower_sg = cluster.sync_gateways[1]
    non_shadower_sg.restart(config_path_non_shadower)

    # the other sync gateway will be the shadower
    shadower_sg = cluster.sync_gateways[0]

    admin = Admin(non_shadower_sg)

    alice_shadower = admin.register_user(
        target=shadower_sg,
        db="db",
        name="alice",
        password="******",
        channels=["ABC", "NBC", "CBS"],
    )

    bob_non_shadower = admin.register_user(
        target=non_shadower_sg,
        db="db",
        name="bob",
        password="******",
        channels=["ABC", "NBC", "CBS"],
    )

    source_bucket = cluster.servers[0].get_bucket(source_bucket_name)
    data_bucket = cluster.servers[0].get_bucket(data_bucket_name)

    sc = ShadowCluster(
        bob_non_shadower=bob_non_shadower,
        alice_shadower=alice_shadower,
        admin=admin,
        mode=mode,
        shadower_sg=shadower_sg,
        non_shadower_sg=non_shadower_sg,
        source_bucket=source_bucket,
        data_bucket=data_bucket,
    )

    return sc
def test_dcp_reshard_sync_gateway_comes_up(params_from_base_test_setup, sg_conf):

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    stop_status = cluster.sg_accels[0].stop()
    assert stop_status == 0, "Failed to stop sg_accel"

    admin = Admin(cluster.sync_gateways[0])

    traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"])
    seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["FOX"])

    log_info(">> Users added")

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:

        futures = dict()

        time.sleep(5)

        log_info(">>> Adding Traun docs")  # ABC, NBC, CBS
        futures[executor.submit(traun.add_docs, 6000)] = "traun"

        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 4000)] = "seth"

        # Bring up a sync_gateway
        up_status = cluster.sg_accels[0].start(sg_conf)
        assert up_status == 0

        for future in concurrent.futures.as_completed(futures):
            tag = futures[future]
            log_info("{} Completed:".format(tag))

    # TODO better way to do this
    time.sleep(60)

    verify_changes(traun, expected_num_docs=6000, expected_num_revisions=0, expected_docs=traun.cache)
    verify_changes(seth, expected_num_docs=4000, expected_num_revisions=0, expected_docs=seth.cache)
def test_multiple_db_single_data_bucket_single_index_bucket(params_from_base_test_setup, sg_conf_name, num_users, num_docs_per_user):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'multiple_db_unique_data_bucket_unique_index_bucket'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_docs_per_user: {}".format(num_docs_per_user))

    # 2 dbs share the same data and index bucket
    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    num_db_users = num_users
    num_db2_users = num_users
    num_docs_per_user = num_docs_per_user

    admin = Admin(cluster.sync_gateways[0])

    db_one_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="bulk_db_user", number=num_db_users, password="******", channels=["ABC"])
    db_two_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db2", name_prefix="bulk_db2_user", number=num_db2_users, password="******", channels=["ABC"])

    all_users = list(db_one_users)
    all_users.extend(db_two_users)
    assert len(all_users) == num_db_users + num_db2_users

    # Round robin
    num_sgs = len(cluster.sync_gateways)
    count = 1

    for user in all_users:
        user.add_docs(num_docs_per_user, bulk=True)
        user.target = cluster.sync_gateways[(count + 1) % num_sgs]
        count += 1

    time.sleep(10)

    # Get list of all docs from users caches
    cached_docs_from_all_users = {k: v for user in all_users for k, v in user.cache.items()}

    # Verify each user has all of the docs
    verify_changes(all_users, expected_num_docs=(num_users * 2) * num_docs_per_user, expected_num_revisions=0, expected_docs=cached_docs_from_all_users)
def test_multiple_db_single_data_bucket_single_index_bucket(params_from_base_test_setup, sg_conf_name, num_users, num_docs_per_user):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'multiple_db_unique_data_bucket_unique_index_bucket'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_docs_per_user: {}".format(num_docs_per_user))

    # 2 dbs share the same data and index bucket
    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    num_db_users = num_users
    num_db2_users = num_users
    num_docs_per_user = num_docs_per_user

    admin = Admin(cluster.sync_gateways[0])

    db_one_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="bulk_db_user", number=num_db_users, password="******", channels=["ABC"])
    db_two_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db2", name_prefix="bulk_db2_user", number=num_db2_users, password="******", channels=["ABC"])

    all_users = list(db_one_users)
    all_users.extend(db_two_users)
    assert len(all_users) == num_db_users + num_db2_users

    # Round robin
    num_sgs = len(cluster.sync_gateways)
    count = 1

    for user in all_users:
        user.add_docs(num_docs_per_user, bulk=True)
        user.target = cluster.sync_gateways[(count + 1) % num_sgs]
        count += 1

    time.sleep(10)

    # Get list of all docs from users caches
    cached_docs_from_all_users = {k: v for user in all_users for k, v in user.cache.items()}

    # Verify each user has all of the docs
    verify_changes(all_users, expected_num_docs=(num_users * 2) * num_docs_per_user, expected_num_revisions=0, expected_docs=cached_docs_from_all_users)
def test_single_user_single_channel_doc_updates(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log.info("Running 'single_user_single_channel_doc_updates'")
    log.info("cluster_conf: {}".format(cluster_conf))
    log.info("sg_conf: {}".format(sg_conf))
    log.info("num_docs: {}".format(num_docs))
    log.info("num_revisions: {}".format(num_revisions))

    start = time.time()

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    num_docs = num_docs
    num_revisions = num_revisions
    username = "******"
    password = "******"
    channels = ["channel-1"]

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    single_user = admin.register_user(target=sgs[0], db="db", name=username, password=password, channels=channels)

    # Not using bulk docs
    single_user.add_docs(num_docs, name_prefix="test-")

    assert len(single_user.cache) == num_docs

    # let SG catch up with all the changes
    time.sleep(5)

    single_user.update_docs(num_revisions)

    time.sleep(10)

    verify_changes([single_user], expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=single_user.cache)

    end = time.time()
    log.info("TIME:{}s".format(end - start))
Esempio n. 34
0
def create_sg_users_channels(sg1, sg2, db1, db2):
    admin1 = Admin(sg1)
    admin2 = Admin(sg2)
    sg1a_user = admin1.register_user(
        target=sg1,
        db=db1,
        name="sg1A_user",
        password="******",
        channels=["A"],
    )
    sg1b_user = admin1.register_user(
        target=sg1,
        db=db1,
        name="sg1B_user",
        password="******",
        channels=["B"],
    )
    sg2_user = admin2.register_user(
        target=sg2,
        db=db2,
        name="sg2_user",
        password="******",
        channels=["*"],
    )

    return sg1a_user, sg1b_user, sg2_user
Esempio n. 35
0
    def __init__(self, cluster_config, target):
        self.ansible_runner = AnsibleRunner(cluster_config)
        self.ip = target["ip"]
        self.url = "http://{}:4984".format(target["ip"])
        self.hostname = target["name"]
        self._headers = {'Content-Type': 'application/json'}
        self.admin = Admin(self)

        self.cluster_config = cluster_config
        self.server_port = 8091
        self.server_scheme = "http"

        if is_cbs_ssl_enabled(self.cluster_config):
            self.server_port = 18091
            self.server_scheme = "https"

        self.couchbase_server_primary_node = add_cbs_to_sg_config_server_field(
            self.cluster_config)
def test_single_user_multiple_channels(params_from_base_test_setup,
                                       sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'single_user_multiple_channels'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    start = time.time()
    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])
    seth = admin.register_user(target=sgs[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["ABC", "CBS", "NBC", "FOX"])

    # Round robin
    count = 1
    num_sgs = len(cluster.sync_gateways)
    while count <= 5:
        seth.add_docs(1000, bulk=True)
        seth.target = cluster.sync_gateways[count % num_sgs]
        count += 1

    log_info(seth)

    time.sleep(10)

    verify_changes(users=[seth],
                   expected_num_docs=5000,
                   expected_num_revisions=0,
                   expected_docs=seth.cache)

    end = time.time()
    log_info("TIME:{}s".format(end - start))
Esempio n. 37
0
def test_sync_access_sanity(params_from_base_test_setup, sg_conf_name):

    num_docs = 100

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'sync_access_sanity'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******")

    # Push some ABC docs
    abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"])
    abc_doc_pusher.add_docs(num_docs)

    # Create access doc pusher and grant access Seth to ABC channel
    access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******", channels=["access"])
    access_doc_pusher.add_doc(doc_id="access_doc", content={"grant_access": "true"})

    # Allow docs to backfill
    time.sleep(5)

    verify_changes(seth, expected_num_docs=num_docs, expected_num_revisions=0, expected_docs=abc_doc_pusher.cache)

    # Remove seth from ABC
    access_doc_pusher.update_doc(doc_id="access_doc", content={"grant_access": "false"})

    # Push more ABC docs
    abc_doc_pusher.add_docs(num_docs)

    time.sleep(10)

    # Verify seth sees no abc_docs
    verify_changes(seth, expected_num_docs=0, expected_num_revisions=0, expected_docs={})
def test_muliple_users_single_channel(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'muliple_users_single_channel'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    sgs = cluster.sync_gateways

    num_docs_seth = 1000
    num_docs_adam = 2000
    num_docs_traun = 3000

    admin = Admin(sgs[0])

    seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"])
    adam = admin.register_user(target=sgs[0], db="db", name="adam", password="******", channels=["ABC"])
    traun = admin.register_user(target=sgs[0], db="db", name="traun", password="******", channels=["ABC"])

    seth.add_docs(num_docs_seth)  # ABC
    adam.add_docs(num_docs_adam, bulk=True)  # ABC
    traun.add_docs(num_docs_traun, bulk=True)  # ABC

    assert len(seth.cache) == num_docs_seth
    assert len(adam.cache) == num_docs_adam
    assert len(traun.cache) == num_docs_traun

    # discuss appropriate time with team
    time.sleep(10)

    # Each user should get all docs from all users
    all_caches = [seth.cache, adam.cache, traun.cache]
    all_docs = {k: v for cache in all_caches for k, v in cache.items()}

    verify_changes([seth, adam, traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=all_docs)
def test_bulk_get_compression(params_from_base_test_setup, sg_conf_name, num_docs, accept_encoding, x_accept_part_encoding, user_agent):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'test_bulk_get_compression'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using user_agent: {}".format(user_agent))
    log_info("Using accept_encoding: {}".format(accept_encoding))
    log_info("Using x_accept_part_encoding: {}".format(x_accept_part_encoding))

    cluster = Cluster(config=cluster_config)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    user = admin.register_user(cluster.sync_gateways[0], "db", "seth", "password", channels=["seth"])

    doc_body = Data.load("mock_users_20k.json")

    with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
        futures = [executor.submit(user.add_doc, doc_id="test-{}".format(i), content=doc_body) for i in range(num_docs)]
        for future in concurrent.futures.as_completed(futures):
            try:
                log_info(future.result())
            except Exception as e:
                log_info("Failed to push doc: {}".format(e))

    docs = [{"id": "test-{}".format(i)} for i in range(num_docs)]
    payload = {"docs": docs}

    # Issue curl request and get size of request
    response_size = issue_request(cluster.sync_gateways[0], user_agent, accept_encoding, x_accept_part_encoding, payload)
    log_info("Response size: {}".format(response_size))

    # Verfiy size matches expected size
    verify_response_size(user_agent, accept_encoding, x_accept_part_encoding, response_size)
Esempio n. 40
0
def test_sync_sanity_backfill(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'sync_sanity_backfill'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    radio_stations = ["KMOW", "HWOD", "KDWB"]
    number_of_docs_per_pusher = 5000

    admin = Admin(cluster.sync_gateways[0])

    dj_0 = admin.register_user(target=cluster.sync_gateways[0], db="db", name="dj_0", password="******")

    kdwb_caches = []
    for radio_station in radio_stations:
        doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(radio_station), password="******", channels=[radio_station])
        doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True)
        if doc_pusher.name == "KDWB_doc_pusher":
            kdwb_caches.append(doc_pusher.cache)

    access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******")

    # Grant dj_0 access to KDWB channel via sync after docs are pushed
    access_doc_pusher.add_doc("access_doc", content="access")

    # Build global doc_id, rev dict for all docs from all KDWB caches
    kdwb_docs = {k: v for cache in kdwb_caches for k, v in cache.items()}

    # wait for changes
    time.sleep(5)

    verify_changes(dj_0, expected_num_docs=number_of_docs_per_pusher, expected_num_revisions=0, expected_docs=kdwb_docs)
def test_bulk_get_compression(params_from_base_test_setup, sg_conf_name, num_docs, accept_encoding, x_accept_part_encoding, user_agent):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'test_bulk_get_compression'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using user_agent: {}".format(user_agent))
    log_info("Using accept_encoding: {}".format(accept_encoding))
    log_info("Using x_accept_part_encoding: {}".format(x_accept_part_encoding))

    cluster = Cluster(config=cluster_config)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    user = admin.register_user(cluster.sync_gateways[0], "db", "seth", "password", channels=["seth"])

    doc_body = Data.load("mock_users_20k.json")

    with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
        futures = [executor.submit(user.add_doc, doc_id="test-{}".format(i), content=doc_body) for i in range(num_docs)]
        for future in concurrent.futures.as_completed(futures):
            try:
                log_info(future.result())
            except Exception as e:
                log_info("Failed to push doc: {}".format(e))

    docs = [{"id": "test-{}".format(i)} for i in range(num_docs)]
    payload = {"docs": docs}

    # Issue curl request and get size of request
    response_size = issue_request(cluster.sync_gateways[0], user_agent, accept_encoding, x_accept_part_encoding, payload)
    log_info("Response size: {}".format(response_size))

    # Verfiy size matches expected size
    verify_response_size(user_agent, accept_encoding, x_accept_part_encoding, response_size)
def test_single_user_single_channel(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'single_user_single_channel'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    sgs = cluster.sync_gateways

    num_seth_docs = 7000
    num_cbs_docs = 3000

    admin = Admin(sgs[0])
    seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"])
    cbs_user = admin.register_user(target=sgs[0], db="db", name="cbs_user", password="******", channels=["CBS"])
    admin_user = admin.register_user(target=sgs[0], db="db", name="admin", password="******", channels=["ABC", "CBS"])

    seth.add_docs(num_seth_docs)
    cbs_user.add_docs(num_cbs_docs)

    assert len(seth.cache) == num_seth_docs
    assert len(cbs_user.cache) == num_cbs_docs
    assert len(admin_user.cache) == 0

    time.sleep(10)

    verify_changes([seth], expected_num_docs=num_seth_docs, expected_num_revisions=0, expected_docs=seth.cache)
    verify_changes([cbs_user], expected_num_docs=num_cbs_docs, expected_num_revisions=0, expected_docs=cbs_user.cache)

    all_doc_caches = [seth.cache, cbs_user.cache]
    all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()}
    verify_changes([admin_user], expected_num_docs=num_cbs_docs + num_seth_docs, expected_num_revisions=0, expected_docs=all_docs)
def test_db_delayed_online(params_from_base_test_setup, sg_conf_name, num_docs):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])

    time.sleep(2)
    status = admin.take_db_offline("db")
    log_info("offline request response status: {}".format(status))
    time.sleep(10)

    pool = ThreadPool(processes=1)

    db_info = admin.get_db_info("db")
    assert db_info["state"] == "Offline"

    async_result = pool.apply_async(admin.bring_db_online, ("db", 15,))
    status = async_result.get(timeout=15)
    log_info("offline request response status: {}".format(status))

    time.sleep(20)

    db_info = admin.get_db_info("db")
    assert db_info["state"] == "Online"

    # all db rest enpoints should succeed
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
    assert len(errors) == 0
def test_single_user_multiple_channels(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'single_user_multiple_channels'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    start = time.time()
    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])
    seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC", "CBS", "NBC", "FOX"])

    # Round robin
    count = 1
    num_sgs = len(cluster.sync_gateways)
    while count <= 5:
        seth.add_docs(1000, bulk=True)
        seth.target = cluster.sync_gateways[count % num_sgs]
        count += 1

    log_info(seth)

    time.sleep(10)

    verify_changes(users=[seth], expected_num_docs=5000, expected_num_revisions=0, expected_docs=seth.cache)

    end = time.time()
    log_info("TIME:{}s".format(end - start))
def test_overloaded_channel_cache(params_from_base_test_setup, sg_conf_name, num_docs, user_channels, filter, limit):

    """
    The purpose of this test is to verify that channel cache backfill via view queries is working properly.
    It works by doing the following:

    - Set channel cache size in Sync Gateway config to a small number, eg, 750.  This means that only 750 docs fit in the channel cache
    - Add a large number of docs, eg, 1000.
    - Issue a _changes request that will return all 1000 docs

    Expected behavior / Verification:

    - Since 1000 docs requested from changes feed, but only 750 docs fit in channel cache, then it will need to do a view query
      to get the remaining 250 changes
    - Verify that the changes feed returns all 1000 expected docs
    - Check the expvar statistics to verify that view queries were made
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    if mode == "di":
        pytest.skip("Unsupported feature in distributed index")

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'test_overloaded_channel_cache'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using user_channels: {}".format(user_channels))
    log_info("Using filter: {}".format(filter))
    log_info("Using limit: {}".format(limit))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    target_sg = cluster.sync_gateways[0]

    admin = Admin(target_sg)

    users = admin.register_bulk_users(target_sg, "db", "user", 1000, "password", [user_channels])
    assert len(users) == 1000

    doc_pusher = admin.register_user(target_sg, "db", "abc_doc_pusher", "password", ["ABC"])
    doc_pusher.add_docs(num_docs, bulk=True)

    # Give a few seconds to let changes register
    time.sleep(2)

    start = time.time()

    # This uses a ProcessPoolExecutor due to https://github.com/couchbaselabs/mobile-testkit/issues/1142
    with concurrent.futures.ProcessPoolExecutor(max_workers=100) as executor:

        changes_requests = []
        errors = []

        for user in users:
            if filter and limit is not None:
                changes_requests.append(executor.submit(user.get_changes, since=0, limit=limit, filter="sync_gateway/bychannel", channels=["ABC"]))
            elif filter and limit is None:
                changes_requests.append(executor.submit(user.get_changes, filter="sync_gateway/bychannel", channels=["ABC"]))
            elif not filter and limit is not None:
                changes_requests.append(executor.submit(user.get_changes, limit=limit))
            elif not filter and limit is None:
                changes_requests.append(executor.submit(user.get_changes))

        for future in concurrent.futures.as_completed(changes_requests):
            changes = future.result()
            if limit is not None:
                assert len(changes["results"]) == 50
            else:
                assert len(changes["results"]) == 5001

        # changes feed should all be successful
        log_info(len(errors))
        assert len(errors) == 0

        if limit is not None:
            # HACK: Should be less than a minute unless blocking on view calls
            end = time.time()
            time_for_users_to_get_all_changes = end - start
            log_info("Time for users to get all changes: {}".format(time_for_users_to_get_all_changes))
            assert time_for_users_to_get_all_changes < 240, "Time to get all changes was greater than 2 minutes: {}s".format(
                time_for_users_to_get_all_changes
            )

        # Sanity check that a subset of users have _changes feed intact
        for i in range(10):
            verify_changes(users[i], expected_num_docs=num_docs, expected_num_revisions=0, expected_docs=doc_pusher.cache)

        # Get sync_gateway expvars
        resp = requests.get(url="http://{}:4985/_expvar".format(target_sg.ip))
        resp.raise_for_status()
        resp_obj = resp.json()

        # Since Sync Gateway will need to issue view queries to handle _changes requests that don't
        # fit in the channel cache, we expect there to be several view queries
        assert resp_obj["syncGateway_changeCache"]["view_queries"] > 0
Esempio n. 46
0
def test_sync_require_roles(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'sync_require_roles'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    radio_stations = ["KMOW", "HWOD", "KDWB"]
    tv_stations = ["ABC", "CBS", "NBC"]

    number_of_djs = 10
    number_of_vjs = 10

    number_of_docs_per_pusher = 100

    admin = Admin(cluster.sync_gateways[0])

    admin.create_role("db", name="radio_stations", channels=radio_stations)
    admin.create_role("db", name="tv_stations", channels=tv_stations)

    djs = admin.register_bulk_users(target=cluster.sync_gateways[0],
                                    db="db",
                                    name_prefix="dj",
                                    number=number_of_djs,
                                    password="******",
                                    roles=["radio_stations"])
    vjs = admin.register_bulk_users(target=cluster.sync_gateways[0],
                                    db="db",
                                    name_prefix="vj",
                                    number=number_of_vjs,
                                    password="******",
                                    roles=["tv_stations"])

    mogul = admin.register_user(target=cluster.sync_gateways[0],
                                db="db",
                                name="mogul",
                                password="******",
                                roles=["tv_stations", "radio_stations"])

    radio_doc_caches = []
    for radio_station in radio_stations:
        doc_pusher = admin.register_user(
            target=cluster.sync_gateways[0],
            db="db",
            name="{}_doc_pusher".format(radio_station),
            password="******",
            channels=[radio_station],
            roles=["radio_stations"])
        doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True)
        radio_doc_caches.append(doc_pusher.cache)

    expected_num_radio_docs = len(radio_stations) * number_of_docs_per_pusher

    # All docs that have been pushed with the "radio_stations" role
    all_radio_docs = {
        k: v
        for cache in radio_doc_caches for k, v in cache.items()
    }

    tv_doc_caches = []
    for tv_station in tv_stations:
        doc_pusher = admin.register_user(
            target=cluster.sync_gateways[0],
            db="db",
            name="{}_doc_pusher".format(tv_station),
            password="******",
            channels=[tv_station],
            roles=["tv_stations"])
        doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True)
        tv_doc_caches.append(doc_pusher.cache)

    expected_num_tv_docs = len(tv_stations) * number_of_docs_per_pusher

    # All docs that have been pushed with the "tv_stations" role
    all_tv_docs = {k: v for cache in tv_doc_caches for k, v in cache.items()}

    # Read only users
    radio_channels_no_roles_user = admin.register_user(
        target=cluster.sync_gateways[0],
        db="db",
        name="bad_radio_user",
        password="******",
        channels=radio_stations)
    tv_channel_no_roles_user = admin.register_user(
        target=cluster.sync_gateways[0],
        db="db",
        name="bad_tv_user",
        password="******",
        channels=tv_stations)

    # Should not be allowed
    radio_channels_no_roles_user.add_docs(13, name_prefix="bad_doc")
    tv_channel_no_roles_user.add_docs(26, name_prefix="bad_doc")

    read_only_user_caches = [
        radio_channels_no_roles_user.cache, tv_channel_no_roles_user.cache
    ]
    read_only_user_docs = {
        k: v
        for cache in read_only_user_caches for k, v in cache.items()
    }

    # Dictionary should be empty if they were blocked from pushing docs
    assert len(read_only_user_docs.items()) == 0

    # It seems be non deterministic but sometimes when issuing the changes call return, some of the documents are returned but not all.
    # There is currently no retry loop in verify_changes and I'm guessing that the bulk_docs requests are still processing.
    time.sleep(5)

    # Should recieve docs from radio_channels
    verify_changes(radio_channels_no_roles_user,
                   expected_num_docs=expected_num_radio_docs,
                   expected_num_revisions=0,
                   expected_docs=all_radio_docs)

    # Should recieve docs from tv_channels
    verify_changes(tv_channel_no_roles_user,
                   expected_num_docs=expected_num_tv_docs,
                   expected_num_revisions=0,
                   expected_docs=all_tv_docs)

    # verify all djs with the 'radio_stations' role get the docs with radio station channels
    verify_changes(djs,
                   expected_num_docs=expected_num_radio_docs,
                   expected_num_revisions=0,
                   expected_docs=all_radio_docs)

    # verify all djs with the 'radio_stations' role get the docs with radio station channels
    verify_changes(vjs,
                   expected_num_docs=expected_num_tv_docs,
                   expected_num_revisions=0,
                   expected_docs=all_tv_docs)

    # Verify mogul gets docs for all the channels associated with the radio_stations + tv_stations roles
    all_doc_caches = list(radio_doc_caches)
    all_doc_caches.extend(tv_doc_caches)
    all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()}

    for k, v in all_docs.items():
        assert not k.startswith("bad_doc")

    verify_changes(mogul,
                   expected_num_docs=expected_num_radio_docs +
                   expected_num_tv_docs,
                   expected_num_revisions=0,
                   expected_docs=all_docs)
Esempio n. 47
0
def test_dcp_reshard_sync_gateway_goes_down(params_from_base_test_setup,
                                            sg_conf):

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    mode = cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])

    traun = admin.register_user(target=cluster.sync_gateways[0],
                                db="db",
                                name="traun",
                                password="******",
                                channels=["ABC", "NBC", "CBS"])
    seth = admin.register_user(target=cluster.sync_gateways[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["FOX"])

    log_info(">> Users added")

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:

        futures = dict()

        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 8000)] = "seth"

        log_info(">>> Adding Traun docs")  # ABC, NBC, CBS
        futures[executor.submit(traun.add_docs, 2000, bulk=True)] = "traun"

        # stop sg_accel
        shutdown_status = cluster.sg_accels[0].stop()
        assert shutdown_status == 0

        for future in concurrent.futures.as_completed(futures):
            tag = futures[future]
            log_info("{} Completed:".format(tag))

    # TODO better way to do this
    time.sleep(120)

    verify_changes(traun,
                   expected_num_docs=2000,
                   expected_num_revisions=0,
                   expected_docs=traun.cache)
    verify_changes(seth,
                   expected_num_docs=8000,
                   expected_num_revisions=0,
                   expected_docs=seth.cache)

    # Verify that the sg1 is down but the other sync_gateways are running
    errors = cluster.verify_alive(mode)
    assert len(errors) == 1 and errors[0][0].hostname == "ac1"

    # Restart the failing node so that cluster verification does not blow up in test teardown
    start_status = cluster.sg_accels[0].start(sg_conf)
    assert start_status == 0
Esempio n. 48
0
def test_sync_role_sanity(params_from_base_test_setup, sg_conf_name):

    num_docs_per_channel = 100
    tv_channels = ["ABC", "NBC", "CBS"]

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'sync_role_sanity'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    admin.create_role(db="db", name="tv_stations", channels=tv_channels)

    seth = admin.register_user(target=cluster.sync_gateways[0],
                               db="db",
                               name="seth",
                               password="******")

    doc_pushers = []
    doc_pusher_caches = []
    # Push some ABC docs
    for tv_channel in tv_channels:
        doc_pusher = admin.register_user(
            target=cluster.sync_gateways[0],
            db="db",
            name="{}_doc_pusher".format(tv_channel),
            password="******",
            channels=[tv_channel])
        doc_pusher.add_docs(num_docs_per_channel, bulk=True)

        doc_pushers.append(doc_pusher)
        doc_pusher_caches.append(doc_pusher.cache)

    # Before role access grant
    verify_changes(seth,
                   expected_num_docs=0,
                   expected_num_revisions=0,
                   expected_docs={})

    # Create access doc pusher and grant access Seth to ABC channel
    access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0],
                                            db="db",
                                            name="access_doc_pusher",
                                            password="******",
                                            channels=["access"])
    access_doc_pusher.add_doc(doc_id="access_doc",
                              content={"grant_access": "true"})

    # Allow docs to backfill
    time.sleep(5)

    all_tv_docs = {
        k: v
        for cache in doc_pusher_caches for k, v in cache.items()
    }
    verify_changes(seth,
                   expected_num_docs=num_docs_per_channel * len(tv_channels),
                   expected_num_revisions=0,
                   expected_docs=all_tv_docs)

    # Remove seth from tv_stations role
    access_doc_pusher.update_doc(doc_id="access_doc",
                                 content={"grant_access": "false"})

    # Allow docs to backfill
    time.sleep(5)

    # Verify seth sees no tv_stations channel docs
    verify_changes(seth,
                   expected_num_docs=0,
                   expected_num_revisions=0,
                   expected_docs={})

    # Push more ABC docs
    for doc_pusher in doc_pushers:
        doc_pusher.add_docs(num_docs_per_channel, bulk=True)

    # Allow docs to backfill
    time.sleep(5)

    # Verify seth sees no tv_stations channel docs
    verify_changes(seth,
                   expected_num_docs=0,
                   expected_num_revisions=0,
                   expected_docs={})
def test_continuous_changes_parametrized(params_from_base_test_setup,
                                         sg_conf_name, num_users, num_docs,
                                         num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'continuous_changes_parametrized'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_users: {}".format(num_users))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    users = admin.register_bulk_users(target=cluster.sync_gateways[0],
                                      db="db",
                                      name_prefix="user",
                                      number=num_users,
                                      password="******",
                                      channels=["ABC", "TERMINATE"])
    abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0],
                                         db="db",
                                         name="abc_doc_pusher",
                                         password="******",
                                         channels=["ABC"])
    doc_terminator = admin.register_user(target=cluster.sync_gateways[0],
                                         db="db",
                                         name="doc_terminator",
                                         password="******",
                                         channels=["TERMINATE"])

    with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:

        futures = {
            executor.submit(user.start_continuous_changes_tracking,
                            termination_doc_id="killcontinuous"): user.name
            for user in users
        }
        futures[executor.submit(abc_doc_pusher.add_docs,
                                num_docs)] = "doc_pusher"

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            # Send termination doc to seth continuous changes feed subscriber
            if task_name == "doc_pusher":

                errors = future.result()
                assert len(errors) == 0
                abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)

                time.sleep(10)

                doc_terminator.add_doc("killcontinuous")
            elif task_name.startswith("user"):
                # When the user has continuous _changes feed closed, return the docs and verify the user got all the channel docs
                docs_in_changes = future.result()
                # Expect number of docs + the termination doc + _user doc
                verify_same_docs(expected_num_docs=num_docs,
                                 doc_dict_one=docs_in_changes,
                                 doc_dict_two=abc_doc_pusher.cache)

    # Expect number of docs + the termination doc
    verify_changes(abc_doc_pusher,
                   expected_num_docs=num_docs,
                   expected_num_revisions=num_revisions,
                   expected_docs=abc_doc_pusher.cache)
def test_continuous_changes_sanity(params_from_base_test_setup, sg_conf_name,
                                   num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'continuous_changes_sanity'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    seth = admin.register_user(target=cluster.sync_gateways[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["ABC", "TERMINATE"])
    abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0],
                                         db="db",
                                         name="abc_doc_pusher",
                                         password="******",
                                         channels=["ABC"])
    doc_terminator = admin.register_user(target=cluster.sync_gateways[0],
                                         db="db",
                                         name="doc_terminator",
                                         password="******",
                                         channels=["TERMINATE"])

    docs_in_changes = dict()

    with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:

        futures = dict()
        futures[executor.submit(
            seth.start_continuous_changes_tracking,
            termination_doc_id="killcontinuous")] = "continuous"
        futures[executor.submit(abc_doc_pusher.add_docs,
                                num_docs)] = "doc_pusher"

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            # Send termination doc to seth continuous changes feed subscriber
            if task_name == "doc_pusher":
                abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)

                time.sleep(5)

                doc_terminator.add_doc("killcontinuous")
            elif task_name == "continuous":
                docs_in_changes = future.result()

    # Expect number of docs + the termination doc
    verify_changes(abc_doc_pusher,
                   expected_num_docs=num_docs,
                   expected_num_revisions=num_revisions,
                   expected_docs=abc_doc_pusher.cache)

    # Expect number of docs + the termination doc + _user doc
    verify_same_docs(expected_num_docs=num_docs,
                     doc_dict_one=docs_in_changes,
                     doc_dict_two=abc_doc_pusher.cache)
Esempio n. 51
0
def test_roles_sanity(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'roles_sanity'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    radio_stations = ["KMOW", "HWOD", "KDWB"]
    tv_stations = ["ABC", "CBS", "NBC"]

    number_of_djs = 10
    number_of_vjs = 10

    number_of_docs_per_pusher = 500

    admin = Admin(cluster.sync_gateways[0])

    admin.create_role("db", name="radio_stations", channels=radio_stations)
    admin.create_role("db", name="tv_stations", channels=tv_stations)

    djs = admin.register_bulk_users(target=cluster.sync_gateways[0],
                                    db="db",
                                    name_prefix="dj",
                                    number=number_of_djs,
                                    password="******",
                                    roles=["radio_stations"])
    vjs = admin.register_bulk_users(target=cluster.sync_gateways[0],
                                    db="db",
                                    name_prefix="vj",
                                    number=number_of_vjs,
                                    password="******",
                                    roles=["tv_stations"])

    mogul = admin.register_user(target=cluster.sync_gateways[0],
                                db="db",
                                name="mogul",
                                password="******",
                                roles=["tv_stations", "radio_stations"])

    radio_doc_caches = []
    for radio_station in radio_stations:
        doc_pusher = admin.register_user(
            target=cluster.sync_gateways[0],
            db="db",
            name="{}_doc_pusher".format(radio_station),
            password="******",
            channels=[radio_station])
        doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True)
        radio_doc_caches.append(doc_pusher.cache)

    radio_docs = {k: v for cache in radio_doc_caches for k, v in cache.items()}

    tv_doc_caches = []
    for tv_station in tv_stations:
        doc_pusher = admin.register_user(
            target=cluster.sync_gateways[0],
            db="db",
            name="{}_doc_pusher".format(tv_station),
            password="******",
            channels=[tv_station])
        doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True)
        tv_doc_caches.append(doc_pusher.cache)

    tv_docs = {k: v for cache in tv_doc_caches for k, v in cache.items()}

    # Verify djs get docs for all the channels associated with the radio_stations role
    expected_num_radio_docs = len(radio_stations) * number_of_docs_per_pusher
    verify_changes(djs,
                   expected_num_docs=expected_num_radio_docs,
                   expected_num_revisions=0,
                   expected_docs=radio_docs)

    # Verify vjs get docs for all the channels associated with the tv_stations role
    expected_num_tv_docs = len(tv_stations) * number_of_docs_per_pusher
    verify_changes(vjs,
                   expected_num_docs=expected_num_tv_docs,
                   expected_num_revisions=0,
                   expected_docs=tv_docs)

    # Verify mogul gets docs for all the channels associated with the radio_stations + tv_stations roles
    all_docs_caches = list(radio_doc_caches)
    all_docs_caches.extend(tv_doc_caches)
    all_docs = {k: v for cache in all_docs_caches for k, v in cache.items()}
    verify_changes(mogul,
                   expected_num_docs=expected_num_radio_docs +
                   expected_num_tv_docs,
                   expected_num_revisions=0,
                   expected_docs=all_docs)
Esempio n. 52
0
def test_issue_1524(params_from_base_test_setup, sg_conf_name, num_docs):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'issue_1524'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    user_no_channels = admin.register_user(target=cluster.sync_gateways[0],
                                           db="db",
                                           name="user_no_channels",
                                           password="******")
    a_doc_pusher = admin.register_user(target=cluster.sync_gateways[0],
                                       db="db",
                                       name="a_doc_pusher",
                                       password="******",
                                       channels=["A"])
    access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0],
                                            db="db",
                                            name="access_doc_pusher",
                                            password="******")
    terminator = admin.register_user(target=cluster.sync_gateways[0],
                                     db="db",
                                     name="terminator",
                                     password="******",
                                     channels=["A"])

    longpoll_docs = {}

    with concurrent.futures.ThreadPoolExecutor(
            max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS
    ) as executor:
        futures = dict()
        futures[executor.submit(
            user_no_channels.start_longpoll_changes_tracking,
            termination_doc_id="terminator")] = "polling"
        log_info("Starting longpoll feed")

        futures[executor.submit(a_doc_pusher.add_docs,
                                num_docs=num_docs,
                                bulk=True,
                                name_prefix="a-doc")] = "a_docs_pushed"
        log_info("'A' channel docs pushing")

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            if task_name == "a_docs_pushed":
                log_info("'A' channel docs pushed")
                time.sleep(5)

                log_info(
                    "Grant 'user_no_channels' access to channel 'A' via sync function"
                )
                access_doc_pusher.add_doc(doc_id="access_doc",
                                          content={
                                              "accessUser": "******",
                                              "accessChannels": ["A"]
                                          })

                time.sleep(5)
                log_info("'terminator' pushing termination doc")
                terminator.add_doc(doc_id="terminator")

            if task_name == "polling":
                log_info("Getting changes from longpoll")
                longpoll_docs, last_seq = future.result()
                log_info(
                    "Verify docs in longpoll changes are the expected docs")

    log_info(
        "Verifying 'user_no_channels' has same docs as 'a_doc_pusher' + access_doc"
    )

    # One off changes verification will include the termination doc
    expected_docs = {
        k: v
        for cache in [a_doc_pusher.cache, terminator.cache]
        for k, v in cache.items()
    }
    verify_changes(user_no_channels,
                   expected_num_docs=num_docs + 1,
                   expected_num_revisions=0,
                   expected_docs=expected_docs)

    # TODO: Fix this inconsistency suite wide
    # Longpoll docs do not save termination doc
    log_info("Verify docs in longpoll changes are the expected docs")
    verify_same_docs(num_docs, longpoll_docs, a_doc_pusher.cache)
Esempio n. 53
0
class SyncGateway:
    def __init__(self, cluster_config, target):
        self.ansible_runner = AnsibleRunner(cluster_config)
        self.ip = target["ip"]
        self.url = "http://{}:4984".format(target["ip"])
        self.hostname = target["name"]
        self._headers = {'Content-Type': 'application/json'}
        self.admin = Admin(self)

        self.cluster_config = cluster_config
        self.server_port = 8091
        self.server_scheme = "http"

        if is_cbs_ssl_enabled(self.cluster_config):
            self.server_port = 18091
            self.server_scheme = "https"

        self.couchbase_server_primary_node = add_cbs_to_sg_config_server_field(
            self.cluster_config)

    def info(self):
        r = requests.get(self.url)
        r.raise_for_status()
        return r.text

    def stop(self):
        status = self.ansible_runner.run_ansible_playbook(
            "stop-sync-gateway.yml", subset=self.hostname)
        return status

    def start(self, config):
        conf_path = os.path.abspath(config)
        log.info(">>> Starting sync_gateway with configuration: {}".format(
            conf_path))

        playbook_vars = {
            "sync_gateway_config_filepath": conf_path,
            "server_port": self.server_port,
            "server_scheme": self.server_scheme,
            "autoimport": "",
            "xattrs": "",
            "no_conflicts": "",
            "couchbase_server_primary_node": self.couchbase_server_primary_node
        }

        if is_xattrs_enabled(self.cluster_config):
            playbook_vars["autoimport"] = '"import_docs": "continuous",'
            playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,'

        if no_conflicts_enabled(self.cluster_config):
            playbook_vars["no_conflicts"] = '"allow_conflicts": false,'
        try:
            revs_limit = get_revs_limit(self.cluster_config)
            playbook_vars["revs_limit"] = '"revs_limit": {},'.format(
                revs_limit)
        except KeyError as ex:
            log_info("Keyerror in getting revs_limit{}".format(ex.message))
        status = self.ansible_runner.run_ansible_playbook(
            "start-sync-gateway.yml",
            extra_vars=playbook_vars,
            subset=self.hostname)
        return status

    def restart(self, config, cluster_config=None):

        if (cluster_config is not None):
            self.cluster_config = cluster_config
        conf_path = os.path.abspath(config)
        log.info(">>> Restarting sync_gateway with configuration: {}".format(
            conf_path))

        playbook_vars = {
            "sync_gateway_config_filepath": conf_path,
            "server_port": self.server_port,
            "server_scheme": self.server_scheme,
            "autoimport": "",
            "xattrs": "",
            "no_conflicts": "",
            "revs_limit": "",
            "couchbase_server_primary_node": self.couchbase_server_primary_node
        }

        if is_xattrs_enabled(self.cluster_config):
            playbook_vars["autoimport"] = '"import_docs": "continuous",'
            playbook_vars["xattrs"] = '"enable_shared_bucket_access": true,'

        if no_conflicts_enabled(self.cluster_config):
            playbook_vars["no_conflicts"] = '"allow_conflicts": false,'
        try:
            revs_limit = get_revs_limit(self.cluster_config)
            playbook_vars["revs_limit"] = '"revs_limit": {},'.format(
                revs_limit)
        except KeyError as ex:
            log_info("Keyerror in getting revs_limit{}".format(ex.message))

        status = self.ansible_runner.run_ansible_playbook(
            "reset-sync-gateway.yml",
            extra_vars=playbook_vars,
            subset=self.hostname)
        return status

    def verify_launched(self):
        r = requests.get(self.url)
        log.info("GET {} ".format(r.url))
        log.info("{}".format(r.text))
        r.raise_for_status()

    def create_db(self, name):
        return self.admin.create_db(name)

    def delete_db(self, name):
        return self.admin.delete_db(name)

    def reset(self):
        dbs = self.admin.get_dbs()
        for db in dbs:
            self.admin.delete_db(db)

    def start_push_replication(self,
                               target,
                               source_db,
                               target_db,
                               continuous=True,
                               use_remote_source=False,
                               channels=None,
                               async=False,
                               use_admin_url=False):
def test_multiple_users_multiple_channels(params_from_base_test_setup,
                                          sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'multiple_users_multiple_channels'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    num_docs_seth = 1000
    num_docs_adam = 2000
    num_docs_traun = 3000

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    seth = admin.register_user(target=sgs[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["ABC"])
    adam = admin.register_user(target=sgs[0],
                               db="db",
                               name="adam",
                               password="******",
                               channels=["NBC", "CBS"])
    traun = admin.register_user(target=sgs[0],
                                db="db",
                                name="traun",
                                password="******",
                                channels=["ABC", "NBC", "CBS"])

    # TODO use bulk docs
    seth.add_docs(num_docs_seth)  # ABC
    adam.add_docs(num_docs_adam)  # NBC, CBS
    traun.add_docs(num_docs_traun)  # ABC, NBC, CBS

    assert len(seth.cache) == num_docs_seth
    assert len(adam.cache) == num_docs_adam
    assert len(traun.cache) == num_docs_traun

    # discuss appropriate time with team
    time.sleep(10)

    # Seth should get docs from seth + traun
    seth_subset = [seth.cache, traun.cache]
    seth_expected_docs = {
        k: v
        for cache in seth_subset for k, v in cache.items()
    }
    verify_changes([seth],
                   expected_num_docs=num_docs_seth + num_docs_traun,
                   expected_num_revisions=0,
                   expected_docs=seth_expected_docs)

    # Adam should get docs from adam + traun
    adam_subset = [adam.cache, traun.cache]
    adam_expected_docs = {
        k: v
        for cache in adam_subset for k, v in cache.items()
    }
    verify_changes([adam],
                   expected_num_docs=num_docs_adam + num_docs_traun,
                   expected_num_revisions=0,
                   expected_docs=adam_expected_docs)

    # Traun should get docs from seth + adam + traun
    traun_subset = [seth.cache, adam.cache, traun.cache]
    traun_expected_docs = {
        k: v
        for cache in traun_subset for k, v in cache.items()
    }
    verify_changes([traun],
                   expected_num_docs=num_docs_seth + num_docs_adam +
                   num_docs_traun,
                   expected_num_revisions=0,
                   expected_docs=traun_expected_docs)
Esempio n. 55
0
def test_sync_channel_sanity(params_from_base_test_setup, sg_conf_name):

    num_docs_per_channel = 100
    channels = ["ABC", "NBC", "CBS"]

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'sync_channel_sanity'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    doc_pushers = []
    doc_pusher_caches = []
    # Push some ABC docs
    for channel in channels:
        doc_pusher = admin.register_user(target=cluster.sync_gateways[0],
                                         db="db",
                                         name="{}_doc_pusher".format(channel),
                                         password="******",
                                         channels=[channel])
        doc_pusher.add_docs(num_docs_per_channel, bulk=True)

        doc_pushers.append(doc_pusher)
        doc_pusher_caches.append(doc_pusher.cache)

    # Verfy that none of the doc_pushers get docs. They should all be redirected by the sync function
    verify_changes(doc_pushers,
                   expected_num_docs=0,
                   expected_num_revisions=0,
                   expected_docs={})

    subscriber = admin.register_user(target=cluster.sync_gateways[0],
                                     db="db",
                                     name="subscriber",
                                     password="******",
                                     channels=["tv_station_channel"])

    # Allow docs to backfill
    time.sleep(20)

    # subscriber should recieve all docs
    all_docs = {k: v for cache in doc_pusher_caches for k, v in cache.items()}
    verify_changes(subscriber,
                   expected_num_docs=len(channels) * num_docs_per_channel,
                   expected_num_revisions=0,
                   expected_docs=all_docs)

    # update subscribers cache so the user knows what docs to update
    subscriber.cache = all_docs
    subscriber.update_docs(num_revs_per_doc=1)

    # Allow docs to backfill
    time.sleep(20)

    # Verify the doc are back in the repective ABC, NBC, CBS channels
    # HACK: Ignoring rev_id verification due to the fact that the doc was updated the the subscriber user and not the
    # doc_pusher
    for doc_pusher in doc_pushers:
        verify_changes(doc_pusher,
                       expected_num_docs=num_docs_per_channel,
                       expected_num_revisions=1,
                       expected_docs=doc_pusher.cache,
                       ignore_rev_ids=True)

    # Verify that all docs have been flaged with _removed = true in changes feed for subscriber
    verify_docs_removed(subscriber,
                        expected_num_docs=len(all_docs.items()),
                        expected_docs=all_docs)
Esempio n. 56
0
def test_dcp_reshard_sync_gateway_comes_up(params_from_base_test_setup,
                                           sg_conf):

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    stop_status = cluster.sg_accels[0].stop()
    assert stop_status == 0, "Failed to stop sg_accel"

    admin = Admin(cluster.sync_gateways[0])

    traun = admin.register_user(target=cluster.sync_gateways[0],
                                db="db",
                                name="traun",
                                password="******",
                                channels=["ABC", "NBC", "CBS"])
    seth = admin.register_user(target=cluster.sync_gateways[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["FOX"])

    log_info(">> Users added")

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:

        futures = dict()

        time.sleep(5)

        log_info(">>> Adding Traun docs")  # ABC, NBC, CBS
        futures[executor.submit(traun.add_docs, 6000)] = "traun"

        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 4000)] = "seth"

        # Bring up a sync_gateway accel
        up_status = cluster.sg_accels[0].start(sg_conf)
        assert up_status == 0

        for future in concurrent.futures.as_completed(futures):
            tag = futures[future]
            log_info("{} Completed:".format(tag))

    # TODO better way to do this
    time.sleep(120)

    verify_changes(traun,
                   expected_num_docs=6000,
                   expected_num_revisions=0,
                   expected_docs=traun.cache)
    verify_changes(seth,
                   expected_num_docs=4000,
                   expected_num_revisions=0,
                   expected_docs=seth.cache)
Esempio n. 57
0
def test_sg_replicate_basic_test(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    log_info("Running 'test_sg_replicate_basic_test'")
    log_info("Using cluster_config: {}".format(cluster_config))

    config = sync_gateway_config_path_for_mode("sync_gateway_sg_replicate", mode)

    sg1, sg2 = create_sync_gateways(
        cluster_config=cluster_config,
        sg_config_path=config
    )

    admin = Admin(sg1)
    admin.admin_url = sg1.url

    sg1_user, sg2_user = create_sg_users(sg1, sg2, DB1, DB2)

    # Add docs to sg1 and sg2
    doc_id_sg1 = sg1_user.add_doc()
    doc_id_sg2 = sg2_user.add_doc()

    # Wait until docs show up in changes feed
    wait_until_doc_in_changes_feed(sg1, DB1, doc_id_sg1)
    wait_until_doc_in_changes_feed(sg2, DB2, doc_id_sg2)

    # Make sure it doesn't appear on the target DB
    # even without starting a replication (which will
    # happen if the SG's are sharing a CB bucket)
    time.sleep(5)
    assert_does_not_have_doc(sg2_user, doc_id_sg1)
    assert_does_not_have_doc(sg1_user, doc_id_sg2)

    # Start a push replication sg1 -> sg2
    # Should block until replication
    # Result should contain the stats of the completed replication
    replication_result = sg1.start_push_replication(
        sg2.admin.admin_url,
        DB1,
        DB2,
        continuous=False,
        use_remote_source=True,
        use_admin_url=True
    )

    logging.debug("replication_result 1: {}".format(replication_result))

    assert replication_result["continuous"] is False, 'replication_result["continuous"] != False'
    assert replication_result["docs_written"] == 1, 'replication_result["docs_written"] != 1'
    assert replication_result["docs_read"] == 1, 'replication_result["docs_read"] != 1'
    assert replication_result["doc_write_failures"] == 0, 'replication_result["doc_write_failures"] != 0'

    # Start a pull replication sg1 <- sg2
    replication_result = sg1.start_pull_replication(
        sg2.admin.admin_url,
        DB2,
        DB1,
        continuous=False,
        use_remote_target=True,
        use_admin_url=True
    )

    logging.debug("replication_result 2: {}".format(replication_result))

    assert replication_result["continuous"] is False, 'replication_result["continuous"] != False'
    assert replication_result["docs_written"] == 1, 'replication_result["docs_written"] != 1'
    assert replication_result["docs_read"] == 1, 'replication_result["docs_read"] != 1'
    assert replication_result["doc_write_failures"] == 0, 'replication_result["doc_write_failures"] != 0'

    # Verify that the doc added to sg1 made it to sg2
    assert_has_doc(sg2_user, doc_id_sg1)

    # Verify that the doc added to sg2 made it to sg1
    assert_has_doc(sg1_user, doc_id_sg2)
Esempio n. 58
0
def test_dcp_reshard_single_sg_accel_goes_down_and_up(
        params_from_base_test_setup, sg_conf):

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # Stop the second sg_accel
    stop_status = cluster.sg_accels[1].stop()
    assert stop_status == 0, "Failed to stop sg_accel"

    admin = Admin(cluster.sync_gateways[0])

    traun = admin.register_user(target=cluster.sync_gateways[0],
                                db="db",
                                name="traun",
                                password="******",
                                channels=["ABC", "NBC", "CBS"])
    seth = admin.register_user(target=cluster.sync_gateways[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["FOX"])

    log_info(">> Users added")

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:

        futures = dict()

        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 8000)] = "seth"

        log_info(">>> Adding Traun docs")  # ABC, NBC, CBS
        futures[executor.submit(traun.add_docs, 10000, bulk=True)] = "traun"

        # take down a sync_gateway
        shutdown_status = cluster.sg_accels[0].stop()
        assert shutdown_status == 0

        # Add more docs while no writers are online
        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 2000, bulk=True)] = "seth"

        # Start a single writer
        start_status = cluster.sg_accels[0].start(sg_conf)
        assert start_status == 0

        for future in concurrent.futures.as_completed(futures):
            tag = futures[future]
            log_info("{} Completed:".format(tag))

    # TODO better way to do this
    time.sleep(300)

    verify_changes(traun,
                   expected_num_docs=10000,
                   expected_num_revisions=0,
                   expected_docs=traun.cache)
    verify_changes(seth,
                   expected_num_docs=10000,
                   expected_num_revisions=0,
                   expected_docs=seth.cache)

    # Start second writer again
    start_status = cluster.sg_accels[1].start(sg_conf)
    assert start_status == 0
Esempio n. 59
0
def test_seq(params_from_base_test_setup, sg_conf_name, num_users, num_docs,
             num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running seq")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_users: {}".format(num_users))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    # all users will share docs due to having the same channel
    users = admin.register_bulk_users(target=cluster.sync_gateways[0],
                                      db="db",
                                      name_prefix="user",
                                      number=num_users,
                                      password="******",
                                      channels=["ABC"])

    for user in users:
        user.add_docs(num_docs, bulk=True)

    for user in users:
        user.update_docs(num_revisions)

    time.sleep(5)

    user_0_changes = users[0].get_changes(since=0)
    doc_seq = user_0_changes["results"][num_docs / 2]["seq"]

    # https://github.com/couchbase/sync_gateway/issues/1475#issuecomment-172426052
    # verify you can issue _changes with since=12313-0::1023.15
    for user in users:
        changes = user.get_changes(since=doc_seq)
        log_info("Trying changes with since={}".format(doc_seq))
        assert len(changes["results"]) > 0

        second_to_last_doc_entry_seq = changes["results"][-2]["seq"]
        last_doc_entry_seq = changes["results"][-1]["seq"]

        log_info('Second to last doc "seq": {}'.format(
            second_to_last_doc_entry_seq))
        log_info('Last doc "seq": {}'.format(last_doc_entry_seq))

        if mode == "di":
            # Verify last "seq" follows the formate 12313-0, not 12313-0::1023.15
            log_info('Verify that the last "seq" is a plain hashed value')
            assert len(second_to_last_doc_entry_seq.split("::")) == 2
            assert len(last_doc_entry_seq.split("::")) == 1
        elif mode == "cc":
            assert second_to_last_doc_entry_seq > 0
            assert last_doc_entry_seq > 0
        else:
            raise ValueError("Unsupported 'mode' !!")

    all_doc_caches = [user.cache for user in users]
    all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()}
    verify_changes(users,
                   expected_num_docs=num_users * num_docs,
                   expected_num_revisions=num_revisions,
                   expected_docs=all_docs)
def test_db_online_offline_webhooks_offline(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions):

    start = time.time()

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    if mode == "di":
        pytest.skip("Offline tests not supported in Di mode -- see https://github.com/couchbase/sync_gateway/issues/2423#issuecomment-300841425")

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'test_db_online_offline_webhooks_offline'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_channels: {}".format(num_channels))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_conf)

    init_completed = time.time()
    log_info("Initialization completed. Time taken:{}s".format(init_completed - start))

    channels = ["channel-" + str(i) for i in range(num_channels)]
    password = "******"
    ws = WebServer()
    ws.start()

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    # Register User
    log_info("Register User")
    user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
                                             number=num_users, password=password, channels=channels)

    # Add User
    log_info("Add docs")
    in_parallel(user_objects, 'add_docs', num_docs)

    # Update docs
    log_info("Update docs")
    in_parallel(user_objects, 'update_docs', num_revisions)
    time.sleep(10)

    # Take db offline
    sg_client = MobileRestClient()
    status = sg_client.take_db_offline(cluster_conf=cluster_conf, db="db")
    assert status == 0

    time.sleep(5)
    db_info = admin.get_db_info("db")
    log_info("Expecting db state {} found db state {}".format("Offline", db_info['state']))
    assert db_info["state"] == "Offline"

    webhook_events = ws.get_data()
    time.sleep(5)
    log_info("webhook event {}".format(webhook_events))

    try:
        last_event = webhook_events[-1]
        assert last_event['state'] == 'offline'

        # Bring db online
        status = sg_client.bring_db_online(cluster_conf=cluster_conf, db="db")
        assert status == 0

        time.sleep(5)
        db_info = admin.get_db_info("db")
        log_info("Expecting db state {} found db state {}".format("Online", db_info['state']))
        assert db_info["state"] == "Online"
        time.sleep(5)
        webhook_events = ws.get_data()
        last_event = webhook_events[-1]
        assert last_event['state'] == 'online'
        time.sleep(10)
        log_info("webhook event {}".format(webhook_events))
    except IndexError:
        log_info("Received index error")
        raise
    finally:
        ws.stop()