コード例 #1
0
def test_online_to_offline_check_503(params_from_base_test_setup, sg_conf_name, num_docs):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    # all db endpoints should function as expected
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
    assert len(errors) == 0

    # Take bucket offline
    status = admin.take_db_offline(db="db")
    assert status == 200

    # all db endpoints should return 503
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])

    # We hit NUM_ENDPOINT unique REST endpoints + num of doc PUT failures
    assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
    for error_tuple in errors:
        log_info("({},{})".format(error_tuple[0], error_tuple[1]))
        assert error_tuple[1] == 503
コード例 #2
0
def test_db_offline_tap_loss_sanity(params_from_base_test_setup, sg_conf_name, num_docs):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # all db rest enpoints should succeed
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
    assert len(errors) == 0

    # Delete bucket to sever TAP feed
    status = cluster.servers[0].delete_bucket("data-bucket")
    assert status == 0

    # Check that bucket is in offline state
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])
    assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
    for error_tuple in errors:
        log_info("({},{})".format(error_tuple[0], error_tuple[1]))
        assert error_tuple[1] == 503
コード例 #3
0
def params_from_base_test_setup(request, params_from_base_suite_setup):
    # Code before the yeild will execute before each test starts

    cluster_config = params_from_base_suite_setup["cluster_config"]
    mode = params_from_base_suite_setup["mode"]

    test_name = request.node.name
    log_info("Setting up test '{}'".format(test_name))

    # This dictionary is passed to each test
    yield {"cluster_config": cluster_config, "mode": mode}

    # Code after the yeild will execute when each test finishes
    log_info("Tearing down test '{}'".format(test_name))

    # Capture testkit socket usage
    network_utils = NetworkUtils()
    network_utils.list_connections()

    # Verify all sync_gateways and sg_accels are reachable
    c = Cluster(cluster_config)
    errors = c.verify_alive(mode)
    assert len(errors) == 0

    # if the test failed pull logs
    if request.node.rep_call.failed:
        logging_helper = Logging()
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config, test_name=test_name)
コード例 #4
0
def provision_or_reset_cluster(provision_or_reset, sg_deploy_type,
                               couchbase_server_version, sync_gateway_version,
                               sync_gateway_commit, sync_gateway_config_file,
                               cluster_config):

    server_config = CouchbaseServerConfig(version=couchbase_server_version)

    version_number, build_number = version_and_build(sync_gateway_version)

    sync_gateway_conf = SyncGatewayConfig(version_number=version_number,
                                          build_number=build_number,
                                          commit=sync_gateway_commit,
                                          build_flags="",
                                          config_path=sync_gateway_config_file,
                                          skip_bucketcreation=False)

    # Don't specify version number on a source build
    if sg_deploy_type == "Source":
        sync_gateway_conf.version_number = None
    else:
        # Likewise, don't specify commmit on a package build
        sync_gateway_conf.commit = None

    if provision_or_reset == "Provision":
        print("provisioning cluster")
        provision_cluster(cluster_config=cluster_config,
                          couchbase_server_config=server_config,
                          sync_gateway_config=sync_gateway_conf)
    elif provision_or_reset == "Reset":
        print("resetting cluster")
        cluster = Cluster(config=cluster_config)
        cluster.reset(sync_gateway_config_file)
コード例 #5
0
def test_offline_false_config_rest(params_from_base_test_setup, sg_conf_name, num_docs):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # all db endpoints should function as expected
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])

    assert len(errors) == 0

    # Scenario 4
    # Check the db has an Online state at each running sync_gateway
    for sg in cluster.sync_gateways:
        admin = Admin(sg)
        db_info = admin.get_db_info("db")
        assert db_info["state"] == "Online"
コード例 #6
0
def test_single_user_single_channel(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'single_user_single_channel'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    sgs = cluster.sync_gateways

    num_seth_docs = 7000
    num_cbs_docs = 3000

    admin = Admin(sgs[0])
    seth = admin.register_user(target=sgs[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["ABC"])
    cbs_user = admin.register_user(target=sgs[0],
                                   db="db",
                                   name="cbs_user",
                                   password="******",
                                   channels=["CBS"])
    admin_user = admin.register_user(target=sgs[0],
                                     db="db",
                                     name="admin",
                                     password="******",
                                     channels=["ABC", "CBS"])

    seth.add_docs(num_seth_docs)
    cbs_user.add_docs(num_cbs_docs)

    assert len(seth.cache) == num_seth_docs
    assert len(cbs_user.cache) == num_cbs_docs
    assert len(admin_user.cache) == 0

    time.sleep(10)

    verify_changes([seth],
                   expected_num_docs=num_seth_docs,
                   expected_num_revisions=0,
                   expected_docs=seth.cache)
    verify_changes([cbs_user],
                   expected_num_docs=num_cbs_docs,
                   expected_num_revisions=0,
                   expected_docs=cbs_user.cache)

    all_doc_caches = [seth.cache, cbs_user.cache]
    all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()}
    verify_changes([admin_user],
                   expected_num_docs=num_cbs_docs + num_seth_docs,
                   expected_num_revisions=0,
                   expected_docs=all_docs)
コード例 #7
0
def test_log_rotation_negative(params_from_base_test_setup, sg_conf_name):
    """Test log rotation with negative values for:
        "maxsize": -1,
        "maxage": -30,
        "maxbackups": -2
    SG shouldn't start
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set negative values for rotation section
    data['logging']["default"]["rotation"] = {
        "maxsize": -1,
        "maxage": -30,
        "maxbackups": -2,
        "localtime": True
    }
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    try:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
    except ProvisioningError:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # Remove generated conf file
        os.remove(temp_conf)
        return

    # Remove generated conf file
    os.remove(temp_conf)
    pytest.fail("SG shouldn't be started!!!!")
コード例 #8
0
def test_missing_num_shards(params_from_base_test_setup, sg_conf):
    """
    1. Launch sg_accels missing the following property in the config.
        "num_shards":16
    2. Verify there are 16 shards
    3. Verify they are distributed evenly across the nodes
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_missing_num_shards'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # CBGT REST Admin API endpoint
    admin_api = Admin(cluster.sg_accels[1])
    cbgt_cfg = admin_api.get_cbgt_config()

    # Verify that default number of pindex shards is 16.
    # This may change in the future in which case this test will need to be updated.
    assert cbgt_cfg.num_shards == 16

    # Verify sharding is correct
    assert cluster.validate_cbgt_pindex_distribution_retry(
        num_running_sg_accels=3)
コード例 #9
0
def test_offline_true_config_bring_online(params_from_base_test_setup, sg_conf_name, num_docs):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])

    # all db endpoints should fail with 503
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])

    assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
    for error_tuple in errors:
        log_info("({},{})".format(error_tuple[0], error_tuple[1]))
        assert error_tuple[1] == 503

    # Scenario 9
    # POST /db/_online
    status = admin.bring_db_online(db="db")
    assert status == 200

    # all db endpoints should succeed
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
    assert len(errors) == 0
コード例 #10
0
def params_from_base_test_setup(request, params_from_base_suite_setup):
    # Code before the yeild will execute before each test starts

    # pytest command line parameters
    collect_logs = request.config.getoption("--collect-logs")

    cluster_config = params_from_base_suite_setup["cluster_config"]
    mode = params_from_base_suite_setup["mode"]

    test_name = request.node.name
    log_info("Setting up test '{}'".format(test_name))

    # This dictionary is passed to each test
    yield {"cluster_config": cluster_config, "mode": mode}

    # Code after the yeild will execute when each test finishes
    log_info("Tearing down test '{}'".format(test_name))

    # Capture testkit socket usage
    network_utils = NetworkUtils()
    network_utils.list_connections()

    # Verify all sync_gateways and sg_accels are reachable
    c = Cluster(cluster_config)
    errors = c.verify_alive(mode)

    # if the test failed pull logs
    if collect_logs or request.node.rep_call.failed or len(errors) != 0:
        logging_helper = Logging()
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config,
                                              test_name=test_name)

    assert len(errors) == 0
コード例 #11
0
def test_muliple_users_single_channel(params_from_base_test_setup,
                                      sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'muliple_users_single_channel'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    sgs = cluster.sync_gateways

    num_docs_seth = 1000
    num_docs_adam = 2000
    num_docs_traun = 3000

    admin = Admin(sgs[0])

    seth = admin.register_user(target=sgs[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["ABC"])
    adam = admin.register_user(target=sgs[0],
                               db="db",
                               name="adam",
                               password="******",
                               channels=["ABC"])
    traun = admin.register_user(target=sgs[0],
                                db="db",
                                name="traun",
                                password="******",
                                channels=["ABC"])

    seth.add_docs(num_docs_seth)  # ABC
    adam.add_docs(num_docs_adam, bulk=True)  # ABC
    traun.add_docs(num_docs_traun, bulk=True)  # ABC

    assert len(seth.cache) == num_docs_seth
    assert len(adam.cache) == num_docs_adam
    assert len(traun.cache) == num_docs_traun

    # discuss appropriate time with team
    time.sleep(10)

    # Each user should get all docs from all users
    all_caches = [seth.cache, adam.cache, traun.cache]
    all_docs = {k: v for cache in all_caches for k, v in cache.items()}

    verify_changes([seth, adam, traun],
                   expected_num_docs=num_docs_seth + num_docs_adam +
                   num_docs_traun,
                   expected_num_revisions=0,
                   expected_docs=all_docs)
コード例 #12
0
def test_seq(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running seq")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_users: {}".format(num_users))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    # all users will share docs due to having the same channel
    users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="******", channels=["ABC"])

    for user in users:
        user.add_docs(num_docs, bulk=True)

    for user in users:
        user.update_docs(num_revisions)

    time.sleep(5)

    user_0_changes = users[0].get_changes(since=0)
    doc_seq = user_0_changes["results"][num_docs / 2]["seq"]

    # https://github.com/couchbase/sync_gateway/issues/1475#issuecomment-172426052
    # verify you can issue _changes with since=12313-0::1023.15
    for user in users:
        changes = user.get_changes(since=doc_seq)
        log_info("Trying changes with since={}".format(doc_seq))
        assert len(changes["results"]) > 0

        second_to_last_doc_entry_seq = changes["results"][-2]["seq"]
        last_doc_entry_seq = changes["results"][-1]["seq"]

        log_info('Second to last doc "seq": {}'.format(second_to_last_doc_entry_seq))
        log_info('Last doc "seq": {}'.format(last_doc_entry_seq))

        if mode == "di":
            # Verify last "seq" follows the formate 12313-0, not 12313-0::1023.15
            log_info('Verify that the last "seq" is a plain hashed value')
            assert len(second_to_last_doc_entry_seq.split("::")) == 2
            assert len(last_doc_entry_seq.split("::")) == 1
        elif mode == "cc":
            assert second_to_last_doc_entry_seq > 0
            assert last_doc_entry_seq > 0
        else:
            raise ValueError("Unsupported 'mode' !!")

    all_doc_caches = [user.cache for user in users]
    all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()}
    verify_changes(users, expected_num_docs=num_users * num_docs, expected_num_revisions=num_revisions, expected_docs=all_docs)
コード例 #13
0
def test_bucket_shadow_low_revs_limit_repeated_deletes(params_from_base_test_setup):
    """
    Validate that Sync Gateway doesn't panic (and instead creates a conflict branch
    and prints a warning) after doing the following steps:

    - Set revs_limit to 5
    - Create a doc via SG
    - Issue a delete operation for that doc via SG
    - Repeat step 3 5x. (each additional delete will create a new revision in SG, but the delete on the source bucket will fail with the 'not found' error, which also means that upstream_rev won't get incremented
    - Recreate the doc in the source bucket
    See https://github.com/couchbaselabs/sync-gateway-testcluster/issues/291#issuecomment-191521993
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    default_config_path_shadower_low_revs = sync_gateway_config_path_for_mode("sync_gateway_bucketshadow_low_revs", mode)
    default_config_path_non_shadower_low_revs = sync_gateway_config_path_for_mode("sync_gateway_default_low_revs", mode)

    log_info("Running 'test_bucket_shadow_low_revs_limit_repeated_deletes'")
    log_info("Using cluster_config: {}".format(cluster_config))

    cluster = Cluster(config=cluster_config)
    sc = init_shadow_cluster(cluster,
                             default_config_path_shadower_low_revs,
                             default_config_path_non_shadower_low_revs)

    # Write doc into shadower SG
    doc_id = sc.alice_shadower.add_doc()

    # Wait until it gets to source bucket
    get_doc_from_source_bucket_retry(doc_id, sc.source_bucket)

    # Wait until upstream-rev in _sync metadata is non empty
    # Otherwise, this will not reproduce a panic
    while True:
        doc = sc.data_bucket.get(doc_id)
        if doc.success:
            if "upstream_rev" in doc.value["_sync"]:
                break
        time.sleep(1)

    # Repeatedly issue a delete operation for that doc via SG
    # Keep adding tombstone revs to the one and only branch
    rev_id_to_delete = None
    for i in xrange(100):
        resp = sc.alice_shadower.delete_doc(doc_id, rev_id_to_delete)
        rev_id_to_delete = resp["rev"]

    # Recreate doc with that ID in the source bucket
    sc.source_bucket.upsert(doc_id, json.loads('{"foo":"bar"}'))

    # Check if SG's are up
    errors = cluster.verify_alive(sc.mode)
    assert len(errors) == 0

    # Restart Shadow SG
    sc.shadower_sg.stop()
    sc.shadower_sg.start(default_config_path_shadower_low_revs)
コード例 #14
0
def create_sync_gateways(cluster_config, sg_config_path):

    cluster = Cluster(config=cluster_config)
    cluster.reset(sg_config_path=sg_config_path)
    sg1 = cluster.sync_gateways[0]
    sg2 = cluster.sync_gateways[1]

    return sg1, sg2
コード例 #15
0
def create_sync_gateways(cluster_config, sg_config_path):

    cluster = Cluster(config=cluster_config)
    cluster.reset(sg_config_path=sg_config_path)
    sg1 = cluster.sync_gateways[0]
    sg2 = cluster.sync_gateways[1]

    return sg1, sg2
コード例 #16
0
def test_db_online_offline_webhooks_offline_two(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions):

    start = time.time()

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'test_db_online_offline_webhooks_offline_two'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_channels: {}".format(num_channels))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_conf)

    init_completed = time.time()
    log_info("Initialization completed. Time taken:{}s".format(init_completed - start))

    channels = ["channel-" + str(i) for i in range(num_channels)]
    password = "******"
    ws = WebServer()
    ws.start()

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    # Register User
    log_info("Register User")
    user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
                                             number=num_users, password=password, channels=channels)

    # Add User
    log_info("Add docs")
    in_parallel(user_objects, 'add_docs', num_docs)

    # Update docs
    log_info("Update docs")
    in_parallel(user_objects, 'update_docs', num_revisions)
    time.sleep(10)

    status = cluster.servers[0].delete_bucket("data-bucket")
    assert status == 0

    log_info("Sleeping for 120 seconds...")
    time.sleep(120)

    webhook_events = ws.get_data()
    time.sleep(5)
    log_info("webhook event {}".format(webhook_events))
    last_event = webhook_events[-1]
    assert last_event['state'] == 'offline'

    ws.stop()
コード例 #17
0
def test_dcp_reshard_single_sg_accel_goes_down_and_up(params_from_base_test_setup, sg_conf):

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # Stop the second sg_accel
    stop_status = cluster.sg_accels[1].stop()
    assert stop_status == 0, "Failed to stop sg_accel"

    admin = Admin(cluster.sync_gateways[0])

    traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"])
    seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["FOX"])

    log_info(">> Users added")

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:

        futures = dict()

        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 8000)] = "seth"

        log_info(">>> Adding Traun docs")  # ABC, NBC, CBS
        futures[executor.submit(traun.add_docs, 10000, bulk=True)] = "traun"

        # take down a sync_gateway
        shutdown_status = cluster.sg_accels[0].stop()
        assert shutdown_status == 0

        # Add more docs while no writers are online
        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 2000, bulk=True)] = "seth"

        # Start a single writer
        start_status = cluster.sg_accels[0].start(sg_conf)
        assert start_status == 0

        for future in concurrent.futures.as_completed(futures):
            tag = futures[future]
            log_info("{} Completed:".format(tag))

    # TODO better way to do this
    time.sleep(120)

    verify_changes(traun, expected_num_docs=10000, expected_num_revisions=0, expected_docs=traun.cache)
    verify_changes(seth, expected_num_docs=10000, expected_num_revisions=0, expected_docs=seth.cache)

    # Start second writer again
    start_status = cluster.sg_accels[1].start(sg_conf)
    assert start_status == 0
コード例 #18
0
def test_sync_channel_sanity(params_from_base_test_setup, sg_conf_name):

    num_docs_per_channel = 100
    channels = ["ABC", "NBC", "CBS"]

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'sync_channel_sanity'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    doc_pushers = []
    doc_pusher_caches = []
    # Push some ABC docs
    for channel in channels:
        doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(channel), password="******", channels=[channel])
        doc_pusher.add_docs(num_docs_per_channel, bulk=True)

        doc_pushers.append(doc_pusher)
        doc_pusher_caches.append(doc_pusher.cache)

    # Verfy that none of the doc_pushers get docs. They should all be redirected by the sync function
    verify_changes(doc_pushers, expected_num_docs=0, expected_num_revisions=0, expected_docs={})

    subscriber = admin.register_user(target=cluster.sync_gateways[0], db="db", name="subscriber", password="******", channels=["tv_station_channel"])

    # Allow docs to backfill
    time.sleep(20)

    # subscriber should recieve all docs
    all_docs = {k: v for cache in doc_pusher_caches for k, v in cache.items()}
    verify_changes(subscriber, expected_num_docs=len(channels) * num_docs_per_channel, expected_num_revisions=0, expected_docs=all_docs)

    # update subscribers cache so the user knows what docs to update
    subscriber.cache = all_docs
    subscriber.update_docs(num_revs_per_doc=1)

    # Allow docs to backfill
    time.sleep(20)

    # Verify the doc are back in the repective ABC, NBC, CBS channels
    # HACK: Ignoring rev_id verification due to the fact that the doc was updated the the subscriber user and not the
    # doc_pusher
    for doc_pusher in doc_pushers:
        verify_changes(doc_pusher, expected_num_docs=num_docs_per_channel, expected_num_revisions=1, expected_docs=doc_pusher.cache, ignore_rev_ids=True)

    # Verify that all docs have been flaged with _removed = true in changes feed for subscriber
    verify_docs_removed(subscriber, expected_num_docs=len(all_docs.items()), expected_docs=all_docs)
コード例 #19
0
def test_online_to_offline_longpoll_changes_feed_controlled_close_sanity_mulitple_users(params_from_base_test_setup, sg_conf_name, num_docs, num_users):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using num_users: {}".format(num_users))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", password="******", number=num_users, channels=["ABC"])

    feed_close_results = list()

    with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
        # start longpoll tracking with no timeout, will block until longpoll is closed by db going offline
        futures = {executor.submit(user.start_longpoll_changes_tracking, termination_doc_id=None, timeout=0, loop=False): user.name for user in users}

        time.sleep(5)
        futures[executor.submit(admin.take_db_offline, "db")] = "db_offline_task"

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            if task_name == "db_offline_task":
                log_info("DB OFFLINE")
                # make sure db_offline returns 200
                assert future.result() == 200
            if task_name.startswith("user"):
                # Long poll will exit with 503, return docs in the exception
                log_info("POLLING DONE")
                try:
                    docs_in_changes, last_seq_num = future.result()
                    feed_close_results.append((docs_in_changes, last_seq_num))
                except Exception as e:
                    log_info("Longpoll feed close error: {}".format(e))
                    # long poll should be closed so this exception should never happen
                    assert 0

    # Assert that the feed close results length is num_users
    assert len(feed_close_results) == num_users

    # Account for _user doc
    # last_seq may be of the form '1' for channel cache or '1-0' for distributed index
    for feed_result in feed_close_results:
        docs_in_changes = feed_result[0]
        seq_num_component = feed_result[1].split("-")
        assert len(docs_in_changes) == 0
        assert int(seq_num_component[0]) > 0
def test_single_user_single_channel_doc_updates(params_from_base_test_setup,
                                                sg_conf_name, num_docs,
                                                num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log.info("Running 'single_user_single_channel_doc_updates'")
    log.info("cluster_conf: {}".format(cluster_conf))
    log.info("sg_conf: {}".format(sg_conf))
    log.info("num_docs: {}".format(num_docs))
    log.info("num_revisions: {}".format(num_revisions))

    start = time.time()

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    num_docs = num_docs
    num_revisions = num_revisions
    username = "******"
    password = "******"
    channels = ["channel-1"]

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    single_user = admin.register_user(target=sgs[0],
                                      db="db",
                                      name=username,
                                      password=password,
                                      channels=channels)

    # Not using bulk docs
    single_user.add_docs(num_docs, name_prefix="test-")

    assert len(single_user.cache) == num_docs

    # let SG catch up with all the changes
    time.sleep(5)

    single_user.update_docs(num_revisions)

    time.sleep(10)

    verify_changes([single_user],
                   expected_num_docs=num_docs,
                   expected_num_revisions=num_revisions,
                   expected_docs=single_user.cache)

    end = time.time()
    log.info("TIME:{}s".format(end - start))
コード例 #21
0
def test_sync_sanity_backfill(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'sync_sanity_backfill'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    radio_stations = ["KMOW", "HWOD", "KDWB"]
    number_of_docs_per_pusher = 5000

    admin = Admin(cluster.sync_gateways[0])

    dj_0 = admin.register_user(target=cluster.sync_gateways[0],
                               db="db",
                               name="dj_0",
                               password="******")

    kdwb_caches = []
    for radio_station in radio_stations:
        doc_pusher = admin.register_user(
            target=cluster.sync_gateways[0],
            db="db",
            name="{}_doc_pusher".format(radio_station),
            password="******",
            channels=[radio_station])
        doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True)
        if doc_pusher.name == "KDWB_doc_pusher":
            kdwb_caches.append(doc_pusher.cache)

    access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0],
                                            db="db",
                                            name="access_doc_pusher",
                                            password="******")

    # Grant dj_0 access to KDWB channel via sync after docs are pushed
    access_doc_pusher.add_doc("access_doc", content="access")

    # Build global doc_id, rev dict for all docs from all KDWB caches
    kdwb_docs = {k: v for cache in kdwb_caches for k, v in cache.items()}

    # wait for changes
    time.sleep(5)

    verify_changes(dj_0,
                   expected_num_docs=number_of_docs_per_pusher,
                   expected_num_revisions=0,
                   expected_docs=kdwb_docs)
コード例 #22
0
def test_log_rotation_invalid_path(params_from_base_test_setup, sg_conf_name):
    """Test to check that SG is not started with invalid logFilePath.
    OS specific case. SG should check if path correct on startup
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set non existing logFilePath
    data['logging']["default"]["logFilePath"] = "/12345/1231/131231.log"
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()

    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    try:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
    except ProvisioningError:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # Remove generated conf file
        os.remove(temp_conf)
        return

    # Remove generated conf file
    os.remove(temp_conf)
    pytest.fail("SG shouldn't be started!!!!")
コード例 #23
0
def test_log_logLevel_invalid(params_from_base_test_setup, sg_conf_name):
    """Run SG with non existing logLevel value
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # 'debugFake' invalid value for logLevel
    data['logging']["default"]["logLevel"] = "debugFake"

    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    # create temp config file in the same folder as sg_conf
    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    try:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
    except ProvisioningError:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # Remove generated conf file
        os.remove(temp_conf)
        return

    # Remove generated conf file
    os.remove(temp_conf)
    pytest.fail("SG shouldn't be started!!!!")
コード例 #24
0
def test_multiple_users_multiple_channels(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'multiple_users_multiple_channels'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    num_docs_seth = 1000
    num_docs_adam = 2000
    num_docs_traun = 3000

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"])
    adam = admin.register_user(target=sgs[0], db="db", name="adam", password="******", channels=["NBC", "CBS"])
    traun = admin.register_user(target=sgs[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"])

    # TODO use bulk docs
    seth.add_docs(num_docs_seth)  # ABC
    adam.add_docs(num_docs_adam)  # NBC, CBS
    traun.add_docs(num_docs_traun)  # ABC, NBC, CBS

    assert len(seth.cache) == num_docs_seth
    assert len(adam.cache) == num_docs_adam
    assert len(traun.cache) == num_docs_traun

    # discuss appropriate time with team
    time.sleep(10)

    # Seth should get docs from seth + traun
    seth_subset = [seth.cache, traun.cache]
    seth_expected_docs = {k: v for cache in seth_subset for k, v in cache.items()}
    verify_changes([seth], expected_num_docs=num_docs_seth + num_docs_traun, expected_num_revisions=0, expected_docs=seth_expected_docs)

    # Adam should get docs from adam + traun
    adam_subset = [adam.cache, traun.cache]
    adam_expected_docs = {k: v for cache in adam_subset for k, v in cache.items()}
    verify_changes([adam], expected_num_docs=num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=adam_expected_docs)

    # Traun should get docs from seth + adam + traun
    traun_subset = [seth.cache, adam.cache, traun.cache]
    traun_expected_docs = {k: v for cache in traun_subset for k, v in cache.items()}
    verify_changes([traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=traun_expected_docs)
コード例 #25
0
def test_log_logKeys_string(params_from_base_test_setup, sg_conf_name):
    """Negative test to verify that we are not able start SG when
    logKeys is string
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # read sample sg_conf
    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set logKeys as string in config file
    data['logging']["default"]["logKeys"] = "http"
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    try:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
    except ProvisioningError:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # Remove generated conf file
        os.remove(temp_conf)
        return

    # Remove generated conf file
    os.remove(temp_conf)
    pytest.fail("SG shouldn't be started!!!!")
コード例 #26
0
def test_pindex_distribution(params_from_base_test_setup, sg_conf):

    # the test itself doesn't have to do anything beyond calling cluster.reset() with the
    # right configuration, since the validation of the cbgt pindex distribution is in the
    # cluster.reset() method itself.

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_pindex_distribution'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
コード例 #27
0
def test_dcp_reshard_sync_gateway_goes_down(params_from_base_test_setup, sg_conf):

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    mode = cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])

    traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"])
    seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["FOX"])

    log_info(">> Users added")

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:

        futures = dict()

        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 8000)] = "seth"

        log_info(">>> Adding Traun docs")  # ABC, NBC, CBS
        futures[executor.submit(traun.add_docs, 2000, bulk=True)] = "traun"

        # stop sg_accel
        shutdown_status = cluster.sg_accels[0].stop()
        assert shutdown_status == 0

        for future in concurrent.futures.as_completed(futures):
            tag = futures[future]
            log_info("{} Completed:".format(tag))

    # TODO better way to do this
    time.sleep(120)

    verify_changes(traun, expected_num_docs=2000, expected_num_revisions=0, expected_docs=traun.cache)
    verify_changes(seth, expected_num_docs=8000, expected_num_revisions=0, expected_docs=seth.cache)

    # Verify that the sg1 is down but the other sync_gateways are running
    errors = cluster.verify_alive(mode)
    assert len(errors) == 1 and errors[0][0].hostname == "ac1"

    # Restart the failing node so that cluster verification does not blow up in test teardown
    start_status = cluster.sg_accels[0].start(sg_conf)
    assert start_status == 0
コード例 #28
0
def test_pindex_distribution(params_from_base_test_setup, sg_conf):

    # the test itself doesn't have to do anything beyond calling cluster.reset() with the
    # right configuration, since the validation of the cbgt pindex distribution is in the
    # cluster.reset() method itself.

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_pindex_distribution'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
コード例 #29
0
def test_webhooks(params_from_base_test_setup, sg_conf_name, num_users, num_channels, num_docs, num_revisions):

    start = time.time()

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'test_webhooks'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_channels: {}".format(num_channels))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_conf)

    init_completed = time.time()
    log_info("Initialization completed. Time taken:{}s".format(init_completed - start))

    channels = ["channel-" + str(i) for i in range(num_channels)]
    password = "******"
    ws = WebServer()
    ws.start()

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    # Register User
    log_info("Register User")
    user_objects = admin.register_bulk_users(target=sgs[0], db="db", name_prefix="User",
                                             number=num_users, password=password, channels=channels)

    # Add User
    log_info("Add docs")
    in_parallel(user_objects, 'add_docs', num_docs)

    # Update docs
    log_info("Update docs")
    in_parallel(user_objects, 'update_docs', num_revisions)
    time.sleep(30)
    ws.stop()
    expected_events = (num_users * num_docs * num_revisions) + (num_users * num_docs)
    received_events = len(ws.get_data())
    log_info("expected_events: {} received_events {}".format(expected_events, received_events))
    assert expected_events == received_events
コード例 #30
0
    def reset_cluster(self, cluster_config, sync_gateway_config):
        """
        1. Stop sync_gateways
        2. Stop sg_accels
        3. Delete sync_gateway artifacts (logs, conf)
        4. Delete sg_accel artifacts (logs, conf)
        5. Delete all server buckets
        6. Create buckets from 'sync_gateway_config'
        7. Wait for server to be in 'healthy' state
        8. Deploy sync_gateway config and start
        9. Deploy sg_accel config and start (distributed index mode only)
        """

        cluster = Cluster(config=cluster_config)
        cluster.reset(sync_gateway_config)
コード例 #31
0
    def reset_cluster(self, cluster_config, sync_gateway_config):
        """
        1. Stop sync_gateways
        2. Stop sg_accels
        3. Delete sync_gateway artifacts (logs, conf)
        4. Delete sg_accel artifacts (logs, conf)
        5. Delete all server buckets
        6. Create buckets from 'sync_gateway_config'
        7. Wait for server to be in 'healthy' state
        8. Deploy sync_gateway config and start
        9. Deploy sg_accel config and start (distributed index mode only)
        """

        cluster = Cluster(config=cluster_config)
        cluster.reset(sync_gateway_config)
コード例 #32
0
def test_continuous_changes_parametrized(params_from_base_test_setup, sg_conf_name, num_users, num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'continuous_changes_parametrized'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_users: {}".format(num_users))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="user", number=num_users, password="******", channels=["ABC", "TERMINATE"])
    abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"])
    doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"])

    with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:

        futures = {executor.submit(user.start_continuous_changes_tracking, termination_doc_id="killcontinuous"): user.name for user in users}
        futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher"

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            # Send termination doc to seth continuous changes feed subscriber
            if task_name == "doc_pusher":

                errors = future.result()
                assert len(errors) == 0
                abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)

                time.sleep(10)

                doc_terminator.add_doc("killcontinuous")
            elif task_name.startswith("user"):
                # When the user has continuous _changes feed closed, return the docs and verify the user got all the channel docs
                docs_in_changes = future.result()
                # Expect number of docs + the termination doc + _user doc
                verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)

    # Expect number of docs + the termination doc
    verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)
コード例 #33
0
def test_longpoll_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running: 'longpoll_changes_sanity': {}".format(cluster_conf))
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC", "TERMINATE"])
    abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"])
    doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"])

    docs_in_changes = dict()

    with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:

        futures = dict()
        futures[executor.submit(seth.start_longpoll_changes_tracking, termination_doc_id="killpolling")] = "polling"
        futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher"

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            # Send termination doc to seth long poller
            if task_name == "doc_pusher":
                abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)

                # Allow time for changes to reach subscribers
                time.sleep(5)

                doc_terminator.add_doc("killpolling")
            elif task_name == "polling":
                docs_in_changes, seq_num = future.result()

    # Verify abc_docs_pusher gets the correct docs in changes feed
    verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)

    # Verify docs from seth continous changes is the same as abc_docs_pusher's docs
    verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
コード例 #34
0
def test_continuous_changes_sanity(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'continuous_changes_sanity'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["ABC", "TERMINATE"])
    abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"])
    doc_terminator = admin.register_user(target=cluster.sync_gateways[0], db="db", name="doc_terminator", password="******", channels=["TERMINATE"])

    docs_in_changes = dict()

    with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:

        futures = dict()
        futures[executor.submit(seth.start_continuous_changes_tracking, termination_doc_id="killcontinuous")] = "continuous"
        futures[executor.submit(abc_doc_pusher.add_docs, num_docs)] = "doc_pusher"

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            # Send termination doc to seth continuous changes feed subscriber
            if task_name == "doc_pusher":
                abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)

                time.sleep(5)

                doc_terminator.add_doc("killcontinuous")
            elif task_name == "continuous":
                docs_in_changes = future.result()

    # Expect number of docs + the termination doc
    verify_changes(abc_doc_pusher, expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=abc_doc_pusher.cache)

    # Expect number of docs + the termination doc + _user doc
    verify_same_docs(expected_num_docs=num_docs, doc_dict_one=docs_in_changes, doc_dict_two=abc_doc_pusher.cache)
コード例 #35
0
def test_multiple_db_single_data_bucket_single_index_bucket(params_from_base_test_setup, sg_conf_name, num_users, num_docs_per_user):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'multiple_db_unique_data_bucket_unique_index_bucket'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_docs_per_user: {}".format(num_docs_per_user))

    # 2 dbs share the same data and index bucket
    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    num_db_users = num_users
    num_db2_users = num_users
    num_docs_per_user = num_docs_per_user

    admin = Admin(cluster.sync_gateways[0])

    db_one_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="bulk_db_user", number=num_db_users, password="******", channels=["ABC"])
    db_two_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db2", name_prefix="bulk_db2_user", number=num_db2_users, password="******", channels=["ABC"])

    all_users = list(db_one_users)
    all_users.extend(db_two_users)
    assert len(all_users) == num_db_users + num_db2_users

    # Round robin
    num_sgs = len(cluster.sync_gateways)
    count = 1

    for user in all_users:
        user.add_docs(num_docs_per_user, bulk=True)
        user.target = cluster.sync_gateways[(count + 1) % num_sgs]
        count += 1

    time.sleep(10)

    # Get list of all docs from users caches
    cached_docs_from_all_users = {k: v for user in all_users for k, v in user.cache.items()}

    # Verify each user has all of the docs
    verify_changes(all_users, expected_num_docs=(num_users * 2) * num_docs_per_user, expected_num_revisions=0, expected_docs=cached_docs_from_all_users)
コード例 #36
0
def test_dcp_reshard_sync_gateway_comes_up(params_from_base_test_setup, sg_conf):

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    stop_status = cluster.sg_accels[0].stop()
    assert stop_status == 0, "Failed to stop sg_accel"

    admin = Admin(cluster.sync_gateways[0])

    traun = admin.register_user(target=cluster.sync_gateways[0], db="db", name="traun", password="******", channels=["ABC", "NBC", "CBS"])
    seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******", channels=["FOX"])

    log_info(">> Users added")

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:

        futures = dict()

        time.sleep(5)

        log_info(">>> Adding Traun docs")  # ABC, NBC, CBS
        futures[executor.submit(traun.add_docs, 6000)] = "traun"

        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 4000)] = "seth"

        # Bring up a sync_gateway
        up_status = cluster.sg_accels[0].start(sg_conf)
        assert up_status == 0

        for future in concurrent.futures.as_completed(futures):
            tag = futures[future]
            log_info("{} Completed:".format(tag))

    # TODO better way to do this
    time.sleep(60)

    verify_changes(traun, expected_num_docs=6000, expected_num_revisions=0, expected_docs=traun.cache)
    verify_changes(seth, expected_num_docs=4000, expected_num_revisions=0, expected_docs=seth.cache)
コード例 #37
0
def test_multiple_db_single_data_bucket_single_index_bucket(params_from_base_test_setup, sg_conf_name, num_users, num_docs_per_user):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'multiple_db_unique_data_bucket_unique_index_bucket'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_users: {}".format(num_users))
    log_info("Using num_docs_per_user: {}".format(num_docs_per_user))

    # 2 dbs share the same data and index bucket
    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    num_db_users = num_users
    num_db2_users = num_users
    num_docs_per_user = num_docs_per_user

    admin = Admin(cluster.sync_gateways[0])

    db_one_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db", name_prefix="bulk_db_user", number=num_db_users, password="******", channels=["ABC"])
    db_two_users = admin.register_bulk_users(target=cluster.sync_gateways[0], db="db2", name_prefix="bulk_db2_user", number=num_db2_users, password="******", channels=["ABC"])

    all_users = list(db_one_users)
    all_users.extend(db_two_users)
    assert len(all_users) == num_db_users + num_db2_users

    # Round robin
    num_sgs = len(cluster.sync_gateways)
    count = 1

    for user in all_users:
        user.add_docs(num_docs_per_user, bulk=True)
        user.target = cluster.sync_gateways[(count + 1) % num_sgs]
        count += 1

    time.sleep(10)

    # Get list of all docs from users caches
    cached_docs_from_all_users = {k: v for user in all_users for k, v in user.cache.items()}

    # Verify each user has all of the docs
    verify_changes(all_users, expected_num_docs=(num_users * 2) * num_docs_per_user, expected_num_revisions=0, expected_docs=cached_docs_from_all_users)
def test_single_user_single_channel_doc_updates(params_from_base_test_setup, sg_conf_name, num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log.info("Running 'single_user_single_channel_doc_updates'")
    log.info("cluster_conf: {}".format(cluster_conf))
    log.info("sg_conf: {}".format(sg_conf))
    log.info("num_docs: {}".format(num_docs))
    log.info("num_revisions: {}".format(num_revisions))

    start = time.time()

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    num_docs = num_docs
    num_revisions = num_revisions
    username = "******"
    password = "******"
    channels = ["channel-1"]

    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])

    single_user = admin.register_user(target=sgs[0], db="db", name=username, password=password, channels=channels)

    # Not using bulk docs
    single_user.add_docs(num_docs, name_prefix="test-")

    assert len(single_user.cache) == num_docs

    # let SG catch up with all the changes
    time.sleep(5)

    single_user.update_docs(num_revisions)

    time.sleep(10)

    verify_changes([single_user], expected_num_docs=num_docs, expected_num_revisions=num_revisions, expected_docs=single_user.cache)

    end = time.time()
    log.info("TIME:{}s".format(end - start))
コード例 #39
0
def test_log_nondefault_logKeys_set(params_from_base_test_setup, sg_conf_name):
    """Test to verify non default logKeys with any invalid area.
    SG should work even with non existing logging area
    (positive case)
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # read sample sg_conf
    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # "FAKE" not valid area in logging
    data['logging']["default"]["logKeys"] = ["HTTP", "FAKE"]
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)

    # Start sync_gateways
    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=temp_conf)

    # Remove generated conf file
    os.remove(temp_conf)
コード例 #40
0
def test_single_user_multiple_channels(params_from_base_test_setup,
                                       sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'single_user_multiple_channels'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    start = time.time()
    sgs = cluster.sync_gateways

    admin = Admin(sgs[0])
    seth = admin.register_user(target=sgs[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["ABC", "CBS", "NBC", "FOX"])

    # Round robin
    count = 1
    num_sgs = len(cluster.sync_gateways)
    while count <= 5:
        seth.add_docs(1000, bulk=True)
        seth.target = cluster.sync_gateways[count % num_sgs]
        count += 1

    log_info(seth)

    time.sleep(10)

    verify_changes(users=[seth],
                   expected_num_docs=5000,
                   expected_num_revisions=0,
                   expected_docs=seth.cache)

    end = time.time()
    log_info("TIME:{}s".format(end - start))
コード例 #41
0
def test_muliple_users_single_channel(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'muliple_users_single_channel'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    sgs = cluster.sync_gateways

    num_docs_seth = 1000
    num_docs_adam = 2000
    num_docs_traun = 3000

    admin = Admin(sgs[0])

    seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"])
    adam = admin.register_user(target=sgs[0], db="db", name="adam", password="******", channels=["ABC"])
    traun = admin.register_user(target=sgs[0], db="db", name="traun", password="******", channels=["ABC"])

    seth.add_docs(num_docs_seth)  # ABC
    adam.add_docs(num_docs_adam, bulk=True)  # ABC
    traun.add_docs(num_docs_traun, bulk=True)  # ABC

    assert len(seth.cache) == num_docs_seth
    assert len(adam.cache) == num_docs_adam
    assert len(traun.cache) == num_docs_traun

    # discuss appropriate time with team
    time.sleep(10)

    # Each user should get all docs from all users
    all_caches = [seth.cache, adam.cache, traun.cache]
    all_docs = {k: v for cache in all_caches for k, v in cache.items()}

    verify_changes([seth, adam, traun], expected_num_docs=num_docs_seth + num_docs_adam + num_docs_traun, expected_num_revisions=0, expected_docs=all_docs)
コード例 #42
0
def test_sync_access_sanity(params_from_base_test_setup, sg_conf_name):

    num_docs = 100

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'sync_access_sanity'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    seth = admin.register_user(target=cluster.sync_gateways[0], db="db", name="seth", password="******")

    # Push some ABC docs
    abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="abc_doc_pusher", password="******", channels=["ABC"])
    abc_doc_pusher.add_docs(num_docs)

    # Create access doc pusher and grant access Seth to ABC channel
    access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******", channels=["access"])
    access_doc_pusher.add_doc(doc_id="access_doc", content={"grant_access": "true"})

    # Allow docs to backfill
    time.sleep(5)

    verify_changes(seth, expected_num_docs=num_docs, expected_num_revisions=0, expected_docs=abc_doc_pusher.cache)

    # Remove seth from ABC
    access_doc_pusher.update_doc(doc_id="access_doc", content={"grant_access": "false"})

    # Push more ABC docs
    abc_doc_pusher.add_docs(num_docs)

    time.sleep(10)

    # Verify seth sees no abc_docs
    verify_changes(seth, expected_num_docs=0, expected_num_revisions=0, expected_docs={})
コード例 #43
0
def test_bulk_get_compression(params_from_base_test_setup, sg_conf_name, num_docs, accept_encoding, x_accept_part_encoding, user_agent):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'test_bulk_get_compression'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using user_agent: {}".format(user_agent))
    log_info("Using accept_encoding: {}".format(accept_encoding))
    log_info("Using x_accept_part_encoding: {}".format(x_accept_part_encoding))

    cluster = Cluster(config=cluster_config)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    user = admin.register_user(cluster.sync_gateways[0], "db", "seth", "password", channels=["seth"])

    doc_body = Data.load("mock_users_20k.json")

    with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
        futures = [executor.submit(user.add_doc, doc_id="test-{}".format(i), content=doc_body) for i in range(num_docs)]
        for future in concurrent.futures.as_completed(futures):
            try:
                log_info(future.result())
            except Exception as e:
                log_info("Failed to push doc: {}".format(e))

    docs = [{"id": "test-{}".format(i)} for i in range(num_docs)]
    payload = {"docs": docs}

    # Issue curl request and get size of request
    response_size = issue_request(cluster.sync_gateways[0], user_agent, accept_encoding, x_accept_part_encoding, payload)
    log_info("Response size: {}".format(response_size))

    # Verfiy size matches expected size
    verify_response_size(user_agent, accept_encoding, x_accept_part_encoding, response_size)
コード例 #44
0
def test_bulk_get_compression(params_from_base_test_setup, sg_conf_name, num_docs, accept_encoding, x_accept_part_encoding, user_agent):

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'test_bulk_get_compression'")
    log_info("Using cluster_config: {}".format(cluster_config))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using user_agent: {}".format(user_agent))
    log_info("Using accept_encoding: {}".format(accept_encoding))
    log_info("Using x_accept_part_encoding: {}".format(x_accept_part_encoding))

    cluster = Cluster(config=cluster_config)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    user = admin.register_user(cluster.sync_gateways[0], "db", "seth", "password", channels=["seth"])

    doc_body = Data.load("mock_users_20k.json")

    with concurrent.futures.ThreadPoolExecutor(max_workers=libraries.testkit.settings.MAX_REQUEST_WORKERS) as executor:
        futures = [executor.submit(user.add_doc, doc_id="test-{}".format(i), content=doc_body) for i in range(num_docs)]
        for future in concurrent.futures.as_completed(futures):
            try:
                log_info(future.result())
            except Exception as e:
                log_info("Failed to push doc: {}".format(e))

    docs = [{"id": "test-{}".format(i)} for i in range(num_docs)]
    payload = {"docs": docs}

    # Issue curl request and get size of request
    response_size = issue_request(cluster.sync_gateways[0], user_agent, accept_encoding, x_accept_part_encoding, payload)
    log_info("Response size: {}".format(response_size))

    # Verfiy size matches expected size
    verify_response_size(user_agent, accept_encoding, x_accept_part_encoding, response_size)
コード例 #45
0
def test_sync_sanity_backfill(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'sync_sanity_backfill'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    radio_stations = ["KMOW", "HWOD", "KDWB"]
    number_of_docs_per_pusher = 5000

    admin = Admin(cluster.sync_gateways[0])

    dj_0 = admin.register_user(target=cluster.sync_gateways[0], db="db", name="dj_0", password="******")

    kdwb_caches = []
    for radio_station in radio_stations:
        doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="{}_doc_pusher".format(radio_station), password="******", channels=[radio_station])
        doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True)
        if doc_pusher.name == "KDWB_doc_pusher":
            kdwb_caches.append(doc_pusher.cache)

    access_doc_pusher = admin.register_user(target=cluster.sync_gateways[0], db="db", name="access_doc_pusher", password="******")

    # Grant dj_0 access to KDWB channel via sync after docs are pushed
    access_doc_pusher.add_doc("access_doc", content="access")

    # Build global doc_id, rev dict for all docs from all KDWB caches
    kdwb_docs = {k: v for cache in kdwb_caches for k, v in cache.items()}

    # wait for changes
    time.sleep(5)

    verify_changes(dj_0, expected_num_docs=number_of_docs_per_pusher, expected_num_revisions=0, expected_docs=kdwb_docs)
コード例 #46
0
def test_single_user_single_channel(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'single_user_single_channel'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    sgs = cluster.sync_gateways

    num_seth_docs = 7000
    num_cbs_docs = 3000

    admin = Admin(sgs[0])
    seth = admin.register_user(target=sgs[0], db="db", name="seth", password="******", channels=["ABC"])
    cbs_user = admin.register_user(target=sgs[0], db="db", name="cbs_user", password="******", channels=["CBS"])
    admin_user = admin.register_user(target=sgs[0], db="db", name="admin", password="******", channels=["ABC", "CBS"])

    seth.add_docs(num_seth_docs)
    cbs_user.add_docs(num_cbs_docs)

    assert len(seth.cache) == num_seth_docs
    assert len(cbs_user.cache) == num_cbs_docs
    assert len(admin_user.cache) == 0

    time.sleep(10)

    verify_changes([seth], expected_num_docs=num_seth_docs, expected_num_revisions=0, expected_docs=seth.cache)
    verify_changes([cbs_user], expected_num_docs=num_cbs_docs, expected_num_revisions=0, expected_docs=cbs_user.cache)

    all_doc_caches = [seth.cache, cbs_user.cache]
    all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()}
    verify_changes([admin_user], expected_num_docs=num_cbs_docs + num_seth_docs, expected_num_revisions=0, expected_docs=all_docs)
コード例 #47
0
def test_enable_cbs_ssl_in_cluster_config():

    c = Cluster(MOCK_CLUSTER_CONFIG)
    assert not c.cbs_ssl

    persist_cluster_config_environment_prop(MOCK_CLUSTER_CONFIG,
                                            'cbs_ssl_enabled', True)

    with open(MOCK_CLUSTER_CONFIG + ".json") as f:
        cluster_json = json.loads(f.read())

    assert "environment" in cluster_json
    assert cluster_json["environment"]["cbs_ssl_enabled"]

    c = Cluster(MOCK_CLUSTER_CONFIG)
    assert c.cbs_ssl

    config = ConfigParser.ConfigParser()
    config.read(MOCK_CLUSTER_CONFIG)

    assert config.has_section("environment")
    assert config.getboolean('environment', 'cbs_ssl_enabled')
コード例 #48
0
def test_db_delayed_online(params_from_base_test_setup, sg_conf_name, num_docs):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])

    time.sleep(2)
    status = admin.take_db_offline("db")
    log_info("offline request response status: {}".format(status))
    time.sleep(10)

    pool = ThreadPool(processes=1)

    db_info = admin.get_db_info("db")
    assert db_info["state"] == "Offline"

    async_result = pool.apply_async(admin.bring_db_online, ("db", 15,))
    status = async_result.get(timeout=15)
    log_info("offline request response status: {}".format(status))

    time.sleep(20)

    db_info = admin.get_db_info("db")
    assert db_info["state"] == "Online"

    # all db rest enpoints should succeed
    errors = rest_scan(cluster.sync_gateways[0], db="db", online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
    assert len(errors) == 0
コード例 #49
0
def test_multiple_dbs_unique_buckets_lose_tap(params_from_base_test_setup, sg_conf_name, num_docs):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    dbs = ["db1", "db2", "db3", "db4"]

    # all db rest endpoints should succeed
    for db in dbs:
        errors = rest_scan(cluster.sync_gateways[0], db=db, online=True, num_docs=num_docs, user_name="seth", channels=["ABC"])
        assert len(errors) == 0

    status = cluster.servers[0].delete_bucket("data-bucket-1")
    assert status == 0
    status = cluster.servers[0].delete_bucket("data-bucket-3")
    assert status == 0

    # Check that db2 and db4 are still Online
    for db in ["db2", "db4"]:
        errors = rest_scan(cluster.sync_gateways[0], db=db, online=True, num_docs=num_docs, user_name="adam", channels=["CBS"])
        assert len(errors) == 0

    # Check that db1 and db3 go offline
    for db in ["db1", "db3"]:
        errors = rest_scan(cluster.sync_gateways[0], db=db, online=False, num_docs=num_docs, user_name="seth", channels=["ABC"])
        assert len(errors) == NUM_ENDPOINTS + (num_docs * 2)
        for error_tuple in errors:
            log_info("({},{})".format(error_tuple[0], error_tuple[1]))
            assert error_tuple[1] == 503
コード例 #50
0
def test_overloaded_channel_cache(params_from_base_test_setup, sg_conf_name, num_docs, user_channels, filter, limit):

    """
    The purpose of this test is to verify that channel cache backfill via view queries is working properly.
    It works by doing the following:

    - Set channel cache size in Sync Gateway config to a small number, eg, 750.  This means that only 750 docs fit in the channel cache
    - Add a large number of docs, eg, 1000.
    - Issue a _changes request that will return all 1000 docs

    Expected behavior / Verification:

    - Since 1000 docs requested from changes feed, but only 750 docs fit in channel cache, then it will need to do a view query
      to get the remaining 250 changes
    - Verify that the changes feed returns all 1000 expected docs
    - Check the expvar statistics to verify that view queries were made
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    if mode == "di":
        pytest.skip("Unsupported feature in distributed index")

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'test_overloaded_channel_cache'")
    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))
    log_info("Using num_docs: {}".format(num_docs))
    log_info("Using user_channels: {}".format(user_channels))
    log_info("Using filter: {}".format(filter))
    log_info("Using limit: {}".format(limit))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    target_sg = cluster.sync_gateways[0]

    admin = Admin(target_sg)

    users = admin.register_bulk_users(target_sg, "db", "user", 1000, "password", [user_channels])
    assert len(users) == 1000

    doc_pusher = admin.register_user(target_sg, "db", "abc_doc_pusher", "password", ["ABC"])
    doc_pusher.add_docs(num_docs, bulk=True)

    # Give a few seconds to let changes register
    time.sleep(2)

    start = time.time()

    # This uses a ProcessPoolExecutor due to https://github.com/couchbaselabs/mobile-testkit/issues/1142
    with concurrent.futures.ProcessPoolExecutor(max_workers=100) as executor:

        changes_requests = []
        errors = []

        for user in users:
            if filter and limit is not None:
                changes_requests.append(executor.submit(user.get_changes, since=0, limit=limit, filter="sync_gateway/bychannel", channels=["ABC"]))
            elif filter and limit is None:
                changes_requests.append(executor.submit(user.get_changes, filter="sync_gateway/bychannel", channels=["ABC"]))
            elif not filter and limit is not None:
                changes_requests.append(executor.submit(user.get_changes, limit=limit))
            elif not filter and limit is None:
                changes_requests.append(executor.submit(user.get_changes))

        for future in concurrent.futures.as_completed(changes_requests):
            changes = future.result()
            if limit is not None:
                assert len(changes["results"]) == 50
            else:
                assert len(changes["results"]) == 5001

        # changes feed should all be successful
        log_info(len(errors))
        assert len(errors) == 0

        if limit is not None:
            # HACK: Should be less than a minute unless blocking on view calls
            end = time.time()
            time_for_users_to_get_all_changes = end - start
            log_info("Time for users to get all changes: {}".format(time_for_users_to_get_all_changes))
            assert time_for_users_to_get_all_changes < 240, "Time to get all changes was greater than 2 minutes: {}s".format(
                time_for_users_to_get_all_changes
            )

        # Sanity check that a subset of users have _changes feed intact
        for i in range(10):
            verify_changes(users[i], expected_num_docs=num_docs, expected_num_revisions=0, expected_docs=doc_pusher.cache)

        # Get sync_gateway expvars
        resp = requests.get(url="http://{}:4985/_expvar".format(target_sg.ip))
        resp.raise_for_status()
        resp_obj = resp.json()

        # Since Sync Gateway will need to issue view queries to handle _changes requests that don't
        # fit in the channel cache, we expect there to be several view queries
        assert resp_obj["syncGateway_changeCache"]["view_queries"] > 0
コード例 #51
0
def test_dcp_reshard_sync_gateway_comes_up(params_from_base_test_setup,
                                           sg_conf):

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    stop_status = cluster.sg_accels[0].stop()
    assert stop_status == 0, "Failed to stop sg_accel"

    admin = Admin(cluster.sync_gateways[0])

    traun = admin.register_user(target=cluster.sync_gateways[0],
                                db="db",
                                name="traun",
                                password="******",
                                channels=["ABC", "NBC", "CBS"])
    seth = admin.register_user(target=cluster.sync_gateways[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["FOX"])

    log_info(">> Users added")

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:

        futures = dict()

        time.sleep(5)

        log_info(">>> Adding Traun docs")  # ABC, NBC, CBS
        futures[executor.submit(traun.add_docs, 6000)] = "traun"

        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 4000)] = "seth"

        # Bring up a sync_gateway accel
        up_status = cluster.sg_accels[0].start(sg_conf)
        assert up_status == 0

        for future in concurrent.futures.as_completed(futures):
            tag = futures[future]
            log_info("{} Completed:".format(tag))

    # TODO better way to do this
    time.sleep(120)

    verify_changes(traun,
                   expected_num_docs=6000,
                   expected_num_revisions=0,
                   expected_docs=traun.cache)
    verify_changes(seth,
                   expected_num_docs=4000,
                   expected_num_revisions=0,
                   expected_docs=seth.cache)
コード例 #52
0
def test_roles_sanity(params_from_base_test_setup, sg_conf_name):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'roles_sanity'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    radio_stations = ["KMOW", "HWOD", "KDWB"]
    tv_stations = ["ABC", "CBS", "NBC"]

    number_of_djs = 10
    number_of_vjs = 10

    number_of_docs_per_pusher = 500

    admin = Admin(cluster.sync_gateways[0])

    admin.create_role("db", name="radio_stations", channels=radio_stations)
    admin.create_role("db", name="tv_stations", channels=tv_stations)

    djs = admin.register_bulk_users(target=cluster.sync_gateways[0],
                                    db="db",
                                    name_prefix="dj",
                                    number=number_of_djs,
                                    password="******",
                                    roles=["radio_stations"])
    vjs = admin.register_bulk_users(target=cluster.sync_gateways[0],
                                    db="db",
                                    name_prefix="vj",
                                    number=number_of_vjs,
                                    password="******",
                                    roles=["tv_stations"])

    mogul = admin.register_user(target=cluster.sync_gateways[0],
                                db="db",
                                name="mogul",
                                password="******",
                                roles=["tv_stations", "radio_stations"])

    radio_doc_caches = []
    for radio_station in radio_stations:
        doc_pusher = admin.register_user(
            target=cluster.sync_gateways[0],
            db="db",
            name="{}_doc_pusher".format(radio_station),
            password="******",
            channels=[radio_station])
        doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True)
        radio_doc_caches.append(doc_pusher.cache)

    radio_docs = {k: v for cache in radio_doc_caches for k, v in cache.items()}

    tv_doc_caches = []
    for tv_station in tv_stations:
        doc_pusher = admin.register_user(
            target=cluster.sync_gateways[0],
            db="db",
            name="{}_doc_pusher".format(tv_station),
            password="******",
            channels=[tv_station])
        doc_pusher.add_docs(number_of_docs_per_pusher, bulk=True)
        tv_doc_caches.append(doc_pusher.cache)

    tv_docs = {k: v for cache in tv_doc_caches for k, v in cache.items()}

    # Verify djs get docs for all the channels associated with the radio_stations role
    expected_num_radio_docs = len(radio_stations) * number_of_docs_per_pusher
    verify_changes(djs,
                   expected_num_docs=expected_num_radio_docs,
                   expected_num_revisions=0,
                   expected_docs=radio_docs)

    # Verify vjs get docs for all the channels associated with the tv_stations role
    expected_num_tv_docs = len(tv_stations) * number_of_docs_per_pusher
    verify_changes(vjs,
                   expected_num_docs=expected_num_tv_docs,
                   expected_num_revisions=0,
                   expected_docs=tv_docs)

    # Verify mogul gets docs for all the channels associated with the radio_stations + tv_stations roles
    all_docs_caches = list(radio_doc_caches)
    all_docs_caches.extend(tv_doc_caches)
    all_docs = {k: v for cache in all_docs_caches for k, v in cache.items()}
    verify_changes(mogul,
                   expected_num_docs=expected_num_radio_docs +
                   expected_num_tv_docs,
                   expected_num_revisions=0,
                   expected_docs=all_docs)
コード例 #53
0
def test_seq(params_from_base_test_setup, sg_conf_name, num_users, num_docs,
             num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running seq")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_users: {}".format(num_users))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)
    admin = Admin(cluster.sync_gateways[0])

    # all users will share docs due to having the same channel
    users = admin.register_bulk_users(target=cluster.sync_gateways[0],
                                      db="db",
                                      name_prefix="user",
                                      number=num_users,
                                      password="******",
                                      channels=["ABC"])

    for user in users:
        user.add_docs(num_docs, bulk=True)

    for user in users:
        user.update_docs(num_revisions)

    time.sleep(5)

    user_0_changes = users[0].get_changes(since=0)
    doc_seq = user_0_changes["results"][num_docs / 2]["seq"]

    # https://github.com/couchbase/sync_gateway/issues/1475#issuecomment-172426052
    # verify you can issue _changes with since=12313-0::1023.15
    for user in users:
        changes = user.get_changes(since=doc_seq)
        log_info("Trying changes with since={}".format(doc_seq))
        assert len(changes["results"]) > 0

        second_to_last_doc_entry_seq = changes["results"][-2]["seq"]
        last_doc_entry_seq = changes["results"][-1]["seq"]

        log_info('Second to last doc "seq": {}'.format(
            second_to_last_doc_entry_seq))
        log_info('Last doc "seq": {}'.format(last_doc_entry_seq))

        if mode == "di":
            # Verify last "seq" follows the formate 12313-0, not 12313-0::1023.15
            log_info('Verify that the last "seq" is a plain hashed value')
            assert len(second_to_last_doc_entry_seq.split("::")) == 2
            assert len(last_doc_entry_seq.split("::")) == 1
        elif mode == "cc":
            assert second_to_last_doc_entry_seq > 0
            assert last_doc_entry_seq > 0
        else:
            raise ValueError("Unsupported 'mode' !!")

    all_doc_caches = [user.cache for user in users]
    all_docs = {k: v for cache in all_doc_caches for k, v in cache.items()}
    verify_changes(users,
                   expected_num_docs=num_users * num_docs,
                   expected_num_revisions=num_revisions,
                   expected_docs=all_docs)
コード例 #54
0
def test_continuous_changes_sanity(params_from_base_test_setup, sg_conf_name,
                                   num_docs, num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'continuous_changes_sanity'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    seth = admin.register_user(target=cluster.sync_gateways[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["ABC", "TERMINATE"])
    abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0],
                                         db="db",
                                         name="abc_doc_pusher",
                                         password="******",
                                         channels=["ABC"])
    doc_terminator = admin.register_user(target=cluster.sync_gateways[0],
                                         db="db",
                                         name="doc_terminator",
                                         password="******",
                                         channels=["TERMINATE"])

    docs_in_changes = dict()

    with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:

        futures = dict()
        futures[executor.submit(
            seth.start_continuous_changes_tracking,
            termination_doc_id="killcontinuous")] = "continuous"
        futures[executor.submit(abc_doc_pusher.add_docs,
                                num_docs)] = "doc_pusher"

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            # Send termination doc to seth continuous changes feed subscriber
            if task_name == "doc_pusher":
                abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)

                time.sleep(5)

                doc_terminator.add_doc("killcontinuous")
            elif task_name == "continuous":
                docs_in_changes = future.result()

    # Expect number of docs + the termination doc
    verify_changes(abc_doc_pusher,
                   expected_num_docs=num_docs,
                   expected_num_revisions=num_revisions,
                   expected_docs=abc_doc_pusher.cache)

    # Expect number of docs + the termination doc + _user doc
    verify_same_docs(expected_num_docs=num_docs,
                     doc_dict_one=docs_in_changes,
                     doc_dict_two=abc_doc_pusher.cache)
コード例 #55
0
def test_detect_stale_channel_index(params_from_base_test_setup, sg_conf):
    """
    1. Bring up single Sync Gateway node, backed by Couchbase Server with 3 accels indexing
    2. Configure such that the primary bucket and the channel index bucket are different (which is the norm)
    3. Add 1000 documents
    4. Shutdown Sync Gateway
    5. Delete / create the primary bucket ('data-bucket'), but do not touch the channel index bucket
    6. Start Sync Gateway
    7. Assert that sync_gateway fails to start due to stale channel index
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_detect_stale_channel_index'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    cb_server_url = topology["couchbase_servers"][0]
    sg_db = "db"
    num_docs = 1000

    cb_server = couchbaseserver.CouchbaseServer(url=cb_server_url)
    client = MobileRestClient()

    # Create doc pusher user
    doc_pusher_user_info = userinfo.UserInfo(name="doc_pusher",
                                             password="******",
                                             channels=["NASA"],
                                             roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels)

    # Add some docs to Sync Gateway to cause indexing
    docs = document.create_docs(None,
                                number=num_docs,
                                channels=doc_pusher_user_info.channels)
    pushed_docs = client.add_bulk_docs(url=sg_url,
                                       db=sg_db,
                                       docs=docs,
                                       auth=doc_pusher_auth)
    assert len(pushed_docs) == num_docs

    # Shut down sync_gateway
    sg_util = SyncGateway()
    sg_util.stop_sync_gateways(cluster_config=cluster_conf, url=sg_url)

    # Delete server bucket
    cb_server.delete_bucket(name="data-bucket")

    # Create server bucket
    ram_per_bucket_mb = cb_server.get_ram_per_bucket(num_buckets=2)
    cb_server.create_bucket(name="data-bucket", ram_quota_mb=ram_per_bucket_mb)

    # Start sync_gateway and assert that a Provisioning error is raised due to detecting stale index
    with pytest.raises(exceptions.ProvisioningError):
        sg_util.start_sync_gateways(cluster_config=cluster_conf,
                                    url=sg_url,
                                    config=sg_conf)

    # TODO: To make this check even more accurate, could
    # run remote ssh command "systemctl status sync_gateway.service" and look for
    # regex pattern: Main PID: 7185 (code=exited, status=2)

    # Delete index bucket and recreate it
    cb_server.delete_bucket(name="index-bucket")
    cb_server.create_bucket(name="index-bucket",
                            ram_quota_mb=ram_per_bucket_mb)

    # Start sync gateway, should succeed now
    sg_util.start_sync_gateways(cluster_config=cluster_conf,
                                url=sg_url,
                                config=sg_conf)
コード例 #56
0
def test_take_all_sgaccels_down(params_from_base_test_setup, sg_conf):
    """
    Scenario that takes all sync_gateway accel nodes offline during doc load.
    After bring the nodes back online during load, the reshard of the DCP feed is verified.
    The changes feed is verified that all docs show up.

    1. Start doc load (1000 doc)
    2. Take all sg_accel nodes down in parallel
    3. Verify node are down
    4. Wait for doc adds to complete, store "doc_push_result_1"
    5. Verify "doc_push_result_1" docs added
    6. Start doc load (1000 docs)
    7. Wait for 5. to complete, store "doc_push_result_2"
    8. Verify "doc_push_result_2" docs added
    9. Start another doc load (1000 docs)
    10. Bring up nodes in parallel
    11. poll on p-index reshard
    12. Wait for 9. to complete, store "doc_push_result_3"
    13. Verify "doc_push_result_3" docs added
    14. Verify "doc_push_result_1" + "doc_push_result_2" + "doc_push_result_3" show up in _changes feed
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    num_docs = 1000

    client = MobileRestClient()

    doc_pusher_user_info = userinfo.UserInfo("doc_pusher",
                                             "pass",
                                             channels=["A"],
                                             roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels)

    a_user_info = userinfo.UserInfo("a_user", "pass", channels=["A"], roles=[])
    client.create_user(url=sg_admin_url,
                       db=sg_db,
                       name=a_user_info.name,
                       password=a_user_info.password,
                       channels=a_user_info.channels)
    a_user_session = client.create_session(url=sg_admin_url,
                                           db=sg_db,
                                           name=a_user_info.name,
                                           password=a_user_info.password)

    # Shutdown all accel nodes in parallel
    with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex:

        # Start adding docs
        docs_1 = document.create_docs(None,
                                      num_docs,
                                      channels=doc_pusher_user_info.channels)
        docs_1_task = ex.submit(client.add_bulk_docs,
                                url=sg_url,
                                db=sg_db,
                                docs=docs_1,
                                auth=doc_pusher_auth)

        # Take down all access nodes
        log_info("Shutting down sg_accels: [{}, {}, {}] ...".format(
            cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2]))
        sg_accel_down_task_1 = ex.submit(cluster.sg_accels[0].stop)
        sg_accel_down_task_2 = ex.submit(cluster.sg_accels[1].stop)
        sg_accel_down_task_3 = ex.submit(cluster.sg_accels[2].stop)
        assert sg_accel_down_task_1.result() == 0
        assert sg_accel_down_task_2.result() == 0
        assert sg_accel_down_task_3.result() == 0

        # Block until bulk_docs is complete
        doc_push_result_1 = docs_1_task.result()
        assert len(doc_push_result_1) == num_docs
        client.verify_docs_present(url=sg_url,
                                   db=sg_db,
                                   expected_docs=doc_push_result_1,
                                   auth=doc_pusher_auth)

        # Load sync_gateway with another batch of docs while the sg_accel nodes are offline
        docs_2_bodies = document.create_docs(
            None, num_docs, channels=doc_pusher_user_info.channels)
        docs_push_result_2 = client.add_bulk_docs(url=sg_url,
                                                  db=sg_db,
                                                  docs=docs_2_bodies,
                                                  auth=doc_pusher_auth)
        assert len(docs_push_result_2) == num_docs
        client.verify_docs_present(url=sg_url,
                                   db=sg_db,
                                   expected_docs=docs_push_result_2,
                                   auth=doc_pusher_auth)

        # Start loading Sync Gateway with another set of docs while bringing the sg_accel nodes online
        docs_3 = document.create_docs(None,
                                      num_docs,
                                      channels=doc_pusher_user_info.channels)
        docs_3_task = ex.submit(client.add_bulk_docs,
                                url=sg_url,
                                db=sg_db,
                                docs=docs_3,
                                auth=doc_pusher_auth)

        # Bring all the sg_accel nodes back up
        # Take down all access nodes
        log_info("Starting sg_accels: [{}, {}, {}] ...".format(
            cluster.sg_accels[0], cluster.sg_accels[1], cluster.sg_accels[2]))
        sg_accel_up_task_1 = ex.submit(cluster.sg_accels[0].start, sg_conf)
        sg_accel_up_task_2 = ex.submit(cluster.sg_accels[1].start, sg_conf)
        sg_accel_up_task_3 = ex.submit(cluster.sg_accels[2].start, sg_conf)
        assert sg_accel_up_task_1.result() == 0
        assert sg_accel_up_task_2.result() == 0
        assert sg_accel_up_task_3.result() == 0

        # Wait for pindex to reshard correctly
        assert cluster.validate_cbgt_pindex_distribution_retry(3)

        # Block until second bulk_docs is complete
        doc_push_result_3 = docs_3_task.result()
        assert len(doc_push_result_3) == num_docs
        client.verify_docs_present(url=sg_url,
                                   db=sg_db,
                                   expected_docs=doc_push_result_3,
                                   auth=doc_pusher_auth)

    # Combine the 3 push results and make sure the changes propagate to a_user
    # a_user has access to the doc's channel.
    log_info("Verifying all the changes show up for 'a_user' ...")
    all_docs = doc_push_result_1 + docs_push_result_2 + doc_push_result_3
    client.verify_docs_in_changes(url=sg_url,
                                  db=sg_db,
                                  expected_docs=all_docs,
                                  auth=a_user_session,
                                  polling_interval=2)
コード例 #57
0
def test_dcp_reshard_sync_gateway_goes_down(params_from_base_test_setup,
                                            sg_conf):

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_sync_gateway_goes_down'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    mode = cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])

    traun = admin.register_user(target=cluster.sync_gateways[0],
                                db="db",
                                name="traun",
                                password="******",
                                channels=["ABC", "NBC", "CBS"])
    seth = admin.register_user(target=cluster.sync_gateways[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["FOX"])

    log_info(">> Users added")

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:

        futures = dict()

        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 8000)] = "seth"

        log_info(">>> Adding Traun docs")  # ABC, NBC, CBS
        futures[executor.submit(traun.add_docs, 2000, bulk=True)] = "traun"

        # stop sg_accel
        shutdown_status = cluster.sg_accels[0].stop()
        assert shutdown_status == 0

        for future in concurrent.futures.as_completed(futures):
            tag = futures[future]
            log_info("{} Completed:".format(tag))

    # TODO better way to do this
    time.sleep(120)

    verify_changes(traun,
                   expected_num_docs=2000,
                   expected_num_revisions=0,
                   expected_docs=traun.cache)
    verify_changes(seth,
                   expected_num_docs=8000,
                   expected_num_revisions=0,
                   expected_docs=seth.cache)

    # Verify that the sg1 is down but the other sync_gateways are running
    errors = cluster.verify_alive(mode)
    assert len(errors) == 1 and errors[0][0].hostname == "ac1"

    # Restart the failing node so that cluster verification does not blow up in test teardown
    start_status = cluster.sg_accels[0].start(sg_conf)
    assert start_status == 0
コード例 #58
0
def test_take_down_bring_up_sg_accel_validate_cbgt(params_from_base_test_setup,
                                                   sg_conf):
    """
    Scenario 1

    Start with 3 sg_accels
    Take down 2 sg_accels (block until down -- poll port if needed)
    Doc adds with uuids (~30 sec for cbgt to reshard)
    polling loop: wait for all docs to come back over changes feed
    Call validate pindex with correct number of accels

    Scenario 2 (Continuation)

    When bringing up, you'd have to poll the cbgt_cfg until you get expected number of nodes,
    then you could validate the pindex with 2 accels
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    client = MobileRestClient()

    doc_pusher_user_info = userinfo.UserInfo("doc_pusher",
                                             "pass",
                                             channels=["A"],
                                             roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels)

    log_info("Shutting down sg_accels: [{}, {}]".format(
        cluster.sg_accels[1], cluster.sg_accels[2]))
    # Shutdown two accel nodes in parallel
    with concurrent.futures.ThreadPoolExecutor(max_workers=3) as ex:
        sg_accel_down_task_1 = ex.submit(cluster.sg_accels[1].stop)
        sg_accel_down_task_2 = ex.submit(cluster.sg_accels[2].stop)
        assert sg_accel_down_task_1.result() == 0
        assert sg_accel_down_task_2.result() == 0

    log_info("Finished taking nodes down!")

    # It should take some time ~30 for cbgt to pick up failing nodes and reshard the pindexes. During
    # this add a 1000 docs a start a longpoll changes loop to see if those docs make to to the changes feed
    # If the reshard is successful they will show up at somepoint after. If not, the docs will fail to show up.
    doc_pusher_docs = client.add_docs(url=sg_url,
                                      db=sg_db,
                                      number=1000,
                                      id_prefix=None,
                                      auth=doc_pusher_auth,
                                      channels=doc_pusher_user_info.channels)
    assert len(doc_pusher_docs) == 1000
    client.verify_docs_in_changes(url=sg_url,
                                  db=sg_db,
                                  expected_docs=doc_pusher_docs,
                                  auth=doc_pusher_auth,
                                  polling_interval=5)

    # The pindexes should be reshared at this point since all of the changes have shown up
    assert cluster.validate_cbgt_pindex_distribution(num_running_sg_accels=1)

    log_info("Start sg_accels: [{}, {}]".format(cluster.sg_accels[1],
                                                cluster.sg_accels[2]))

    # Start two accel nodes in parallel
    status = cluster.sg_accels[1].start(sg_conf)
    assert status == 0

    # Poll on pIndex reshard after bring 2 accel nodes back
    assert cluster.validate_cbgt_pindex_distribution_retry(
        num_running_sg_accels=2)

    status = cluster.sg_accels[2].start(sg_conf)
    assert status == 0

    # Poll on pIndex reshard after bring 2 accel nodes back
    assert cluster.validate_cbgt_pindex_distribution_retry(
        num_running_sg_accels=3)
コード例 #59
0
def test_continuous_changes_parametrized(params_from_base_test_setup,
                                         sg_conf_name, num_users, num_docs,
                                         num_revisions):

    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Running 'continuous_changes_parametrized'")
    log_info("cluster_conf: {}".format(cluster_conf))
    log_info("sg_conf: {}".format(sg_conf))
    log_info("num_users: {}".format(num_users))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revisions: {}".format(num_revisions))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    admin = Admin(cluster.sync_gateways[0])
    users = admin.register_bulk_users(target=cluster.sync_gateways[0],
                                      db="db",
                                      name_prefix="user",
                                      number=num_users,
                                      password="******",
                                      channels=["ABC", "TERMINATE"])
    abc_doc_pusher = admin.register_user(target=cluster.sync_gateways[0],
                                         db="db",
                                         name="abc_doc_pusher",
                                         password="******",
                                         channels=["ABC"])
    doc_terminator = admin.register_user(target=cluster.sync_gateways[0],
                                         db="db",
                                         name="doc_terminator",
                                         password="******",
                                         channels=["TERMINATE"])

    with concurrent.futures.ThreadPoolExecutor(max_workers=100) as executor:

        futures = {
            executor.submit(user.start_continuous_changes_tracking,
                            termination_doc_id="killcontinuous"): user.name
            for user in users
        }
        futures[executor.submit(abc_doc_pusher.add_docs,
                                num_docs)] = "doc_pusher"

        for future in concurrent.futures.as_completed(futures):
            task_name = futures[future]

            # Send termination doc to seth continuous changes feed subscriber
            if task_name == "doc_pusher":

                errors = future.result()
                assert len(errors) == 0
                abc_doc_pusher.update_docs(num_revs_per_doc=num_revisions)

                time.sleep(10)

                doc_terminator.add_doc("killcontinuous")
            elif task_name.startswith("user"):
                # When the user has continuous _changes feed closed, return the docs and verify the user got all the channel docs
                docs_in_changes = future.result()
                # Expect number of docs + the termination doc + _user doc
                verify_same_docs(expected_num_docs=num_docs,
                                 doc_dict_one=docs_in_changes,
                                 doc_dict_two=abc_doc_pusher.cache)

    # Expect number of docs + the termination doc
    verify_changes(abc_doc_pusher,
                   expected_num_docs=num_docs,
                   expected_num_revisions=num_revisions,
                   expected_docs=abc_doc_pusher.cache)
コード例 #60
0
def test_dcp_reshard_single_sg_accel_goes_down_and_up(
        params_from_base_test_setup, sg_conf):

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_dcp_reshard_single_sg_accel_goes_down_and_up'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # Stop the second sg_accel
    stop_status = cluster.sg_accels[1].stop()
    assert stop_status == 0, "Failed to stop sg_accel"

    admin = Admin(cluster.sync_gateways[0])

    traun = admin.register_user(target=cluster.sync_gateways[0],
                                db="db",
                                name="traun",
                                password="******",
                                channels=["ABC", "NBC", "CBS"])
    seth = admin.register_user(target=cluster.sync_gateways[0],
                               db="db",
                               name="seth",
                               password="******",
                               channels=["FOX"])

    log_info(">> Users added")

    with concurrent.futures.ThreadPoolExecutor(max_workers=5) as executor:

        futures = dict()

        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 8000)] = "seth"

        log_info(">>> Adding Traun docs")  # ABC, NBC, CBS
        futures[executor.submit(traun.add_docs, 10000, bulk=True)] = "traun"

        # take down a sync_gateway
        shutdown_status = cluster.sg_accels[0].stop()
        assert shutdown_status == 0

        # Add more docs while no writers are online
        log_info(">>> Adding Seth docs")  # FOX
        futures[executor.submit(seth.add_docs, 2000, bulk=True)] = "seth"

        # Start a single writer
        start_status = cluster.sg_accels[0].start(sg_conf)
        assert start_status == 0

        for future in concurrent.futures.as_completed(futures):
            tag = futures[future]
            log_info("{} Completed:".format(tag))

    # TODO better way to do this
    time.sleep(300)

    verify_changes(traun,
                   expected_num_docs=10000,
                   expected_num_revisions=0,
                   expected_docs=traun.cache)
    verify_changes(seth,
                   expected_num_docs=10000,
                   expected_num_revisions=0,
                   expected_docs=seth.cache)

    # Start second writer again
    start_status = cluster.sg_accels[1].start(sg_conf)
    assert start_status == 0