示例#1
0
def params_from_base_test_setup(request, params_from_base_suite_setup):
    # Code before the yeild will execute before each test starts

    # pytest command line parameters
    collect_logs = request.config.getoption("--collect-logs")

    cluster_config = params_from_base_suite_setup["cluster_config"]
    mode = params_from_base_suite_setup["mode"]
    xattrs_enabled = params_from_base_suite_setup["xattrs_enabled"]

    test_name = request.node.name
    log_info("Setting up test '{}'".format(test_name))

    # This dictionary is passed to each test
    yield {
        "cluster_config": cluster_config,
        "mode": mode,
        "xattrs_enabled": xattrs_enabled
    }

    # Code after the yeild will execute when each test finishes
    log_info("Tearing down test '{}'".format(test_name))

    # Verify all sync_gateways and sg_accels are reachable
    c = cluster.Cluster(cluster_config)
    errors = c.verify_alive(mode)

    # if the test failed pull logs
    if collect_logs or request.node.rep_call.failed or len(errors) != 0:
        logging_helper = Logging()
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config,
                                              test_name=test_name)

    assert len(errors) == 0
示例#2
0
def params_from_base_test_setup(request, params_from_base_suite_setup):

    # pytest command line parameters
    collect_logs = request.config.getoption("--collect-logs")

    test_name = request.node.name
    log_info("Setting up test '{}'".format(test_name))

    cluster_config = params_from_base_suite_setup["cluster_config"]
    mode = params_from_base_suite_setup["mode"]

    yield {"cluster_config": cluster_config, "mode": mode}

    log_info("Tearing down test '{}'".format(test_name))

    # Capture testkit socket usage
    network_utils = NetworkUtils()
    network_utils.list_connections()

    # Verify all sync_gateways and sg_accels are reachable
    c = cluster.Cluster(cluster_config)
    errors = c.verify_alive(mode)

    # if the test failed pull logs
    if collect_logs or request.node.rep_call.failed or len(errors) != 0:
        logging_helper = Logging()
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config,
                                              test_name=test_name)

    assert len(errors) == 0
def test_no_conflicts_enabled(params_from_base_test_setup, sg_conf_name, num_of_docs):
    """ @summary : Enable no conflicts and verify conflicts are not created
    Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    covered #3
    Steps:
    1. Enable allow_conflicts = false in SG config
    2. Add docs to SG.
    3. Update the docs few times.
    4. Try to create a conflict.
    5. Check the revision list for the doc
    6. Verify no conflicts can be created.
    7. Sync Gateway should respond with a 409 upon trying to add a conflict
    8. Should be possible to add more than 1 revision (Default revs_limit = 1000)
    """

    # Setup
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]
    sg_db = "db"

    if not no_conflicts_enabled:
        pytest.skip('--no-conflicts is not enabled, so skipping the test')

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    sg_client = MobileRestClient()
    channels = ["no-conflicts"]
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels)
    autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******')
    # end of Set up

    # 2. Add docs to SG.
    sgdoc_bodies = document.create_docs(doc_id_prefix='sg_docs', number=num_of_docs,
                                        attachments_generator=attachment.generate_2_png_10_10, channels=channels)
    sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session)
    assert len(sgdoc_bodies) == num_of_docs

    # 3. Update the docs few times
    sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=3, delay=None, auth=autouser_session, channels=channels)

    # 4. Try to create a conflict
    for doc in sg_docs:
        with pytest.raises(HTTPError) as he:
            sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo",
                                   auth=autouser_session)
        assert he.value.message.startswith('409 Client Error: Conflict for url:')

    # 6. Update the docs 1 more time
    sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels)
def test_revs_cache_size(params_from_base_test_setup, sg_conf_name, num_of_docs):
    """ @summary Test for no-conflicts with rev_cache size
    Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    covered #18
    Steps:
    Note : the sg config have rev_cache_size as 1000 , make sure number of docs is less than 1000 to have the test
    work with expected behavior
    1. Add docs to SG.
    2. Get the docs
    3. Verify number of rev_cache_hits is same as number of docs if rev_cache_size is more than number of docs.
    """

    # Setup
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    retrieved_docs = num_of_docs / 2

    sync_gateway_version = params_from_base_test_setup["sync_gateway_version"]

    if sync_gateway_version < "2.0":
        pytest.skip('It does not work with sg < 2.0 , so skipping the test')

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    sg_client = MobileRestClient()
    channels = ["no-conflicts"]
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels)
    autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******')
    # end of Set up

    # 2. Add docs to SG.
    sgdoc_bodies = document.create_docs(doc_id_prefix="sg_docs", number=num_of_docs, channels=channels)
    sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session)
    assert len(sgdoc_bodies) == num_of_docs

    # 3. Get all docs
    for i in range(retrieved_docs):
        doc = sg_docs[i]
        sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)

    # 4. Verify there are number of hits should be same as retrieved docs
    exp_vars = sg_client.get_expvars(url=sg_admin_url)
    revision_cache_hits = exp_vars["syncGateway_stats"]["revisionCache_hits"]
    revision_cache_misses = exp_vars["syncGateway_stats"]["revisionCache_misses"]
    assert revision_cache_hits == retrieved_docs, "Revision Cache hits did not hit with expected number {}".format(num_of_docs)
    assert revision_cache_misses == 0, "Revision Cache misses is not 0"
示例#5
0
def test_invalid_revs_limit_with_allow_conflicts(params_from_base_test_setup,
                                                 sg_conf_name, revs_limit):
    """ @summary Verify all borders of revs limit
    Test case in Excel sheet : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    Covered Test case #2
    Steps:
    - Add a doc
    - Have allow_conflicts to true in sg config
    - Put revs_limit=1 or any number lower than 20 and restart sync-gateway
    - Verify it fails
    - change revs_limit=20 and start sync-gateway
    - Verify it starts without any error
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]

    if no_conflicts_enabled:
        pytest.skip(
            '--no-conflicts is enabled, this test needs to create conflicts, so skipping the test'
        )

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    clust = cluster.Cluster(cluster_config)
    clust.reset(sg_conf)
    temp_cluster_config = copy_to_temp_conf(cluster_config, mode)
    persist_cluster_config_environment_prop(temp_cluster_config,
                                            'revs_limit',
                                            revs_limit,
                                            property_name_check=False)
    status = clust.sync_gateways[0].restart(config=sg_conf,
                                            cluster_config=temp_cluster_config)
    assert status != 0, "Syncgateway started with revs limit 1 when no conflicts disabled"

    # Now change the revs_limit to 20
    revs_limit = 20
    persist_cluster_config_environment_prop(temp_cluster_config,
                                            'revs_limit',
                                            revs_limit,
                                            property_name_check=False)
    status = clust.sync_gateways[0].restart(config=sg_conf,
                                            cluster_config=temp_cluster_config)
    assert status == 0, "Syncgateway did not start after revs_limit changed to 20"
示例#6
0
def params_from_base_test_setup(request, params_from_base_suite_setup):
    # Code before the yeild will execute before each test starts

    # pytest command line parameters
    collect_logs = request.config.getoption("--collect-logs")

    cluster_config = params_from_base_suite_setup["cluster_config"]
    cluster_topology = params_from_base_suite_setup["cluster_topology"]
    mode = params_from_base_suite_setup["mode"]
    xattrs_enabled = params_from_base_suite_setup["xattrs_enabled"]

    test_name = request.node.name
    log_info("Running test '{}'".format(test_name))
    log_info("cluster_config: {}".format(cluster_config))
    log_info("cluster_topology: {}".format(cluster_topology))
    log_info("mode: {}".format(mode))
    log_info("xattrs_enabled: {}".format(xattrs_enabled))

    # This dictionary is passed to each test
    yield {
        "cluster_config":
        cluster_config,
        "cluster_topology":
        cluster_topology,
        "mode":
        mode,
        "xattrs_enabled":
        xattrs_enabled,
        "server_seed_docs":
        params_from_base_suite_setup["server_seed_docs"],
        "max_docs":
        params_from_base_suite_setup["max_docs"],
        "num_users":
        params_from_base_suite_setup["num_users"],
        "create_batch_size":
        params_from_base_suite_setup["create_batch_size"],
        "create_delay":
        params_from_base_suite_setup["create_delay"],
        "update_runtime_sec":
        params_from_base_suite_setup["update_runtime_sec"],
        "update_batch_size":
        params_from_base_suite_setup["update_batch_size"],
        "update_docs_percentage":
        params_from_base_suite_setup["update_docs_percentage"],
        "update_delay":
        params_from_base_suite_setup["update_delay"],
        "changes_delay":
        params_from_base_suite_setup["changes_delay"],
        "changes_limit":
        params_from_base_suite_setup["changes_limit"]
    }

    # Code after the yield will execute when each test finishes
    log_info("Tearing down test '{}'".format(test_name))

    network_utils = NetworkUtils()
    network_utils.list_connections()

    # Verify all sync_gateways and sg_accels are reachable
    c = cluster.Cluster(cluster_config)
    errors = c.verify_alive(mode)

    # if the test failed or a node is down, pull logs
    logging_helper = Logging()
    if collect_logs or request.node.rep_call.failed or len(errors) != 0:
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config,
                                              test_name=test_name)

    assert len(errors) == 0

    # Scan logs
    # SG logs for panic, data race
    # System logs for OOM
    ansible_runner = AnsibleRunner(cluster_config)
    script_name = "{}/utilities/check_logs.sh".format(os.getcwd())
    status = ansible_runner.run_ansible_playbook(
        "check-logs.yml", extra_vars={"script_name": script_name})

    if status != 0:
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config,
                                              test_name=test_name)
        raise LogScanningError("Errors found in the logs")
示例#7
0
def test_upgrade_cbl(setup_client_syncgateway_test):
    """
    1. install previous version of couchbase lite.
    2. Create docs in the lite.
    3. upgrade to latest version of couchbase lite.
    4. Verfiy docs still exits and accessible
    """
    sg_db = "db"
    ls_db = "ls_db"
    channels = ["auto"]

    num_docs = 30000

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    sg_mode = setup_client_syncgateway_test["sg_mode"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]
    liteserv = setup_client_syncgateway_test["liteserv"]
    device_enabled = setup_client_syncgateway_test["device_enabled"]
    liteserv_platform = setup_client_syncgateway_test["liteserv_platform"]
    liteserv_version = setup_client_syncgateway_test["liteserv_version"]

    if liteserv_platform.lower() == "android" or liteserv_platform.lower() == "net-msft" or device_enabled:
        # There is a signature match issue on Android and older version ios does not have app for devices
        pytest.skip('upgrade lite serv app does not work on Android and there is no app for ios device created' +
                    ' for older version, so skipping the test')
    sg_config = sync_gateway_config_path_for_mode("listener_tests/listener_tests", sg_mode)
    c = cluster.Cluster(config=cluster_config)
    c.reset(sg_config_path=sg_config)

    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))
    test_name = "test_upgrade"

    # Downloading 1.4.0-3, we are not paramatrizing here for older version as this is not going to work
    # so hardcodig now rather than adding new parameter
    log_info("Downloading older version of LiteServ ...")
    liteserv.download(version_build="1.4.0-3")

    # Install LiteServ
    if device_enabled and liteserv_platform == "ios":
        liteserv.stop()
        liteserv.install_device()
        ls_url = liteserv.start_device("{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__,
                                                                     test_name, datetime.datetime.now()))
    else:
        liteserv.stop()
        liteserv.install()
        log_info("Liteserv starts now ....")
        ls_url = liteserv.start("{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__,
                                                              test_name, datetime.datetime.now()))

    client = MobileRestClient()
    client.create_user(sg_one_admin, sg_db, "test", password="******", channels=channels)
    session = client.create_session(sg_one_admin, sg_db, "test")

    client.create_database(url=ls_url, name=ls_db)

    # Create 'num_docs' docs on LiteServ
    docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="seeded_doc",
                           generator="four_k", channels=channels)
    assert len(docs) == num_docs
    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=docs, timeout=240)

    with ThreadPoolExecutor(max_workers=2) as tpe:
        download_cbl_tpe = tpe.submit(download_install, liteserv, liteserv_version)
        client.start_replication(url=ls_url, continuous=True, from_db=ls_db, to_url=sg_one_admin,
                                 to_db=sg_db)
        download_cbl_tpe.result()
        log_info("download is done")

    liteserv.stop()
    ls_url = liteserv.start("{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__,
                                                          test_name, datetime.datetime.now()))

    client.start_replication(url=ls_url, continuous=True, from_db=ls_db, to_url=sg_one_admin,
                             to_db=sg_db)
    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=docs, timeout=240)
    client.verify_docs_present(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session, timeout=600)
示例#8
0
def params_from_base_suite_setup(request):
    log_info("Setting up 'params_from_base_suite_setup' ...")

    # pytest command line parameters
    server_version = request.config.getoption("--server-version")
    sync_gateway_version = request.config.getoption("--sync-gateway-version")
    mode = request.config.getoption("--mode")
    skip_provisioning = request.config.getoption("--skip-provisioning")
    race_enabled = request.config.getoption("--race")
    cbs_ssl = request.config.getoption("--server-ssl")
    xattrs_enabled = request.config.getoption("--xattrs")
    sg_lb = request.config.getoption("--sg-lb")
    sg_ce = request.config.getoption("--sg-ce")
    use_sequoia = request.config.getoption("--sequoia")
    no_conflicts_enabled = request.config.getoption("--no-conflicts")

    if xattrs_enabled and version_is_binary(sync_gateway_version):
        check_xattr_support(server_version, sync_gateway_version)

    log_info("server_version: {}".format(server_version))
    log_info("sync_gateway_version: {}".format(sync_gateway_version))
    log_info("mode: {}".format(mode))
    log_info("skip_provisioning: {}".format(skip_provisioning))
    log_info("race_enabled: {}".format(race_enabled))
    log_info("cbs_ssl: {}".format(cbs_ssl))
    log_info("xattrs_enabled: {}".format(xattrs_enabled))
    log_info("sg_lb: {}".format(sg_lb))
    log_info("sg_ce: {}".format(sg_ce))
    log_info("no conflicts enabled {}".format(no_conflicts_enabled))

    # sg-ce is invalid for di mode
    if mode == "di" and sg_ce:
        raise FeatureSupportedError(
            "SGAccel is only available as an enterprise edition")

    if no_conflicts_enabled and sync_gateway_version < "2.0":
        raise FeatureSupportedError(
            'No conflicts feature not available for sync-gateway version below 2.0, so skipping the test'
        )

    # Make sure mode for sync_gateway is supported ('cc' or 'di')
    validate_sync_gateway_mode(mode)

    # use load_balancer_cc cluster config if mode is "cc" or load_balancer_di cluster config if mode is "di"
    cluster_config = "{}/load_balancer_{}".format(
        keywords.constants.CLUSTER_CONFIGS_DIR, mode)
    sg_config = sync_gateway_config_path_for_mode(
        "sync_gateway_default_functional_tests", mode)

    # Add load balancer prop and check if load balancer IP is available
    if sg_lb:
        persist_cluster_config_environment_prop(cluster_config,
                                                'sg_lb_enabled', True)
        log_info("Running tests with load balancer enabled: {}".format(
            get_load_balancer_ip(cluster_config)))
    else:
        log_info("Running tests with load balancer disabled")
        persist_cluster_config_environment_prop(cluster_config,
                                                'sg_lb_enabled', False)

    if cbs_ssl:
        log_info("Running tests with cbs <-> sg ssl enabled")
        # Enable ssl in cluster configs
        persist_cluster_config_environment_prop(cluster_config,
                                                'cbs_ssl_enabled', True)
    else:
        log_info("Running tests with cbs <-> sg ssl disabled")
        # Disable ssl in cluster configs
        persist_cluster_config_environment_prop(cluster_config,
                                                'cbs_ssl_enabled', False)

    if xattrs_enabled:
        log_info("Running test with xattrs for sync meta storage")
        persist_cluster_config_environment_prop(cluster_config,
                                                'xattrs_enabled', True)
    else:
        log_info("Using document storage for sync meta data")
        persist_cluster_config_environment_prop(cluster_config,
                                                'xattrs_enabled', False)

    try:
        server_version
    except NameError:
        log_info("Server version is not provided")
        persist_cluster_config_environment_prop(cluster_config,
                                                'server_version', "")
    else:
        log_info("Running test with server version {}".format(server_version))
        persist_cluster_config_environment_prop(cluster_config,
                                                'server_version',
                                                server_version)

    try:
        sync_gateway_version
    except NameError:
        log_info("Sync gateway version is not provided")
        persist_cluster_config_environment_prop(cluster_config,
                                                'sync_gateway_version', "")
    else:
        log_info("Running test with sync_gateway version {}".format(
            sync_gateway_version))
        persist_cluster_config_environment_prop(cluster_config,
                                                'sync_gateway_version',
                                                sync_gateway_version)

    if no_conflicts_enabled:
        log_info("Running with no conflicts")
        persist_cluster_config_environment_prop(cluster_config,
                                                'no_conflicts_enabled', True)
    else:
        log_info("Running with allow conflicts")
        persist_cluster_config_environment_prop(cluster_config,
                                                'no_conflicts_enabled', False)

    if sync_gateway_version < "2.0.0" and no_conflicts_enabled:
        pytest.skip(
            "Test cannot run with no-conflicts with sg version < 2.0.0")

    # Skip provisioning if user specifies '--skip-provisoning' or '--sequoia'
    should_provision = True
    if skip_provisioning or use_sequoia:
        should_provision = False

    cluster_utils = ClusterKeywords()
    if should_provision:
        try:
            cluster_utils.provision_cluster(
                cluster_config=cluster_config,
                server_version=server_version,
                sync_gateway_version=sync_gateway_version,
                sync_gateway_config=sg_config,
                race_enabled=race_enabled,
                sg_ce=sg_ce)
        except ProvisioningError:
            logging_helper = Logging()
            logging_helper.fetch_and_analyze_logs(
                cluster_config=cluster_config, test_name=request.node.name)
            raise

    # Hit this intalled running services to verify the correct versions are installed
    cluster_utils.verify_cluster_versions(
        cluster_config,
        expected_server_version=server_version,
        expected_sync_gateway_version=sync_gateway_version)

    yield {"cluster_config": cluster_config, "mode": mode}

    log_info("Tearing down 'params_from_base_suite_setup' ...")

    # Stop all sync_gateway and sg_accels as test finished
    c = cluster.Cluster(cluster_config)
    c.stop_sg_and_accel()
示例#9
0
def params_from_base_test_setup(request, params_from_base_suite_setup):
    # Code before the yeild will execute before each test starts

    # pytest command line parameters
    collect_logs = request.config.getoption("--collect-logs")

    cluster_config = params_from_base_suite_setup["cluster_config"]
    cluster_topology = params_from_base_suite_setup["cluster_topology"]
    mode = params_from_base_suite_setup["mode"]
    xattrs_enabled = params_from_base_suite_setup["xattrs_enabled"]
    sg_lb = params_from_base_suite_setup["sg_lb"]
    no_conflicts_enabled = params_from_base_suite_setup["no_conflicts_enabled"]
    sync_gateway_version = params_from_base_suite_setup["sync_gateway_version"]

    test_name = request.node.name

    if sg_lb:
        # These tests target one SG node
        skip_tests = ['resync', 'log_rotation', 'openidconnect']
        for test in skip_tests:
            if test in test_name:
                pytest.skip("Skipping online/offline tests with load balancer")

    # Certain test are diabled for certain modes
    # Given the run conditions, check if the test needs to be skipped
    skip_if_unsupported(
        sync_gateway_version=params_from_base_suite_setup["sync_gateway_version"],
        mode=mode,
        test_name=test_name,
        no_conflicts_enabled=no_conflicts_enabled
    )

    log_info("Running test '{}'".format(test_name))
    log_info("cluster_config: {}".format(cluster_config))
    log_info("cluster_topology: {}".format(cluster_topology))
    log_info("mode: {}".format(mode))
    log_info("xattrs_enabled: {}".format(xattrs_enabled))

    # This dictionary is passed to each test
    yield {
        "cluster_config": cluster_config,
        "cluster_topology": cluster_topology,
        "mode": mode,
        "xattrs_enabled": xattrs_enabled,
        "no_conflicts_enabled": no_conflicts_enabled,
        "sync_gateway_version": sync_gateway_version
    }

    # Code after the yield will execute when each test finishes
    log_info("Tearing down test '{}'".format(test_name))

    network_utils = NetworkUtils()
    network_utils.list_connections()

    # Verify all sync_gateways and sg_accels are reachable
    c = cluster.Cluster(cluster_config)
    errors = c.verify_alive(mode)

    # if the test failed or a node is down, pull logs
    if collect_logs or request.node.rep_call.failed or len(errors) != 0:
        logging_helper = Logging()
        logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config, test_name=test_name)

    assert len(errors) == 0
def test_no_conflicts_with_revs_limit(params_from_base_test_setup, sg_conf_name, num_of_docs, revs_limit):
    """ @summary Enable no conflicts and  with non default revs_limit and verify revs_limit is maintained
    Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    covered #4, #5
    Steps:
    1. Enable allow_conflicts = false in SG config with parametrized revs_limit
    2. Add docs to SG.
    3. Update docs more than revs_limit.
    4. Create a conflict and verify it fails.
    5. Get number of revisions and verify length is equal to revs_limit set to
    6. Update the docs 1 more time
    7. Get number of revisions and verify number of revisions should be same as revs_limit
    8. Verify previous revisions does not exist
    """

    # Setup
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]
    mode = params_from_base_test_setup["mode"]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    if not no_conflicts_enabled:
        pytest.skip('--no-conflicts is not enabled, so skipping the test')
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    sg_client = MobileRestClient()
    channels = ["no-conflicts"]
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels)
    autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******')
    # end of Set up

    # 1. Enable allow_conflicts = false in SG config with revs_limit
    temp_cluster_config = copy_to_temp_conf(cluster_config, mode)
    persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False)
    status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config)
    assert status == 0, "Syncgateway did not start after having revs_limit 1 with no conflicts mode"

    # 2. Add docs to SG.
    sgdoc_bodies = document.create_docs(doc_id_prefix='sg_docs', number=num_of_docs,
                                        attachments_generator=attachment.generate_2_png_10_10, channels=channels)
    sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session)
    assert len(sgdoc_bodies) == num_of_docs

    # 3. Update the docs few times
    prev_revs = []
    for i in xrange(revs_limit + 5):
        update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None,
                                               auth=autouser_session, channels=channels)
        rev = update_sg_docs[0]['rev'].split('-')[1]
        prev_revs.append(rev)

    # 4. Try to create a conflict
    for doc in sg_docs:
        with pytest.raises(HTTPError) as he:
            sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo",
                                   auth=autouser_session)
        assert he.value.message.startswith('409 Client Error: Conflict for url:')

    # 5. Get number of revisions and verify length is equal to revs_limit set to
    for doc in sg_docs:
        num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)
        assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config"
        for i in xrange(4):
            assert prev_revs[i] not in num_of_revs

    # 6. Update the docs 1 more time
    sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels)

    # 7. Get number of revisions and verify number of revisions should be same revs_limit
    # 8. Verify previous revisions does not exist
    for doc in sg_docs:
        num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)
        assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config"
        for i in xrange(5):
            assert prev_revs[i] not in num_of_revs
def test_migrate_conflicts_delete_last_rev(params_from_base_test_setup, sg_conf_name, num_of_docs):
    """ @summary Migrate conflicts to no-conflicts mode and delete last revision and verify revisions exists in open revisions
    Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    covered #19
    Steps:
    1. Start sg with default(i.e allow_conflicts=true)
    2. Add docs to SG.
    3. Update docs few times .
    4. Create a conflicts and verify it is successful.
    5. Modify sg config by enabling allow_conflicts to false
    6. restart sg.
    7. Delete doc by revision of current active open revision
    8. Verify all revisions in history exists in open revisions which got at step 4.
    """

    # Setup
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]
    mode = params_from_base_test_setup["mode"]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sync_gateway_version = params_from_base_test_setup["sync_gateway_version"]
    sg_db = "db"

    if no_conflicts_enabled or sync_gateway_version < "2.0":
        pytest.skip('--no-conflicts is enabled and does not work with sg < 2.0 , so skipping the test')
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    # 1. Start sg with default(i.e allow_conflicts=true)
    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    sg_client = MobileRestClient()
    channels = ["no-conflicts"]
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels)
    autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******')
    # end of Set up

    # 2. Add docs to SG.
    sgdoc_bodies = document.create_docs(doc_id_prefix="sg_docs", number=num_of_docs, channels=channels)
    sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session)
    assert len(sgdoc_bodies) == num_of_docs

    # 3. Update the docs few times
    prev_revs = []
    for i in xrange(5):
        update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None, auth=autouser_session, channels=channels)
        rev = update_sg_docs[0]['rev'].split('-')[1]
        prev_revs.append(rev)

    # 4. Create a conflicts and verify it is successful.
    for doc in sg_docs:
        conflicted_rev = sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo",
                                                auth=autouser_session)
        assert conflicted_rev["rev"] == "2-foo"
    for doc in sg_docs:
        num_of_open_revs = sg_client.get_open_revs_ids(url=sg_url, db=sg_db, doc_id=doc["id"], rev="2-foo", auth=autouser_session)
    time.sleep(5)
    # 5. Enable allow_conflicts = false in SG config and 6. restart sg
    revs_limit = 2
    temp_cluster_config = copy_to_temp_conf(cluster_config, mode)
    persist_cluster_config_environment_prop(temp_cluster_config, 'no_conflicts_enabled', "True", property_name_check=False)
    persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False)
    status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config)
    assert status == 0, "Syncgateway did not start after no conflicts is enabled"
    sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, auth=autouser_session, channels=channels)

    # 6. Delete doc by revision of current active open revision
    for doc in sg_docs:
        num_of_revs = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)
        sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=doc["id"], rev=num_of_revs["_rev"], auth=autouser_session)

    # 7.Verify all revisions in history exists in open revisions which got at step 4.
    for doc in sg_docs:
        num_of_revs_history = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)
        for rev in num_of_revs_history:
            assert rev in num_of_open_revs, "Expected revision does not exist in revision history "
def test_concurrent_updates_no_conflicts(params_from_base_test_setup, sg_conf_name, num_of_docs, revs_limit):
    """@summary Test with concurrent updates with no conflicts enabled
    Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    covered #15
    Steps:
    1. Start sg with some revs_limit specified
    2. Add docs to SG.
    3. Update docs few times via sg .
    4. Update docs few times vis sdk concurrently with sg.
        -> There are chances of getting conflict errors on both, handled the error appropriately
    5. update docs few number of times.
    6. Verify it can maintain default revisions.
    7. Verify previous revisions deleted and revisions maintained based on revs_limit
    """

    # Setup
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]
    mode = params_from_base_test_setup["mode"]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    if revs_limit is None:
        revs_limit = 1000
    additional_updates = revs_limit
    total_updates = revs_limit + additional_updates
    if not no_conflicts_enabled:
        pytest.skip('--no-conflicts is not enabled, so skipping the test')
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    # 1. Start sg
    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    sg_client = MobileRestClient()
    channels = ["no-conflicts"]
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels)
    autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******')

    temp_cluster_config = copy_to_temp_conf(cluster_config, mode)
    persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False)
    status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config)
    assert status == 0, "Syncgateway did not start after no conflicts is enabled"
    # end of Set up

    # 2. Add docs to SG.
    sgdoc_bodies = document.create_docs(doc_id_prefix='sg_docs', number=num_of_docs,
                                        attachments_generator=attachment.generate_2_png_10_10, channels=channels)
    sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session)
    assert len(sgdoc_bodies) == num_of_docs

    # Connect to server via SDK
    log_info('Connecting to bucket ...')
    bucket_name = 'data-bucket'
    cbs_url = topology['couchbase_servers'][0]
    cbs_ip = host_for_url(cbs_url)
    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name), password='******', timeout=SDK_TIMEOUT)
    sg_doc_ids = [doc['id'] for doc in sg_docs]
    sdk_docs_resp = sdk_client.get_multi(sg_doc_ids)

    # Update the same documents concurrently from a sync gateway client and and sdk client
    with ThreadPoolExecutor(max_workers=9) as tpe:

        update_from_sdk_task = tpe.submit(sdk_bulk_update, sdk_client, sdk_docs_resp, 10)
        update_from_sg_task = tpe.submit(sg_doc_updates, sg_client, sg_url=sg_url, sg_db=sg_db, sg_docs=sg_docs, number_updates=10,
                                         auth=autouser_session, channels=channels)

        update_from_sg_task.result()
        update_from_sdk_task.result()

    # 3. Update the docs few times
    prev_revs = []
    for i in xrange(total_updates):
        update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None,
                                               auth=autouser_session, channels=channels)
        rev = update_sg_docs[0]['rev'].split('-')[1]
        prev_revs.append(rev)

    # 4. Verify it can maintain default revisions.
    # 5. Verify previous revisions deleted.
    for doc in sg_docs:
        num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)
        assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config"
        for i in xrange(additional_updates):
            assert prev_revs[i] not in num_of_revs
def test_migrate_conflicts_to_noConflicts(params_from_base_test_setup, sg_conf_name, num_of_docs, revs_limit):
    """ @summary Migrating from no conflicts false to true
    Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    covered #6, #7, #8
    Steps:
    1. Start sg with default(i.e allow_conflicts=true)
    2. Add docs to SG.
    3. Update docs few times .
    4. Create a conflicts and verify it is successful.
    5. Modify sg config by enabling allow_conflicts to false
    6. restart sg.
    7. Create a conflict and verify conflict throws 409.
    8. update docs few number of times.
    9. Verify it can maintain default revisions.
    10. Verify previous revisions deleted.
    """

    # Setup
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]
    mode = params_from_base_test_setup["mode"]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sync_gateway_version = params_from_base_test_setup["sync_gateway_version"]
    sg_db = "db"

    if revs_limit is None:
        revs_limit = 1000
    additional_updates = revs_limit

    if no_conflicts_enabled or sync_gateway_version < "2.0":
        pytest.skip('--no-conflicts is enabled and does not work with sg < 2.0 , so skipping the test')
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    # 1. Start sg with default(i.e allow_conflicts=true)
    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    sg_client = MobileRestClient()
    channels = ["no-conflicts"]
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels)
    autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******')
    # end of Set up

    # 2. Add docs to SG.
    sgdoc_bodies = document.create_docs(doc_id_prefix='sg_docs', number=num_of_docs,
                                        attachments_generator=attachment.generate_2_png_10_10, channels=channels)
    sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session)
    assert len(sgdoc_bodies) == num_of_docs

    # 3. Update the docs few times
    prev_revs = []
    for i in xrange(revs_limit):
        update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None,
                                               auth=autouser_session, channels=channels)
        rev = update_sg_docs[0]['rev'].split('-')[1]
        prev_revs.append(rev)

    # 4. Create a conflicts and verify it is successful.
    for doc in sg_docs:
        conflicted_rev = sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo",
                                                auth=autouser_session)
        assert conflicted_rev["rev"] == "2-foo"

    # 5. Enable allow_conflicts = false in SG config and 6. restart sg
    temp_cluster_config = copy_to_temp_conf(cluster_config, mode)
    persist_cluster_config_environment_prop(temp_cluster_config, 'no_conflicts_enabled', "True", property_name_check=False)
    persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False)
    status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config)
    assert status == 0, "Syncgateway did not start after no conflicts is enabled"
    sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, auth=autouser_session, channels=channels)

    # 7. Create a conflict and verify conflict throws 409.
    for doc in sg_docs:
        with pytest.raises(HTTPError) as he:
            sg_client.add_conflict(url=sg_url, db=sg_db, doc_id=doc["id"], parent_revisions=doc["rev"], new_revision="2-foo1",
                                   auth=autouser_session)
        assert he.value.message.startswith('409 Client Error: Conflict for url:')

    # 8. update docs few number of times.
    update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=additional_updates,
                                           auth=autouser_session, channels=channels)

    # 9. Verify it can maintain default revisions.
    # 10. Verify previous revisions deleted.
    for doc in sg_docs:
        num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)
        assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config"
        for i in xrange(additional_updates):
            assert prev_revs[i] not in num_of_revs
def test_conflicts_sg_accel_added(params_from_base_test_setup, sg_conf_name, num_of_docs, revs_limit, additional_updates):
    """ @summary Verify no conflicts feature works with sg accel down
    Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    covered #12, #16, #21
    Steps:
    1. Enable allow_conflicts = false in SG config with revs_limit
    2. Add docs to SG.
    3. Update the docs few times and get all revisions of updates
    4. Get number of revisions and verify length is equal to revs_limit set to
    5. Start sg accel
    6. Update the docs with few updates
    7. Get number of revisions and verify number of revisions is equvalent to revs_limit set to
    """

    # Setup
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]
    sg_db = "db"
    total_updates = revs_limit + additional_updates
    new_updates = 2

    if not no_conflicts_enabled or mode != "di":
        pytest.skip('--no-conflicts is not enabled or mode is not di, so skipping the test')
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)
    c.sg_accels[0].stop()

    sg_client = MobileRestClient()
    channels = ["no-conflicts"]
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels)
    autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******')
    # end of Set up

    # 1. Enable allow_conflicts = false in SG config with revs_limit
    temp_cluster_config = copy_to_temp_conf(cluster_config, mode)
    persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False)
    status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config)
    assert status == 0, "Syncgateway did not start after having revs_limit  with no conflicts mode"

    # 2. Add docs to SG.
    sgdoc_bodies = document.create_docs(doc_id_prefix="sg_docs", number=num_of_docs, channels=channels)
    sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session)
    assert len(sgdoc_bodies) == num_of_docs

    # 3. Update the docs few times and get all revisions of updates
    prev_revs = []
    for i in xrange(total_updates):
        update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None,
                                               auth=autouser_session, channels=channels)
        rev = update_sg_docs[0]['rev'].split('-')[1]
        prev_revs.append(rev)

    # 4. Get number of revisions and verify length is equal to revs_limit set to
    for doc in sg_docs:
        num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)
        assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config"
        for i in xrange(additional_updates):
            assert prev_revs[i] not in num_of_revs

    # 5. Start sg accel
    status = c.sg_accels[0].start(config=sg_conf)
    assert status == 0, "sync_gateway accel did not start"

    # 6. Update the docs with few updates
    sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=new_updates, delay=None, auth=autouser_session, channels=channels)

    # 7. Get number of revisions and verify number of revisions is equvalent to revs_limit set to
    for doc in sg_docs:
        num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)
        assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config"
        for i in xrange(additional_updates + new_updates):
            assert prev_revs[i] not in num_of_revs
示例#15
0
def test_winning_conflict_branch_revisions(params_from_base_test_setup,
                                           sg_conf_name):
    """ Add winning conflict revisions to the revision tree and ensure
    that the changes feed returns the correct revisions

    Steps:
    - Add a doc ('test_doc')
    - Add 5 revs to 'test_doc'
    - POST _changes, assert rev starts with "6-" from 0, store "last_seq_1"
    - Create a conflict off first revision ("2-foo") (POST docs, new_edits == false)
    - Append 5 revisions to the conflicting branch
        (3-foo with 2-foo as parent, 4-foo with 3-foo as parent ... 7-foo with 6-foo as parent)
    - GET 'test_doc' and verify that the rev is '7-foo'
    - POST _changes, assert returns 7-foo
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]

    if no_conflicts_enabled:
        pytest.skip(
            '--no-conflicts is enabled, this test needs to create conflicts, so skipping the test'
        )

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    client = MobileRestClient()

    seth_user_info = userinfo.UserInfo(name="seth",
                                       password="******",
                                       channels=["NATGEO"],
                                       roles=[])

    seth_auth = client.create_user(url=sg_admin_url,
                                   db=sg_db,
                                   name=seth_user_info.name,
                                   password=seth_user_info.password,
                                   channels=seth_user_info.channels)

    test_doc_body = document.create_doc(doc_id="test_doc",
                                        channels=seth_user_info.channels)
    rev_gen_1_doc = client.add_doc(url=sg_url,
                                   db=sg_db,
                                   doc=test_doc_body,
                                   auth=seth_auth)

    rev_gen_6_doc = client.update_doc(url=sg_url,
                                      db=sg_db,
                                      doc_id=rev_gen_1_doc["id"],
                                      number_updates=5,
                                      auth=seth_auth)
    assert rev_gen_6_doc["rev"].startswith("6-")

    # Wait until doc shows up in changes feed
    last_seq = 0
    start = time.time()
    while True:

        if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
            raise keywords.exceptions.TimeoutError(
                "Wait for Replication Status Idle: TIMEOUT")

        changes_1 = client.get_changes(url=sg_url,
                                       db=sg_db,
                                       since=last_seq,
                                       auth=seth_auth,
                                       skip_user_docs=True)
        last_seq = changes_1["last_seq"]
        if len(changes_1["results"]) > 0 and changes_1["results"][0][
                "changes"][0]["rev"].startswith("6-"):
            break

    assert len(changes_1["results"]) == 1
    assert changes_1["results"][0]["id"] == "test_doc"
    assert changes_1["results"][0]["changes"][0]["rev"].startswith("6-")

    # Create a conflict off of rev one
    rev_gen_1_doc_conflict = client.add_conflict(
        url=sg_url,
        db=sg_db,
        doc_id=rev_gen_1_doc["id"],
        parent_revisions=rev_gen_1_doc["rev"],
        new_revision="2-foo",
        auth=seth_auth)

    # Update the conflicting branch 5x
    rev_gen = 3
    for _ in range(5):
        rev_gen_1_doc_conflict = client.add_conflict(
            url=sg_url,
            db=sg_db,
            doc_id=rev_gen_1_doc["id"],
            parent_revisions=rev_gen_1_doc_conflict["rev"],
            new_revision="{}-foo".format(rev_gen),
            auth=seth_auth)
        rev_gen += 1

    # Wait until doc shows up in changes feed from last_seq from where last changes loop from above left off
    start = time.time()
    while True:

        if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
            raise keywords.exceptions.TimeoutError(
                "Wait for Replication Status Idle: TIMEOUT")

        changes_2 = client.get_changes(url=sg_url,
                                       db=sg_db,
                                       since=last_seq,
                                       auth=seth_auth)
        last_seq = changes_2["last_seq"]
        if changes_2["results"][0]["changes"][0]["rev"] == "7-foo" and len(
                changes_2["results"]) > 0:
            break

    # Verify that the the "7-foo" rev is return on the changes feed
    assert len(changes_2["results"]) == 1
    assert changes_2["results"][0]["id"] == "test_doc"
    assert changes_2["results"][0]["changes"][0]["rev"] == "7-foo"
示例#16
0
def test_non_winning_revisions(params_from_base_test_setup, sg_conf_name):
    """ Add non-winning revisions to the revision tree and ensure
    that the changes feed returns the correct revisions

    Steps:
    - Add a doc
    - Add 5 revs
    - changes, assert rev starts with "6-" from 0, store "last_seq_1"
    - Create a conflict off first revision ("2-foo") (POST docs, new_edits == false)
    - changes, assert rev starts with "6-" from "last_seq_1", store "last_seq_2"
    - changes, assert rev starts with "6-" from 0
    - Add a "3-foo" rev with rev "2-foo" as parent
    - changes, assert rev starts with "6-" from "last_seq_2", store "last_seq_3"
    - changes, assert rev starts with "6-" from 0
    - add tombstone rev as child of "6-" i.e. issue delete on ("6-")
    - changes, assert rev starts with "3-foo" from "last_seq_3"
    - changes, assert rev starts with "3-foo" from 0
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    mode = params_from_base_test_setup["mode"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]

    if no_conflicts_enabled:
        pytest.skip(
            '--no-conflicts is enabled, this test needs to create conflicts, so skipping the test'
        )

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    client = MobileRestClient()

    seth_user_info = userinfo.UserInfo(name="seth",
                                       password="******",
                                       channels=["NATGEO"],
                                       roles=[])

    seth_auth = client.create_user(url=sg_admin_url,
                                   db=sg_db,
                                   name=seth_user_info.name,
                                   password=seth_user_info.password,
                                   channels=seth_user_info.channels)

    test_doc_body = document.create_doc(doc_id="test_doc",
                                        channels=seth_user_info.channels)
    rev_gen_1_doc = client.add_doc(url=sg_url,
                                   db=sg_db,
                                   doc=test_doc_body,
                                   auth=seth_auth)

    rev_gen_6_doc = client.update_doc(url=sg_url,
                                      db=sg_db,
                                      doc_id=rev_gen_1_doc["id"],
                                      number_updates=5,
                                      auth=seth_auth)
    assert rev_gen_6_doc["rev"].startswith("6-")

    # Get changes until rev generation 6 document shows up
    start = time.time()
    last_seq = 0
    while True:
        if time.time() - start > keywords.constants.CLIENT_REQUEST_TIMEOUT:
            raise keywords.exceptions.TimeoutError(
                "Wait for Replication Status Idle: TIMEOUT")

        changes_1 = client.get_changes(url=sg_url,
                                       db=sg_db,
                                       since=last_seq,
                                       auth=seth_auth,
                                       skip_user_docs=True)
        last_seq = changes_1["last_seq"]

        # break when expected rev shows up in changes feed
        if changes_1["results"] and changes_1["results"][0]["changes"][0][
                "rev"].startswith("6-"):
            break

    assert len(changes_1["results"]) == 1
    assert changes_1["results"][0]["id"] == "test_doc"
    assert changes_1["results"][0]["changes"][0]["rev"].startswith("6-")

    # Create a conflict off of rev one
    rev_gen_2_doc_conflict = client.add_conflict(
        url=sg_url,
        db=sg_db,
        doc_id=rev_gen_1_doc["id"],
        parent_revisions=rev_gen_1_doc["rev"],
        new_revision="2-foo",
        auth=seth_auth)
    assert rev_gen_2_doc_conflict["id"] == "test_doc"
    assert rev_gen_2_doc_conflict["rev"] == "2-foo"

    # Issue changes since changes_1 last_seq above
    changes_2 = client.get_changes(url=sg_url,
                                   db=sg_db,
                                   since=changes_1["last_seq"],
                                   auth=seth_auth)
    assert len(changes_2["results"]) == 1
    assert changes_2["results"][0]["id"] == "test_doc"
    assert changes_2["results"][0]["changes"][0]["rev"].startswith("6-")

    # Issue changes since 0, strip user doc and make sure the doc is still the '6-' rev
    changes_from_0_one = client.get_changes(url=sg_url,
                                            db=sg_db,
                                            since=0,
                                            auth=seth_auth,
                                            skip_user_docs=True)
    assert len(changes_from_0_one["results"]) == 1
    assert changes_from_0_one["results"][0]["id"] == "test_doc"
    assert changes_from_0_one["results"][0]["changes"][0]["rev"].startswith(
        "6-")

    # Create a 3-foo rev with 2-foo as the parent
    rev_gen_3_doc_conflict = client.add_conflict(
        url=sg_url,
        db=sg_db,
        doc_id=rev_gen_2_doc_conflict["id"],
        parent_revisions=rev_gen_2_doc_conflict["rev"],
        new_revision="3-foo",
        auth=seth_auth)
    assert rev_gen_3_doc_conflict["id"] == "test_doc"
    assert rev_gen_3_doc_conflict["rev"] == "3-foo"

    # Issue changes since changes_2 last_seq above
    changes_3 = client.get_changes(url=sg_url,
                                   db=sg_db,
                                   since=changes_2["last_seq"],
                                   auth=seth_auth)
    assert len(changes_3["results"]) == 1
    assert changes_3["results"][0]["id"] == "test_doc"
    assert changes_3["results"][0]["changes"][0]["rev"].startswith("6-")

    # Issue changes since 0, strip user doc and make sure the doc is still the '6-' rev
    changes_from_0_two = client.get_changes(url=sg_url,
                                            db=sg_db,
                                            since=0,
                                            auth=seth_auth,
                                            skip_user_docs=True)
    assert len(changes_from_0_two["results"]) == 1
    assert changes_from_0_two["results"][0]["id"] == "test_doc"
    assert changes_from_0_two["results"][0]["changes"][0]["rev"].startswith(
        "6-")

    # Delete test_doc at rev 6-*
    client.delete_doc(url=sg_url,
                      db=sg_db,
                      doc_id=rev_gen_6_doc["id"],
                      rev=rev_gen_6_doc["rev"],
                      auth=seth_auth)

    # Issue changes since changes_3 last_seq above
    changes_4 = client.get_changes(url=sg_url,
                                   db=sg_db,
                                   since=changes_3["last_seq"],
                                   auth=seth_auth)
    assert len(changes_4["results"]) == 1
    assert changes_4["results"][0]["id"] == "test_doc"
    assert changes_4["results"][0]["changes"][0]["rev"] == "3-foo"

    # Issue a oneshot changes since changes_4 last_seq and assert no results are returned
    changes_5 = client.get_changes(url=sg_url,
                                   db=sg_db,
                                   since=changes_4["last_seq"],
                                   feed="normal",
                                   auth=seth_auth)
    assert len(changes_5["results"]) == 0
def test_auto_prune_listener_keeps_conflicts_sanity(
        setup_client_syncgateway_test):
    """"
    1. Create db on LiteServ and add docs
    2. Create db on sync_gateway and add docs with the same id
    3. Create one shot push / pull replication
    4. Update LiteServ 50 times
    5. Assert that pruned conflict is still present
    6. Delete the current revision and check that a GET returns the old conflict as the current rev
    """

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    sg_mode = setup_client_syncgateway_test["sg_mode"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    client = MobileRestClient()

    sg_config = sync_gateway_config_path_for_mode(
        "listener_tests/listener_tests", sg_mode)
    c = cluster.Cluster(config=cluster_config)
    c.reset(sg_config_path=sg_config)

    log_info("Running 'test_auto_prune_listener_keeps_conflicts_sanity' ...")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))

    num_docs = 1
    num_revs = 100
    sg_db = "db"
    ls_db = "ls_db"
    sg_user_name = "sg_user"
    sg_user_channels = ["NBC"]
    client.create_user(url=sg_admin_url,
                       db=sg_db,
                       name=sg_user_name,
                       password="******",
                       channels=sg_user_channels)

    sg_session = client.create_session(url=sg_admin_url,
                                       db=sg_db,
                                       name=sg_user_name)
    ls_db = client.create_database(url=ls_url, name=ls_db)

    # Create docs with same prefix to create conflicts when the dbs complete 1 shot replication
    ls_db_docs = client.add_docs(url=ls_url,
                                 db=ls_db,
                                 number=num_docs,
                                 id_prefix="doc",
                                 channels=sg_user_channels)
    assert len(ls_db_docs) == num_docs

    sg_db_docs = client.add_docs(url=sg_url,
                                 db=sg_db,
                                 number=num_docs,
                                 id_prefix="doc",
                                 channels=sg_user_channels,
                                 auth=sg_session)
    assert len(sg_db_docs) == num_docs

    # Wait for changes to be available on Sync Gateway
    client.verify_docs_in_changes(url=sg_url,
                                  db=sg_db,
                                  expected_docs=sg_db_docs,
                                  auth=sg_session,
                                  polling_interval=1)

    # Setup one shot pull replication and wait for idle.
    client.start_replication(url=ls_url,
                             continuous=False,
                             from_url=sg_admin_url,
                             from_db=sg_db,
                             to_db=ls_db)

    client.wait_for_no_replications(url=ls_url)

    # There should now be a conflict on the client
    conflicting_revs = client.get_conflict_revs(url=ls_url,
                                                db=ls_db,
                                                doc=ls_db_docs[0])

    # Get the doc with conflict rev
    client.get_doc(url=ls_url,
                   db=ls_db,
                   doc_id=ls_db_docs[0]["id"],
                   rev=conflicting_revs[0])

    # Update doc past revs limit and make sure conflict is still available
    updated_doc = client.update_doc(url=ls_url,
                                    db=ls_db,
                                    doc_id=ls_db_docs[0]["id"],
                                    number_updates=num_revs)
    client.get_doc(url=ls_url,
                   db=ls_db,
                   doc_id=ls_db_docs[0]["id"],
                   rev=conflicting_revs[0])

    # Delete doc and ensure that the conflict is now the current rev
    client.delete_doc(url=ls_url,
                      db=ls_db,
                      doc_id=ls_db_docs[0]["id"],
                      rev=updated_doc["rev"])
    current_doc = client.get_doc(url=ls_url,
                                 db=ls_db,
                                 doc_id=ls_db_docs[0]["id"])
    assert current_doc["_rev"] == conflicting_revs[0]
def test_inline_large_attachments(setup_client_syncgateway_test):
    """
    1.  Start LiteServ and Sync Gateway
    2.  Create 2 databases on LiteServ (ls_db1, ls_db2)
    3.  Start continuous push replication from ls_db1 to sg_db
    4.  Start continuous pull replication from sg_db to ls_db2
    5.  PUT 5 large inline attachments to ls_db1
    6.  DELETE the docs on ls_db1
    7.  PUT same 5 large inline attachments to ls_db1
    8.  Verify docs replicate to ls_db2
    9.  Purge ls_db1
    10. Verify docs removed
    """

    log_info("Running 'test_inline_large_attachments' ...")

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    sg_mode = setup_client_syncgateway_test["sg_mode"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_url_admin = setup_client_syncgateway_test["sg_admin_url"]

    ls_url = setup_client_syncgateway_test["ls_url"]

    log_info("ls_url: {}".format(ls_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_url_admin: {}".format(sg_url_admin))

    ls_db1 = "ls_db1"
    ls_db2 = "ls_db2"
    sg_db = "db"

    # Reset cluster to ensure no data in system
    sg_config = sync_gateway_config_path_for_mode(
        "listener_tests/listener_tests", sg_mode)
    c = cluster.Cluster(config=cluster_config)
    c.reset(sg_config_path=sg_config)

    client = MobileRestClient()
    client.create_database(ls_url, ls_db1)
    client.create_database(ls_url, ls_db2)

    # Start continuous push replication from ls_db1 -> sg_db
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db1,
                             to_url=sg_url_admin,
                             to_db=sg_db)

    # Start continuous push replication from sg_db -> ls_db2
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_url_admin,
                             from_db=sg_db,
                             to_db=ls_db2)

    # doc with 2.36 PNG attachment
    attachment_docs = []
    for i in range(5):
        atts = attachment.load_from_data_dir(["golden_gate_large.jpg"])
        doc = document.create_doc(doc_id="large_attach_{}".format(i),
                                  attachments=atts,
                                  channels=["ABC"])
        attachment_docs.append(doc)

    # add large attachments to ls_db1
    docs = []
    for doc in attachment_docs:
        docs.append(client.add_doc(ls_url, ls_db1, doc, use_post=False))

    # Delete docs
    client.delete_docs(ls_url, ls_db1, docs)
    client.verify_docs_deleted(ls_url, ls_db1, docs)

    # Recreated docs
    recreated_docs = []
    for doc in attachment_docs:
        recreated_docs.append(
            client.add_doc(ls_url, ls_db1, doc, use_post=False))

    client.verify_docs_present(ls_url, ls_db1, recreated_docs)
    client.verify_docs_present(sg_url_admin, sg_db, recreated_docs)
    client.verify_docs_present(ls_url, ls_db2, recreated_docs)

    purged_docs = client.purge_docs(ls_url, ls_db1, recreated_docs)
    log_info(purged_docs)
    client.verify_docs_deleted(ls_url,
                               ls_db1,
                               recreated_docs,
                               reason="missing")
def test_auto_prune_with_pull(setup_client_syncgateway_test):
    """Sanity test for autopruning with replication

    1. Create a database on LiteServ (ls_db)
    2. Add doc to sync gateway
    3. Update doc 50 times on sync_gateway
    4. Set up pull replication from sync_gateway db to LiteServ db
    5. Verify number of revisions on client is default (20)
    """

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    sg_mode = setup_client_syncgateway_test["sg_mode"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    client = MobileRestClient()

    sg_config = sync_gateway_config_path_for_mode(
        "listener_tests/listener_tests", sg_mode)
    c = cluster.Cluster(config=cluster_config)
    c.reset(sg_config_path=sg_config)

    log_info("Running 'test_auto_prune_listener_sanity' ...")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))

    num_docs = 1
    num_revs = 50

    sg_user_channels = ["NBC"]
    sg_db = "db"
    sg_user_name = "sg_user"

    client.create_user(url=sg_admin_url,
                       db=sg_db,
                       name=sg_user_name,
                       password="******",
                       channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url,
                                       db=sg_db,
                                       name=sg_user_name)

    ls_db = client.create_database(url=ls_url, name="ls_db")

    sg_db_docs = client.add_docs(url=sg_url,
                                 db=sg_db,
                                 number=num_docs,
                                 id_prefix=sg_db,
                                 channels=sg_user_channels,
                                 auth=sg_session)
    assert len(sg_db_docs) == num_docs

    sg_docs_update = client.update_docs(url=sg_url,
                                        db=sg_db,
                                        docs=sg_db_docs,
                                        number_updates=num_revs,
                                        auth=sg_session)

    # Start continuous replication ls_db <- sg_db
    repl_one = client.start_replication(url=ls_url,
                                        continuous=True,
                                        from_url=sg_admin_url,
                                        from_db=sg_db,
                                        to_db=ls_db)

    client.wait_for_replication_status_idle(url=ls_url,
                                            replication_id=repl_one)
    client.verify_docs_present(url=ls_url,
                               db=ls_db,
                               expected_docs=sg_docs_update)
    client.verify_revs_num_for_docs(url=ls_url,
                                    db=ls_db,
                                    docs=sg_docs_update,
                                    expected_revs_per_doc=20)
示例#20
0
def test_stale_revision_should_not_be_in_the_index(setup_client_syncgateway_test):
    """original ticket: https://github.com/couchbase/couchbase-lite-android/issues/855

    scenario:
    1. Running sync_gateway
    2. Create database and starts both push and pull replicators through client REST API
    3. Create two or more views through client REST API
    4. Add doc, and verify doc is index with current revision through client REST API
    5. Make sure document is pushed to sync gateway through sync gateway REST API
    6. Update doc with sync gateway (not client side) through sync gateway REST API
    7. Make sure updated document is pull replicated to client  through client REST API
    8. Make sure updated document is indexed through client REST API
    9. Make sure stale revision is deleted from index.  through client REST API
    10. Pass criteria
    """

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    sg_mode = setup_client_syncgateway_test["sg_mode"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    num_docs = 10
    num_revs = 100

    d_doc_name = "dd"
    sg_db = "db"
    sg_user_name = "sg_user"

    sg_config = sync_gateway_config_path_for_mode("listener_tests/listener_tests", sg_mode)
    c = cluster.Cluster(config=cluster_config)
    c.reset(sg_config_path=sg_config)

    log_info("Running 'test_stale_revision_should_not_be_in_the_index'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revs: {}".format(num_revs))

    client = MobileRestClient()

    sg_user_channels = ["NBC"]
    client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)

    view = """{
    "language" : "javascript",
    "views" : {
        "content_view" : {
            "map" : "function(doc, meta) { if (doc.content) { emit(doc._id, doc._rev); } }"
        },
        "update_view" : {
            "map" : "function(doc, meta) { emit(doc.updates, null); }"
        }
    }
}"""

    ls_db = client.create_database(url=ls_url, name="ls_db")

    # Setup continuous push / pull replication from ls_db1 to sg_db
    client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db
    )

    design_doc_id = client.add_design_doc(url=ls_url, db=ls_db, name=d_doc_name, doc=view)
    client.get_doc(url=ls_url, db=ls_db, doc_id=design_doc_id)

    doc_body = document.create_doc(doc_id="doc_1", content={"hi": "I should be in the view"}, channels=sg_user_channels)

    log_info(doc_body)

    doc_body_2 = document.create_doc(doc_id="doc_2", channels=sg_user_channels)

    doc = client.add_doc(url=ls_url, db=ls_db, doc=doc_body)
    doc_2 = client.add_doc(url=ls_url, db=ls_db, doc=doc_body_2)

    content_view_rows = client.get_view(url=ls_url, db=ls_db, design_doc_name=d_doc_name, view_name="content_view")
    client.verify_view_row_num(view_response=content_view_rows, expected_num_rows=1)

    update_view_rows = client.get_view(url=ls_url, db=ls_db, design_doc_name=d_doc_name, view_name="update_view")
    client.verify_view_row_num(view_response=update_view_rows, expected_num_rows=2)

    expected_docs_list = [doc, doc_2]
    client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=expected_docs_list, auth=sg_session)

    updated_doc = client.update_doc(url=sg_url, db=sg_db, doc_id=doc["id"], number_updates=10, auth=sg_session)

    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=updated_doc)

    content_view_rows_2 = client.get_view(url=ls_url, db=ls_db, design_doc_name=d_doc_name, view_name="content_view")
    client.verify_view_row_num(view_response=content_view_rows_2, expected_num_rows=1)

    client.verify_view_contains_keys(view_response=content_view_rows_2, keys=doc["id"])
    client.verify_view_contains_values(view_response=content_view_rows_2, values=updated_doc["rev"])
示例#21
0
def params_from_base_test_setup(request, params_from_base_suite_setup):
    # Code before the yeild will execute before each test starts

    # pytest command line parameters
    cluster_config = params_from_base_suite_setup["cluster_config"]
    cluster_topology = params_from_base_suite_setup["cluster_topology"]
    mode = params_from_base_suite_setup["mode"]
    xattrs_enabled = params_from_base_suite_setup["xattrs_enabled"]
    server_version = params_from_base_suite_setup["server_version"]
    sync_gateway_version = params_from_base_suite_setup["sync_gateway_version"]
    server_upgraded_version = params_from_base_suite_setup[
        "server_upgraded_version"]
    sync_gateway_upgraded_version = params_from_base_suite_setup[
        "sync_gateway_upgraded_version"]
    liteserv_host = params_from_base_suite_setup["liteserv_host"]
    liteserv_port = params_from_base_suite_setup["liteserv_port"]
    liteserv_version = params_from_base_suite_setup["liteserv_version"]
    liteserv_platform = params_from_base_suite_setup["liteserv_platform"]
    liteserv_storage_engine = params_from_base_suite_setup[
        "liteserv_storage_engine"]
    liteserv = params_from_base_suite_setup["liteserv"]
    num_docs = params_from_base_suite_setup["num_docs"]
    cbs_platform = params_from_base_suite_setup["cbs_platform"]
    cbs_toy_build = params_from_base_suite_setup["cbs_toy_build"]

    test_name = request.node.name
    log_info("Running test '{}'".format(test_name))
    log_info("cluster_config: {}".format(cluster_config))
    log_info("cluster_topology: {}".format(cluster_topology))
    log_info("mode: {}".format(mode))
    log_info("xattrs_enabled: {}".format(xattrs_enabled))

    client = MobileRestClient()

    # Start LiteServ and delete any databases
    ls_url = liteserv.start("{}/logs/{}-{}-{}.txt".format(
        RESULTS_DIR,
        type(liteserv).__name__, test_name, datetime.datetime.now()))
    client.delete_databases(ls_url)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(
        cluster_config=cluster_config)
    sg_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_admin_url = cluster_hosts["sync_gateways"][0]["admin"]

    if xattrs_enabled:
        log_info("Running upgrade with xattrs for sync meta storage")
        persist_cluster_config_environment_prop(cluster_config,
                                                'xattrs_enabled', True)
    else:
        log_info("Using document storage for sync meta data")
        persist_cluster_config_environment_prop(cluster_config,
                                                'xattrs_enabled', False)

    # This dictionary is passed to each test
    yield {
        "cluster_config": cluster_config,
        "cluster_topology": cluster_topology,
        "mode": mode,
        "xattrs_enabled": xattrs_enabled,
        "server_version": server_version,
        "sync_gateway_version": sync_gateway_version,
        "server_upgraded_version": server_upgraded_version,
        "sync_gateway_upgraded_version": sync_gateway_upgraded_version,
        "liteserv_host": liteserv_host,
        "liteserv_port": liteserv_port,
        "liteserv_version": liteserv_version,
        "liteserv_platform": liteserv_platform,
        "liteserv_storage_engine": liteserv_storage_engine,
        "ls_url": ls_url,
        "sg_url": sg_url,
        "sg_admin_url": sg_admin_url,
        "num_docs": num_docs,
        "cbs_platform": cbs_platform,
        "cbs_toy_build": cbs_toy_build
    }

    client.delete_databases(ls_url)
    liteserv.stop()

    # Code after the yield will execute when each test finishes
    log_info("Tearing down test '{}'".format(test_name))

    network_utils = NetworkUtils()
    network_utils.list_connections()

    # Verify all sync_gateways and sg_accels are reachable
    c = cluster.Cluster(cluster_config)
    errors = c.verify_alive(mode)

    # Fetch logs
    logging_helper = Logging()
    logging_helper.fetch_and_analyze_logs(cluster_config=cluster_config,
                                          test_name=test_name)

    assert len(errors) == 0

    # Scan logs
    # SG logs for panic, data race
    # System logs for OOM
    ansible_runner = AnsibleRunner(cluster_config)
    script_name = "{}/utilities/check_logs.sh".format(os.getcwd())
    status = ansible_runner.run_ansible_playbook(
        "check-logs.yml", extra_vars={"script_name": script_name})

    if status != 0:
        raise LogScanningError("Errors found in the logs")
def test_listener_two_sync_gateways(setup_client_syncgateway_test):
    """
    Port of https://github.com/couchbaselabs/sync-gateway-tests/blob/master/tests/cbl-replication-mismatch-2-gateways.js
    Scenario:
      1. Start 2 sync_gateways
      2. Create sg_db_one db on sync_gateway one
      3. Create sg_db_two db on sync_gateway two
      4. Create ls_db_one and ls_db_two on Liteserv
      5. Setup continuous push / pull replication from ls_db_one <-> sg_db_one
      6. Setup continuous push / pull replication from ls_db_two <-> sg_db_two
      7. Setup continuous push / pull replication from sg_db_one <-> ls_db_two
      8. Setup continuous push / pull replication from sg_db_two <-> ls_db_one
      9. Add num_docs / 2 to each liteserv database
      10. Verify each database has num_docs docs
      11. Verify all_docs in all dbs
      12. Verify changes feed for sg_db_one and sg_db_two
      13. Verify chnages feed for ls_db_one and ls_db_two
    """

    num_docs = 500

    ls_url = setup_client_syncgateway_test["ls_url"]
    cluster_config = setup_client_syncgateway_test["cluster_config"]
    sg_mode = setup_client_syncgateway_test["sg_mode"]

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_config)

    sg_one_admin_url = topology["sync_gateways"][0]["admin"]
    sg_two_admin_url = topology["sync_gateways"][1]["admin"]
    cb_server_url = topology["couchbase_servers"][0]

    log_info("Sync Gateway 1 admin url: {}".format(sg_one_admin_url))
    log_info("Sync Gateway 2 admin url: {}".format(sg_two_admin_url))
    log_info("Couchbase Server url: {}".format(cb_server_url))

    c = cluster.Cluster(cluster_config)
    sg_config_path = sync_gateway_config_path_for_mode(
        "listener_tests/multiple_sync_gateways", sg_mode)
    c.reset(sg_config_path=sg_config_path)

    ls_db_one = "ls_db1"
    ls_db_two = "ls_db2"
    sg_db_one = "sg_db1"
    sg_db_two = "sg_db2"

    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin_url: {}".format(sg_one_admin_url))
    log_info("sg_two_admin_url: {}".format(sg_two_admin_url))
    log_info("num_docs: {}".format(num_docs))
    log_info("Running 'test_listener_two_sync_gateways' ...")

    client = MobileRestClient()

    # Delete sg_db2 on sync_gateway 1
    client.delete_database(url=sg_one_admin_url, name=sg_db_two)

    # Delete sg_db1 on sync_gateway 2
    client.delete_database(url=sg_two_admin_url, name=sg_db_one)

    # Create dbs on LiteServ
    client.create_database(ls_url, ls_db_one)
    client.create_database(ls_url, ls_db_two)

    # Start continuous push pull replication ls_db_one <-> sg_db_one
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db_one,
                             to_url=sg_one_admin_url,
                             to_db=sg_db_one)
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_one_admin_url,
                             from_db=sg_db_one,
                             to_db=ls_db_one)

    # Start continuous push pull replication ls_db_two <-> sg_db_two
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db_two,
                             to_url=sg_two_admin_url,
                             to_db=sg_db_two)
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_two_admin_url,
                             from_db=sg_db_two,
                             to_db=ls_db_two)

    # Start continuous push pull replication sg_db_one <-> ls_db_two
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_one_admin_url,
                             from_db=sg_db_one,
                             to_db=ls_db_two)
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db_two,
                             to_url=sg_one_admin_url,
                             to_db=sg_db_one)

    # Start continuous push pull replication sg_db_two <-> ls_db_one
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_two_admin_url,
                             from_db=sg_db_two,
                             to_db=ls_db_one)
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_db=ls_db_one,
                             to_url=sg_two_admin_url,
                             to_db=sg_db_two)

    ls_db_one_docs = client.add_docs(url=ls_url,
                                     db=ls_db_one,
                                     number=num_docs / 2,
                                     id_prefix="ls_db_one_doc")
    assert len(ls_db_one_docs) == num_docs / 2

    ls_db_two_docs = client.add_docs(url=ls_url,
                                     db=ls_db_two,
                                     number=num_docs / 2,
                                     id_prefix="ls_db_two_doc")
    assert len(ls_db_two_docs) == num_docs / 2

    all_docs = client.merge(ls_db_one_docs, ls_db_two_docs)
    assert len(all_docs) == 500

    # Verify docs replicate to each db
    client.verify_docs_present(url=ls_url,
                               db=ls_db_one,
                               expected_docs=all_docs)
    client.verify_docs_present(url=ls_url,
                               db=ls_db_two,
                               expected_docs=all_docs)
    client.verify_docs_present(url=sg_one_admin_url,
                               db=sg_db_one,
                               expected_docs=all_docs)
    client.verify_docs_present(url=sg_two_admin_url,
                               db=sg_db_two,
                               expected_docs=all_docs)

    # Verify changes feeds for each db
    client.verify_docs_in_changes(url=ls_url,
                                  db=ls_db_one,
                                  expected_docs=all_docs)
    client.verify_docs_in_changes(url=ls_url,
                                  db=ls_db_two,
                                  expected_docs=all_docs)
    client.verify_docs_in_changes(url=sg_one_admin_url,
                                  db=sg_db_one,
                                  expected_docs=all_docs)
    client.verify_docs_in_changes(url=sg_two_admin_url,
                                  db=sg_db_two,
                                  expected_docs=all_docs)
def test_no_conflicts_update_revs_limit(params_from_base_test_setup, sg_conf_name, num_of_docs, revs_limit):
    """ @summary Enable no conflicts and  with non default revs_limit and verify revs_limit is maintained
    Test case link : https://docs.google.com/spreadsheets/d/1YwI_gCeoBebQKBybkzoAEoXSc0XLReszDA-mPFQapgk/edit#gid=0
    covered #14
    Steps:
    1. Enable allow_conflicts = false in SG config with parametried revs_limit
    2. Add docs to SG.
    3. Update the more than revs_limit.
    4. Check the revision list for the doc.
    5. Modify the revs_limit to 2
    6. Update doc
    7. Verify the revision history shows only 2 revisions now
    8. Verify previous revisions deleted
    """

    # Setup
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]
    mode = params_from_base_test_setup["mode"]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    reduced_revs_limit = revs_limit - 3
    total_updates = revs_limit + 5

    if not no_conflicts_enabled:
        pytest.skip('--no-conflicts is not enabled, so skipping the test')
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    c = cluster.Cluster(cluster_config)
    c.reset(sg_conf)

    sg_client = MobileRestClient()
    channels = ["no-conflicts"]
    sg_client.create_user(url=sg_admin_url, db=sg_db, name='autotest', password='******', channels=channels)
    autouser_session = sg_client.create_session(url=sg_admin_url, db=sg_db, name='autotest', password='******')
    # end of Set up

    # 1. Enable allow_conflicts = false in SG config with revs_limit=5
    temp_cluster_config = copy_to_temp_conf(cluster_config, mode)
    persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', revs_limit, property_name_check=False)
    status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config)
    assert status == 0, "Syncgateway did not start after having revs_limit 1 with no conflicts mode"

    # 2. Add docs to SG.
    sgdoc_bodies = document.create_docs(doc_id_prefix="sg_docs", number=num_of_docs, channels=channels)
    sg_docs = sg_client.add_bulk_docs(url=sg_url, db=sg_db, docs=sgdoc_bodies, auth=autouser_session)
    assert len(sgdoc_bodies) == num_of_docs

    # 3. Update the docs few times
    prev_revs = []
    for i in xrange(total_updates):
        update_sg_docs = sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=1, delay=None,
                                               auth=autouser_session, channels=channels)
        rev = update_sg_docs[0]['rev'].split('-')[1]
        prev_revs.append(rev)

    # 4. Get number of revisions and verify length is equal to revs_limit set to
    for doc in sg_docs:
        num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)
        assert len(num_of_revs) == revs_limit, "Number of revisions in history is more than revs_limit set in sg config"
        for i in xrange(5):
            assert prev_revs[i] not in num_of_revs

    # 5. Modify the revs_limit to 2
    temp_cluster_config = copy_to_temp_conf(cluster_config, mode)
    persist_cluster_config_environment_prop(temp_cluster_config, 'revs_limit', reduced_revs_limit, property_name_check=False)
    status = c.sync_gateways[0].restart(config=sg_conf, cluster_config=temp_cluster_config)
    assert status == 0, "Syncgateway did not start after having revs_limit 2 with no conflicts mode"

    # 6. Update the docs 1 more time
    sg_client.update_docs(url=sg_url, db=sg_db, docs=sg_docs, number_updates=2, delay=None, auth=autouser_session, channels=channels)

    # 7. Get number of revisions and verify number of revisions is equivalent to revs_limit set to
    for doc in sg_docs:
        num_of_revs = sg_client.get_revs_num_in_history(url=sg_url, db=sg_db, doc_id=doc["id"], auth=autouser_session)
        assert len(num_of_revs) == 2, "Number of revisions in history is more than revs_limit set in sg config"
        for i in xrange(total_updates - reduced_revs_limit):
            assert prev_revs[i] not in num_of_revs