Ejemplo n.º 1
0
def upgrade_sync_gateway(sync_gateways, sync_gateway_version,
                         sync_gateway_upgraded_version, sg_conf,
                         cluster_config):
    log_info('------------------------------------------')
    log_info('START Sync Gateway cluster upgrade')
    log_info('------------------------------------------')

    sg_obj = SyncGateway()

    for sg in sync_gateways:
        sg_ip = host_for_url(sg["admin"])
        log_info("Checking for sync gateway product info before upgrade")
        verify_sync_gateway_product_info(sg_ip)
        log_info("Checking for sync gateway version: {}".format(
            sync_gateway_version))
        verify_sync_gateway_version(sg_ip, sync_gateway_version)
        log_info("Upgrading sync gateway: {}".format(sg_ip))
        sg_obj.upgrade_sync_gateways(
            cluster_config=cluster_config,
            sg_conf=sg_conf,
            sync_gateway_version=sync_gateway_upgraded_version,
            url=sg_ip)

        time.sleep(10)
        log_info("Checking for sync gateway product info after upgrade")
        verify_sync_gateway_product_info(sg_ip)
        log_info("Checking for sync gateway version after upgrade: {}".format(
            sync_gateway_upgraded_version))
        verify_sync_gateway_version(sg_ip, sync_gateway_upgraded_version)

    log_info("Upgraded all the sync gateway nodes in the cluster")
    log_info('------------------------------------------')
    log_info('END Sync Gateway cluster upgrade')
    log_info('------------------------------------------')
Ejemplo n.º 2
0
def upgrade_sg_accel(sg_accels, sync_gateway_version,
                     sync_gateway_upgraded_version, sg_conf, cluster_config):
    log_info('------------------------------------------')
    log_info('START SG Accel cluster upgrade')
    log_info('------------------------------------------')

    ac_obj = SyncGateway()

    for ac in sg_accels:
        ac_ip = host_for_url(ac)
        log_info("Checking for sg_accel version before upgrade: {}".format(
            sync_gateway_version))
        verify_sg_accel_version(ac_ip, sync_gateway_version)
        log_info("Upgrading sg_accel: {}".format(ac_ip))
        ac_obj.upgrade_sync_gateways(
            cluster_config=cluster_config,
            sg_conf=sg_conf,
            sync_gateway_version=sync_gateway_upgraded_version,
            url=ac_ip)
        time.sleep(10)

        log_info("Checking for sg accel product info after upgrade")
        verify_sg_accel_product_info(ac_ip)
        log_info("Checking for sg accel version after upgrade: {}".format(
            sync_gateway_upgraded_version))
        verify_sg_accel_version(ac_ip, sync_gateway_upgraded_version)

    log_info("Upgraded all the sg accel nodes in the cluster")
    log_info('------------------------------------------')
    log_info('END SG Accel cluster upgrade')
    log_info('------------------------------------------')
def setup_client_2sgs_suite(request):

    """Suite setup fixture for client sync_gateway tests"""

    log_info("Setting up client sync_gateway suite ...")

    liteserv_platform = request.config.getoption("--liteserv-platform")
    liteserv_version = request.config.getoption("--liteserv-version")
    liteserv_storage_engine = request.config.getoption("--liteserv-storage-engine")

    sync_gateway_version = request.config.getoption("--sync-gateway-version")

    ls = LiteServ()

    log_info("Downloading LiteServ One ...")

    # Download LiteServ One
    ls.download_liteserv(
        platform=liteserv_platform,
        version=liteserv_version,
        storage_engine=liteserv_storage_engine
    )

    ls_cluster_target = None
    if liteserv_platform == "net-win":
        ls_cluster_target = "resources/cluster_configs/windows"

    # Install LiteServ
    ls.install_liteserv(
        platform=liteserv_platform,
        version=liteserv_version,
        storage_engine=liteserv_storage_engine,
        cluster_config=ls_cluster_target
    )

    cluster_helper = ClusterKeywords()
    cluster_helper.set_cluster_config("2sgs")
    cluster_config = os.environ["CLUSTER_CONFIG"]

    clean_cluster(cluster_config=cluster_config)

    log_info("Installing sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.install_sync_gateway(
        cluster_config=cluster_config,
        sync_gateway_version=sync_gateway_version,
        sync_gateway_config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    # Wait at the yeild until tests referencing this suite setup have run,
    # Then execute the teardown
    yield

    log_info("Tearing down suite ...")
    cluster_helper.unset_cluster_config()
Ejemplo n.º 4
0
def setup_client_syncgateway_suite(request):

    """Suite setup fixture for client sync_gateway tests"""

    log_info("Setting up client sync_gateway suite ...")

    liteserv_platform = request.config.getoption("--liteserv-platform")
    liteserv_version = request.config.getoption("--liteserv-version")
    liteserv_host = request.config.getoption("--liteserv-host")
    liteserv_port = request.config.getoption("--liteserv-port")
    liteserv_storage_engine = request.config.getoption("--liteserv-storage-engine")

    sync_gateway_version = request.config.getoption("--sync-gateway-version")

    liteserv = LiteServFactory.create(
        platform=liteserv_platform,
        version_build=liteserv_version,
        host=liteserv_host,
        port=liteserv_port,
        storage_engine=liteserv_storage_engine,
    )

    log_info("Downloading LiteServ ...")

    # Download LiteServ
    liteserv.download()

    # Install LiteServ
    liteserv.install()

    cluster_helper = ClusterKeywords()
    cluster_helper.set_cluster_config("1sg")
    cluster_config = os.environ["CLUSTER_CONFIG"]

    clean_cluster(cluster_config=cluster_config)

    log_info("Installing sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.install_sync_gateway(
        cluster_config=cluster_config,
        sync_gateway_version=sync_gateway_version,
        sync_gateway_config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS),
    )

    # Wait at the yeild until tests referencing this suite setup have run,
    # Then execute the teardown
    yield liteserv

    log_info("Tearing down suite ...")
    cluster_helper.unset_cluster_config()

    liteserv.remove()
Ejemplo n.º 5
0
def test_longpoll_changes_termination_heartbeat(setup_client_syncgateway_test):
    """https://github.com/couchbase/couchbase-lite-java-core/issues/1296
    Create 30 longpoll _changes in a loop (with heartbeat parameter = 5s)
    Cancel the request after 2s
    Wait 5.1s
    Create another request GET /db/ on listener and make sure the listener responds
    """

    log_info("Running 'longpoll_changes_termination' ...")

    ls_db = "ls_db"
    cluster_config = setup_client_syncgateway_test["cluster_config"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    ls_url = setup_client_syncgateway_test["ls_url"]

    log_info("Running 'test_longpoll_changes_termination' ...")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_url: {}".format(sg_url))

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    client = MobileRestClient()
    if client.get_server_platform(ls_url) == Platform.macosx:
        pytest.skip("https://github.com/couchbase/couchbase-lite-ios/issues/1236")

    client.create_database(ls_url, ls_db)

    ct = ChangesTracker(ls_url, ls_db)

    with ThreadPoolExecutor(max_workers=35) as executor:
        futures = [executor.submit(
            ct.start,
            timeout=5000,
            heartbeat=5000,
            request_timeout=2000
        ) for _ in range(30)]

        for futures in as_completed(futures):
            log_info("Future _changes loop complete")

    log_info("Futures exited")

    # make sure client can still take connections
    dbs = client.get_databases(url=ls_url)
    log_info(dbs)
    database = client.get_database(url=ls_url, db_name=ls_db)
    log_info(database)
Ejemplo n.º 6
0
def test_auto_prune_with_pull(setup_client_syncgateway_test):
    """Sanity test for autopruning with replication

    1. Create a database on LiteServ (ls_db)
    2. Add doc to sync gateway
    3. Update doc 50 times on sync_gateway
    4. Set up pull replication from sync_gateway db to LiteServ db
    5. Verify number of revisions on client is default (20)
    """

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    client = MobileRestClient()
    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_auto_prune_listener_sanity' ...")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))

    num_docs = 1
    num_revs = 50

    sg_user_channels = ["NBC"]
    sg_db = "db"
    sg_user_name = "sg_user"

    client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)

    ls_db = client.create_database(url=ls_url, name="ls_db")

    sg_db_docs = client.add_docs(
        url=sg_url, db=sg_db, number=num_docs, id_prefix=sg_db, channels=sg_user_channels, auth=sg_session
    )
    assert len(sg_db_docs) == num_docs

    sg_docs_update = client.update_docs(url=sg_url, db=sg_db, docs=sg_db_docs, number_updates=num_revs, auth=sg_session)

    # Start continuous replication ls_db <- sg_db
    repl_one = client.start_replication(url=ls_url, continuous=True, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db)

    client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one)
    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=sg_docs_update)
    client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=sg_docs_update, expected_revs_per_doc=20)
def test_log_rotation_negative(params_from_base_test_setup, sg_conf_name):
    """Test log rotation with negative values for:
        "maxsize": -1,
        "maxage": -30,
        "maxbackups": -2
    SG shouldn't start
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set negative values for rotation section
    data['logging']["default"]["rotation"] = {
        "maxsize": -1,
        "maxage": -30,
        "maxbackups": -2,
        "localtime": True
    }
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    try:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
    except ProvisioningError:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # Remove generated conf file
        os.remove(temp_conf)
        return

    # Remove generated conf file
    os.remove(temp_conf)
    pytest.fail("SG shouldn't be started!!!!")
def test_log_rotation_invalid_path(params_from_base_test_setup, sg_conf_name):
    """Test to check that SG is not started with invalid logFilePath.
    OS specific case. SG should check if path correct on startup
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set non existing logFilePath
    data['logging']["default"]["logFilePath"] = "/12345/1231/131231.log"
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()

    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    try:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
    except ProvisioningError:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # Remove generated conf file
        os.remove(temp_conf)
        return

    # Remove generated conf file
    os.remove(temp_conf)
    pytest.fail("SG shouldn't be started!!!!")
Ejemplo n.º 9
0
def setup_client_syncgateway_test(request, setup_client_syncgateway_suite):
    """Test setup fixture for client sync_gateway tests"""

    log_info("Setting up client sync_gateway test ...")

    liteserv = setup_client_syncgateway_suite
    test_name = request.node.name

    ls_url = liteserv.start(
        "{}/logs/{}-{}-{}.txt".format(RESULTS_DIR, type(liteserv).__name__, test_name, datetime.datetime.now())
    )

    cluster_helper = ClusterKeywords()
    sg_helper = SyncGateway()

    cluster_hosts = cluster_helper.get_cluster_topology(os.environ["CLUSTER_CONFIG"])

    sg_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_admin_url = cluster_hosts["sync_gateways"][0]["admin"]
    sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_url)

    # Yield values to test case via fixture argument
    yield {
        "cluster_config": os.environ["CLUSTER_CONFIG"],
        "ls_url": ls_url,
        "sg_url": sg_url,
        "sg_admin_url": sg_admin_url,
    }

    log_info("Tearing down test")

    # Teardown test
    client = MobileRestClient()
    client.delete_databases(ls_url)

    liteserv.stop()

    sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_url)

    # if the test failed pull logs
    if request.node.rep_call.failed:
        logging_helper = Logging()
        logging_helper.fetch_and_analyze_logs(cluster_config=os.environ["CLUSTER_CONFIG"], test_name=test_name)
Ejemplo n.º 10
0
def test_log_logLevel_invalid(params_from_base_test_setup, sg_conf_name):
    """Run SG with non existing logLevel value
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # 'debugFake' invalid value for logLevel
    data['logging']["default"]["logLevel"] = "debugFake"

    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    # create temp config file in the same folder as sg_conf
    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    try:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
    except ProvisioningError:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # Remove generated conf file
        os.remove(temp_conf)
        return

    # Remove generated conf file
    os.remove(temp_conf)
    pytest.fail("SG shouldn't be started!!!!")
Ejemplo n.º 11
0
def test_log_logKeys_string(params_from_base_test_setup, sg_conf_name):
    """Negative test to verify that we are not able start SG when
    logKeys is string
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # read sample sg_conf
    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set logKeys as string in config file
    data['logging']["default"]["logKeys"] = "http"
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    try:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
    except ProvisioningError:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # Remove generated conf file
        os.remove(temp_conf)
        return

    # Remove generated conf file
    os.remove(temp_conf)
    pytest.fail("SG shouldn't be started!!!!")
Ejemplo n.º 12
0
def test_log_nondefault_logKeys_set(params_from_base_test_setup, sg_conf_name):
    """Test to verify non default logKeys with any invalid area.
    SG should work even with non existing logging area
    (positive case)
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # read sample sg_conf
    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # "FAKE" not valid area in logging
    data['logging']["default"]["logKeys"] = ["HTTP", "FAKE"]
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)

    # Start sync_gateways
    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=temp_conf)

    # Remove generated conf file
    os.remove(temp_conf)
def test_initial_pull_replication(setup_client_syncgateway_test, continuous):
    """
    1. Prepare sync-gateway to have 10000 documents.
    2. Create a single shot / continuous pull replicator and to pull the docs into a database.
    3. Verify if all of the docs get pulled.
    Referenced issue: couchbase/couchbase-lite-android#955.
    """

    sg_db = "db"
    ls_db = "ls_db"

    num_docs = 10000

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_initial_pull_replication', continuous: {}".format(continuous))
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_user(sg_one_admin, sg_db, "seth", password="******", channels=["ABC", "NBC"])
    session = client.create_session(sg_one_admin, sg_db, "seth")

    # Create 'num_docs' docs on sync_gateway
    docs = client.add_docs(
        url=sg_one_public,
        db=sg_db,
        number=num_docs,
        id_prefix="seeded_doc",
        generator="four_k",
        auth=session
    )
    assert len(docs) == num_docs

    client.create_database(url=ls_url, name=ls_db)

    # Start oneshot pull replication
    repl_id = client.start_replication(
        url=ls_url,
        continuous=continuous,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db
    )

    start = time.time()

    if continuous:
        log_info("Waiting for replication status 'Idle' for: {}".format(repl_id))
        # Android will report IDLE status, and drop into the 'verify_docs_present' below
        # due to https://github.com/couchbase/couchbase-lite-java-core/issues/1409
        client.wait_for_replication_status_idle(ls_url, repl_id)
    else:
        log_info("Waiting for no replications: {}".format(repl_id))
        client.wait_for_no_replications(ls_url)

    # Verify docs replicated to client
    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=docs, timeout=240)

    all_docs_replicated_time = time.time() - start
    log_info("Replication took: {}s".format(all_docs_replicated_time))

    # Verify docs show up in client's changes feed
    client.verify_docs_in_changes(url=ls_url, db=ls_db, expected_docs=docs)

    replications = client.get_replications(url=ls_url)

    if continuous:
        assert len(replications) == 1, "There should only be one replication running"
        assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'"
        assert replications[0]["continuous"], "Running replication should be continuous"
        # Only .NET has an 'error' property
        if "error" in replications[0]:
            assert len(replications[0]["error"]) == 0
    else:
        assert len(replications) == 0, "No replications should be running"
def test_initial_push_replication(setup_client_syncgateway_test, continuous):
    """
    1. Prepare LiteServ to have 10000 documents.
    2. Create a single shot push / continuous replicator and to push the docs into a sync_gateway database.
    3. Verify if all of the docs get pushed.
    """

    sg_db = "db"
    ls_db = "ls_db"
    seth_channels = ["ABC", "NBC"]

    num_docs = 10000

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_initial_push_replication', continuous: {}".format(continuous))
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_user(sg_one_admin, sg_db, "seth", password="******", channels=seth_channels)
    session = client.create_session(sg_one_admin, sg_db, "seth")

    client.create_database(url=ls_url, name=ls_db)

    # Create 'num_docs' docs on LiteServ
    docs = client.add_docs(
        url=ls_url,
        db=ls_db,
        number=num_docs,
        id_prefix="seeded_doc",
        generator="four_k",
        channels=seth_channels
    )
    assert len(docs) == num_docs

    # Start push replication
    repl_id = client.start_replication(
        url=ls_url,
        continuous=continuous,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db
    )

    if continuous:
        log_info("Waiting for replication status 'Idle' for: {}".format(repl_id))
        client.wait_for_replication_status_idle(ls_url, repl_id)
    else:
        log_info("Waiting for no replications: {}".format(repl_id))
        client.wait_for_no_replications(ls_url)

    # Verify docs replicated to sync_gateway
    client.verify_docs_present(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session)

    # Verify docs show up in sync_gateway's changes feed
    client.verify_docs_in_changes(url=sg_one_public, db=sg_db, expected_docs=docs, auth=session)

    replications = client.get_replications(url=ls_url)

    if continuous:
        assert len(replications) == 1, "There should only be one replication running"
        assert replications[0]["status"] == "Idle", "Replication Status should be 'Idle'"
        assert replications[0]["continuous"], "Running replication should be continuous"
        # Only .NET has an 'error' property
        if "error" in replications[0]:
            assert len(replications[0]["error"]) == 0
    else:
        assert len(replications) == 0, "No replications should be running"
def test_multiple_replications_not_created_with_same_properties(setup_client_syncgateway_test):
    """Regression test for https://github.com/couchbase/couchbase-lite-android/issues/939
    1. Create LiteServ database and launch sync_gateway with database
    2. Start 5 continuous push replicators with the same source and target
    3. Make sure the sample replication id is returned
    4. Check that 1 one replication exists in 'active_tasks'
    5. Stop the replication with POST /_replicate cancel=true
    6. Start 5 continuous pull replicators with the same source and target
    7. Make sure the sample replication id is returned
    8. Check that 1 one replication exists in 'active_tasks'
    9. Stop the replication with POST /_replicate cancel=true
    """

    sg_db = "db"
    ls_db = "ls_db"

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_multiple_replications_not_created_with_same_properties'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_database(url=ls_url, name=ls_db)

    repl_id_num = 0
    response_one_id_num = 0
    response_two_id_num = 0

    # launch 50 concurrent push replication requests with the same source / target
    with ThreadPoolExecutor(max_workers=10) as executor:
        futures = [executor.submit(
            client.start_replication,
            url=ls_url,
            continuous=True,
            from_db=ls_db,
            to_url=sg_one_admin,
            to_db=sg_db
        ) for _ in range(50)]

        for future in as_completed(futures):
            response_one_id = future.result()
            # Convert session_id from string "repl001" -> int 1
            response_one_id_num = int(response_one_id.replace("repl", ""))
            log_info(response_one_id_num)

    # Assert that concurrent replications have a greater session id than 0
    assert response_one_id_num > repl_id_num, "'response_one_id_num': {} should be greater than 'repl_id_num': {}".format(
        response_one_id_num,
        repl_id_num
    )

    # Check there is only one replication running
    replications = client.get_replications(ls_url)
    assert len(replications) == 1, "Number of replications, Expected: {} Actual {}".format(
        1,
        len(replications)
    )

    # Stop replication
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db
    )

    # Check that no replications are running
    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    assert len(replications) == 0, "Number of replications, Expected: {} Actual {}".format(
        0,
        len(replications)
    )

    # launch 50 concurrent pull replication requests with the same source / target
    with ThreadPoolExecutor(max_workers=10) as executor:
        futures = [executor.submit(
            client.start_replication,
            url=ls_url,
            continuous=True,
            from_db=sg_db,
            from_url=sg_one_admin,
            to_db=ls_db
        ) for _ in range(50)]

        for future in as_completed(futures):
            response_two_id = future.result()
            # Convert session_id from string "repl001" -> int 1
            response_two_id_num = int(response_two_id.replace("repl", ""))
            log_info(response_two_id_num)

    # Assert that the second set of concurrent replication requests has a higher id than the first
    assert response_two_id_num > response_one_id_num, "'response_two_id_num': {} should be greater than 'response_one_id_num': {}".format(
        response_two_id_num,
        response_one_id_num
    )

    # Check there is only one replication running
    replications = client.get_replications(ls_url)
    assert len(replications) == 1, "Number of replications, Expected: {} Actual {}".format(
        1,
        len(replications)
    )

    # Stop replication
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=sg_db,
        from_url=sg_one_admin,
        to_db=ls_db
    )

    # Check that no replications are running
    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    assert len(replications) == 0, "Number of replications, Expected: {} Actual {}".format(
        0,
        len(replications)
    )
Ejemplo n.º 16
0
def test_auto_prune_listener_keeps_conflicts_sanity(setup_client_syncgateway_test):
    """"
    1. Create db on LiteServ and add docs
    2. Create db on sync_gateway and add docs with the same id
    3. Create one shot push / pull replication
    4. Update LiteServ 50 times
    5. Assert that pruned conflict is still present
    6. Delete the current revision and check that a GET returns the old conflict as the current rev
    """

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    client = MobileRestClient()
    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config, url=sg_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_auto_prune_listener_keeps_conflicts_sanity' ...")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))

    num_docs = 1
    num_revs = 100
    sg_db = "db"
    ls_db = "ls_db"
    sg_user_name = "sg_user"
    sg_user_channels = ["NBC"]
    client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels)

    sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)
    ls_db = client.create_database(url=ls_url, name=ls_db)

    # Create docs with same prefix to create conflicts when the dbs complete 1 shot replication
    ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="doc", channels=sg_user_channels)
    assert len(ls_db_docs) == num_docs

    sg_db_docs = client.add_docs(
        url=sg_url, db=sg_db, number=num_docs, id_prefix="doc", channels=sg_user_channels, auth=sg_session
    )
    assert len(sg_db_docs) == num_docs

    # Setup one shot pull replication and wait for idle.
    client.start_replication(url=ls_url, continuous=False, from_url=sg_admin_url, from_db=sg_db, to_db=ls_db)

    client.wait_for_no_replications(url=ls_url)

    # There should now be a conflict on the client
    conflicting_revs = client.get_conflict_revs(url=ls_url, db=ls_db, doc=ls_db_docs[0])

    # Get the doc with conflict rev
    client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0])

    # Update doc past revs limit and make sure conflict is still available
    updated_doc = client.update_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], number_updates=num_revs)
    client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=conflicting_revs[0])

    # Delete doc and ensure that the conflict is now the current rev
    client.delete_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"], rev=updated_doc["rev"])
    current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id=ls_db_docs[0]["id"])
    assert current_doc["_rev"] == conflicting_revs[0]
def setup_client_2sgs_test(request):

    """Test setup fixture for client sync_gateway tests"""

    log_info("Setting up client sync_gateway test ...")

    liteserv_platform = request.config.getoption("--liteserv-platform")
    liteserv_version = request.config.getoption("--liteserv-version")
    liteserv_host = request.config.getoption("--liteserv-host")
    liteserv_port = request.config.getoption("--liteserv-port")
    liteserv_storage_engine = request.config.getoption("--liteserv-storage-engine")

    ls = LiteServ()
    client = MobileRestClient()

    test_name = request.node.name

    # Verify LiteServ is not running
    ls.verify_liteserv_not_running(host=liteserv_host, port=liteserv_port)

    ls_cluster_target = None
    if liteserv_platform == "net-win":
        ls_cluster_target = "resources/cluster_configs/windows"

    print("Starting LiteServ ...")

    if liteserv_platform != "net-win":
        # logging is file
        ls_logging = open("{}/logs/{}-ls1-{}-{}.txt".format(RESULTS_DIR, datetime.datetime.now(), liteserv_platform, test_name), "w")
    else:
        # logging is name
        ls_logging = "{}/logs/{}-ls1-{}-{}.txt".format(RESULTS_DIR, datetime.datetime.now(), liteserv_platform, test_name)

    ls_url, ls_handle = ls.start_liteserv(
        platform=liteserv_platform,
        version=liteserv_version,
        host=liteserv_host,
        port=liteserv_port,
        storage_engine=liteserv_storage_engine,
        logfile=ls_logging,
        cluster_config=ls_cluster_target
    )

    cluster_helper = ClusterKeywords()
    sg_helper = SyncGateway()

    cluster_hosts = cluster_helper.get_cluster_topology(os.environ["CLUSTER_CONFIG"])

    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_one_admin_url = cluster_hosts["sync_gateways"][0]["admin"]
    sg_two_url = cluster_hosts["sync_gateways"][1]["public"]
    sg_two_admin_url = cluster_hosts["sync_gateways"][1]["admin"]

    sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_one_url)
    sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_two_url)

    # Yield values to test case via fixture argument
    yield {
        "cluster_config": os.environ["CLUSTER_CONFIG"],
        "ls_url": ls_url,
        "sg_one_url": sg_one_url,
        "sg_one_admin_url": sg_one_admin_url,
        "sg_two_url": sg_two_url,
        "sg_two_admin_url": sg_two_admin_url
    }

    log_info("Tearing down test")

    # Teardown test
    client.delete_databases(ls_url)
    ls.shutdown_liteserv(host=liteserv_host,
                         platform=liteserv_platform,
                         version=liteserv_version,
                         storage_engine=liteserv_storage_engine,
                         process_handle=ls_handle,
                         logfile=ls_logging,
                         cluster_config=ls_cluster_target)

    # Verify LiteServ is killed
    ls.verify_liteserv_not_running(host=liteserv_host, port=liteserv_port)

    sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_one_url)
    sg_helper.stop_sync_gateway(cluster_config=os.environ["CLUSTER_CONFIG"], url=sg_two_url)

    # if the test failed pull logs
    if request.node.rep_call.failed:
        logging_helper = Logging()
        logging_helper.fetch_and_analyze_logs(cluster_config=os.environ["CLUSTER_CONFIG"], test_name=test_name)
def test_verify_open_revs_with_revs_limit_push_conflict(setup_client_syncgateway_test):
    """Test replication from multiple client dbs to one sync_gateway db

    https://github.com/couchbase/couchbase-lite-ios/issues/1277
    """

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    num_docs = 100
    num_revs = 20

    sg_db = "db"
    sg_user_name = "sg_user"

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_verify_open_revs_with_revs_limit_push_conflict'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revs: {}".format(num_revs))

    client = MobileRestClient()

    # Test the endpoint, listener does not support users but should have a default response
    client.get_session(url=ls_url)
    sg_user_channels = ["NBC"]
    client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)

    ls_db = client.create_database(url=ls_url, name="ls_db")
    ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix="ls_db", channels=sg_user_channels)
    assert len(ls_db_docs) == num_docs

    # Start replication ls_db -> sg_db
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db_docs)

    client.update_docs(url=sg_url, db=sg_db, docs=ls_db_docs, number_updates=num_revs, auth=sg_session)
    sg_current_doc = client.get_doc(url=sg_url, db=sg_db, doc_id="ls_db_2", auth=sg_session)

    client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, number_updates=num_revs)
    ls_current_doc = client.get_doc(url=ls_url, db=ls_db, doc_id="ls_db_2")

    client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one)

    client.verify_doc_rev_generation(url=ls_url, db=ls_db, doc_id=ls_current_doc["_id"], expected_generation=21)
    client.verify_doc_rev_generation(url=sg_url, db=sg_db, doc_id=sg_current_doc["_id"], expected_generation=21, auth=sg_session)

    expected_ls_revs = [ls_current_doc["_rev"]]
    client.verify_open_revs(url=ls_url, db=ls_db, doc_id=ls_current_doc["_id"], expected_open_revs=expected_ls_revs)

    expected_sg_revs = [ls_current_doc["_rev"], sg_current_doc["_rev"]]
    client.verify_open_revs(url=sg_admin_url, db=sg_db, doc_id=sg_current_doc["_id"], expected_open_revs=expected_sg_revs)
Ejemplo n.º 19
0
def test_log_maxbackups_0(params_from_base_test_setup, sg_conf_name):
    """Test with maxbackups=0 that means do not limit the number of backups
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    remote_executor = RemoteExecutor(cluster.sync_gateways[0].ip)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)

    remote_executor.execute("mkdir -p /tmp/sg_logs")
    remote_executor.execute("sudo rm -rf /tmp/sg_logs/sg_log_rotation*")
    # generate log file with almost 1MB
    remote_executor.execute(
        "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=1030000 count=1"
    )
    remote_executor.execute("sudo chmod 777 -R /tmp/sg_logs")

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set maxbackups=0 in config file
    data['logging']["default"]["rotation"]["maxbackups"] = 0
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=temp_conf)
    # ~1M MB will be added to log file after requests
    remote_executor.execute(
        "for ((i=1;i <= 1000;i += 1)); do curl -s http://localhost:4984/ > /dev/null; done"
    )

    status, stdout, stderr = remote_executor.execute(
        "ls /tmp/sg_logs/ | grep sg_log_rotation | wc -l")
    assert stdout[0].rstrip() == '2'

    # Remove generated conf file
    os.remove(temp_conf)
def test_multiple_replications_created_with_unique_properties(setup_client_syncgateway_test):
    """Regression test for couchbase/couchbase-lite-java-core#1386
    1. Setup SGW with a remote database name db for an example
    2. Create a local database such as ls_db
    3. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true
    4. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true, doc_ids=["doc1", "doc2"]
    5. Send POST /_replicate with source = ls_db, target = http://localhost:4985/db, continuous = true, filter="filter1"
    6. Make sure that the session_id from each POST /_replicate are different.
    7. Send GET /_active_tasks to make sure that there are 3 tasks created.
    8. Send 3 POST /_replicate withe the same parameter as Step 3=5 plus cancel=true to stop those replicators
    9. Repeat Step 3 - 8 with source = and target = db for testing the pull replicator.
    """

    sg_db = "db"
    ls_db = "ls_db"

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_one_admin = setup_client_syncgateway_test["sg_admin_url"]
    sg_one_public = setup_client_syncgateway_test["sg_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_one_public,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_multiple_replications_created_with_unique_properties'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin: {}".format(sg_one_admin))
    log_info("sg_one_public: {}".format(sg_one_public))

    client = MobileRestClient()
    client.create_database(url=ls_url, name=ls_db)

    ########
    # PUSH #
    ########
    # Start 3 unique push replication requests
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db
    )
    client.wait_for_replication_status_idle(ls_url, repl_one)

    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db,
        doc_ids=["doc_1", "doc_2"]
    )
    client.wait_for_replication_status_idle(ls_url, repl_two)

    # Create doc filter and add to the design doc
    filters = {
        "language": "javascript",
        "filters": {
            "sample_filter": "function(doc, req) { if (doc.type && doc.type === \"skip\") { return false; } return true; }"
        }
    }
    client.add_design_doc(url=ls_url, db=ls_db, name="by_type", doc=json.dumps(filters))

    repl_three = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db,
        repl_filter="by_type/sample_filter"
    )
    client.wait_for_replication_status_idle(ls_url, repl_three)

    # Verify 3 replicaitons are running
    replications = client.get_replications(ls_url)
    log_info(replications)
    assert len(replications) == 3, "Number of replications, Expected: {} Actual: {}".format(
        3,
        len(replications)
    )

    # Stop repl001
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db
    )

    # Stop repl002
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db,
        doc_ids=["doc_1", "doc_2"]
    )

    # Stop repl003
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_one_admin,
        to_db=sg_db,
        repl_filter="by_type/sample_filter"
    )

    # Verify no replications are running
    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    log_info(replications)
    assert len(replications) == 0, "Number of replications, Expected: {} Actual: {}".format(
        0,
        len(replications)
    )

    ########
    # PULL #
    ########
    # Start 3 unique push replication requests
    repl_four = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db
    )
    client.wait_for_replication_status_idle(ls_url, repl_four)

    # Start filtered pull from sync gateway to LiteServ
    repl_five = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db,
        channels_filter=["ABC", "CBS"]
    )
    client.wait_for_replication_status_idle(ls_url, repl_five)

    # Verify 3 replicaitons are running
    replications = client.get_replications(ls_url)
    log_info(replications)
    assert len(replications) == 2, "Number of replications, Expected: {} Actual: {}".format(
        2,
        len(replications)
    )

    # Stop repl_four
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db
    )

    # Stop repl_five
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_one_admin,
        from_db=sg_db,
        to_db=ls_db,
        channels_filter=["ABC", "CBS"]
    )

    # Verify no replications are running
    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    log_info(replications)
    assert len(replications) == 0, "Number of replications, Expected: {} Actual: {}".format(
        0,
        len(replications)
    )
def test_client_to_sync_gateway_complex_replication_with_revs_limit(setup_client_syncgateway_test):
    """ Ported from sync_gateway tests repo
    ...  1.  Clear server buckets
    ...  2.  Restart liteserv with _session
    ...  3.  Restart sync_gateway wil that config
    ...  4.  Create db on LiteServ
    ...  5.  Add numDocs to LiteServ db
    ...  6.  Setup push replication from LiteServ db to sync_gateway
    ...  7.  Verify doc present on sync_gateway (number of docs)
    ...  8.  Update sg docs numRevs * 4 = 480
    ...  9.  Update docs on LiteServ db numRevs * 4 = 480
    ...  10. Setup pull replication from sg -> liteserv db
    ...  11. Verify all docs are replicated
    ...  12. compact LiteServ db (POST _compact)
    ...  13. Verify number of revs in LiteServ db (?revs_info=true) check rev status == available fail if revs available > revs limit
    ...  14. Delete LiteServ db conflicts (?conflicts=true) DELETE _conflicts
    ...  15. Create numDoc number of docs in LiteServ db
    ...  16. Update LiteServ db docs numRevs * 5 (600)
    ...  17. Verify LiteServ db revs is < 602
    ...  18. Verify LiteServ db docs revs prefix (9 * numRevs + 3)
    ...  19. Compact LiteServ db
    ...  20. Verify number of revs <= 10
    ...  21. Delete LiteServ docs
    ...  22. Delete Server bucket
    ...  23. Delete LiteServ db
    """

    ls_db_name = "ls_db"
    sg_db = "db"
    sg_user_name = "sg_user"
    num_docs = 10
    num_revs = 100

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus-revs-limit.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_client_to_sync_gateway_complex_replication_with_revs_limit'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    client = MobileRestClient()

    # Test the endpoint, listener does not support users but should have a default response
    client.get_session(url=ls_url)

    sg_user_channels = ["NBC"]
    client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)

    ls_db = client.create_database(url=ls_url, name=ls_db_name)
    ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix=ls_db, channels=sg_user_channels)
    assert len(ls_db_docs) == num_docs

    # Start replication ls_db -> sg_db
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db_docs)

    # Delay is to the updates here due to couchbase/couchbase-lite-ios#1277.
    # Basically, if your revs depth is small and someone is updating a doc past the revs depth before a push replication,
    # the push replication will have no common ancestor with sync_gateway causing conflicts to be created.
    # Adding a delay between updates helps this situation. There is an alternative for CBL mac and CBL NET to change the default revs client depth
    # but that is not configurable for Android.
    # Currently adding a delay will allow the replication to act as expected for all platforms now.
    client.update_docs(url=sg_url, db=sg_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1, auth=sg_session)
    client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, number_updates=num_revs, delay=0.1)

    # Start replication ls_db <- sg_db
    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db
    )

    client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_one)
    client.wait_for_replication_status_idle(url=ls_url, replication_id=repl_two)

    client.compact_database(url=ls_url, db=ls_db)

    # LiteServ should only have 20 revisions due to built in client revs limit
    client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20)

    # Sync Gateway should have 100 revisions due to the specified revs_limit in the sg config and possible conflict winners from the liteserv db
    client.verify_max_revs_num_for_docs(url=sg_url, db=sg_db, docs=ls_db_docs, expected_max_number_revs_per_doc=100, auth=sg_session)

    client.delete_conflicts(url=ls_url, db=ls_db, docs=ls_db_docs)
    expected_generation = num_revs + 1
    client.verify_docs_rev_generations(url=ls_url, db=ls_db, docs=ls_db_docs, expected_generation=expected_generation)
    client.verify_docs_rev_generations(url=sg_url, db=sg_db, docs=ls_db_docs, expected_generation=expected_generation, auth=sg_session)

    client.delete_docs(url=ls_url, db=ls_db, docs=ls_db_docs)
    client.verify_docs_deleted(url=ls_url, db=ls_db, docs=ls_db_docs)
    client.verify_docs_deleted(url=sg_admin_url, db=sg_db, docs=ls_db_docs)

    ls_db_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs, id_prefix=ls_db, channels=sg_user_channels)
    assert len(ls_db_docs) == 10

    expected_revs = num_revs + 20 + 2
    client.update_docs(url=ls_url, db=ls_db, docs=ls_db_docs, delay=0.1, number_updates=num_revs)

    client.verify_max_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_max_number_revs_per_doc=expected_revs)

    expected_generation = (num_revs * 2) + 3
    client.verify_docs_rev_generations(url=ls_url, db=ls_db, docs=ls_db_docs, expected_generation=expected_generation)

    client.compact_database(url=ls_url, db=ls_db)
    client.verify_revs_num_for_docs(url=ls_url, db=ls_db, docs=ls_db_docs, expected_revs_per_doc=20)

    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db
    )

    client.wait_for_no_replications(url=ls_url)

    client.delete_conflicts(url=ls_url, db=ls_db, docs=ls_db_docs)
    client.delete_conflicts(url=sg_url, db=sg_db, docs=ls_db_docs, auth=sg_session)
    client.delete_docs(url=ls_url, db=ls_db, docs=ls_db_docs)

    # Start push pull and verify that all docs are deleted
    # Start replication ls_db -> sg_db
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    # Start replication ls_db <- sg_db
    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db
    )

    client.verify_docs_deleted(url=ls_url, db=ls_db, docs=ls_db_docs)
    client.verify_docs_deleted(url=sg_admin_url, db=sg_db, docs=ls_db_docs)
Ejemplo n.º 22
0
def test_detect_stale_channel_index(params_from_base_test_setup, sg_conf):
    """
    1. Bring up single Sync Gateway node, backed by Couchbase Server with 3 accels indexing
    2. Configure such that the primary bucket and the channel index bucket are different (which is the norm)
    3. Add 1000 documents
    4. Shutdown Sync Gateway
    5. Delete / create the primary bucket ('data-bucket'), but do not touch the channel index bucket
    6. Start Sync Gateway
    7. Assert that sync_gateway fails to start due to stale channel index
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_detect_stale_channel_index'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    cb_server_url = topology["couchbase_servers"][0]
    sg_db = "db"
    num_docs = 1000

    cb_server = couchbaseserver.CouchbaseServer(url=cb_server_url)
    client = MobileRestClient()

    # Create doc pusher user
    doc_pusher_user_info = userinfo.UserInfo(name="doc_pusher",
                                             password="******",
                                             channels=["NASA"],
                                             roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels)

    # Add some docs to Sync Gateway to cause indexing
    docs = document.create_docs(None,
                                number=num_docs,
                                channels=doc_pusher_user_info.channels)
    pushed_docs = client.add_bulk_docs(url=sg_url,
                                       db=sg_db,
                                       docs=docs,
                                       auth=doc_pusher_auth)
    assert len(pushed_docs) == num_docs

    # Shut down sync_gateway
    sg_util = SyncGateway()
    sg_util.stop_sync_gateways(cluster_config=cluster_conf, url=sg_url)

    # Delete server bucket
    cb_server.delete_bucket(name="data-bucket")

    # Create server bucket
    ram_per_bucket_mb = cb_server.get_ram_per_bucket(num_buckets=2)
    cb_server.create_bucket(name="data-bucket", ram_quota_mb=ram_per_bucket_mb)

    # Start sync_gateway and assert that a Provisioning error is raised due to detecting stale index
    with pytest.raises(exceptions.ProvisioningError):
        sg_util.start_sync_gateways(cluster_config=cluster_conf,
                                    url=sg_url,
                                    config=sg_conf)

    # TODO: To make this check even more accurate, could
    # run remote ssh command "systemctl status sync_gateway.service" and look for
    # regex pattern: Main PID: 7185 (code=exited, status=2)

    # Delete index bucket and recreate it
    cb_server.delete_bucket(name="index-bucket")
    cb_server.create_bucket(name="index-bucket",
                            ram_quota_mb=ram_per_bucket_mb)

    # Start sync gateway, should succeed now
    sg_util.start_sync_gateways(cluster_config=cluster_conf,
                                url=sg_url,
                                config=sg_conf)
Ejemplo n.º 23
0
def test_stale_revision_should_not_be_in_the_index(setup_client_syncgateway_test):
    """original ticket: https://github.com/couchbase/couchbase-lite-android/issues/855

    scenario:
    1. Running sync_gateway
    2. Create database and starts both push and pull replicators through client REST API
    3. Create two or more views through client REST API
    4. Add doc, and verify doc is index with current revision through client REST API
    5. Make sure document is pushed to sync gateway through sync gateway REST API
    6. Update doc with sync gateway (not client side) through sync gateway REST API
    7. Make sure updated document is pull replicated to client  through client REST API
    8. Make sure updated document is indexed through client REST API
    9. Make sure stale revision is deleted from index.  through client REST API
    10. Pass criteria
    """

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    num_docs = 10
    num_revs = 100

    d_doc_name = "dd"
    sg_db = "db"
    sg_user_name = "sg_user"

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_stale_revision_should_not_be_in_the_index'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))
    log_info("num_docs: {}".format(num_docs))
    log_info("num_revs: {}".format(num_revs))

    client = MobileRestClient()

    sg_user_channels = ["NBC"]
    client.create_user(url=sg_admin_url, db=sg_db, name=sg_user_name, password="******", channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url, db=sg_db, name=sg_user_name)

    view = """{
    "language" : "javascript",
    "views" : {
        "content_view" : {
            "map" : "function(doc, meta) { if (doc.content) { emit(doc._id, doc._rev); } }"
        },
        "update_view" : {
            "map" : "function(doc, meta) { emit(doc.updates, null); }"
        }
    }
}"""

    ls_db = client.create_database(url=ls_url, name="ls_db")

    # Setup continuous push / pull replication from ls_db1 to sg_db
    client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db
    )

    design_doc_id = client.add_design_doc(url=ls_url, db=ls_db, name=d_doc_name, doc=view)
    client.get_doc(url=ls_url, db=ls_db, doc_id=design_doc_id)

    doc_body = document.create_doc(doc_id="doc_1", content={"hi": "I should be in the view"}, channels=sg_user_channels)

    log_info(doc_body)

    doc_body_2 = document.create_doc(doc_id="doc_2", channels=sg_user_channels)

    doc = client.add_doc(url=ls_url, db=ls_db, doc=doc_body)
    doc_2 = client.add_doc(url=ls_url, db=ls_db, doc=doc_body_2)

    content_view_rows = client.get_view(url=ls_url, db=ls_db, design_doc_id=design_doc_id, view_name="content_view")
    client.verify_view_row_num(view_response=content_view_rows, expected_num_rows=1)

    update_view_rows = client.get_view(url=ls_url, db=ls_db, design_doc_id=design_doc_id, view_name="update_view")
    client.verify_view_row_num(view_response=update_view_rows, expected_num_rows=2)

    expected_docs_list = [doc, doc_2]
    client.verify_docs_present(url=sg_url, db=sg_db, expected_docs=expected_docs_list, auth=sg_session)

    updated_doc = client.update_doc(url=sg_url, db=sg_db, doc_id=doc["id"], number_updates=10, auth=sg_session)

    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=updated_doc)

    content_view_rows_2 = client.get_view(url=ls_url, db=ls_db, design_doc_id=design_doc_id, view_name="content_view")
    client.verify_view_row_num(view_response=content_view_rows_2, expected_num_rows=1)

    client.verify_view_contains_keys(view_response=content_view_rows_2, keys=doc["id"])
    client.verify_view_contains_values(view_response=content_view_rows_2, values=updated_doc["rev"])
Ejemplo n.º 24
0
def test_log_maxage_10_timestamp_ignored(params_from_base_test_setup,
                                         sg_conf_name):
    """Test to verify SG continues to wrile logs in the same file even when
     timestamp for the log file has been changed
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    remote_executor = RemoteExecutor(cluster.sync_gateways[0].ip)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)

    remote_executor.execute("mkdir -p /tmp/sg_logs")
    remote_executor.execute("sudo rm -rf /tmp/sg_logs/sg_log_rotation*")
    # generate log file with almost 1MB
    remote_executor.execute(
        "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=1030000 count=1"
    )
    remote_executor.execute("sudo chmod 777 -R /tmp/sg_logs")

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set maxage = 10 days
    data['logging']["default"]["rotation"]["maxage"] = 10
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=temp_conf)
    # ~1M MB will be added to log file after requests
    remote_executor.execute(
        "for ((i=1;i <= 1000;i += 1)); do curl -s http://localhost:4984/ > /dev/null; done"
    )

    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    # change timestamp for log when SG stopped( we don't change file naming)
    remote_executor.execute(
        "sudo touch -d \"10 days ago\" /tmp/sg_logs/sg_log_rotation*")

    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=temp_conf)

    _, stdout, _ = remote_executor.execute(
        "ls /tmp/sg_logs/ | grep sg_log_rotation | wc -l")
    # verify that new log file was not created
    assert stdout[0].rstrip() == '2'

    # Remove generated conf file
    os.remove(temp_conf)
Ejemplo n.º 25
0
def test_log_number_backups(params_from_base_test_setup, sg_conf_name):
    """Test to check general behaviour for number of backups.
     In test the following params have been used:
        "maxsize": 1,
        "maxage": 30,
        "maxbackups": 2
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    remote_executor = RemoteExecutor(cluster.sync_gateways[0].ip)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)

    remote_executor.execute("mkdir -p /tmp/sg_logs")
    remote_executor.execute("sudo rm -rf /tmp/sg_logs/sg_log_rotation*")
    # generate log file with almost 1MB
    remote_executor.execute(
        "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=1030000 count=1"
    )
    remote_executor.execute("sudo chmod 777 -R /tmp/sg_logs")

    # iterate 5 times
    for i in xrange(5):
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # ~1M MB will be added to log file after requests
        remote_executor.execute(
            "for ((i=1;i <= 1000;i += 1)); do curl -s http://localhost:4984/ > /dev/null; done"
        )

        _, stdout, _ = remote_executor.execute(
            "ls /tmp/sg_logs/ | grep sg_log_rotation | wc -l")
        # max 3 files: 2 backups + 1 log file
        assert stdout[0].rstrip() == str(min(3, i + 2))

        sg_helper.stop_sync_gateways(cluster_config=cluster_conf,
                                     url=sg_one_url)
        # generate log file with almost 1MB
        remote_executor.execute(
            "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=1030000 count=1"
        )

    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=sg_conf)
Ejemplo n.º 26
0
def test_upgrade(params_from_base_test_setup):
    """
    @summary
        The initial versions of SG and CBS has already been provisioned at this point
        We have to upgrade them to the upgraded versions
    """
    cluster_config = params_from_base_test_setup['cluster_config']
    mode = params_from_base_test_setup['mode']
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']
    ls_url = params_from_base_test_setup["ls_url"]
    server_version = params_from_base_test_setup['server_version']
    sync_gateway_version = params_from_base_test_setup['sync_gateway_version']
    server_upgraded_version = params_from_base_test_setup[
        'server_upgraded_version']
    sync_gateway_upgraded_version = params_from_base_test_setup[
        'sync_gateway_upgraded_version']
    sg_url = params_from_base_test_setup['sg_url']
    sg_admin_url = params_from_base_test_setup['sg_admin_url']
    num_docs = int(params_from_base_test_setup['num_docs'])
    cbs_platform = params_from_base_test_setup['cbs_platform']
    cbs_toy_build = params_from_base_test_setup['cbs_toy_build']
    sg_conf = "{}/resources/sync_gateway_configs/sync_gateway_default_functional_tests_{}.json".format(
        os.getcwd(), mode)

    # Add data to liteserv
    client = MobileRestClient()
    log_info("ls_url: {}".format(ls_url))
    ls_db = client.create_database(ls_url, name="ls_db")

    # Create user and session on SG
    sg_user_channels = ["sg_user_channel"]
    sg_db = "db"
    sg_user_name = "sg_user"
    sg_user_password = "******"
    client.create_user(url=sg_admin_url,
                       db=sg_db,
                       name=sg_user_name,
                       password=sg_user_password,
                       channels=sg_user_channels)
    sg_session = client.create_session(url=sg_admin_url,
                                       db=sg_db,
                                       name=sg_user_name,
                                       password=sg_user_password)

    log_info(
        "Starting continuous push pull replication from liteserv to sync gateway"
    )
    repl_one = client.start_replication(url=ls_url,
                                        continuous=True,
                                        from_db=ls_db,
                                        to_url=sg_url,
                                        to_db=sg_db,
                                        to_auth=sg_session)
    client.wait_for_replication_status_idle(ls_url, repl_one)

    log_info("Starting replication from sync gateway to liteserv")
    client.start_replication(url=ls_url,
                             continuous=True,
                             from_url=sg_url,
                             from_db=sg_db,
                             from_auth=sg_session,
                             to_db=ls_db)

    # Add docs to liteserv
    added_docs = add_docs_to_client_task(client=client,
                                         url=ls_url,
                                         db=ls_db,
                                         channels=sg_user_channels,
                                         num_docs=num_docs)
    log_info("Added {} docs".format(len(added_docs)))

    # start updating docs
    terminator_doc_id = 'terminator'
    with ProcessPoolExecutor() as up:
        # Start updates in background process
        updates_future = up.submit(update_docs, client, ls_url, ls_db,
                                   added_docs, sg_session, terminator_doc_id)

        # Supported upgrade process
        # 1. Upgrade SGs first docmeta -> docmeta - CBS 5.0.0 does not support TAP.
        # 2. Upgrade the CBS cluster.
        # 3. Enable import/xattrs on SGs

        # Upgrade SG docmeta -> docmeta
        cluster_util = ClusterKeywords()
        topology = cluster_util.get_cluster_topology(cluster_config,
                                                     lb_enable=False)
        sync_gateways = topology["sync_gateways"]
        sg_accels = topology["sg_accels"]

        upgrade_sync_gateway(sync_gateways, sync_gateway_version,
                             sync_gateway_upgraded_version, sg_conf,
                             cluster_config)

        if mode == "di":
            upgrade_sg_accel(sg_accels, sync_gateway_version,
                             sync_gateway_upgraded_version, sg_conf,
                             cluster_config)

        # Upgrade CBS
        cluster = Cluster(config=cluster_config)
        if len(cluster.servers) < 2:
            raise Exception("Please provide at least 3 servers")

        server_urls = []
        for server in cluster.servers:
            server_urls.append(server.url)

        primary_server = cluster.servers[0]
        secondary_server = cluster.servers[1]
        servers = cluster.servers[1:]

        upgrade_server_cluster(servers,
                               primary_server,
                               secondary_server,
                               server_version,
                               server_upgraded_version,
                               server_urls,
                               cluster_config,
                               cbs_platform,
                               toy_build=cbs_toy_build)

        # Restart SGs after the server upgrade
        sg_obj = SyncGateway()
        for sg in sync_gateways:
            sg_ip = host_for_url(sg["admin"])
            log_info("Restarting sync gateway {}".format(sg_ip))
            sg_obj.restart_sync_gateways(cluster_config=cluster_config,
                                         url=sg_ip)
            time.sleep(5)

        if mode == "di":
            ac_obj = SyncGateway()
            for ac in sg_accels:
                ac_ip = host_for_url(ac)
                log_info("Restarting sg accel {}".format(ac_ip))
                ac_obj.restart_sync_gateways(cluster_config=cluster_config,
                                             url=ac_ip)
                time.sleep(5)

        if xattrs_enabled:
            # Enable xattrs on all SG/SGAccel nodes
            # cc - Start 1 SG with import enabled, all with XATTRs enabled
            # di - All SGs/SGAccels with xattrs enabled - this will also enable import on SGAccel
            #    - Do not enable import in SG.
            if mode == "cc":
                enable_import = True
            elif mode == "di":
                enable_import = False

            if mode == "di":
                ac_obj = SyncGateway()
                for ac in sg_accels:
                    ac_ip = host_for_url(ac)
                    ac_obj.enable_import_xattrs(cluster_config=cluster_config,
                                                sg_conf=sg_conf,
                                                url=ac_ip,
                                                enable_import=False)

            sg_obj = SyncGateway()
            for sg in sync_gateways:
                sg_ip = host_for_url(sg["admin"])
                sg_obj.enable_import_xattrs(cluster_config=cluster_config,
                                            sg_conf=sg_conf,
                                            url=sg_ip,
                                            enable_import=enable_import)
                enable_import = False
                # Check Import showing up on all nodes

        send_changes_termination_doc(auth=sg_session,
                                     terminator_doc_id=terminator_doc_id,
                                     terminator_channel=sg_user_channels,
                                     ls_url=ls_url,
                                     ls_db=ls_db)
        log_info("Waiting for doc updates to complete")
        updated_doc_revs = updates_future.result()

        log_info("Stopping replication from liteserv to sync gateway")
        # Stop repl_one
        client.stop_replication(url=ls_url,
                                continuous=True,
                                from_db=ls_db,
                                to_url=sg_url,
                                to_db=sg_db,
                                to_auth=sg_session)

        log_info("Stopping replication from sync gateway to liteserv")
        # Stop repl_two
        client.stop_replication(url=ls_url,
                                continuous=True,
                                from_url=sg_url,
                                from_db=sg_db,
                                from_auth=sg_session,
                                to_db=ls_db)
        # Gather the new revs for verification
        log_info("Gathering the updated revs for verification")
        doc_ids = []
        for i in range(len(added_docs)):
            doc_ids.append(added_docs[i]["id"])
            if added_docs[i]["id"] in updated_doc_revs:
                added_docs[i]["rev"] = updated_doc_revs[added_docs[i]["id"]]

        # Verify rev, doc bdy and revision history of all docs
        verify_sg_docs_revision_history(url=sg_admin_url,
                                        db=sg_db,
                                        added_docs=added_docs)

        if xattrs_enabled:
            # Verify through SDK that there is no _sync property in the doc body
            bucket_name = 'data-bucket'
            sdk_client = Bucket('couchbase://{}/{}'.format(
                primary_server.host, bucket_name),
                                password='******',
                                timeout=SDK_TIMEOUT)
            log_info("Fetching docs from SDK")
            docs_from_sdk = sdk_client.get_multi(doc_ids)

            log_info("Verifying that there is no _sync property in the docs")
            for i in docs_from_sdk:
                if "_sync" in docs_from_sdk[i].value:
                    raise Exception(
                        "_sync section found in docs after upgrade")
def test_replication_with_session_cookie(setup_client_syncgateway_test):
    """Regression test for https://github.com/couchbase/couchbase-lite-android/issues/817
    1. SyncGateway Config with guest disabled = true and One user added (e.g. user1 / 1234)
    2. Create a new session on SGW for the user1 by using POST /_session.
       Capture the SyncGatewaySession cookie from the set-cookie in the response header.
    3. Start continuous push and pull replicator on the LiteServ with SyncGatewaySession cookie.
       Make sure that both replicators start correctly
    4. Delete the session from SGW by sending DELETE /_sessions/ to SGW
    5. Cancel both push and pull replicator on the LiteServ
    6. Repeat step 1 and 2
    """

    ls_db = "ls_db"
    sg_db = "db"

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus-user.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_replication_with_session_cookie'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    client = MobileRestClient()
    client.create_database(url=ls_url, name=ls_db)

    # Get session header for user_1
    session_header = client.create_session_header(url=sg_url, db=sg_db, name="user_1", password="******")

    # Get session id from header
    session_parts = re.split("=|;", session_header)
    session_id = session_parts[1]
    log_info("{}: {}".format(session_parts[0], session_id))
    session = (session_parts[0], session_id)

    # Start authenticated push replication
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_url,
        to_db=sg_db,
        to_auth=session_header
    )

    # Start authenticated pull replication
    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_url,
        from_db=sg_db,
        from_auth=session_header,
        to_db=ls_db,
    )

    # Wait for 2 replications to be 'Idle', On .NET they may not be immediately available via _active_tasks
    client.wait_for_replication_status_idle(ls_url, repl_one)
    client.wait_for_replication_status_idle(ls_url, repl_two)

    replications = client.get_replications(ls_url)
    assert len(replications) == 2, "2 replications (push / pull should be running)"

    num_docs_pushed = 100

    # Sanity test docs
    ls_docs = client.add_docs(url=ls_url, db=ls_db, number=num_docs_pushed, id_prefix="ls_doc", channels=["ABC"])
    assert len(ls_docs) == num_docs_pushed

    sg_docs = client.add_docs(url=sg_url, db=sg_db, number=num_docs_pushed, id_prefix="sg_doc", auth=session, channels=["ABC"])
    assert len(sg_docs) == num_docs_pushed

    all_docs = client.merge(ls_docs, sg_docs)
    log_info(all_docs)

    client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=all_docs)
    client.verify_docs_present(url=ls_url, db=ls_db, expected_docs=all_docs)

    # GET from session endpoint /{db}/_session/{session-id}
    session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
    assert len(session["userCtx"]["channels"]) == 2, "There should be only 2 channels for the user"
    assert "ABC" in session["userCtx"]["channels"], "The channel info should contain 'ABC'"
    assert session["userCtx"]["name"] == "user_1", "The user should have the name 'user_1'"
    assert len(session["authentication_handlers"]) == 2, "There should be 2 authentication_handlers"
    assert "default" in session["authentication_handlers"], "Did not find 'default' in authentication_headers"
    assert "cookie" in session["authentication_handlers"], "Did not find 'cookie' in authentication_headers"

    log_info("SESSIONs: {}".format(session))

    # Delete session via sg admin port and _user rest endpoint
    client.delete_session(url=sg_admin_url, db=sg_db, user_name="user_1", session_id=session_id)

    # Make sure session is deleted
    try:
        session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
    except HTTPError as he:
        expected_error_code = he.response.status_code
        log_info(expected_error_code)

    assert expected_error_code == 404, "Expected 404 status, actual {}".format(expected_error_code)

    # Cancel the replications
    # Stop repl_one
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_url,
        to_db=sg_db,
        to_auth=session_header
    )

    # Stop repl_two
    client.stop_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_url,
        from_db=sg_db,
        from_auth=session_header,
        to_db=ls_db,
    )

    client.wait_for_no_replications(ls_url)
    replications = client.get_replications(ls_url)
    assert len(replications) == 0, "All replications should be stopped"

    # Create new session and new push / pull replications
    session_header = client.create_session_header(url=sg_url, db=sg_db, name="user_1", password="******")

    # Get session id from header
    session_parts = re.split("=|;", session_header)
    session_id = session_parts[1]
    log_info("{}: {}".format(session_parts[0], session_id))

    # Start authenticated push replication
    repl_one = client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db,
        to_url=sg_url,
        to_db=sg_db,
        to_auth=session_header
    )

    # Start authenticated pull replication
    repl_two = client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_url,
        from_db=sg_db,
        from_auth=session_header,
        to_db=ls_db,
    )

    replications = client.get_replications(ls_url)
    assert len(replications) == 2, "2 replications (push / pull should be running), found: {}".format(2)

    session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
    assert len(session["userCtx"]["channels"]) == 2, "There should be only 2 channels for the user"
    assert "ABC" in session["userCtx"]["channels"], "The channel info should contain 'ABC'"
    assert session["userCtx"]["name"] == "user_1", "The user should have the name 'user_1'"
    assert len(session["authentication_handlers"]) == 2, "There should be 2 authentication_handlers"
    assert "default" in session["authentication_handlers"], "Did not find 'default' in authentication_headers"
    assert "cookie" in session["authentication_handlers"], "Did not find 'cookie' in authentication_headers"

    log_info("SESSIONs: {}".format(session))

    # Delete session via sg admin port and db rest endpoint
    client.delete_session(url=sg_admin_url, db=sg_db, session_id=session_id)

    # Make sure session is deleted
    try:
        session = client.get_session(url=sg_admin_url, db=sg_db, session_id=session_id)
    except HTTPError as he:
        expected_error_code = he.response.status_code
        log_info(expected_error_code)

    assert expected_error_code == 404, "Expected 404 status, actual {}".format(expected_error_code)
Ejemplo n.º 28
0
def test_system_test(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup['cluster_config']
    mode = params_from_base_test_setup['mode']

    # Scenario parameters
    server_seed_docs = int(params_from_base_test_setup['server_seed_docs'])
    max_docs = int(params_from_base_test_setup['max_docs'])
    num_users = int(params_from_base_test_setup['num_users'])

    # Create paramters
    create_batch_size = int(params_from_base_test_setup['create_batch_size'])
    create_delay = float(params_from_base_test_setup['create_delay'])

    # Update parameters
    update_runtime_sec = int(params_from_base_test_setup['update_runtime_sec'])
    update_batch_size = int(params_from_base_test_setup['update_batch_size'])
    update_docs_percentage = float(
        params_from_base_test_setup['update_docs_percentage'])
    update_delay = float(params_from_base_test_setup['update_delay'])

    # Changes parameters
    changes_delay = float(params_from_base_test_setup['changes_delay'])
    changes_limit = int(params_from_base_test_setup['changes_limit'])

    changes_terminator_doc_id = 'terminator'

    docs_per_user = max_docs / num_users
    docs_per_user_per_update = int(update_docs_percentage * docs_per_user)

    log_info('Running System Test #1')
    log_info('> server_seed_docs          = {}'.format(server_seed_docs))
    log_info('> max_docs                  = {}'.format(max_docs))
    log_info('> num_users                 = {}'.format(num_users))
    log_info('> docs_per_user             = {}'.format(docs_per_user))
    log_info('> create_batch_size         = {}'.format(create_batch_size))
    log_info('> create_delay              = {}'.format(create_delay))
    log_info('> update_batch_size         = {}'.format(update_batch_size))
    log_info('> update_docs_percentage    = {}'.format(update_docs_percentage))
    log_info(
        '> docs_per_user_per_update  = {}'.format(docs_per_user_per_update))
    log_info('> update_delay              = {}'.format(update_delay))
    log_info('> update_runtime_sec        = {}'.format(update_runtime_sec))
    log_info('> changes_delay             = {}'.format(changes_delay))
    log_info('> changes_limit             = {}'.format(changes_limit))
    log_info(
        '> changes_terminator_doc_id = {}'.format(changes_terminator_doc_id))

    # Validate
    # Server docs should be a multiple of 1000 for batching purposes
    if server_seed_docs % 1000 != 0:
        raise ValueError('server_seed_docs must be divisible by 1000')

    # Number of docs should be equally divisible by number of users
    if max_docs % num_users != 0:
        raise ValueError('max_docs must be divisible by number_of_users')

    # Number of docs per user (max_docs / num_users) should be equally
    # divisible by the batch size for easier computation
    if docs_per_user % create_batch_size != 0:
        raise ValueError(
            'docs_per_user ({}) must be devisible by create_batch_size ({})'.
            format(docs_per_user, create_batch_size))

    # We want an even distributed of users per type
    if num_users % len(USER_TYPES) != 0:
        raise ValueError("'num_users' should be a multiple of 4")

    # Make sure that the 'update_batch_size' is complatible with
    # then number of users per type
    num_users_per_type = num_users / len(USER_TYPES)
    if update_batch_size > num_users_per_type:
        raise ValueError(
            "'batch_size' cannot be larger than number of users per type")

    if num_users_per_type % update_batch_size != 0:
        raise ValueError(
            "'update_batch_size' ({}) should be a multiple of number_users_per_type ({})"
            .format(update_batch_size, num_users_per_type))

    sg_conf_name = 'sync_gateway_default'
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    # Reset cluster state
    c = Cluster(config=cluster_config)
    c.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cbs_url = topology['couchbase_servers'][0]
    cbs_admin_url = cbs_url.replace('8091', '8092')
    cb_server = couchbaseserver.CouchbaseServer(cbs_url)
    bucket_name = "data-bucket"

    cbs_ip = cb_server.host

    headers = {'Content-Type': 'application/json'}
    cbs_session = Session()
    cbs_session.headers = headers
    cbs_session.auth = ('Administrator', 'password')

    log_info('Seeding {} with {} docs'.format(cbs_ip, server_seed_docs))
    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name),
                        password='******',
                        timeout=300)

    # Stop SG before loading the server
    lb_url = topology['sync_gateways'][0]['public']
    sg_admin_url = topology['sync_gateways'][0]['admin']
    sg_db = 'db'

    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_config)

    # Scenario Actions
    delete_views(cbs_session, cbs_admin_url, bucket_name)
    load_bucket(sdk_client, server_seed_docs)
    sg_helper.start_sync_gateways(cluster_config, config=sg_conf)
    wait_for_view_creation(cbs_session, cbs_admin_url, bucket_name)

    # Start concurrent creation of docs (max docs / num users)
    # Each user will add batch_size number of docs via bulk docs and sleep for 'create_delay'
    # Once a user has added number of expected docs 'docs_per_user', it will terminate.
    log_info('------------------------------------------')
    log_info('START concurrent user / doc creation')
    log_info('------------------------------------------')
    users = create_docs(sg_admin_url=sg_admin_url,
                        sg_url=lb_url,
                        sg_db=sg_db,
                        num_users=num_users,
                        number_docs_per_user=docs_per_user,
                        create_batch_size=create_batch_size,
                        create_delay=create_delay)
    assert len(users) == num_users
    log_info('------------------------------------------')
    log_info('END concurrent user / doc creation')
    log_info('------------------------------------------')

    # Start changes processing
    with ProcessPoolExecutor(max_workers=((len(users) * 3) +
                                          update_batch_size + 3)) as pex:

        # Start changes feeds in background process
        changes_workers_task = pex.submit(start_changes_processing, lb_url,
                                          sg_db, users, changes_delay,
                                          changes_limit,
                                          changes_terminator_doc_id)

        log_info('------------------------------------------')
        log_info('START concurrent updates')
        log_info('------------------------------------------')
        # Start concurrent updates of update
        # Update batch size is the number of users that will concurrently update all of their docs
        users = update_docs(sg_url=lb_url,
                            sg_db=sg_db,
                            users=users,
                            update_runtime_sec=update_runtime_sec,
                            batch_size=update_batch_size,
                            docs_per_user_per_update=docs_per_user_per_update,
                            update_delay=update_delay)

        all_user_channels = []
        for k, v in users.items():
            log_info('User ({}) updated docs {} times!'.format(
                k, v['updates']))
            all_user_channels.append(k)

        log_info('------------------------------------------')
        log_info('END concurrent updates')
        log_info('------------------------------------------')

        # Broadcast termination doc to all users
        terminator_channel = 'terminator'
        send_changes_termination_doc(lb_url, sg_db, users,
                                     changes_terminator_doc_id,
                                     terminator_channel)

        # Overwrite each users channels with 'terminator' so their changes feed will backfill with the termination doc
        grant_users_access(users, [terminator_channel], sg_admin_url, sg_db)

        # Block on changes completion
        users = changes_workers_task.result()

        # Print the summary of the system test
        print_summary(users)
def test_replication_with_multiple_client_dbs_and_single_sync_gateway_db(setup_client_syncgateway_test):
    """Test replication from multiple client dbs to one sync_gateway db"""

    cluster_config = setup_client_syncgateway_test["cluster_config"]
    ls_url = setup_client_syncgateway_test["ls_url"]
    sg_url = setup_client_syncgateway_test["sg_url"]
    sg_admin_url = setup_client_syncgateway_test["sg_admin_url"]

    num_docs = 1000

    sg_helper = SyncGateway()
    sg_helper.start_sync_gateway(
        cluster_config=cluster_config,
        url=sg_url,
        config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS)
    )

    log_info("Running 'test_replication_with_multiple_client_dbs_and_single_sync_gateway_db'")
    log_info("ls_url: {}".format(ls_url))
    log_info("sg_admin_url: {}".format(sg_admin_url))
    log_info("sg_url: {}".format(sg_url))

    client = MobileRestClient()

    ls_db1 = client.create_database(url=ls_url, name="ls_db1")
    ls_db2 = client.create_database(url=ls_url, name="ls_db2")
    sg_db = client.create_database(url=sg_admin_url, name="sg_db", server="walrus:")

    # Setup continuous push / pull replication from ls_db1 to sg_db
    client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db1,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db1
    )

    # Setup continuous push / pull replication from ls_db2 to sg_db
    client.start_replication(
        url=ls_url,
        continuous=True,
        from_db=ls_db2,
        to_url=sg_admin_url, to_db=sg_db
    )

    client.start_replication(
        url=ls_url,
        continuous=True,
        from_url=sg_admin_url, from_db=sg_db,
        to_db=ls_db2
    )

    ls_db_one_docs = client.add_docs(url=ls_url, db=ls_db1, number=num_docs, id_prefix=ls_db1)
    assert len(ls_db_one_docs) == 1000

    ls_db_two_docs = client.add_docs(url=ls_url, db=ls_db2, number=num_docs, id_prefix=ls_db2)
    assert len(ls_db_two_docs) == 1000

    ls_db1_db2_docs = ls_db_one_docs + ls_db_two_docs

    client.verify_docs_present(url=ls_url, db=ls_db1, expected_docs=ls_db1_db2_docs)
    client.verify_docs_present(url=ls_url, db=ls_db2, expected_docs=ls_db1_db2_docs)
    client.verify_docs_present(url=sg_admin_url, db=sg_db, expected_docs=ls_db1_db2_docs)

    client.verify_docs_in_changes(url=sg_admin_url, db=sg_db, expected_docs=ls_db1_db2_docs)
    client.verify_docs_in_changes(url=ls_url, db=ls_db1, expected_docs=ls_db1_db2_docs)
    client.verify_docs_in_changes(url=ls_url, db=ls_db2, expected_docs=ls_db1_db2_docs)
Ejemplo n.º 30
0
def test_view_backfill_for_deletes(params_from_base_test_setup, sg_conf_name,
                                   validate_changes_before_restart):
    """
    Scenario:
    1. Write a bunch of docs
    2. Delete 1/2
    3. Restart Sync Gateway
    4. Issue _changes, assert view backfills docs and delete notifications
    """

    num_docs = 1000
    sg_db = 'db'

    cluster_conf = params_from_base_test_setup['cluster_config']
    cluster_topology = params_from_base_test_setup['cluster_topology']
    mode = params_from_base_test_setup['mode']

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    sg_admin_url = cluster_topology['sync_gateways'][0]['admin']
    sg_url = cluster_topology['sync_gateways'][0]['public']
    cbs_url = cluster_topology['couchbase_servers'][0]

    log_info('sg_conf: {}'.format(sg_conf))
    log_info('sg_admin_url: {}'.format(sg_admin_url))
    log_info('sg_url: {}'.format(sg_url))
    log_info('cbs_url: {}'.format(cbs_url))
    log_info('validate_changes_before_restart: {}'.format(
        validate_changes_before_restart))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # Create clients
    sg_client = MobileRestClient()

    # Create user / session
    seth_user_info = UserInfo(name='seth',
                              password='******',
                              channels=['NASA', 'NATGEO'],
                              roles=[])
    sg_client.create_user(url=sg_admin_url,
                          db=sg_db,
                          name=seth_user_info.name,
                          password=seth_user_info.password,
                          channels=seth_user_info.channels)

    seth_auth = sg_client.create_session(url=sg_admin_url,
                                         db=sg_db,
                                         name=seth_user_info.name,
                                         password=seth_user_info.password)

    # Add 'num_docs' to Sync Gateway
    doc_bodies = document.create_docs('test_doc',
                                      number=num_docs,
                                      channels=seth_user_info.channels)
    bulk_resp = sg_client.add_bulk_docs(url=sg_url,
                                        db=sg_db,
                                        docs=doc_bodies,
                                        auth=seth_auth)
    assert len(bulk_resp) == num_docs

    # Delete half of the docs randomly
    deleted_docs = []
    for _ in range(num_docs / 2):
        random_doc = random.choice(bulk_resp)
        deleted_doc = sg_client.delete_doc(url=sg_url,
                                           db=sg_db,
                                           doc_id=random_doc['id'],
                                           rev=random_doc['rev'],
                                           auth=seth_auth)
        deleted_docs.append(deleted_doc)
        bulk_resp.remove(random_doc)

    log_info('Number of docs deleted: {}'.format(len(deleted_docs)))
    all_docs = bulk_resp + deleted_docs
    log_info('Number of docs to look for in changes: {}'.format(len(all_docs)))

    # This test will check changes before and after SG restart if
    # validate_changes_before_restart == True
    # If it is not set to True, only build the changes after restart
    if validate_changes_before_restart:
        # Verify deletions and inital docs show up in changes feed
        sg_client.verify_docs_in_changes(url=sg_url,
                                         db=sg_db,
                                         expected_docs=all_docs,
                                         auth=seth_auth)
        changes = sg_client.get_changes(url=sg_url,
                                        db=sg_db,
                                        since=0,
                                        auth=seth_auth)
        # All docs should show up + _user doc
        assert len(changes['results']) == num_docs + 1

        deleted_doc_ids = [doc['id'] for doc in deleted_docs]
        assert len(deleted_doc_ids) == num_docs / 2
        deleted_docs_in_changes = [
            change['id'] for change in changes['results']
            if 'deleted' in change and change['deleted']
        ]
        assert len(deleted_docs_in_changes) == num_docs / 2

        # All deleted docs should show up in the changes feed
        for doc_id in deleted_docs_in_changes:
            assert doc_id in deleted_doc_ids
            deleted_doc_ids.remove(doc_id)
        assert len(deleted_doc_ids) == 0

    # Restart Sync Gateway
    sg_controller = SyncGateway()
    sg_controller.stop_sync_gateways(url=sg_url, cluster_config=cluster_conf)
    sg_controller.start_sync_gateways(url=sg_url,
                                      cluster_config=cluster_conf,
                                      config=sg_conf)

    # Verify deletions and inital docs show up in changes feed
    sg_client.verify_docs_in_changes(url=sg_url,
                                     db=sg_db,
                                     expected_docs=all_docs,
                                     auth=seth_auth)

    changes = sg_client.get_changes(url=sg_url,
                                    db=sg_db,
                                    since=0,
                                    auth=seth_auth)
    # All docs should show up + _user doc
    assert len(changes['results']) == num_docs + 1

    deleted_doc_ids = [doc['id'] for doc in deleted_docs]
    assert len(deleted_doc_ids) == num_docs / 2
    deleted_docs_in_changes = [
        change['id'] for change in changes['results']
        if 'deleted' in change and change['deleted']
    ]
    assert len(deleted_docs_in_changes) == num_docs / 2

    # All deleted docs should show up in th changes feed
    for doc_id in deleted_docs_in_changes:
        assert doc_id in deleted_doc_ids
        deleted_doc_ids.remove(doc_id)
    assert len(deleted_doc_ids) == 0
def test_mobile_opt_in(params_from_base_test_setup, sg_conf_name):
    """
    Scenario: Enable mobile opt in sync function in sync-gateway configuration file
    - Check xattrs/mobile-opt-in_cc or di json files
    - 8 cases covered
    - doc : https://docs.google.com/document/d/1XxLIBsjuj_UxTTJs4Iu7C7uZdos8ZEzeckrVc17y3sw/edit
    - #1 Create doc via sdk with mobile opt in and verify doc is imported
    - #2 Create doc via sdk with mobile opt out and verify doc is not imported
    - #3 Create doc via sg with mobile opt in and update via sdk and verify doc is imported
    - #4 Create doc via sg with mobile opt out and update via sdk and verify doc is not imported
         - Try to update same doc via sg and verify 409 conflict error is thrown
         - Create a doc with same doc id and verify doc is created successfully
    - #5 Create doc via sg with mobile opt out and update via sdk which created no revisions
         - Now do sdk create with mobile opt in should import case #5
    - #6 Create doc via sg with mobile opt out  and update via sdk with opt in
         - Verify type is overrided and doc is imported
    - #7 Create doc via sg with mobile opt in  and update via sdk with opt out
         - Verify type is overrided and doc is not imported
    - #8 Disable import in the sg config and have mobile opt in function
         Create doc via sdk with mobile property and verify sg update succeeds
    - #9 Same config as #8 and have mobile opt in function in config
         Create doc via sdk without mobile property and create new doc via sg with same doc id and
         verify it succeeds
    """

    bucket_name = 'data-bucket'
    sg_db = 'db'

    cluster_conf = params_from_base_test_setup['cluster_config']
    cluster_topology = params_from_base_test_setup['cluster_topology']
    mode = params_from_base_test_setup['mode']
    xattrs_enabled = params_from_base_test_setup['xattrs_enabled']

    # This test should only run when using xattr meta storage
    if not xattrs_enabled:
        pytest.skip('XATTR tests require --xattrs flag')

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    sg_admin_url = cluster_topology['sync_gateways'][0]['admin']
    sg_url = cluster_topology['sync_gateways'][0]['public']
    cbs_url = cluster_topology['couchbase_servers'][0]

    log_info('sg_conf: {}'.format(sg_conf))
    log_info('sg_admin_url: {}'.format(sg_admin_url))
    log_info('sg_url: {}'.format(sg_url))
    log_info('cbs_url: {}'.format(cbs_url))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # Create clients
    sg_client = MobileRestClient()
    cbs_ip = host_for_url(cbs_url)
    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name), password='******', timeout=SDK_TIMEOUT)

    # Create user / session
    auto_user_info = UserInfo(name='autotest', password='******', channels=['mobileOptIn'], roles=[])
    sg_client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=auto_user_info.name,
        password=auto_user_info.password,
        channels=auto_user_info.channels
    )

    test_auth_session = sg_client.create_session(
        url=sg_admin_url,
        db=sg_db,
        name=auto_user_info.name,
        password=auto_user_info.password
    )

    def update_mobile_prop():
        return {
            'updates': 0,
            'type': 'mobile',
        }

    def update_non_mobile_prop():
        return {
            'updates': 0,
            'test': 'true',
            'type': 'mobile opt out',
        }

    # Create first doc via SDK with type mobile. Case #1
    doc_id1 = 'mobile_opt_in_sdk_doc'
    doc = document.create_doc(doc_id=doc_id1, channels=['mobileOptIn'], prop_generator=update_mobile_prop)
    sdk_client.upsert(doc_id1, doc)
    sg_get_doc1 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id1, auth=test_auth_session)
    assert sg_get_doc1['_rev'].startswith('1-') and sg_get_doc1['_id'] == doc_id1
    # Additional coverage for case #1
    sg_client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id1, number_updates=1, auth=test_auth_session)
    sg_get_doc1 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id1, auth=test_auth_session)
    assert sg_get_doc1['_rev'].startswith('2-') and sg_get_doc1['_id'] == doc_id1

    # Create second doc via SDK with type non mobile. Case #2
    doc_id2 = 'mobile_opt_out_sdk_doc'
    doc = document.create_doc(doc_id=doc_id2, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop)
    sdk_client.upsert(doc_id2, doc)
    with pytest.raises(HTTPError) as he:
        sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id2, auth=test_auth_session)
    log_info(he.value)
    assert he.value.message.startswith('404 Client Error: Not Found for url:')

    # Create third sg doc with mobile opt in  and update via sdk. Case #3
    doc_id3 = 'mobile_opt_in_sg_doc'
    doc_body = document.create_doc(doc_id=doc_id3, channels=['mobileOptIn'], prop_generator=update_mobile_prop)
    doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    sg_get_doc3 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id3, auth=test_auth_session)
    sg_get_doc3["updated_sdk_via_sg"] = "1"
    sdk_client.upsert(doc_id3, sg_get_doc3)
    sg_get_doc3 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id3, auth=test_auth_session)
    assert sg_get_doc3['_rev'].startswith('2-') and sg_get_doc3['_id'] == doc_id3
    log_info("sg get doc3 is {}".format(sg_get_doc3))

    # Create fourth sg doc with mobile opt out and update via sdk. Case #4 and case #8
    doc_id4 = 'mobile_opt_out_sg_doc'
    doc_body = document.create_doc(doc_id=doc_id4, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop)
    doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    # update vis SDK
    sg_get_doc4 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id4, auth=test_auth_session)
    rev = sg_get_doc4['_rev']
    sg_get_doc4["updated_sdk_via_sg"] = "1"
    sdk_client.upsert(doc_id4, sg_get_doc4)
    with pytest.raises(HTTPError) as he:
        sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id4, auth=test_auth_session)
    log_info(he.value)
    assert he.value.message.startswith('404 Client Error: Not Found for url:')
    # update via SG
    with pytest.raises(HTTPError) as he:
        sg_client.put_doc(url=sg_url, db=sg_db, doc_id=doc_id4, doc_body={'sg_rewrite': 'True'}, rev=rev, auth=test_auth_session)
    log_info(he.value)
    assert he.value.message.startswith('409 Client Error: Conflict for url:')
    # Create same doc again to verify there is not existing key error covers case #8
    doc_body = document.create_doc(doc_id=doc_id4, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop)
    sg_get_doc4_1 = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    log_info("4th doc after recreate vis sg is {}".format(sg_get_doc4_1))
    assert sg_get_doc4_1['rev'].startswith('1-') and sg_get_doc4_1['id'] == doc_id4

    # Create Fifth sg doc with mobile opt in and delete doc which created no revisions i.e tombstone doc
    # Now do sdk create with mobile opt in should import case #5
    doc_id5 = 'mobile_sdk_recreate_no_activerev'
    doc_body = document.create_doc(doc_id=doc_id5, channels=['mobileOptIn'], prop_generator=update_mobile_prop)
    doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    rev = doc['rev']
    sg_client.delete_doc(url=sg_url, db=sg_db, doc_id=doc_id5, rev=rev, auth=test_auth_session)
    # At this point no active revisions for this doc, so now update via sdk with mobile opt in should be successful
    # in getting doc
    doc = document.create_doc(doc_id=doc_id5, channels=['mobileOptIn'], prop_generator=update_mobile_prop)
    sdk_client.upsert(doc_id5, doc)
    sg_get_doc5 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id5, auth=test_auth_session)
    log_info("sg get doc 5 is {}".format(sg_get_doc5))
    assert sg_get_doc5['_rev'].startswith('1-') and sg_get_doc5['_id'] == doc_id5

    # Create sixth sg doc with mobile opt out  and update via sdk with opt in
    doc_id6 = 'mobileoptout_sg_doc_sdkupdate_optin'
    doc_body = document.create_doc(doc_id=doc_id6, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop)
    doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    sg_get_doc6 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id6, auth=test_auth_session)
    log_info("Sg sixth doc is {}".format(sg_get_doc6))
    sg_get_doc6["type"] = "mobile"
    sdk_client.upsert(doc_id6, sg_get_doc6)
    sg_get_doc6 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id6, auth=test_auth_session)
    assert sg_get_doc6['_rev'].startswith('2-') and sg_get_doc6['_id'] == doc_id6

    # Create seventh sg doc with mobile opt in  and update via sdk with opt out
    doc_id7 = 'mobileoptin_sg_doc_sdkupdate_optout'
    doc_body = document.create_doc(doc_id=doc_id7, channels=['mobileOptIn'], prop_generator=update_mobile_prop)
    doc = sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    sg_get_doc7 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id7, auth=test_auth_session)
    log_info("Sg sixth doc is {}".format(sg_get_doc7))
    sg_get_doc7["type"] = "mobile opt out"
    sdk_client.upsert(doc_id7, sg_get_doc7)
    with pytest.raises(HTTPError) as he:
        sg_get_doc7 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id7, auth=test_auth_session)
    log_info(he.value)
    assert he.value.message.startswith('404 Client Error: Not Found for url:')
    # TODO : verify _changes that it shows tombstone revisions -> it will happen on 2.0

    # Create eighth sdk doc with import disabled and add mobile property and update via sg. Case #7
    sg_conf_name = "xattrs/mobile_opt_in_no_import"
    sg_no_import_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    sg_util = SyncGateway()
    sg_util.start_sync_gateways(cluster_config=cluster_conf, url=sg_url, config=sg_no_import_conf)

    doc_id8 = 'mobile_opt_in_sg_rewrite_with_importdisabled'
    doc_body = document.create_doc(doc_id=doc_id8, channels=['mobileOptIn'], prop_generator=update_mobile_prop)
    sdk_client.upsert(doc_id8, doc_body)
    with pytest.raises(HTTPError) as he:
        sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    log_info(he.value)
    assert he.value.message.startswith('409 Client Error: Conflict for url:')
    sg_client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id8, number_updates=1, auth=test_auth_session)
    sg_get_doc8 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id8, auth=test_auth_session)
    assert sg_get_doc8['_rev'].startswith('2-') and sg_get_doc8['_id'] == doc_id8

    # Create ninth sdk doc with import disabled and add mobile property and update via sg. Case #8
    doc_id9 = 'mobile_opt_out_sg_rewrite_with_importdisabled'
    doc_body = document.create_doc(doc_id=doc_id9, channels=['mobileOptIn'], prop_generator=update_non_mobile_prop)
    sdk_client.upsert(doc_id9, doc_body)
    sg_client.add_doc(url=sg_url, db=sg_db, doc=doc_body, auth=test_auth_session)
    # sg_client.update_doc(url=sg_url, db=sg_db, doc_id=doc_id8, number_updates=1, auth=test_auth_session)
    sg_get_doc9 = sg_client.get_doc(url=sg_url, db=sg_db, doc_id=doc_id9, auth=test_auth_session)
    assert sg_get_doc9['_rev'].startswith('1-') and sg_get_doc9['_id'] == doc_id9
Ejemplo n.º 32
0
def test_log_rotation_default_values(params_from_base_test_setup,
                                     sg_conf_name):
    """Test to verify default values for rotation section:
    maxsize = 100 MB
    MaxAge = 0(do not limit the number of MaxAge)
    MaxBackups = 0(do not limit the number of backups)
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    remote_executor = RemoteExecutor(cluster.sync_gateways[0].ip)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # delete rotation from sample config
    del data['logging']["default"]["rotation"]
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    log_info("TEMP_CONF: {}".format(temp_conf))

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    remote_executor.execute("mkdir -p /tmp/sg_logs")

    remote_executor.execute("sudo rm -rf /tmp/sg_logs/sg_log_rotation*")
    # generate log file  with size  ~94MB to check that backup file not created while 100MB not reached
    remote_executor.execute(
        "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=94850000 count=1"
    )

    remote_executor.execute("sudo chmod 777 -R /tmp/sg_logs")
    # iterate 5th times to verify that every time we get new backup file with ~100MB
    for i in xrange(5):
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
        # ~1M MB will be added to log file after requests
        remote_executor.execute(
            "for ((i=1;i <= 1000;i += 1)); do curl -s http://localhost:4984/ > /dev/null; done"
        )

        _, stdout, _ = remote_executor.execute(
            "ls /tmp/sg_logs/ | grep sg_log_rotation | wc -l")
        # verify num of log files
        assert stdout[0].rstrip() == str(i + 1)

        sg_helper.stop_sync_gateways(cluster_config=cluster_conf,
                                     url=sg_one_url)
        # generate log file  with size  ~99MB
        remote_executor.execute(
            "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=104850000 count=1"
        )

    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=sg_conf)

    # Remove generated conf file
    os.remove(temp_conf)
Ejemplo n.º 33
0
def test_attachment_revpos_when_ancestor_unavailable(
        params_from_base_test_setup, sg_conf_name):
    """
    Creates a document with an attachment, then updates that document so that
    the body of the revision that originally pushed the document is no
    longer available.  Add a new revision that's not a child of the
    active revision, and validate that it's uploaded successfully.
    Example:
       1. Document is created with attachment at rev-1
       2. Document is updated (strip digests and length, only put revpos & stub) multiple times on the server, goes to rev-10
       3. Client attempts to add a new (conflicting) revision 2, with parent rev-1.
       4. If the body of rev-1 is no longer available on the server (temporary backup of revision has expired, and is no longer stored
         in the in-memory rev cache), we were throwing an error to client
         because we couldn't verify based on the _attachments property in rev-1.
       5. In this scenario, before returning error, we are now checking if the active revision has a common ancestor with the incoming revision.
    If so, we can validate any revpos values equal to or earlier than the common ancestor against the active revision
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]

    if no_conflicts_enabled:
        pytest.skip('--no-conflicts is not enabled, so skipping the test')

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(cluster_config, sg_conf)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    bucket = "data-bucket"

    log_info("Running 'test_attachment_revpos_when_ancestor_unavailable'")
    log_info("Using cbs_url: {}".format(cbs_url))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_url_admin: {}".format(sg_url_admin))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using bucket: {}".format(bucket))

    channels_list = ["ABC"]

    client = MobileRestClient()
    sg_util = SyncGateway()
    cb_server = couchbaseserver.CouchbaseServer(cbs_url)

    user1 = client.create_user(url=sg_url_admin,
                               db=sg_db,
                               name="user1",
                               password="******",
                               channels=channels_list)
    atts = attachment.load_from_data_dir(["sample_text.txt"])
    doc_with_att = document.create_doc(doc_id="att_doc",
                                       content={"sample_key": "sample_val"},
                                       attachments=atts,
                                       channels=channels_list)

    doc_gen_1 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc_with_att,
                               auth=user1)
    client.update_doc(url=sg_url,
                      db=sg_db,
                      doc_id=doc_gen_1["id"],
                      number_updates=10,
                      auth=user1)

    # Clear cached rev doc bodys from server and cycle sync_gateway
    sg_util.stop_sync_gateways(cluster_config=cluster_config, url=sg_url)

    cb_server.delete_couchbase_server_cached_rev_bodies(bucket=bucket)
    sg_util.start_sync_gateways(cluster_config=cluster_config,
                                url=sg_url,
                                config=sg_conf)

    client.add_conflict(url=sg_url,
                        db=sg_db,
                        doc_id=doc_gen_1["id"],
                        parent_revisions=doc_gen_1["rev"],
                        new_revision="2-foo",
                        auth=user1)
def test_attachment_revpos_when_ancestor_unavailable(params_from_base_test_setup, sg_conf_name):
    """
    Creates a document with an attachment, then updates that document so that
    the body of the revision that originally pushed the document is no
    longer available.  Add a new revision that's not a child of the
    active revision, and validate that it's uploaded successfully.
    Example:
       1. Document is created with attachment at rev-1
       2. Document is updated (strip digests and length, only put revpos & stub) multiple times on the server, goes to rev-4
       3. Client attempts to add a new (conflicting) revision 2, with parent rev-1.
       4. If the body of rev-1 is no longer available on the server (temporary backup of revision has expired, and is no longer stored
         in the in-memory rev cache), we were throwing an error to client
         because we couldn't verify based on the _attachments property in rev-1.
       5. In this scenario, before returning error, we are now checking if the active revision has a common ancestor with the incoming revision.
    If so, we can validate any revpos values equal to or earlier than the common ancestor against the active revision
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(cluster_config, sg_conf)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    bucket = "data-bucket"

    log_info("Running 'test_attachment_revpos_when_ancestor_unavailable'")
    log_info("Using cbs_url: {}".format(cbs_url))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_url_admin: {}".format(sg_url_admin))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using bucket: {}".format(bucket))

    channels_list = ["ABC"]

    client = MobileRestClient()
    sg_util = SyncGateway()
    cb_server = CouchbaseServer(cbs_url)

    user1 = client.create_user(url=sg_url_admin, db=sg_db, name="user1", password="******", channels=channels_list)
    doc_with_att = document.create_doc(doc_id="att_doc", content={"sample_key": "sample_val"}, attachment_name="sample_text.txt", channels=channels_list)

    doc_gen_1 = client.add_doc(url=sg_url, db=sg_db, doc=doc_with_att, auth=user1)
    client.update_doc(url=sg_url, db=sg_db, doc_id=doc_gen_1["id"], number_updates=10, auth=user1)

    # Clear cached rev doc bodys from server and cycle sync_gateway
    sg_util.stop_sync_gateway(cluster_config=cluster_config, url=sg_url)

    cb_server.delete_couchbase_server_cached_rev_bodies(bucket=bucket)
    sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_url, config=sg_conf)

    client.add_conflict(
        url=sg_url, db=sg_db,
        doc_id=doc_gen_1["id"],
        parent_revisions=doc_gen_1["rev"],
        new_revision="2-foo",
        auth=user1
    )
Ejemplo n.º 35
0
def test_deleted_docs_from_changes_active_only(params_from_base_test_setup,
                                               sg_conf_name):
    """
    https://github.com/couchbase/sync_gateway/issues/2955
    1. Create a document
    2. Delete the document
    3. Restart Sync Gateway (to force rebuild of cache from view)
    4. Issue an active_only=true changes request
    5. Issue an active_only=false changes request
    The deleted document was not being included in the result set in step 5.
    """
    cluster_config = params_from_base_test_setup["cluster_config"]
    topology = params_from_base_test_setup["cluster_topology"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    num_docs = 10
    client = MobileRestClient()

    # Add doc to SG
    added_doc = client.add_docs(url=sg_admin_url,
                                db=sg_db,
                                number=num_docs,
                                id_prefix="test_changes")

    # Delete 1 doc
    doc_id = added_doc[0]["id"]
    log_info("Deleting {}".format(doc_id))
    doc = client.get_doc(url=sg_admin_url, db=sg_db, doc_id=doc_id)
    doc_rev = doc['_rev']
    client.delete_doc(sg_admin_url, sg_db, doc_id, doc_rev)

    # Restart SG
    sg_obj = SyncGateway()
    sg_obj.restart_sync_gateways(cluster_config)

    # Changes request with active_only=true
    session = Session()
    request_url = "{}/{}/_changes?active_only=true".format(sg_admin_url, sg_db)
    log_info("Issuing changes request {}".format(request_url))
    resp = session.get(request_url)
    resp.raise_for_status()
    resp_obj = resp.json()
    log_info(
        "Checking that the deleted doc is not included in the active_only=true changes request"
    )
    for d in resp_obj["results"]:
        assert doc_id not in d

    # Changes request with active_only=false
    request_url = "{}/{}/_changes?active_only=false".format(
        sg_admin_url, sg_db)
    log_info("Issuing changes request {}".format(request_url))
    resp = session.get(request_url)
    resp.raise_for_status()
    resp_obj = resp.json()
    doc_found = False
    for d in resp_obj["results"]:
        if doc_id != d["id"]:
            continue
        else:
            assert doc_id == d["id"]
            assert d["deleted"]
            doc_found = True
            break

    log_info(
        "Checking that the deleted doc is included in the active_only=false changes request"
    )
    assert doc_found
def test_listener_two_sync_gateways(setup_client_2sgs_test):
    """
    Port of https://github.com/couchbaselabs/sync-gateway-tests/blob/master/tests/cbl-replication-mismatch-2-gateways.js
    Scenario:
      1. Start 2 sync_gateways
      2. Create sg_db_one db on sync_gateway one
      3. Create sg_db_two db on sync_gateway two
      4. Create ls_db_one and ls_db_two on Liteserv
      5. Setup continuous push / pull replication from ls_db_one <-> sg_db_one
      6. Setup continuous push / pull replication from ls_db_two <-> sg_db_two
      7. Setup continuous push / pull replication from sg_db_one <-> ls_db_two
      8. Setup continuous push / pull replication from sg_db_two <-> ls_db_one
      9. Add num_docs / 2 to each liteserv database
      10. Verify each database has num_docs docs
      11. Verify all_docs in all dbs
      12. Verify changes feed for sg_db_one and sg_db_two
      13. Verify chnages feed for ls_db_one and ls_db_two
    """

    num_docs = 500

    ls_url = setup_client_2sgs_test["ls_url"]
    cluster_config = setup_client_2sgs_test["cluster_config"]
    sg_one_admin_url = setup_client_2sgs_test["sg_one_admin_url"]
    sg_two_admin_url = setup_client_2sgs_test["sg_two_admin_url"]

    sg_util = SyncGateway()
    sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_one_admin_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS))
    sg_util.start_sync_gateway(cluster_config=cluster_config, url=sg_two_admin_url, config="{}/walrus.json".format(SYNC_GATEWAY_CONFIGS))

    ls_db_one = "ls_db1"
    ls_db_two = "ls_db2"
    sg_db_one = "sg_db1"
    sg_db_two = "sg_db2"

    log_info("ls_url: {}".format(ls_url))
    log_info("sg_one_admin_url: {}".format(sg_one_admin_url))
    log_info("sg_two_admin_url: {}".format(sg_two_admin_url))
    log_info("num_docs: {}".format(num_docs))
    log_info("Running 'test_listener_two_sync_gateways' ...")

    client = MobileRestClient()

    # Create dbs on sync_gateway
    client.create_database(sg_one_admin_url, sg_db_one, "walrus:")
    client.create_database(sg_two_admin_url, sg_db_two, "walrus:")

    # Create dbs on LiteServ
    client.create_database(ls_url, ls_db_one)
    client.create_database(ls_url, ls_db_two)

    # Start continuous push pull replication ls_db_one <-> sg_db_one
    client.start_replication(
        url=ls_url, continuous=True,
        from_db=ls_db_one,
        to_url=sg_one_admin_url, to_db=sg_db_one
    )
    client.start_replication(
        url=ls_url, continuous=True,
        from_url=sg_one_admin_url, from_db=sg_db_one,
        to_db=ls_db_one
    )

    # Start continuous push pull replication ls_db_two <-> sg_db_two
    client.start_replication(
        url=ls_url, continuous=True,
        from_db=ls_db_two,
        to_url=sg_two_admin_url, to_db=sg_db_two
    )
    client.start_replication(
        url=ls_url, continuous=True,
        from_url=sg_two_admin_url, from_db=sg_db_two,
        to_db=ls_db_two
    )

    # Start continuous push pull replication sg_db_one <-> ls_db_two
    client.start_replication(
        url=ls_url, continuous=True,
        from_url=sg_one_admin_url, from_db=sg_db_one,
        to_db=ls_db_two
    )
    client.start_replication(
        url=ls_url, continuous=True,
        from_db=ls_db_two,
        to_url=sg_one_admin_url, to_db=sg_db_one
    )

    # Start continuous push pull replication sg_db_two <-> ls_db_one
    client.start_replication(
        url=ls_url, continuous=True,
        from_url=sg_two_admin_url, from_db=sg_db_two,
        to_db=ls_db_one
    )
    client.start_replication(
        url=ls_url, continuous=True,
        from_db=ls_db_one,
        to_url=sg_two_admin_url, to_db=sg_db_two
    )

    ls_db_one_docs = client.add_docs(url=ls_url, db=ls_db_one, number=num_docs / 2, id_prefix="ls_db_one_doc")
    assert len(ls_db_one_docs) == num_docs / 2

    ls_db_two_docs = client.add_docs(url=ls_url, db=ls_db_two, number=num_docs / 2, id_prefix="ls_db_two_doc")
    assert len(ls_db_two_docs) == num_docs / 2

    all_docs = client.merge(ls_db_one_docs, ls_db_two_docs)
    assert len(all_docs) == 500

    # Verify docs replicate to each db
    client.verify_docs_present(url=ls_url, db=ls_db_one, expected_docs=all_docs)
    client.verify_docs_present(url=ls_url, db=ls_db_two, expected_docs=all_docs)
    client.verify_docs_present(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs)
    client.verify_docs_present(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs)

    # Verify changes feeds for each db
    client.verify_docs_in_changes(url=ls_url, db=ls_db_one, expected_docs=all_docs)
    client.verify_docs_in_changes(url=ls_url, db=ls_db_two, expected_docs=all_docs)
    client.verify_docs_in_changes(url=sg_one_admin_url, db=sg_db_one, expected_docs=all_docs)
    client.verify_docs_in_changes(url=sg_two_admin_url, db=sg_db_two, expected_docs=all_docs)