def test_log_rotation_negative(params_from_base_test_setup, sg_conf_name):
    """Test log rotation with negative values for:
        "maxsize": -1,
        "maxage": -30,
        "maxbackups": -2
    SG shouldn't start
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set negative values for rotation section
    data['logging']["default"]["rotation"] = {
        "maxsize": -1,
        "maxage": -30,
        "maxbackups": -2,
        "localtime": True
    }
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    try:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
    except ProvisioningError:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # Remove generated conf file
        os.remove(temp_conf)
        return

    # Remove generated conf file
    os.remove(temp_conf)
    pytest.fail("SG shouldn't be started!!!!")
def test_log_rotation_invalid_path(params_from_base_test_setup, sg_conf_name):
    """Test to check that SG is not started with invalid logFilePath.
    OS specific case. SG should check if path correct on startup
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set non existing logFilePath
    data['logging']["default"]["logFilePath"] = "/12345/1231/131231.log"
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()

    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    try:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
    except ProvisioningError:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # Remove generated conf file
        os.remove(temp_conf)
        return

    # Remove generated conf file
    os.remove(temp_conf)
    pytest.fail("SG shouldn't be started!!!!")
def test_log_logLevel_invalid(params_from_base_test_setup, sg_conf_name):
    """Run SG with non existing logLevel value
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # 'debugFake' invalid value for logLevel
    data['logging']["default"]["logLevel"] = "debugFake"

    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    # create temp config file in the same folder as sg_conf
    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    try:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
    except ProvisioningError:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # Remove generated conf file
        os.remove(temp_conf)
        return

    # Remove generated conf file
    os.remove(temp_conf)
    pytest.fail("SG shouldn't be started!!!!")
def test_log_logKeys_string(params_from_base_test_setup, sg_conf_name):
    """Negative test to verify that we are not able start SG when
    logKeys is string
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # read sample sg_conf
    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set logKeys as string in config file
    data['logging']["default"]["logKeys"] = "http"
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    try:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
    except ProvisioningError:
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # Remove generated conf file
        os.remove(temp_conf)
        return

    # Remove generated conf file
    os.remove(temp_conf)
    pytest.fail("SG shouldn't be started!!!!")
def test_log_nondefault_logKeys_set(params_from_base_test_setup, sg_conf_name):
    """Test to verify non default logKeys with any invalid area.
    SG should work even with non existing logging area
    (positive case)
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # read sample sg_conf
    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # "FAKE" not valid area in logging
    data['logging']["default"]["logKeys"] = ["HTTP", "FAKE"]
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)

    # Start sync_gateways
    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=temp_conf)

    # Remove generated conf file
    os.remove(temp_conf)
Ejemplo n.º 6
0
def test_detect_stale_channel_index(params_from_base_test_setup, sg_conf):
    """
    1. Bring up single Sync Gateway node, backed by Couchbase Server with 3 accels indexing
    2. Configure such that the primary bucket and the channel index bucket are different (which is the norm)
    3. Add 1000 documents
    4. Shutdown Sync Gateway
    5. Delete / create the primary bucket ('data-bucket'), but do not touch the channel index bucket
    6. Start Sync Gateway
    7. Assert that sync_gateway fails to start due to stale channel index
    """

    cluster_conf = params_from_base_test_setup["cluster_config"]

    log_info("Running 'test_detect_stale_channel_index'")
    log_info("cluster_conf: {}".format(cluster_conf))

    log_info("sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_util = ClusterKeywords()
    topology = cluster_util.get_cluster_topology(cluster_conf)

    sg_url = topology["sync_gateways"][0]["public"]
    sg_admin_url = topology["sync_gateways"][0]["admin"]
    cb_server_url = topology["couchbase_servers"][0]
    sg_db = "db"
    num_docs = 1000

    cb_server = couchbaseserver.CouchbaseServer(url=cb_server_url)
    client = MobileRestClient()

    # Create doc pusher user
    doc_pusher_user_info = userinfo.UserInfo(name="doc_pusher",
                                             password="******",
                                             channels=["NASA"],
                                             roles=[])
    doc_pusher_auth = client.create_user(
        url=sg_admin_url,
        db=sg_db,
        name=doc_pusher_user_info.name,
        password=doc_pusher_user_info.password,
        channels=doc_pusher_user_info.channels)

    # Add some docs to Sync Gateway to cause indexing
    docs = document.create_docs(None,
                                number=num_docs,
                                channels=doc_pusher_user_info.channels)
    pushed_docs = client.add_bulk_docs(url=sg_url,
                                       db=sg_db,
                                       docs=docs,
                                       auth=doc_pusher_auth)
    assert len(pushed_docs) == num_docs

    # Shut down sync_gateway
    sg_util = SyncGateway()
    sg_util.stop_sync_gateways(cluster_config=cluster_conf, url=sg_url)

    # Delete server bucket
    cb_server.delete_bucket(name="data-bucket")

    # Create server bucket
    ram_per_bucket_mb = cb_server.get_ram_per_bucket(num_buckets=2)
    cb_server.create_bucket(name="data-bucket", ram_quota_mb=ram_per_bucket_mb)

    # Start sync_gateway and assert that a Provisioning error is raised due to detecting stale index
    with pytest.raises(exceptions.ProvisioningError):
        sg_util.start_sync_gateways(cluster_config=cluster_conf,
                                    url=sg_url,
                                    config=sg_conf)

    # TODO: To make this check even more accurate, could
    # run remote ssh command "systemctl status sync_gateway.service" and look for
    # regex pattern: Main PID: 7185 (code=exited, status=2)

    # Delete index bucket and recreate it
    cb_server.delete_bucket(name="index-bucket")
    cb_server.create_bucket(name="index-bucket",
                            ram_quota_mb=ram_per_bucket_mb)

    # Start sync gateway, should succeed now
    sg_util.start_sync_gateways(cluster_config=cluster_conf,
                                url=sg_url,
                                config=sg_conf)
Ejemplo n.º 7
0
def test_view_backfill_for_deletes(params_from_base_test_setup, sg_conf_name,
                                   validate_changes_before_restart):
    """
    Scenario:
    1. Write a bunch of docs
    2. Delete 1/2
    3. Restart Sync Gateway
    4. Issue _changes, assert view backfills docs and delete notifications
    """

    num_docs = 1000
    sg_db = 'db'

    cluster_conf = params_from_base_test_setup['cluster_config']
    cluster_topology = params_from_base_test_setup['cluster_topology']
    mode = params_from_base_test_setup['mode']

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)
    sg_admin_url = cluster_topology['sync_gateways'][0]['admin']
    sg_url = cluster_topology['sync_gateways'][0]['public']
    cbs_url = cluster_topology['couchbase_servers'][0]

    log_info('sg_conf: {}'.format(sg_conf))
    log_info('sg_admin_url: {}'.format(sg_admin_url))
    log_info('sg_url: {}'.format(sg_url))
    log_info('cbs_url: {}'.format(cbs_url))
    log_info('validate_changes_before_restart: {}'.format(
        validate_changes_before_restart))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    # Create clients
    sg_client = MobileRestClient()

    # Create user / session
    seth_user_info = UserInfo(name='seth',
                              password='******',
                              channels=['NASA', 'NATGEO'],
                              roles=[])
    sg_client.create_user(url=sg_admin_url,
                          db=sg_db,
                          name=seth_user_info.name,
                          password=seth_user_info.password,
                          channels=seth_user_info.channels)

    seth_auth = sg_client.create_session(url=sg_admin_url,
                                         db=sg_db,
                                         name=seth_user_info.name,
                                         password=seth_user_info.password)

    # Add 'num_docs' to Sync Gateway
    doc_bodies = document.create_docs('test_doc',
                                      number=num_docs,
                                      channels=seth_user_info.channels)
    bulk_resp = sg_client.add_bulk_docs(url=sg_url,
                                        db=sg_db,
                                        docs=doc_bodies,
                                        auth=seth_auth)
    assert len(bulk_resp) == num_docs

    # Delete half of the docs randomly
    deleted_docs = []
    for _ in range(num_docs / 2):
        random_doc = random.choice(bulk_resp)
        deleted_doc = sg_client.delete_doc(url=sg_url,
                                           db=sg_db,
                                           doc_id=random_doc['id'],
                                           rev=random_doc['rev'],
                                           auth=seth_auth)
        deleted_docs.append(deleted_doc)
        bulk_resp.remove(random_doc)

    log_info('Number of docs deleted: {}'.format(len(deleted_docs)))
    all_docs = bulk_resp + deleted_docs
    log_info('Number of docs to look for in changes: {}'.format(len(all_docs)))

    # This test will check changes before and after SG restart if
    # validate_changes_before_restart == True
    # If it is not set to True, only build the changes after restart
    if validate_changes_before_restart:
        # Verify deletions and inital docs show up in changes feed
        sg_client.verify_docs_in_changes(url=sg_url,
                                         db=sg_db,
                                         expected_docs=all_docs,
                                         auth=seth_auth)
        changes = sg_client.get_changes(url=sg_url,
                                        db=sg_db,
                                        since=0,
                                        auth=seth_auth)
        # All docs should show up + _user doc
        assert len(changes['results']) == num_docs + 1

        deleted_doc_ids = [doc['id'] for doc in deleted_docs]
        assert len(deleted_doc_ids) == num_docs / 2
        deleted_docs_in_changes = [
            change['id'] for change in changes['results']
            if 'deleted' in change and change['deleted']
        ]
        assert len(deleted_docs_in_changes) == num_docs / 2

        # All deleted docs should show up in the changes feed
        for doc_id in deleted_docs_in_changes:
            assert doc_id in deleted_doc_ids
            deleted_doc_ids.remove(doc_id)
        assert len(deleted_doc_ids) == 0

    # Restart Sync Gateway
    sg_controller = SyncGateway()
    sg_controller.stop_sync_gateways(url=sg_url, cluster_config=cluster_conf)
    sg_controller.start_sync_gateways(url=sg_url,
                                      cluster_config=cluster_conf,
                                      config=sg_conf)

    # Verify deletions and inital docs show up in changes feed
    sg_client.verify_docs_in_changes(url=sg_url,
                                     db=sg_db,
                                     expected_docs=all_docs,
                                     auth=seth_auth)

    changes = sg_client.get_changes(url=sg_url,
                                    db=sg_db,
                                    since=0,
                                    auth=seth_auth)
    # All docs should show up + _user doc
    assert len(changes['results']) == num_docs + 1

    deleted_doc_ids = [doc['id'] for doc in deleted_docs]
    assert len(deleted_doc_ids) == num_docs / 2
    deleted_docs_in_changes = [
        change['id'] for change in changes['results']
        if 'deleted' in change and change['deleted']
    ]
    assert len(deleted_docs_in_changes) == num_docs / 2

    # All deleted docs should show up in th changes feed
    for doc_id in deleted_docs_in_changes:
        assert doc_id in deleted_doc_ids
        deleted_doc_ids.remove(doc_id)
    assert len(deleted_doc_ids) == 0
Ejemplo n.º 8
0
def test_system_test(params_from_base_test_setup):

    cluster_config = params_from_base_test_setup['cluster_config']
    mode = params_from_base_test_setup['mode']

    # Scenario parameters
    server_seed_docs = int(params_from_base_test_setup['server_seed_docs'])
    max_docs = int(params_from_base_test_setup['max_docs'])
    num_users = int(params_from_base_test_setup['num_users'])

    # Create paramters
    create_batch_size = int(params_from_base_test_setup['create_batch_size'])
    create_delay = float(params_from_base_test_setup['create_delay'])

    # Update parameters
    update_runtime_sec = int(params_from_base_test_setup['update_runtime_sec'])
    update_batch_size = int(params_from_base_test_setup['update_batch_size'])
    update_docs_percentage = float(
        params_from_base_test_setup['update_docs_percentage'])
    update_delay = float(params_from_base_test_setup['update_delay'])

    # Changes parameters
    changes_delay = float(params_from_base_test_setup['changes_delay'])
    changes_limit = int(params_from_base_test_setup['changes_limit'])

    changes_terminator_doc_id = 'terminator'

    docs_per_user = max_docs / num_users
    docs_per_user_per_update = int(update_docs_percentage * docs_per_user)

    log_info('Running System Test #1')
    log_info('> server_seed_docs          = {}'.format(server_seed_docs))
    log_info('> max_docs                  = {}'.format(max_docs))
    log_info('> num_users                 = {}'.format(num_users))
    log_info('> docs_per_user             = {}'.format(docs_per_user))
    log_info('> create_batch_size         = {}'.format(create_batch_size))
    log_info('> create_delay              = {}'.format(create_delay))
    log_info('> update_batch_size         = {}'.format(update_batch_size))
    log_info('> update_docs_percentage    = {}'.format(update_docs_percentage))
    log_info(
        '> docs_per_user_per_update  = {}'.format(docs_per_user_per_update))
    log_info('> update_delay              = {}'.format(update_delay))
    log_info('> update_runtime_sec        = {}'.format(update_runtime_sec))
    log_info('> changes_delay             = {}'.format(changes_delay))
    log_info('> changes_limit             = {}'.format(changes_limit))
    log_info(
        '> changes_terminator_doc_id = {}'.format(changes_terminator_doc_id))

    # Validate
    # Server docs should be a multiple of 1000 for batching purposes
    if server_seed_docs % 1000 != 0:
        raise ValueError('server_seed_docs must be divisible by 1000')

    # Number of docs should be equally divisible by number of users
    if max_docs % num_users != 0:
        raise ValueError('max_docs must be divisible by number_of_users')

    # Number of docs per user (max_docs / num_users) should be equally
    # divisible by the batch size for easier computation
    if docs_per_user % create_batch_size != 0:
        raise ValueError(
            'docs_per_user ({}) must be devisible by create_batch_size ({})'.
            format(docs_per_user, create_batch_size))

    # We want an even distributed of users per type
    if num_users % len(USER_TYPES) != 0:
        raise ValueError("'num_users' should be a multiple of 4")

    # Make sure that the 'update_batch_size' is complatible with
    # then number of users per type
    num_users_per_type = num_users / len(USER_TYPES)
    if update_batch_size > num_users_per_type:
        raise ValueError(
            "'batch_size' cannot be larger than number of users per type")

    if num_users_per_type % update_batch_size != 0:
        raise ValueError(
            "'update_batch_size' ({}) should be a multiple of number_users_per_type ({})"
            .format(update_batch_size, num_users_per_type))

    sg_conf_name = 'sync_gateway_default'
    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    # Reset cluster state
    c = Cluster(config=cluster_config)
    c.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    topology = cluster_helper.get_cluster_topology(cluster_config)

    cbs_url = topology['couchbase_servers'][0]
    cbs_admin_url = cbs_url.replace('8091', '8092')
    cb_server = couchbaseserver.CouchbaseServer(cbs_url)
    bucket_name = "data-bucket"

    cbs_ip = cb_server.host

    headers = {'Content-Type': 'application/json'}
    cbs_session = Session()
    cbs_session.headers = headers
    cbs_session.auth = ('Administrator', 'password')

    log_info('Seeding {} with {} docs'.format(cbs_ip, server_seed_docs))
    sdk_client = Bucket('couchbase://{}/{}'.format(cbs_ip, bucket_name),
                        password='******',
                        timeout=300)

    # Stop SG before loading the server
    lb_url = topology['sync_gateways'][0]['public']
    sg_admin_url = topology['sync_gateways'][0]['admin']
    sg_db = 'db'

    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_config)

    # Scenario Actions
    delete_views(cbs_session, cbs_admin_url, bucket_name)
    load_bucket(sdk_client, server_seed_docs)
    sg_helper.start_sync_gateways(cluster_config, config=sg_conf)
    wait_for_view_creation(cbs_session, cbs_admin_url, bucket_name)

    # Start concurrent creation of docs (max docs / num users)
    # Each user will add batch_size number of docs via bulk docs and sleep for 'create_delay'
    # Once a user has added number of expected docs 'docs_per_user', it will terminate.
    log_info('------------------------------------------')
    log_info('START concurrent user / doc creation')
    log_info('------------------------------------------')
    users = create_docs(sg_admin_url=sg_admin_url,
                        sg_url=lb_url,
                        sg_db=sg_db,
                        num_users=num_users,
                        number_docs_per_user=docs_per_user,
                        create_batch_size=create_batch_size,
                        create_delay=create_delay)
    assert len(users) == num_users
    log_info('------------------------------------------')
    log_info('END concurrent user / doc creation')
    log_info('------------------------------------------')

    # Start changes processing
    with ProcessPoolExecutor(max_workers=((len(users) * 3) +
                                          update_batch_size + 3)) as pex:

        # Start changes feeds in background process
        changes_workers_task = pex.submit(start_changes_processing, lb_url,
                                          sg_db, users, changes_delay,
                                          changes_limit,
                                          changes_terminator_doc_id)

        log_info('------------------------------------------')
        log_info('START concurrent updates')
        log_info('------------------------------------------')
        # Start concurrent updates of update
        # Update batch size is the number of users that will concurrently update all of their docs
        users = update_docs(sg_url=lb_url,
                            sg_db=sg_db,
                            users=users,
                            update_runtime_sec=update_runtime_sec,
                            batch_size=update_batch_size,
                            docs_per_user_per_update=docs_per_user_per_update,
                            update_delay=update_delay)

        all_user_channels = []
        for k, v in users.items():
            log_info('User ({}) updated docs {} times!'.format(
                k, v['updates']))
            all_user_channels.append(k)

        log_info('------------------------------------------')
        log_info('END concurrent updates')
        log_info('------------------------------------------')

        # Broadcast termination doc to all users
        terminator_channel = 'terminator'
        send_changes_termination_doc(lb_url, sg_db, users,
                                     changes_terminator_doc_id,
                                     terminator_channel)

        # Overwrite each users channels with 'terminator' so their changes feed will backfill with the termination doc
        grant_users_access(users, [terminator_channel], sg_admin_url, sg_db)

        # Block on changes completion
        users = changes_workers_task.result()

        # Print the summary of the system test
        print_summary(users)
Ejemplo n.º 9
0
def test_attachment_revpos_when_ancestor_unavailable(
        params_from_base_test_setup, sg_conf_name):
    """
    Creates a document with an attachment, then updates that document so that
    the body of the revision that originally pushed the document is no
    longer available.  Add a new revision that's not a child of the
    active revision, and validate that it's uploaded successfully.
    Example:
       1. Document is created with attachment at rev-1
       2. Document is updated (strip digests and length, only put revpos & stub) multiple times on the server, goes to rev-10
       3. Client attempts to add a new (conflicting) revision 2, with parent rev-1.
       4. If the body of rev-1 is no longer available on the server (temporary backup of revision has expired, and is no longer stored
         in the in-memory rev cache), we were throwing an error to client
         because we couldn't verify based on the _attachments property in rev-1.
       5. In this scenario, before returning error, we are now checking if the active revision has a common ancestor with the incoming revision.
    If so, we can validate any revpos values equal to or earlier than the common ancestor against the active revision
    """

    cluster_config = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    no_conflicts_enabled = params_from_base_test_setup["no_conflicts_enabled"]

    if no_conflicts_enabled:
        pytest.skip('--no-conflicts is not enabled, so skipping the test')

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    cluster_helper = ClusterKeywords()
    cluster_helper.reset_cluster(cluster_config, sg_conf)

    topology = cluster_helper.get_cluster_topology(cluster_config)

    cbs_url = topology["couchbase_servers"][0]
    sg_url = topology["sync_gateways"][0]["public"]
    sg_url_admin = topology["sync_gateways"][0]["admin"]
    sg_db = "db"
    bucket = "data-bucket"

    log_info("Running 'test_attachment_revpos_when_ancestor_unavailable'")
    log_info("Using cbs_url: {}".format(cbs_url))
    log_info("Using sg_url: {}".format(sg_url))
    log_info("Using sg_url_admin: {}".format(sg_url_admin))
    log_info("Using sg_db: {}".format(sg_db))
    log_info("Using bucket: {}".format(bucket))

    channels_list = ["ABC"]

    client = MobileRestClient()
    sg_util = SyncGateway()
    cb_server = couchbaseserver.CouchbaseServer(cbs_url)

    user1 = client.create_user(url=sg_url_admin,
                               db=sg_db,
                               name="user1",
                               password="******",
                               channels=channels_list)
    atts = attachment.load_from_data_dir(["sample_text.txt"])
    doc_with_att = document.create_doc(doc_id="att_doc",
                                       content={"sample_key": "sample_val"},
                                       attachments=atts,
                                       channels=channels_list)

    doc_gen_1 = client.add_doc(url=sg_url,
                               db=sg_db,
                               doc=doc_with_att,
                               auth=user1)
    client.update_doc(url=sg_url,
                      db=sg_db,
                      doc_id=doc_gen_1["id"],
                      number_updates=10,
                      auth=user1)

    # Clear cached rev doc bodys from server and cycle sync_gateway
    sg_util.stop_sync_gateways(cluster_config=cluster_config, url=sg_url)

    cb_server.delete_couchbase_server_cached_rev_bodies(bucket=bucket)
    sg_util.start_sync_gateways(cluster_config=cluster_config,
                                url=sg_url,
                                config=sg_conf)

    client.add_conflict(url=sg_url,
                        db=sg_db,
                        doc_id=doc_gen_1["id"],
                        parent_revisions=doc_gen_1["rev"],
                        new_revision="2-foo",
                        auth=user1)
Ejemplo n.º 10
0
def test_log_maxbackups_0(params_from_base_test_setup, sg_conf_name):
    """Test with maxbackups=0 that means do not limit the number of backups
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    remote_executor = RemoteExecutor(cluster.sync_gateways[0].ip)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)

    remote_executor.execute("mkdir -p /tmp/sg_logs")
    remote_executor.execute("sudo rm -rf /tmp/sg_logs/sg_log_rotation*")
    # generate log file with almost 1MB
    remote_executor.execute(
        "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=1030000 count=1"
    )
    remote_executor.execute("sudo chmod 777 -R /tmp/sg_logs")

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set maxbackups=0 in config file
    data['logging']["default"]["rotation"]["maxbackups"] = 0
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=temp_conf)
    # ~1M MB will be added to log file after requests
    remote_executor.execute(
        "for ((i=1;i <= 1000;i += 1)); do curl -s http://localhost:4984/ > /dev/null; done"
    )

    status, stdout, stderr = remote_executor.execute(
        "ls /tmp/sg_logs/ | grep sg_log_rotation | wc -l")
    assert stdout[0].rstrip() == '2'

    # Remove generated conf file
    os.remove(temp_conf)
Ejemplo n.º 11
0
def test_log_rotation_default_values(params_from_base_test_setup,
                                     sg_conf_name):
    """Test to verify default values for rotation section:
    maxsize = 100 MB
    MaxAge = 0(do not limit the number of MaxAge)
    MaxBackups = 0(do not limit the number of backups)
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    remote_executor = RemoteExecutor(cluster.sync_gateways[0].ip)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # delete rotation from sample config
    del data['logging']["default"]["rotation"]
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    log_info("TEMP_CONF: {}".format(temp_conf))

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    remote_executor.execute("mkdir -p /tmp/sg_logs")

    remote_executor.execute("sudo rm -rf /tmp/sg_logs/sg_log_rotation*")
    # generate log file  with size  ~94MB to check that backup file not created while 100MB not reached
    remote_executor.execute(
        "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=94850000 count=1"
    )

    remote_executor.execute("sudo chmod 777 -R /tmp/sg_logs")
    # iterate 5th times to verify that every time we get new backup file with ~100MB
    for i in xrange(5):
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=temp_conf)
        # ~1M MB will be added to log file after requests
        remote_executor.execute(
            "for ((i=1;i <= 1000;i += 1)); do curl -s http://localhost:4984/ > /dev/null; done"
        )

        _, stdout, _ = remote_executor.execute(
            "ls /tmp/sg_logs/ | grep sg_log_rotation | wc -l")
        # verify num of log files
        assert stdout[0].rstrip() == str(i + 1)

        sg_helper.stop_sync_gateways(cluster_config=cluster_conf,
                                     url=sg_one_url)
        # generate log file  with size  ~99MB
        remote_executor.execute(
            "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=104850000 count=1"
        )

    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=sg_conf)

    # Remove generated conf file
    os.remove(temp_conf)
Ejemplo n.º 12
0
def test_log_number_backups(params_from_base_test_setup, sg_conf_name):
    """Test to check general behaviour for number of backups.
     In test the following params have been used:
        "maxsize": 1,
        "maxage": 30,
        "maxbackups": 2
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    remote_executor = RemoteExecutor(cluster.sync_gateways[0].ip)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]
    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)

    remote_executor.execute("mkdir -p /tmp/sg_logs")
    remote_executor.execute("sudo rm -rf /tmp/sg_logs/sg_log_rotation*")
    # generate log file with almost 1MB
    remote_executor.execute(
        "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=1030000 count=1"
    )
    remote_executor.execute("sudo chmod 777 -R /tmp/sg_logs")

    # iterate 5 times
    for i in xrange(5):
        sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                      url=sg_one_url,
                                      config=sg_conf)
        # ~1M MB will be added to log file after requests
        remote_executor.execute(
            "for ((i=1;i <= 1000;i += 1)); do curl -s http://localhost:4984/ > /dev/null; done"
        )

        _, stdout, _ = remote_executor.execute(
            "ls /tmp/sg_logs/ | grep sg_log_rotation | wc -l")
        # max 3 files: 2 backups + 1 log file
        assert stdout[0].rstrip() == str(min(3, i + 2))

        sg_helper.stop_sync_gateways(cluster_config=cluster_conf,
                                     url=sg_one_url)
        # generate log file with almost 1MB
        remote_executor.execute(
            "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=1030000 count=1"
        )

    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=sg_conf)
Ejemplo n.º 13
0
def test_log_maxage_10_timestamp_ignored(params_from_base_test_setup,
                                         sg_conf_name):
    """Test to verify SG continues to wrile logs in the same file even when
     timestamp for the log file has been changed
    """
    cluster_conf = params_from_base_test_setup["cluster_config"]
    mode = params_from_base_test_setup["mode"]
    xattrs_enabled = params_from_base_test_setup["xattrs_enabled"]

    sg_conf = sync_gateway_config_path_for_mode(sg_conf_name, mode)

    log_info("Using cluster_conf: {}".format(cluster_conf))
    log_info("Using sg_conf: {}".format(sg_conf))

    cluster = Cluster(config=cluster_conf)
    cluster.reset(sg_config_path=sg_conf)

    remote_executor = RemoteExecutor(cluster.sync_gateways[0].ip)

    # Stop sync_gateways
    log_info(">>> Stopping sync_gateway")
    sg_helper = SyncGateway()
    cluster_helper = ClusterKeywords()
    cluster_hosts = cluster_helper.get_cluster_topology(cluster_conf)
    sg_one_url = cluster_hosts["sync_gateways"][0]["public"]

    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)

    remote_executor.execute("mkdir -p /tmp/sg_logs")
    remote_executor.execute("sudo rm -rf /tmp/sg_logs/sg_log_rotation*")
    # generate log file with almost 1MB
    remote_executor.execute(
        "sudo dd if=/dev/zero of=/tmp/sg_logs/sg_log_rotation.log bs=1030000 count=1"
    )
    remote_executor.execute("sudo chmod 777 -R /tmp/sg_logs")

    # read sample sg_conf
    data = load_sync_gateway_config(sg_conf, mode,
                                    cluster_hosts["couchbase_servers"][0],
                                    xattrs_enabled, cluster_conf)

    # set maxage = 10 days
    data['logging']["default"]["rotation"]["maxage"] = 10
    # create temp config file in the same folder as sg_conf
    temp_conf = "/".join(sg_conf.split('/')[:-2]) + '/temp_conf.json'

    with open(temp_conf, 'w') as fp:
        json.dump(data, fp)

    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=temp_conf)
    # ~1M MB will be added to log file after requests
    remote_executor.execute(
        "for ((i=1;i <= 1000;i += 1)); do curl -s http://localhost:4984/ > /dev/null; done"
    )

    sg_helper.stop_sync_gateways(cluster_config=cluster_conf, url=sg_one_url)
    # change timestamp for log when SG stopped( we don't change file naming)
    remote_executor.execute(
        "sudo touch -d \"10 days ago\" /tmp/sg_logs/sg_log_rotation*")

    sg_helper.start_sync_gateways(cluster_config=cluster_conf,
                                  url=sg_one_url,
                                  config=temp_conf)

    _, stdout, _ = remote_executor.execute(
        "ls /tmp/sg_logs/ | grep sg_log_rotation | wc -l")
    # verify that new log file was not created
    assert stdout[0].rstrip() == '2'

    # Remove generated conf file
    os.remove(temp_conf)