def test_tier_2_upgrade(floating_ip_2_tier_1_clusters, tier_2_manager,
                        cfy, tmpdir, logger):
    local_snapshot_path = str(tmpdir / 'snapshot.zip')

    tier_1_cluster = floating_ip_2_tier_1_clusters[0]
    tier_1_cluster.deploy_and_validate()

    cfy.snapshots.create([constants.TIER_2_SNAP_ID])
    tier_2_manager.wait_for_all_executions()
    cfy.snapshots.download(
        [constants.TIER_2_SNAP_ID, '-o', local_snapshot_path]
    )

    tier_2_manager.teardown()
    tier_2_manager.bootstrap()
    tier_2_manager.use()
    _upload_resources_to_tier_2_manager(cfy, tier_2_manager, logger)
    cfy.snapshots.upload([local_snapshot_path, '-s', constants.TIER_2_SNAP_ID])
    restore_snapshot(tier_2_manager, constants.TIER_2_SNAP_ID, cfy, logger,
                     restore_certificates=True)

    cfy.agents.install()

    # This will only work properly if the Tier 2 manager was restored correctly
    tier_1_cluster.uninstall()
def test_tier_2_upgrade(floating_ip_2_tier_1_clusters, tier_2_manager, cfy,
                        tmpdir, logger):
    local_snapshot_path = str(tmpdir / 'snapshot.zip')

    tier_1_cluster = floating_ip_2_tier_1_clusters[0]
    tier_1_cluster.deploy_and_validate()

    cfy.snapshots.create([constants.TIER_2_SNAP_ID])
    tier_2_manager.wait_for_all_executions()
    cfy.snapshots.download(
        [constants.TIER_2_SNAP_ID, '-o', local_snapshot_path])

    tier_2_manager.teardown()
    tier_2_manager.bootstrap()
    tier_2_manager.use()
    _upload_resources_to_tier_2_manager(cfy, tier_2_manager, logger)
    cfy.snapshots.upload([local_snapshot_path, '-s', constants.TIER_2_SNAP_ID])
    restore_snapshot(tier_2_manager,
                     constants.TIER_2_SNAP_ID,
                     cfy,
                     logger,
                     restore_certificates=True)

    cfy.agents.install()

    # This will only work properly if the Tier 2 manager was restored correctly
    tier_1_cluster.uninstall()
def test_cluster_to_upgrade_cluster(distributed_installation,
                                    cfy,
                                    logger,
                                    tmpdir,
                                    attributes,
                                    distributed_nodecellar):
    logger.info('Running Sanity check for cluster with an external database')
    rabbitmq = distributed_installation.message_queue

    manager1 = distributed_installation.manager
    old_manager_1 = distributed_installation.old_cluster[0]

    old_manager_1.use()

    create_and_add_user_to_tenant(cfy, logger)

    set_sanity_user(cfy, old_manager_1, logger)

    # Creating secrets with 'tenant' visibility
    create_secrets(cfy, logger, attributes, old_manager_1)

    distributed_nodecellar.upload_and_verify_install()

    set_admin_user(cfy, old_manager_1, logger)

    # Create and download a snapshot
    snapshot_id = 'SNAPSHOT_ID'
    local_snapshot_path = str(tmpdir / 'snap.zip')
    logger.info('Creating snapshot')
    create_snapshot(old_manager_1, snapshot_id, attributes, logger)
    download_snapshot(old_manager_1, local_snapshot_path, snapshot_id, logger)

    set_admin_user(cfy, manager1, logger)

    # Upload and restore snapshot to the external AIO manager
    logger.info('Uploading and restoring snapshot')
    upload_snapshot(manager1, local_snapshot_path, snapshot_id, logger)
    restore_snapshot(manager1, snapshot_id, cfy, logger,
                     change_manager_password=False)
    time.sleep(7)
    verify_services_status(manager1, logger)

    # wait for agents reconnection
    time.sleep(30)

    # Upgrade agents
    logger.info('Upgrading agents')
    copy_ssl_cert_from_manager_to_tmpdir(old_manager_1, tmpdir)
    args = ['--manager-ip', rabbitmq.private_ip_address,
            '--manager_certificate', str(tmpdir + 'new_manager_cert.txt'),
            '--all-tenants']
    cfy.agents.install(args)

    set_sanity_user(cfy, manager1, logger)
    # Verify `agents install` worked as expected
    distributed_nodecellar.uninstall()
def test_multiple_networks(managers,
                           cfy,
                           multi_network_hello_worlds,
                           logger,
                           tmpdir,
                           attributes):

    logger.info('Testing managers with multiple networks')

    # We should have at least 3 hello world objects. We will verify the first
    # one completely on the first manager.
    # All the other ones will be installed on the first manager,
    # then we'll create a snapshot and restore it on the second manager, and
    # finally, to complete the verification, we'll uninstall the remaining
    # hellos on the new manager

    old_manager = managers[0]
    new_manager = managers[1]
    snapshot_id = 'SNAPSHOT_ID'
    local_snapshot_path = str(tmpdir / 'snap.zip')

    # The first hello is the one that belongs to a network that will be added
    # manually post bootstrap to the new manager
    post_bootstrap_hello = multi_network_hello_worlds.pop(0)
    post_bootstrap_hello.manager = new_manager

    for hello in multi_network_hello_worlds:
        hello.upload_and_verify_install()

    create_snapshot(old_manager, snapshot_id, attributes, logger)
    download_snapshot(old_manager, local_snapshot_path, snapshot_id, logger)

    new_manager.use()

    upload_snapshot(new_manager, local_snapshot_path, snapshot_id, logger)
    restore_snapshot(new_manager, snapshot_id, cfy, logger,
                     change_manager_password=False)

    upgrade_agents(cfy, new_manager, logger)
    delete_manager(old_manager, logger)

    for hello in multi_network_hello_worlds:
        hello.manager = new_manager
        hello.uninstall()
        hello.delete_deployment()

    _add_new_network(new_manager, logger)
    post_bootstrap_hello.verify_all()
def test_old_agent_stopped_after_agent_upgrade(
        managers, nodecellar, cfy, logger, tmpdir
):
    local_snapshot_path = str(tmpdir / 'snapshot.zip')
    snapshot_id = 'snap'

    old_manager = managers[0]
    new_manager = managers[1]

    old_manager.use()

    nodecellar.upload_and_verify_install()

    cfy.snapshots.create([snapshot_id])
    old_manager.wait_for_all_executions()
    cfy.snapshots.download([snapshot_id, '-o', local_snapshot_path])

    new_manager.use()

    cfy.snapshots.upload([local_snapshot_path, '-s', snapshot_id])
    restore_snapshot(new_manager, snapshot_id, cfy, logger)

    # Before upgrading the agents, the old agent should still be up
    old_manager.use()
    cfy.agents.validate()

    # Upgrade to new agents and stop old agents
    new_manager.use()
    cfy.agents.install('--stop-old-agent')

    # We now expect the old agent to be stopped, and thus unresponsive. So,
    # calling `cfy agents validate` on the old manager should fail
    try:
        logger.info('Validating the old agent is indeed down')
        old_manager.use()
        cfy.agents.validate()
    except ErrorReturnCode:
        logger.info('Old agent unresponsive, as expected')
    else:
        raise StandardError('Old agent is still responsive')

    old_manager.delete()

    new_manager.use()
    nodecellar.manager = new_manager
    nodecellar.verify_installation()
    nodecellar.uninstall()
def test_multiple_networks(managers, cfy, multi_network_hello_worlds, logger,
                           tmpdir, attributes):

    logger.info('Testing managers with multiple networks')

    # We should have at least 3 hello world objects. We will verify the first
    # one completely on the first manager.
    # All the other ones will be installed on the first manager,
    # then we'll create a snapshot and restore it on the second manager, and
    # finally, to complete the verification, we'll uninstall the remaining
    # hellos on the new manager

    old_manager = managers[0]
    new_manager = managers[1]
    snapshot_id = 'SNAPSHOT_ID'
    local_snapshot_path = str(tmpdir / 'snap.zip')

    # The first hello is the one that belongs to a network that will be added
    # manually post bootstrap to the new manager
    post_bootstrap_hello = multi_network_hello_worlds.pop(0)
    post_bootstrap_hello.manager = new_manager

    for hello in multi_network_hello_worlds:
        hello.upload_and_verify_install()

    create_snapshot(old_manager, snapshot_id, attributes, logger)
    download_snapshot(old_manager, local_snapshot_path, snapshot_id, logger)

    new_manager.use()

    upload_snapshot(new_manager, local_snapshot_path, snapshot_id, logger)
    restore_snapshot(new_manager,
                     snapshot_id,
                     cfy,
                     logger,
                     change_manager_password=False)

    upgrade_agents(cfy, new_manager, logger)
    delete_manager(old_manager, logger)

    for hello in multi_network_hello_worlds:
        hello.manager = new_manager
        hello.uninstall()
        hello.delete_deployment()

    _add_new_network(new_manager, logger)
    post_bootstrap_hello.verify_all()
Exemple #7
0
def test_distributed_installation_scenario(distributed_installation, cfy,
                                           logger, tmpdir, attributes,
                                           distributed_nodecellar):
    manager = distributed_installation.manager
    _set_admin_user(cfy, manager, logger)

    # Creating secrets
    _create_secrets(cfy, logger, attributes, manager, visibility='global')

    distributed_nodecellar.upload_and_verify_install()

    snapshot_id = 'SNAPSHOT_ID'
    create_snapshot(manager, snapshot_id, attributes, logger)

    # Restore snapshot
    logger.info('Restoring snapshot')
    restore_snapshot(manager, snapshot_id, cfy, logger, force=True)

    distributed_nodecellar.uninstall()
def test_distributed_installation_scenario(distributed_installation,
                                           cfy,
                                           logger,
                                           tmpdir,
                                           attributes,
                                           distributed_nodecellar):
    manager = distributed_installation.manager
    set_admin_user(cfy, manager, logger)

    # Creating secrets
    create_secrets(cfy, logger, attributes, manager, visibility='global')

    distributed_nodecellar.upload_and_verify_install()

    snapshot_id = 'SNAPSHOT_ID'
    create_snapshot(manager, snapshot_id, attributes, logger)

    # Restore snapshot
    logger.info('Restoring snapshot')
    restore_snapshot(manager, snapshot_id, cfy, logger, force=True)

    distributed_nodecellar.uninstall()
def test_distributed_installation_sanity(distributed_installation,
                                         cfy,
                                         logger,
                                         tmpdir,
                                         attributes,
                                         distributed_nodecellar):
    logger.info('Running Sanity check for cluster with an external database')
    rabbitmq = distributed_installation.message_queue

    manager1 = distributed_installation.manager
    manager2, manager3 = distributed_installation.joining_managers
    manager_aio = distributed_installation.sanity_manager

    manager1.use()
    cfy.cluster('update-profile')

    logger.info('Cfy version')
    cfy('--version')

    logger.info('Cfy status')
    result = cfy.cluster.status()
    assert len(re.findall('Active', result.stdout)) == 3

    create_and_add_user_to_tenant(cfy, logger)

    set_sanity_user(cfy, manager1, logger)

    # Creating secrets with 'tenant' visibility
    create_secrets(cfy, logger, attributes, manager1)

    distributed_nodecellar.upload_and_verify_install()

    set_admin_user(cfy, manager1, logger)

    # Simulate failover (manager2/3 will be the remaining active managers)
    toggle_cluster_node(manager1, 'nginx', logger, disable=True)

    # Create and download snapshots from the remaining active managers (2 or 3)
    snapshot_id = 'SNAPSHOT_ID'
    local_snapshot_path = str(tmpdir / 'snap.zip')
    logger.info('Creating snapshot')
    create_snapshot(manager2, snapshot_id, attributes, logger)
    download_snapshot(manager2, local_snapshot_path, snapshot_id, logger)

    set_admin_user(cfy, manager_aio, logger)

    # Upload and restore snapshot to the external AIO manager
    logger.info('Uploading and restoring snapshot')
    upload_snapshot(manager_aio, local_snapshot_path, snapshot_id, logger)
    restore_snapshot(manager_aio, snapshot_id, cfy, logger,
                     change_manager_password=False)
    time.sleep(7)
    verify_services_status(manager_aio, logger)

    # wait for agents reconnection
    time.sleep(30)

    # Upgrade agents
    logger.info('Upgrading agents')
    copy_ssl_cert_from_manager_to_tmpdir(manager2, tmpdir)
    args = ['--manager-ip', rabbitmq.private_ip_address,
            '--manager_certificate', str(tmpdir + 'new_manager_cert.txt'),
            '--all-tenants']
    cfy.agents.install(args)

    set_sanity_user(cfy, manager_aio, logger)
    # Verify `agents install` worked as expected
    distributed_nodecellar.uninstall()
Exemple #10
0
def test_distributed_installation_sanity(distributed_installation, cfy, logger,
                                         tmpdir, attributes,
                                         distributed_nodecellar):
    logger.info('Running Sanity check for cluster with an external database')
    manager1 = distributed_installation.manager
    manager2, manager3 = distributed_installation.joining_managers
    manager_aio = distributed_installation.sanity_manager

    manager1.use()
    verify_nodes_status(manager1, cfy, logger)

    logger.info('Cfy version')
    cfy('--version')

    logger.info('Cfy status')
    cfy.status()

    _create_and_add_user_to_tenant(cfy, logger)

    _set_sanity_user(cfy, manager1, logger)

    # Creating secrets with 'tenant' visibility
    _create_secrets(cfy, logger, attributes, manager1)

    distributed_nodecellar.upload_and_verify_install()

    _set_admin_user(cfy, manager1, logger)

    # Simulate failover (manager2 will be the new cluster master)
    set_active(manager2, cfy, logger)

    # Create and download snapshots from the new cluster master (manager2)
    snapshot_id = 'SNAPSHOT_ID'
    local_snapshot_path = str(tmpdir / 'snap.zip')
    logger.info('Creating snapshot')
    create_snapshot(manager2, snapshot_id, attributes, logger)
    download_snapshot(manager2, local_snapshot_path, snapshot_id, logger)

    _set_admin_user(cfy, manager_aio, logger)

    # Upload and restore snapshot to manager3
    logger.info('Uploading and restoring snapshot')
    upload_snapshot(manager_aio, local_snapshot_path, snapshot_id, logger)
    restore_snapshot(manager_aio,
                     snapshot_id,
                     cfy,
                     logger,
                     change_manager_password=False)
    time.sleep(7)
    verify_services_status(manager_aio, logger)

    # wait for agents reconnection
    time.sleep(30)

    # Upgrade agents
    logger.info('Upgrading agents')
    _copy_ssl_cert_from_manager_to_tmpdir(manager2, tmpdir)
    args = [
        '--manager-ip', manager2.private_ip_address, '--manager_certificate',
        str(tmpdir + 'new_manager_cert.txt'), '--all-tenants'
    ]
    cfy.agents.install(args)

    _set_sanity_user(cfy, manager_aio, logger)
    # Verify `agents install` worked as expected
    distributed_nodecellar.uninstall()
def test_sanity_scenario(managers,
                         cfy,
                         logger,
                         tmpdir,
                         attributes,
                         nodecellar):

    manager1 = managers[0]
    manager2 = managers[1]
    manager3 = managers[2]

    manager3.use()
    manager2.use()
    manager1.use()

    logger.info('Cfy version')
    cfy('--version')

    logger.info('Cfy status')
    cfy.status()

    # Start HA cluster, add manager2 to it
    _start_cluster(cfy, manager1, logger)

    _set_admin_user(cfy, manager2, logger)

    _join_cluster(cfy, manager1, manager2, logger)

    time.sleep(30)

    _create_and_add_user_to_tenant(cfy, logger)

    _set_sanity_user(cfy, manager1, logger)

    # Creating secrets
    _create_secrets(cfy, logger, attributes, manager1)

    nodecellar.upload_and_verify_install()

    _set_admin_user(cfy, manager1, logger)

    # Simulate failover (manager2 will be the new cluster master)
    logger.info('Setting replica manager')
    _set_admin_user(cfy, manager2, logger)
    ha_helper.set_active(manager2, cfy, logger)
    # time.sleep(30)

    # Create and download snapshots from the new cluster master (manager2)
    snapshot_id = 'SNAPSHOT_ID'
    local_snapshot_path = str(tmpdir / 'snap.zip')
    logger.info('Creating snapshot')
    create_snapshot(manager2, snapshot_id, attributes, logger)
    download_snapshot(manager2, local_snapshot_path, snapshot_id, logger)

    _set_admin_user(cfy, manager3, logger)

    # Upload and restore snapshot to manager3
    logger.info('Uploading and restoring snapshot')
    upload_snapshot(manager3, local_snapshot_path, snapshot_id, logger)
    restore_snapshot(manager3, snapshot_id,
                     cfy, logger, change_manager_password=False)

    verify_services_status(manager3, logger)

    # Upgrade agents
    logger.info('Upgrading agents')
    _copy_ssl_cert_from_manager_to_tmpdir(manager2, tmpdir)
    args = ['--manager-ip', manager2.private_ip_address,
            '--manager_certificate', str(tmpdir + 'new_manager_cert.txt'),
            '--all-tenants']
    cfy.agents.install(args)

    _set_sanity_user(cfy, manager3, logger)
    # Verify `agents install` worked as expected
    nodecellar.uninstall()