Exemplo n.º 1
0
def upgrade_db(orig_id, seed_id, db_role_name):
    orig_env = environment_obj.Environment(orig_id)
    seed_env = environment_obj.Environment(seed_id)
    env_util.delete_fuel_resources(seed_env)
    # Wait for Neutron to reconfigure networks
    time.sleep(7)  # FIXME: Use more deterministic way
    maintenance.disable_apis(orig_env)
    maintenance.stop_corosync_services(seed_env)
    maintenance.stop_upstart_services(seed_env)

    expected_dbs = set(magic_consts.OS_SERVICES)
    existing_dbs = set(db.get_databases(orig_env))
    dbs = existing_dbs & expected_dbs
    if len(dbs) < len(magic_consts.OS_SERVICES):
        LOG.info('Skipping nonexistent tables: %s',
                 ', '.join(expected_dbs - existing_dbs))
    LOG.info('Will dump tables: %s', ', '.join(dbs))

    fname = os.path.join(magic_consts.FUEL_CACHE, 'dbs.original.sql.gz')
    db.mysqldump_from_env(orig_env, db_role_name, dbs, fname)

    fname2 = os.path.join(
        magic_consts.FUEL_CACHE,
        'dbs.original.cluster_%s.sql.gz' % (orig_env.data['id'], ),
    )
    shutil.copy(fname, fname2)

    db.mysqldump_restore_to_env(seed_env, db_role_name, fname)
    db.db_sync(seed_env)
Exemplo n.º 2
0
def upgrade_db(orig_id, seed_id, db_role_name):
    orig_env = environment_obj.Environment(orig_id)
    seed_env = environment_obj.Environment(seed_id)
    env_util.delete_fuel_resources(seed_env)
    # Wait for Neutron to reconfigure networks
    time.sleep(7)  # FIXME: Use more deterministic way
    maintenance.disable_apis(orig_env)
    maintenance.stop_corosync_services(seed_env)
    maintenance.stop_upstart_services(seed_env)

    expected_dbs = set(magic_consts.OS_SERVICES)
    existing_dbs = set(db.get_databases(orig_env))
    dbs = existing_dbs & expected_dbs
    if len(dbs) < len(magic_consts.OS_SERVICES):
        LOG.info('Skipping nonexistent tables: %s',
                 ', '.join(expected_dbs - existing_dbs))
    LOG.info('Will dump tables: %s', ', '.join(dbs))

    fname = os.path.join(magic_consts.FUEL_CACHE, 'dbs.original.sql.gz')
    db.mysqldump_from_env(orig_env, db_role_name, dbs, fname)

    fname2 = os.path.join(
        magic_consts.FUEL_CACHE,
        'dbs.original.cluster_%s.sql.gz' % (orig_env.data['id'],),
    )
    shutil.copy(fname, fname2)

    db.mysqldump_restore_to_env(seed_env, db_role_name, fname)
    db.db_sync(seed_env)
Exemplo n.º 3
0
def prepare(orig_id, seed_id):
    orig_env = environment_obj.Environment(orig_id)
    seed_env = environment_obj.Environment(seed_id)
    controller = env_util.get_one_controller(seed_env)

    with tempfile.NamedTemporaryFile() as temp:
        db.mysqldump_from_env(orig_env, ['keystone'], temp.name)
        db.mysqldump_restore_to_env(seed_env, temp.name)

    ssh.call(['keystone-manage', 'db_sync'],
             node=controller, parse_levels=True)
    for controller in env_util.get_controllers(seed_env):
        ssh.call(['service', 'memcached', 'restart'], node=controller)
Exemplo n.º 4
0
def test_mysqldump_restore_to_env(mocker, mock_open, mock_subprocess,
                                  mock_ssh_popen, node):
    test_contents = b'test_contents\nhere'
    buf = io.BytesIO()

    mock_open.return_value = io.BytesIO(test_contents)

    get_one_controller = mocker.patch('octane.util.env.get_one_controller')
    get_one_controller.return_value = node

    proc = mock_ssh_popen.return_value.__enter__.return_value
    proc.stdin.write.side_effect = buf.write

    db.mysqldump_restore_to_env('env', 'filename')

    assert not mock_subprocess.called
    mock_ssh_popen.assert_called_once_with(
        ['sh', '-c', mock.ANY], stdin=ssh.PIPE, node=node)
    mock_open.assert_called_once_with('filename', 'rb')
    assert buf.getvalue() == test_contents