Ejemplo n.º 1
0
def test__get_binlog_info_parses_file():
    mock_client = mock.Mock()
    mock_client.execute.return_value = ("mysql-bin.000002\t1054", None)
    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "backup_type": "full",
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None,
    })
    rmt_sql._ssh_client = mock_client
    assert rmt_sql._get_binlog_info("foo") == ("mysql-bin.000002", 1054)
Ejemplo n.º 2
0
def test__get_binlog_info_parses_file():
    mock_client = mock.Mock()
    mock_client.execute.return_value = ("mysql-bin.000002\t1054", None)
    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "backup_type": 'full',
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None
    })
    rmt_sql._ssh_client = mock_client
    assert rmt_sql._get_binlog_info('foo') == ("mysql-bin.000002", 1054)
Ejemplo n.º 3
0
def test__save_cfg(mock_all_cnf, mock_get_config, mock_get_content,
                   tmpdir, root, files, writing_config_count,
                   writing_text_count):
    root_file = "%s/%s" % (str(tmpdir), root)

    # Prepare steps (writing config files with contents)

    for key in files.keys():
        path = key.split('/')
        if len(path) > 1:
            try:
                tmpdir.mkdir('', path[0])
            except Exception:
                pass
        cfg_file = tmpdir.join(key)
        if '!includedir' in files[key]:
            files[key] = files[key].format(tmp_dir=str(tmpdir))
        cfg_file.write(files[key])
        new_key = "%s/%s" % (str(tmpdir), key)
        files[new_key] = files.pop(key)

    mock_all_cnf.return_value = files.keys()

    def get_config(value):
        cfg = ConfigParser.ConfigParser(allow_no_value=True)
        cfg.readfp(StringIO.StringIO(files[value]))
        return cfg

    def get_content(value):
        return files[value]

    mock_get_config.side_effect = get_config

    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "backup_type": 'full',
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None
    })

    mock_cin = mock.MagicMock()
    mock_cin.channel.recv_exit_status.return_value = 0

    dst = mock.MagicMock()
    dst.host = "localhost"


    mock_get_content.side_effect = get_content

    rmt_sql._save_cfg(dst, root_file)

    mock_all_cnf.assert_called_once_with(root_file)
    assert dst.client.write_config.call_count == writing_config_count
    assert mock_get_content.call_count == writing_text_count
Ejemplo n.º 4
0
def test__clone_config(mock_get_root, mock_save):
    mock_get_root.return_value = "/etc/my.cnf"
    dst = Ssh()
    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "full_backup": INTERVALS[0],
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None
    })
    rmt_sql.clone_config(dst)
    mock_get_root.assert_called_with()
    mock_save.assert_called_with(dst, "/etc/my.cnf")
Ejemplo n.º 5
0
def test__save_cfg(mock_all_cnf, mock_get_config, mock_get_content,
                   tmpdir, root, files, writing_config_count,
                   writing_text_count):
    root_file = "%s/%s" % (str(tmpdir), root)

    # Prepare steps (writing config files with contents)

    for key in files.keys():
        path = key.split('/')
        if len(path) > 1:
            try:
                tmpdir.mkdir('', path[0])
            except Exception:
                pass
        cfg_file = tmpdir.join(key)
        if '!includedir' in files[key]:
            files[key] = files[key].format(tmp_dir=str(tmpdir))
        cfg_file.write(files[key])
        new_key = "%s/%s" % (str(tmpdir), key)
        files[new_key] = files.pop(key)

    mock_all_cnf.return_value = files.keys()

    def get_config(value):
        cfg = ConfigParser.ConfigParser(allow_no_value=True)
        cfg.readfp(StringIO.StringIO(files[value]))
        return cfg

    def get_content(value):
        return files[value]

    mock_get_config.side_effect = get_config

    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "backup_type": 'full',
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None
    })

    mock_cin = mock.MagicMock()
    mock_cin.channel.recv_exit_status.return_value = 0

    dst = mock.MagicMock()
    dst.host = "localhost"

    mock_get_content.side_effect = get_content

    rmt_sql._save_cfg(dst, root_file)

    mock_all_cnf.assert_called_once_with(root_file)
    assert dst.client.write_config.call_count == writing_config_count
    assert mock_get_content.call_count == writing_text_count
Ejemplo n.º 6
0
def test__clone_config(mock_get_root, mock_save):
    mock_get_root.return_value = "/etc/my.cnf"
    dst = Ssh('some_remote_dir')
    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        'backup_type': 'full',
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None
    })
    rmt_sql.clone_config(dst)
    mock_get_root.assert_called_with()
    mock_save.assert_called_with(dst, "/etc/my.cnf")
Ejemplo n.º 7
0
def test___mem_available():

    mock_client = mock.Mock()
    mock_client.execute.return_value = ("100500", None)

    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "backup_type": 'full',
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None
    })
    rmt_sql._ssh_client = mock_client
    assert rmt_sql._mem_available() == 100500 * 1024
Ejemplo n.º 8
0
def test___mem_available():

    mock_client = mock.Mock()
    mock_client.execute.return_value = ("100500", None)

    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "backup_type": "full",
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None,
    })
    rmt_sql._ssh_client = mock_client
    assert rmt_sql._mem_available() == 100500 * 1024
Ejemplo n.º 9
0
def test__clone(dest, port):
    arg = 'bash -c "sudo innobackupex --stream=xbstream ./ | gzip -c - | nc %s %d"' % (
        dest, port)
    mock_client = mock.Mock()
    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "full_backup": INTERVALS[0],
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None
    })
    rmt_sql._ssh_client = mock_client
    rmt_sql.clone(dest, port)
    mock_client.execute.assert_called_with(arg)
Ejemplo n.º 10
0
def test__mem_available_raise_exception():

    mock_client = mock.Mock()
    mock_client.execute.return_value = ("", None)

    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "backup_type": "full",
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None,
    })
    rmt_sql._ssh_client = mock_client
    with pytest.raises(OSError):
        rmt_sql._mem_available()
Ejemplo n.º 11
0
def test__mem_available_raise_exception():

    mock_client = mock.Mock()
    mock_client.execute.return_value = ("", None)

    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "backup_type": 'full',
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None
    })
    rmt_sql._ssh_client = mock_client
    with pytest.raises(OSError):
        rmt_sql._mem_available()
Ejemplo n.º 12
0
def test___find_all_cnf(mock_get_text_content, mock_list, tmpdir,
                        root, files):
    root_file = "%s/%s" % (str(tmpdir), root)

    # Prepare steps (writing config files with contents)

    for key in files.keys():
        path = key.split('/')
        if len(path) > 1:
            try:
                tmpdir.mkdir('', path[0])
            except Exception:
                pass
        cfg_file = tmpdir.join(key)
        if '!includedir' in files[key]:
            files[key] = files[key].format(tmp_dir=str(tmpdir))
        cfg_file.write(files[key])
        files["%s/%s" % (str(tmpdir), key)] = files.pop(key)

    # Callback for return ConfiParser from local content

    def get_text_content(value):
        return files[value]

    def get_list(value):
        return os.listdir(value)

    mock_get_text_content.side_effect = get_text_content
    mock_list.side_effect = get_list

    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "backup_type": 'full',
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None
    })
    assert sorted(rmt_sql._find_all_cnf(root_file)) == sorted(files.keys())
Ejemplo n.º 13
0
def test___find_all_cnf(mock_get_text_content, mock_list, tmpdir, mycnfs,
                        expected_result_template):
    mycnf_root = Path(tmpdir)

    # Prepare steps (writing config files with content)

    for key in mycnfs.keys():
        mycnf_root.joinpath(key).parent.mkdir(exist_ok=True)
        with open(str(mycnf_root.joinpath(key)), "w") as fp:
            fp.write(mycnfs[key])

    # mock helper functions
    def get_text_content(full_path):
        LOG.debug("Getting content of %s", full_path)
        # cut mysql_root prefix from the full path and lookup for content in the mycnfs dictionary.
        return mycnfs["/".join(
            PurePath(full_path).parts[len(mycnf_root.parts):])]

    def get_list(path, recursive=False, files_only=True):
        return os.listdir(path)

    mock_get_text_content.side_effect = get_text_content
    mock_list.side_effect = get_list
    #
    rmt_sql = RemoteMySQLSource({
        "run_type": INTERVALS[0],
        "backup_type": 'full',
        "mysql_connect_info": MySQLConnectInfo("/"),
        "ssh_connection_info": None
    })
    expected_result = sorted(
        [osp.join(str(mycnf_root), item) for item in expected_result_template])
    actual_result = sorted(rmt_sql._find_all_cnf(
        mycnf_root.joinpath("my.cnf")))
    assert (actual_result == expected_result), LOG.error(
        "Expected: %s\nActual: %s" %
        (pformat(expected_result), pformat(actual_result)))
Ejemplo n.º 14
0
def clone_mysql(
        cfg,
        source,
        destination,  # pylint: disable=too-many-arguments
        replication_user,
        replication_password,
        netcat_port=9990,
        compress=False):
    """Clone mysql backup of remote machine and stream it to slave"""
    try:
        LOG.debug('Remote MySQL Source: %s', split_host_port(source)[0])
        LOG.debug('MySQL defaults: %s', cfg.get('mysql',
                                                'mysql_defaults_file'))
        LOG.debug('SSH username: %s', cfg.get('ssh', 'ssh_user'))
        LOG.debug('SSH key: %s', cfg.get('ssh', 'ssh_key'))
        src = RemoteMySQLSource({
            "ssh_host":
            split_host_port(source)[0],
            "ssh_user":
            cfg.get('ssh', 'ssh_user'),
            "ssh_key":
            cfg.get('ssh', 'ssh_key'),
            "mysql_connect_info":
            MySQLConnectInfo(cfg.get('mysql', 'mysql_defaults_file'),
                             hostname=split_host_port(source)[0]),
            "run_type":
            INTERVALS[0],
            "backup_type":
            'full'
        })
        xbstream_binary = cfg.get('mysql', 'xbstream_binary')
        LOG.debug('SSH destination: %s', split_host_port(destination)[0])
        LOG.debug('SSH username: %s', cfg.get('ssh', 'ssh_user'))
        LOG.debug('SSH key: %s', cfg.get('ssh', 'ssh_key'))
        dst = Ssh('/tmp',
                  ssh_host=split_host_port(destination)[0],
                  ssh_user=cfg.get('ssh', 'ssh_user'),
                  ssh_key=cfg.get('ssh', 'ssh_key'))
        datadir = src.datadir
        LOG.debug('datadir: %s', datadir)

        if dst.list_files(datadir):
            LOG.error("Destination datadir is not empty: %s", datadir)
            exit(1)

        _run_remote_netcat(compress, datadir, destination, dst, netcat_port,
                           src, xbstream_binary)
        LOG.debug('Copying MySQL config to the destination')
        src.clone_config(dst)

        LOG.debug('Remote MySQL destination: %s',
                  split_host_port(destination)[0])
        LOG.debug('MySQL defaults: %s', cfg.get('mysql',
                                                'mysql_defaults_file'))
        LOG.debug('SSH username: %s', cfg.get('ssh', 'ssh_user'))
        LOG.debug('SSH key: %s', cfg.get('ssh', 'ssh_key'))

        dst_mysql = RemoteMySQLSource({
            "ssh_host":
            split_host_port(destination)[0],
            "ssh_user":
            cfg.get('ssh', 'ssh_user'),
            "ssh_key":
            cfg.get('ssh', 'ssh_key'),
            "mysql_connect_info":
            MySQLConnectInfo(cfg.get('mysql', 'mysql_defaults_file'),
                             hostname=split_host_port(destination)[0]),
            "run_type":
            INTERVALS[0],
            "backup_type":
            'full'
        })

        binlog, position = dst_mysql.apply_backup(datadir)

        LOG.debug('Binlog coordinates: (%s, %d)', binlog, position)

        try:
            LOG.debug('Starting MySQL on the destination')
            _mysql_service(dst, action='start')
            LOG.debug('MySQL started')
        except TwinDBBackupError as err:
            LOG.error(err)
            exit(1)

        LOG.debug('Setting up replication.')
        LOG.debug('Master host: %s', source)
        LOG.debug('Replication user: %s', replication_user)
        LOG.debug('Replication password: %s', replication_password)
        dst_mysql.setup_slave(
            MySQLMasterInfo(host=split_host_port(source)[0],
                            port=split_host_port(source)[1],
                            user=replication_user,
                            password=replication_password,
                            binlog=binlog,
                            binlog_pos=position))
    except (ConfigParser.NoOptionError, OperationalError) as err:
        LOG.error(err)
        exit(1)
Ejemplo n.º 15
0
def clone_mysql(cfg, source, destination,  # pylint: disable=too-many-arguments
                replication_user, replication_password,
                netcat_port=9990,
                compress=False):
    """Clone mysql backup of remote machine and stream it to slave

    :param cfg: TwinDB Backup tool config
    :type cfg: TwinDBBackupConfig
    """
    LOG.debug('Remote MySQL Source: %s', split_host_port(source)[0])
    LOG.debug(
        'MySQL defaults: %s',
        cfg.mysql.defaults_file
    )
    LOG.debug(
        'SSH username: %s',
        cfg.ssh.user
    )
    LOG.debug(
        'SSH key: %s',
        cfg.ssh.key
    )
    src = RemoteMySQLSource(
        {
            "ssh_host": split_host_port(source)[0],
            "ssh_user": cfg.ssh.user,
            "ssh_key": cfg.ssh.key,
            "mysql_connect_info": MySQLConnectInfo(
                cfg.mysql.defaults_file,
                hostname=split_host_port(source)[0]),
            "run_type": INTERVALS[0],
            "backup_type": 'full'
        }
    )
    xbstream_binary = cfg.mysql.xbstream_binary
    LOG.debug('SSH destination: %s', split_host_port(destination)[0])
    LOG.debug('SSH username: %s', cfg.ssh.user)
    LOG.debug('SSH key: %s', cfg.ssh.key)
    dst = Ssh(
        '/tmp',
        ssh_host=split_host_port(destination)[0],
        ssh_user=cfg.ssh.user,
        ssh_key=cfg.ssh.key
    )
    datadir = src.datadir
    LOG.debug('datadir: %s', datadir)

    if dst.list_files(datadir):
        LOG.error("Destination datadir is not empty: %s", datadir)
        exit(1)

    _run_remote_netcat(
        compress,
        datadir,
        destination,
        dst,
        netcat_port,
        src,
        xbstream_binary
    )
    LOG.debug('Copying MySQL config to the destination')
    src.clone_config(dst)

    LOG.debug('Remote MySQL destination: %s',
              split_host_port(destination)[0])
    LOG.debug(
        'MySQL defaults: %s',
        cfg.mysql.defaults_file
    )
    LOG.debug('SSH username: %s', cfg.ssh.user)
    LOG.debug('SSH key: %s', cfg.ssh.key)

    dst_mysql = RemoteMySQLSource({
        "ssh_host": split_host_port(destination)[0],
        "ssh_user": cfg.ssh.user,
        "ssh_key": cfg.ssh.key,
        "mysql_connect_info": MySQLConnectInfo(
            cfg.mysql.defaults_file,
            hostname=split_host_port(destination)[0]
        ),
        "run_type": INTERVALS[0],
        "backup_type": 'full'
    })

    binlog, position = dst_mysql.apply_backup(datadir)

    LOG.debug('Binlog coordinates: (%s, %d)', binlog, position)

    LOG.debug('Starting MySQL on the destination')
    _mysql_service(dst, action='start')
    LOG.debug('MySQL started')

    LOG.debug('Setting up replication.')
    LOG.debug('Master host: %s', source)
    LOG.debug('Replication user: %s', replication_user)
    LOG.debug('Replication password: %s', replication_password)
    dst_mysql.setup_slave(
        MySQLMasterInfo(
            host=split_host_port(source)[0],
            port=split_host_port(source)[1],
            user=replication_user,
            password=replication_password,
            binlog=binlog,
            binlog_pos=position
        )
    )
Ejemplo n.º 16
0
def test_clone(runner, master1, slave, docker_client, config_content_clone):

    twindb_config_dir = get_twindb_config_dir(docker_client, runner['Id'])
    twindb_config_host = "%s/twindb-backup-1.cfg" % twindb_config_dir
    twindb_config_guest = '/etc/twindb/twindb-backup-1.cfg'
    my_cnf_path = "%s/my.cnf" % twindb_config_dir

    private_key_host = "%s/private_key" % twindb_config_dir
    private_key_guest = "/etc/twindb/private_key"
    contents = """
[client]
user=dba
password=qwerty
"""
    with open(my_cnf_path, "w") as my_cnf:
        my_cnf.write(contents)

    private_key = """-----BEGIN RSA PRIVATE KEY-----
MIIEoAIBAAKCAQEAyXxAjPShNGAedbaEtltFI6A7RlsyI+4evxTq6uQrgbJ6Hm+p
HBXshXQYXDyVjvytaM+6GKF+r+6+C+6Wc5Xz4lLO/ZiSCdPbyEgqw1JoHrgPNpc6
wmCtjJExxjzvpwSVgbZg3xOdqW1y+TyqeUkXEg/Lm4VZhN1Q/KyGCgBlWuAXoOYR
GhaNWqcnr/Wn5YzVHAx2yJNrurtKLVYVMIkGcN/6OUaPpWqKZLaXiK/28PSZ5GdT
DmxRg4W0pdyGEYQndpPlpLF4w5gNUEhVZM8hWVE29+DIW3XXVYGYchxmkhU7wrGx
xZR+k5AT+7g8VspVS8zNMXM9Z27w55EQuluNMQIBIwKCAQAzz35QIaXLo7APo/Y9
hS8JKTPQQ1YJPTsbMUO4vlRUjPrUoF6vc1oTsCOFbqoddCyXS1u9MNdvEYFThn51
flSn6WhtGJqU0BPxrChA2q0PNqTThfkqqyVQCBQdCFrhzfqPEaPhl1RtZUlzSh01
IWxVGgEn/bfu9xTTQk5aV9+MZQ2XKe4BGzpOZMI/B7ivRCcthEwMTx92opr52bre
4t7DahVLN/2Wu4lxajDzCaKXpjMuL76lFov0mZZN7S8whH5xSx1tpapHqsCAwfLL
k49lDdR8aN6oqoeK0e9w//McIaKxN2FVxD4bcuXiQTjihx+QwQOLmlHSRDKhTsYg
4Q5bAoGBAOgVZM2eqC8hNl5UH//uuxOeBKqwz7L/FtGemNr9m0XG8N9yE/K7A5iX
6EDvDyVI51IlIXdxfK8re5yxfbJ4YevenwdEZZ2O8YRrVByJ53PV9CcVeWL4p6f/
I56sYyDfXcnDTEOVYY0mCfYUfUcSb1ExpuIU4RvuQJg6tvbdxD9FAoGBAN4/pVCT
krRd6PJmt6Dbc2IF6N09OrAnLB3fivGztF5cp+RpyqZK4ve+akLoe1laTg7vNtnF
l/PZtM9v/VT45hb70MFEHO+sKvGa5Yimxkb6YCriJOcLxTysSgFHKz7v+8BqqoHi
qY4fORGwPVDv28I8jKRvcuNHendV/Rdcuk79AoGAd1t1q5NscAJzu3u4r4IXEWc1
mZzClpHROJq1AujTgviZInUu1JqxZGthgHrx2KkmggR3nIOB86/2bdefut7TRhq4
L5+Et24VzxKgSTD6sJnrR0zfV3iQvMxbdizFRBsaSoGyMWLEdHn2fo4xzMem9o6Q
VwNsdMOsMQhA1rsxuiMCgYBr8wcnIxte68jqxC1OIXKOsmnKi3RG7nSDidXF2vE1
JbCiJMGD+Hzeu5KyyLDw4rgzI7uOWKjkJ+obnMuBCy3t6AZPPlcylXPxsaKwFn2Q
MHfaUJWUyzPqRQ4AnukekdINAJv18cAR1Kaw0fHle9Ej1ERP3lxfw6HiMRSHsLJD
nwKBgCIXVhXCDaXOOn8M4ky6k27bnGJrTkrRjHaq4qWiQhzizOBTb+7MjCrJIV28
8knW8+YtEOfl5R053SKQgVsmRjjDfvCirGgqC4kSAN4A6MD+GNVXZVUUjAUBVUbU
8Wt4BxW6kFA7+Su7n8o4DxCqhZYmK9ZUhNjE+uUhxJCJaGr4
-----END RSA PRIVATE KEY-----
"""
    with open(private_key_host, "w") as key_fd:
        key_fd.write(private_key)

    with open(twindb_config_host, 'w') as fp:
        content = config_content_clone.format(
            PRIVATE_KEY=private_key_guest,
            MY_CNF='/etc/twindb/my.cnf'
        )
        fp.write(content)

    cmd = '/usr/sbin/sshd'
    # Run SSH daemon on master1_1
    ret, cout = docker_execute(docker_client, master1['Id'], cmd)
    print(cout)

    cmd = ['twindb-backup', '--debug',
           '--config', twindb_config_guest,
           'clone', 'mysql',
           "%s:3306" % master1['ip'], "%s:3306" % slave['ip']
           ]
    ret, cout = docker_execute(docker_client, runner['Id'], cmd)
    print(cout)

    assert ret == 0
    sql_master_2 = RemoteMySQLSource({
        "ssh_host": slave['ip'],
        "ssh_user": '******',
        "ssh_key": private_key_guest,
        "mysql_connect_info": MySQLConnectInfo(
            my_cnf_path,
            hostname=slave['ip']
        ),
        "run_type": INTERVALS[0],
        "backup_type": 'full'
    })

    timeout = time.time() + 30
    while time.time() < timeout:
        with sql_master_2.get_connection() as conn:
            with conn.cursor() as cursor:
                cursor.execute('SHOW SLAVE STATUS')
                row = cursor.fetchone()
                if row['Slave_IO_Running'] == 'Yes' and row['Slave_SQL_Running'] == 'Yes':
                    LOG.info('Relication is up and running')
                    return
    LOG.error('Replication is not running after 30 seconds timeout')
    assert False
Ejemplo n.º 17
0
def clone_mysql(
        cfg,
        source,
        destination,  # pylint: disable=too-many-arguments
        replication_user,
        replication_password,
        netcat_port=9990):
    """Clone mysql backup of remote machine and stream it to slave"""
    try:
        LOG.debug('Remote MySQL Source: %s', split_host_port(source)[0])
        LOG.debug('MySQL defaults: %s', cfg.get('mysql',
                                                'mysql_defaults_file'))
        LOG.debug('SSH username: %s', cfg.get('ssh', 'ssh_user'))
        LOG.debug('SSH key: %s', cfg.get('ssh', 'ssh_key'))
        src = RemoteMySQLSource({
            "ssh_connection_info":
            SshConnectInfo(host=split_host_port(source)[0],
                           user=cfg.get('ssh', 'ssh_user'),
                           key=cfg.get('ssh', 'ssh_key')),
            "mysql_connect_info":
            MySQLConnectInfo(cfg.get('mysql', 'mysql_defaults_file'),
                             hostname=split_host_port(source)[0]),
            "run_type":
            INTERVALS[0],
            "full_backup":
            INTERVALS[0],
        })
        LOG.debug('SSH destination: %s', split_host_port(destination)[0])
        LOG.debug('SSH username: %s', cfg.get('ssh', 'ssh_user'))
        LOG.debug('SSH key: %s', cfg.get('ssh', 'ssh_key'))
        dst = Ssh(ssh_connect_info=SshConnectInfo(
            host=split_host_port(destination)[0],
            user=cfg.get('ssh', 'ssh_user'),
            key=cfg.get('ssh', 'ssh_key')), )

        datadir = src.datadir

        LOG.debug('datadir: %s', datadir)

        if dst.list_files(datadir):
            LOG.error("Destination datadir is not empty: %s", datadir)
            exit(1)

        try:
            LOG.debug('Stopping MySQL on the destination')
            _mysql_service(dst, action='stop')
        except TwinDBBackupError as err:
            LOG.error(err)
            exit(1)

        proc_netcat = Process(
            target=dst.netcat,
            args=("gunzip -c - | xbstream -x -C {datadir}".format(
                datadir=datadir), ),
            kwargs={'port': netcat_port})
        proc_netcat.start()
        LOG.debug('Starting netcat on the destination')
        src.clone(dest_host=split_host_port(destination)[0], port=netcat_port)
        proc_netcat.join()
        LOG.debug('Copying MySQL config to the destination')
        src.clone_config(dst)

        LOG.debug('Remote MySQL destination: %s',
                  split_host_port(destination)[0])
        LOG.debug('MySQL defaults: %s', cfg.get('mysql',
                                                'mysql_defaults_file'))
        LOG.debug('SSH username: %s', cfg.get('ssh', 'ssh_user'))
        LOG.debug('SSH key: %s', cfg.get('ssh', 'ssh_key'))

        dst_mysql = RemoteMySQLSource({
            "ssh_connection_info":
            SshConnectInfo(host=split_host_port(destination)[0],
                           user=cfg.get('ssh', 'ssh_user'),
                           key=cfg.get('ssh', 'ssh_key')),
            "mysql_connect_info":
            MySQLConnectInfo(cfg.get('mysql', 'mysql_defaults_file'),
                             hostname=split_host_port(destination)[0]),
            "run_type":
            INTERVALS[0],
            "full_backup":
            INTERVALS[0],
        })

        binlog, position = dst_mysql.apply_backup(datadir)

        LOG.debug('Binlog coordinates: (%s, %d)', binlog, position)

        try:
            LOG.debug('Starting MySQL on the destination')
            _mysql_service(dst, action='start')
        except TwinDBBackupError as err:
            LOG.error(err)
            exit(1)

        LOG.debug('Setting up replication.')
        LOG.debug('Master host: %s', source)
        LOG.debug('Replication user: %s', replication_user)
        LOG.debug('Replication password: %s', replication_password)
        dst_mysql.setup_slave(source, replication_user, replication_password,
                              binlog, position)

    except (ConfigParser.NoOptionError, OperationalError) as err:
        LOG.error(err)
        exit(1)
Ejemplo n.º 18
0
def test_clone(
        runner,
        master1,
        slave,
        docker_client,
        config_content_clone,
        client_my_cnf,
        rsa_private_key):

    twindb_config_dir = get_twindb_config_dir(docker_client, runner['Id'])
    twindb_config_host = "%s/twindb-backup-1.cfg" % twindb_config_dir
    twindb_config_guest = '/etc/twindb/twindb-backup-1.cfg'
    my_cnf_path = "%s/my.cnf" % twindb_config_dir

    private_key_host = "%s/private_key" % twindb_config_dir
    private_key_guest = "/etc/twindb/private_key"

    with open(my_cnf_path, "w") as my_cnf:
        my_cnf.write(client_my_cnf)

    with open(private_key_host, "w") as key_fd:
        key_fd.write(rsa_private_key)

    with open(twindb_config_host, 'w') as fp:
        content = config_content_clone.format(
            PRIVATE_KEY=private_key_guest,
            MY_CNF='/etc/twindb/my.cnf'
        )
        fp.write(content)

    cmd = '/usr/sbin/sshd'
    LOG.info('Run SSH daemon on master1_1')
    ret, cout = docker_execute(docker_client, master1['Id'], cmd)
    print(cout)

    cmd = [
        'twindb-backup',
        '--debug',
        '--config', twindb_config_guest,
        'clone',
        'mysql',
        '%s:3306' % master1['ip'],
        '%s:3306' % slave['ip']
    ]
    pause_test(' '.join(cmd))
    ret, cout = docker_execute(docker_client, runner['Id'], cmd)
    print(cout)

    assert ret == 0
    sql_master_2 = RemoteMySQLSource({
        "ssh_host": slave['ip'],
        "ssh_user": '******',
        "ssh_key": private_key_guest,
        "mysql_connect_info": MySQLConnectInfo(
            my_cnf_path,
            hostname=slave['ip']
        ),
        "run_type": INTERVALS[0],
        "backup_type": 'full'
    })

    timeout = time.time() + 30
    while time.time() < timeout:
        with sql_master_2.get_connection() as conn:
            with conn.cursor() as cursor:
                cursor.execute('SHOW SLAVE STATUS')
                row = cursor.fetchone()
                if row['Slave_IO_Running'] == 'Yes' \
                        and row['Slave_SQL_Running'] == 'Yes':

                    LOG.info('Replication is up and running')
                    return

    LOG.error('Replication is not running after 30 seconds timeout')
    assert False
Ejemplo n.º 19
0
def clone_mysql(
    cfg,
    source,
    destination,  # pylint: disable=too-many-arguments
    replication_user,
    replication_password,
    netcat_port=9990,
    compress=False,
):
    """Clone mysql backup of remote machine and stream it to slave

    :param cfg: TwinDB Backup tool config
    :type cfg: TwinDBBackupConfig
    """
    LOG.debug("Remote MySQL Source: %s", split_host_port(source)[0])
    LOG.debug("MySQL defaults: %s", cfg.mysql.defaults_file)
    LOG.debug("SSH username: %s", cfg.ssh.user)
    LOG.debug("SSH key: %s", cfg.ssh.key)
    src = RemoteMySQLSource({
        "ssh_host":
        split_host_port(source)[0],
        "ssh_user":
        cfg.ssh.user,
        "ssh_key":
        cfg.ssh.key,
        "mysql_connect_info":
        MySQLConnectInfo(cfg.mysql.defaults_file,
                         hostname=split_host_port(source)[0]),
        "run_type":
        INTERVALS[0],
        "backup_type":
        "full",
    })
    xbstream_binary = cfg.mysql.xbstream_binary
    LOG.debug("SSH destination: %s", split_host_port(destination)[0])
    LOG.debug("SSH username: %s", cfg.ssh.user)
    LOG.debug("SSH key: %s", cfg.ssh.key)
    dst = Ssh(
        "/tmp",
        ssh_host=split_host_port(destination)[0],
        ssh_user=cfg.ssh.user,
        ssh_key=cfg.ssh.key,
    )
    datadir = src.datadir
    LOG.debug("datadir: %s", datadir)

    if dst.list_files(datadir):
        LOG.error("Destination datadir is not empty: %s", datadir)
        exit(1)

    _run_remote_netcat(compress, datadir, destination, dst, netcat_port, src,
                       xbstream_binary)
    LOG.debug("Copying MySQL config to the destination")
    src.clone_config(dst)

    LOG.debug("Remote MySQL destination: %s", split_host_port(destination)[0])
    LOG.debug("MySQL defaults: %s", cfg.mysql.defaults_file)
    LOG.debug("SSH username: %s", cfg.ssh.user)
    LOG.debug("SSH key: %s", cfg.ssh.key)

    dst_mysql = RemoteMySQLSource({
        "ssh_host":
        split_host_port(destination)[0],
        "ssh_user":
        cfg.ssh.user,
        "ssh_key":
        cfg.ssh.key,
        "mysql_connect_info":
        MySQLConnectInfo(
            cfg.mysql.defaults_file,
            hostname=split_host_port(destination)[0],
        ),
        "run_type":
        INTERVALS[0],
        "backup_type":
        "full",
    })

    binlog, position = dst_mysql.apply_backup(datadir)

    LOG.debug("Binlog coordinates: (%s, %d)", binlog, position)

    LOG.debug("Starting MySQL on the destination")
    _mysql_service(dst, action="start")
    LOG.debug("MySQL started")

    LOG.debug("Setting up replication.")
    LOG.debug("Master host: %s", source)
    LOG.debug("Replication user: %s", replication_user)
    LOG.debug("Replication password: %s", replication_password)
    dst_mysql.setup_slave(
        MySQLMasterInfo(
            host=split_host_port(source)[0],
            port=split_host_port(source)[1],
            user=replication_user,
            password=replication_password,
            binlog=binlog,
            binlog_pos=position,
        ))
Ejemplo n.º 20
0
def test_clone(
    runner,
    master1,
    slave,
    docker_client,
    config_content_clone,
    client_my_cnf,
    rsa_private_key,
):

    twindb_config_dir = get_twindb_config_dir(docker_client, runner["Id"])
    twindb_config_host = "%s/twindb-backup-1.cfg" % twindb_config_dir
    twindb_config_guest = "/etc/twindb/twindb-backup-1.cfg"
    my_cnf_path = "%s/my.cnf" % twindb_config_dir

    private_key_host = "%s/private_key" % twindb_config_dir
    private_key_guest = "/etc/twindb/private_key"

    with open(my_cnf_path, "w") as my_cnf:
        my_cnf.write(client_my_cnf)

    with open(private_key_host, "w") as key_fd:
        key_fd.write(rsa_private_key)

    with open(twindb_config_host, "w") as fp:
        content = config_content_clone.format(PRIVATE_KEY=private_key_guest,
                                              MY_CNF="/etc/twindb/my.cnf")
        fp.write(content)

    cmd = "/usr/sbin/sshd"
    LOG.info("Run SSH daemon on master1_1")
    ret, cout = docker_execute(docker_client, master1["Id"], cmd)
    assert_and_pause((ret == 0, ), cout)

    cmd = [
        "twindb-backup",
        "--debug",
        "--config",
        twindb_config_guest,
        "clone",
        "mysql",
        "%s:3306" % master1["ip"],
        "%s:3306" % slave["ip"],
    ]
    ret, cout = docker_execute(docker_client, runner["Id"], cmd)
    assert_and_pause((ret == 0, ), cout)

    sql_master_2 = RemoteMySQLSource({
        "ssh_host":
        slave["ip"],
        "ssh_user":
        "******",
        "ssh_key":
        private_key_guest,
        "mysql_connect_info":
        MySQLConnectInfo(my_cnf_path, hostname=slave["ip"]),
        "run_type":
        INTERVALS[0],
        "backup_type":
        "full",
    })

    timeout = time.time() + 30
    while time.time() < timeout:
        with sql_master_2.get_connection() as conn:
            with conn.cursor() as cursor:
                cursor.execute("SHOW SLAVE STATUS")
                row = cursor.fetchone()
                if (row["Slave_IO_Running"] == "Yes"
                        and row["Slave_SQL_Running"] == "Yes"):

                    LOG.info("Replication is up and running")
                    return

    LOG.error("Replication is not running after 30 seconds timeout")
    assert False
Ejemplo n.º 21
0
def test_clone(runner, master1, slave, docker_client, config_content_clone,
               client_my_cnf, rsa_private_key):

    twindb_config_dir = get_twindb_config_dir(docker_client, runner['Id'])
    twindb_config_host = "%s/twindb-backup-1.cfg" % twindb_config_dir
    twindb_config_guest = '/etc/twindb/twindb-backup-1.cfg'
    my_cnf_path = "%s/my.cnf" % twindb_config_dir

    private_key_host = "%s/private_key" % twindb_config_dir
    private_key_guest = "/etc/twindb/private_key"

    with open(my_cnf_path, "w") as my_cnf:
        my_cnf.write(client_my_cnf)

    with open(private_key_host, "w") as key_fd:
        key_fd.write(rsa_private_key)

    with open(twindb_config_host, 'w') as fp:
        content = config_content_clone.format(PRIVATE_KEY=private_key_guest,
                                              MY_CNF='/etc/twindb/my.cnf')
        fp.write(content)

    cmd = '/usr/sbin/sshd'
    LOG.info('Run SSH daemon on master1_1')
    ret, cout = docker_execute(docker_client, master1['Id'], cmd)
    print(cout)

    cmd = [
        'twindb-backup', '--debug', '--config', twindb_config_guest, 'clone',
        'mysql',
        '%s:3306' % master1['ip'],
        '%s:3306' % slave['ip']
    ]
    pause_test(' '.join(cmd))
    ret, cout = docker_execute(docker_client, runner['Id'], cmd)
    print(cout)

    assert ret == 0
    sql_master_2 = RemoteMySQLSource({
        "ssh_host":
        slave['ip'],
        "ssh_user":
        '******',
        "ssh_key":
        private_key_guest,
        "mysql_connect_info":
        MySQLConnectInfo(my_cnf_path, hostname=slave['ip']),
        "run_type":
        INTERVALS[0],
        "backup_type":
        'full'
    })

    timeout = time.time() + 30
    while time.time() < timeout:
        with sql_master_2.get_connection() as conn:
            with conn.cursor() as cursor:
                cursor.execute('SHOW SLAVE STATUS')
                row = cursor.fetchone()
                if row['Slave_IO_Running'] == 'Yes' \
                        and row['Slave_SQL_Running'] == 'Yes':

                    LOG.info('Replication is up and running')
                    return

    LOG.error('Replication is not running after 30 seconds timeout')
    assert False