Beispiel #1
0
def run_test_sigterm():
  utils.zk_wipe()
  utils.run_vtctl('CreateKeyspace -force test_keyspace')

  # create the database so vttablets start, as it is serving
  tablet_62344.create_db('vt_test_keyspace')

  tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)

  # start a 'vtctl Sleep' command in the background
  sp = utils.run_bg(utils.vtroot+'/bin/vtctl -logfile=/dev/null Sleep %s 60s' %
                    tablet_62344.tablet_alias,
                    stdout=PIPE, stderr=PIPE)

  # wait for it to start, and let's kill it
  time.sleep(2.0)
  utils.run(['pkill', 'vtaction'])
  out, err = sp.communicate()

  # check the vtctl command got the right remote error back
  if "vtaction interrupted by signal" not in err:
    raise utils.TestError("cannot find expected output in error:", err)
  utils.debug("vtaction was interrupted correctly:\n" + err)

  tablet_62344.kill_vttablet()
Beispiel #2
0
def test_multisnapshot_vtctl():
  populate = sum([[
    "insert into vt_insert_test_%s (msg) values ('test %s')" % (i, x)
    for x in xrange(4)] for i in range(6)], [])
  create = ['''create table vt_insert_test_%s (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB''' % i for i in range(6)]

  utils.zk_wipe()

  # Start up a master mysql and vttablet
  utils.run_vtctl('CreateKeyspace -force test_keyspace')

  tablet_62344.init_tablet('master', 'test_keyspace', '0')
  utils.run_vtctl('RebuildShardGraph test_keyspace/0')
  utils.validate_topology()

  tablet_62344.populate('vt_test_keyspace', create,
                        populate)

  tablet_62344.start_vttablet()

  utils.run_vtctl('MultiSnapshot --force --tables=vt_insert_test_1,vt_insert_test_2,vt_insert_test_3 --spec=-0000000000000003- %s id' % tablet_62344.tablet_alias)

  # if err != 0:
  #   raise utils.TestError('mysqlctl multisnapshot failed')
  if os.path.exists(os.path.join(utils.vtdataroot, 'snapshot/vt_0000062344/data/vt_test_keyspace-,0000000000000003/vt_insert_test_4.0.csv.gz')):
    raise utils.TestError("Table vt_insert_test_4 wasn't supposed to be dumped.")
  for kr in 'vt_test_keyspace-,0000000000000003', 'vt_test_keyspace-0000000000000003,':
    path = os.path.join(utils.vtdataroot, 'snapshot/vt_0000062344/data/', kr, 'vt_insert_test_1.0.csv.gz')
    with gzip.open(path) as f:
      if len(f.readlines()) != 2:
        raise utils.TestError("Data looks wrong in %s" % path)
Beispiel #3
0
def _populate_zk():
    utils.zk_wipe()

    utils.run(utils.vtroot + '/bin/zk touch -p /zk/test_nj/zkocc1')
    utils.run(utils.vtroot + '/bin/zk touch -p /zk/test_nj/zkocc2')
    fd = tempfile.NamedTemporaryFile(dir=utils.tmp_root, delete=False)
    filename1 = fd.name
    fd.write("Test data 1")
    fd.close()
    utils.run(utils.vtroot + '/bin/zk cp ' + filename1 +
              ' /zk/test_nj/zkocc1/data1')

    fd = tempfile.NamedTemporaryFile(dir=utils.tmp_root, delete=False)
    filename2 = fd.name
    fd.write("Test data 2")
    fd.close()
    utils.run(utils.vtroot + '/bin/zk cp ' + filename2 +
              ' /zk/test_nj/zkocc1/data2')

    fd = tempfile.NamedTemporaryFile(dir=utils.tmp_root, delete=False)
    filename3 = fd.name
    fd.write("Test data 3")
    fd.close()
    utils.run(utils.vtroot + '/bin/zk cp ' + filename3 +
              ' /zk/test_nj/zkocc1/data3')
 def setUp(self):
     utils.zk_wipe()
     self.zkocc_server = utils.zkocc_start()
     self.vttopo_server = utils.vttopo_start()
     self.topo = zkocc.ZkOccConnection(
         "localhost:%u" % utils.zkocc_port_base, 'test_nj', 30)
     self.topo.dial()
Beispiel #5
0
def run_test_reparent_lag_slave(shard_id='0'):
    utils.zk_wipe()

    utils.run_vtctl('CreateKeyspace -force test_keyspace')

    # create the database so vttablets start, as they are serving
    tablet_62344.create_db('vt_test_keyspace')
    tablet_62044.create_db('vt_test_keyspace')
    tablet_41983.create_db('vt_test_keyspace')
    tablet_31981.create_db('vt_test_keyspace')

    # Start up a master mysql and vttablet
    tablet_62344.init_tablet('master', 'test_keyspace', shard_id, start=True)

    # Create a few slaves for testing reparenting.
    tablet_62044.init_tablet('replica', 'test_keyspace', shard_id, start=True)
    tablet_31981.init_tablet('replica', 'test_keyspace', shard_id, start=True)
    tablet_41983.init_tablet('lag', 'test_keyspace', shard_id, start=True)

    # Recompute the shard layout node - until you do that, it might not be valid.
    utils.run_vtctl('RebuildShardGraph test_keyspace/' + shard_id)
    utils.validate_topology()

    # Force the slaves to reparent assuming that all the datasets are identical.
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
        t.reset_replication()
    utils.run_vtctl('ReparentShard -force test_keyspace/%s %s' %
                    (shard_id, tablet_62344.tablet_alias))
    utils.validate_topology(ping_tablets=True)

    tablet_62344.create_db('vt_test_keyspace')
    tablet_62344.mquery('vt_test_keyspace', create_vt_insert_test)

    tablet_41983.mquery('', 'stop slave')
    for q in populate_vt_insert_test:
        tablet_62344.mquery('vt_test_keyspace', q, write=True)

    # Perform a graceful reparent operation.
    utils.run_vtctl('ReparentShard test_keyspace/%s %s' %
                    (shard_id, tablet_62044.tablet_alias))

    tablet_41983.mquery('', 'start slave')
    time.sleep(1)

    utils.pause("check orphan")

    utils.run_vtctl('ReparentTablet %s' % tablet_41983.tablet_alias)

    result = tablet_41983.mquery('vt_test_keyspace',
                                 'select msg from vt_insert_test where id=1')
    if len(result) != 1:
        raise utils.TestError('expected 1 row from vt_insert_test', result)

    utils.pause("check lag reparent")

    tablet_62344.kill_vttablet()
    tablet_62044.kill_vttablet()
    tablet_41983.kill_vttablet()
    tablet_31981.kill_vttablet()
Beispiel #6
0
 def setUp(self):
   utils.zk_wipe()
   self.zkocc_server = utils.zkocc_start()
   # the default topo implementation for vtgate is zookeeper
   self.vtgate_zk, self.vtgate_zk_port = utils.vtgate_start()
   self.vtgate_zkocc, self.vtgate_zkocc_port = utils.vtgate_start(topo_impl="zkocc")
   self.topo = zkocc.ZkOccConnection("localhost:%u" % utils.zkocc_port_base, 'test_nj', 30)
   self.topo.dial()
def test_multisnapshot_and_restore_vtctl():
    tables = ["vt_insert_test", "vt_insert_test1"]
    create_template = """create table %s (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB"""
    insert_template = "insert into %s (id, msg) values (%s, 'test %s')"
    utils.zk_wipe()

    # Start up a master mysql and vttablet
    utils.run_vtctl("CreateKeyspace -force test_keyspace")

    # Start three tablets for three different shards. At this point the
    # sharding schema is not really important, as long as it is
    # consistent.
    new_spec = "-0000000000000028-"
    old_tablets = [tablet_62044, tablet_41983, tablet_31981]
    for i, tablet in enumerate(old_tablets):
        tablet.init_tablet("master", "test_keyspace", str(i))
        utils.run_vtctl("RebuildShardGraph test_keyspace/%s" % i)
    utils.validate_topology()

    for i, tablet in enumerate(old_tablets):
        tablet.populate(
            "vt_test_keyspace",
            [create_template % table for table in tables],
            sum([[insert_template % (table, 10 * j + i, 10 * j + i) for j in range(1, 8)] for table in tables], []),
        )
        tablet.start_vttablet()
        utils.run_vtctl(
            "MultiSnapshot -force -maximum-file-size=1 -spec=%s %s id" % (new_spec, tablet.tablet_alias),
            trap_output=True,
        )

    utils.run_vtctl("CreateKeyspace -force test_keyspace_new")
    tablet_62344.init_tablet("master", "test_keyspace_new", "-0000000000000028", dbname="not_vt_test_keyspace")
    utils.run_vtctl("RebuildShardGraph test_keyspace_new/-0000000000000028")
    utils.validate_topology()
    tablet_62344.mquery("", "DROP DATABASE IF EXISTS not_vt_test_keyspace")
    tablet_62344.start_vttablet(wait_for_state="CONNECTING")  # db not created

    # 0x28 = 40
    source_aliases = " ".join(t.tablet_alias for t in old_tablets)
    utils.run_vtctl(
        "MultiRestore %s %s" % (tablet_62344.tablet_alias, source_aliases), auto_log=True, raise_on_error=True
    )
    time.sleep(1)
    for table in tables:
        rows = tablet_62344.mquery("not_vt_test_keyspace", "select id from %s" % table)
        if len(rows) == 0:
            raise utils.TestError("There are no rows in the restored database.")
        for row in rows:
            if row[0] > 32:
                raise utils.TestError("Bad row: %s" % row)
    for tablet in tablet_62044, tablet_41983, tablet_31981, tablet_62344:
        tablet.kill_vttablet()
Beispiel #8
0
def run_test_mysqlctl_split():
  utils.zk_wipe()

  # Start up a master mysql and vttablet
  utils.run_vtctl('CreateKeyspace -force test_keyspace')

  tablet_62344.init_tablet('master', 'test_keyspace', '0')
  utils.run_vtctl('RebuildShardGraph test_keyspace/0')
  utils.validate_topology()

  tablet_62344.populate('vt_test_keyspace', create_vt_insert_test,
                        populate_vt_insert_test)

  tablet_62344.start_vttablet()

  err = tablet_62344.mysqlctl('-port %u -mysql-port %u partialsnapshot -end=0000000000000003 vt_test_keyspace id' % (tablet_62344.port, tablet_62344.mysql_port)).wait()
  if err != 0:
    raise utils.TestError('mysqlctl partialsnapshot failed')


  utils.pause("partialsnapshot finished")

  tablet_62044.mquery('', 'stop slave')
  tablet_62044.create_db('vt_test_keyspace')
  call(["touch", "/tmp/vtSimulateFetchFailures"])
  err = tablet_62044.mysqlctl('-port %u -mysql-port %u partialrestore %s/snapshot/vt_0000062344/data/vt_test_keyspace-,0000000000000003/partial_snapshot_manifest.json' % (tablet_62044.port, tablet_62044.mysql_port, utils.vtdataroot)).wait()
  if err != 0:
    raise utils.TestError('mysqlctl partialrestore failed')

  tablet_62044.assert_table_count('vt_test_keyspace', 'vt_insert_test', 2)

  # change/add two values on the master, one in range, one out of range, make
  # sure the right one propagate and not the other
  utils.run_vtctl('SetReadWrite ' + tablet_62344.tablet_alias)
  tablet_62344.mquery('vt_test_keyspace', "insert into vt_insert_test (id, msg) values (5, 'test should not propagate')", write=True)
  tablet_62344.mquery('vt_test_keyspace', "update vt_insert_test set msg='test should propagate' where id=2", write=True)

  utils.pause("look at db now!")

  # wait until value that should have been changed is here
  timeout = 10
  while timeout > 0:
    result = tablet_62044.mquery('vt_test_keyspace', 'select msg from vt_insert_test where id=2')
    if result[0][0] == "test should propagate":
      break
    timeout -= 1
    time.sleep(1)
  if timeout == 0:
    raise utils.TestError("expected propagation to happen", result)

  # test value that should not propagate
  # this part is disabled now, as the replication pruning is only enabled
  # for row-based replication, but the mysql server is statement based.
  # will re-enable once we get statement-based pruning patch into mysql.
#  tablet_62044.assert_table_count('vt_test_keyspace', 'vt_insert_test', 0, 'where id=5')

  tablet_62344.kill_vttablet()
def run_test_reparent_lag_slave(shard_id="0"):
    utils.zk_wipe()

    utils.run_vtctl("CreateKeyspace -force test_keyspace")

    # create the database so vttablets start, as they are serving
    tablet_62344.create_db("vt_test_keyspace")
    tablet_62044.create_db("vt_test_keyspace")
    tablet_41983.create_db("vt_test_keyspace")
    tablet_31981.create_db("vt_test_keyspace")

    # Start up a master mysql and vttablet
    tablet_62344.init_tablet("master", "test_keyspace", shard_id, start=True)

    # Create a few slaves for testing reparenting.
    tablet_62044.init_tablet("replica", "test_keyspace", shard_id, start=True)
    tablet_31981.init_tablet("replica", "test_keyspace", shard_id, start=True)
    tablet_41983.init_tablet("lag", "test_keyspace", shard_id, start=True)

    # Recompute the shard layout node - until you do that, it might not be valid.
    utils.run_vtctl("RebuildShardGraph test_keyspace/" + shard_id)
    utils.validate_topology()

    # Force the slaves to reparent assuming that all the datasets are identical.
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
        t.reset_replication()
    utils.run_vtctl("ReparentShard -force test_keyspace/%s %s" % (shard_id, tablet_62344.tablet_alias))
    utils.validate_topology(ping_tablets=True)

    tablet_62344.create_db("vt_test_keyspace")
    tablet_62344.mquery("vt_test_keyspace", create_vt_insert_test)

    tablet_41983.mquery("", "stop slave")
    for q in populate_vt_insert_test:
        tablet_62344.mquery("vt_test_keyspace", q, write=True)

    # Perform a graceful reparent operation.
    utils.run_vtctl("ReparentShard test_keyspace/%s %s" % (shard_id, tablet_62044.tablet_alias))

    tablet_41983.mquery("", "start slave")
    time.sleep(1)

    utils.pause("check orphan")

    utils.run_vtctl("ReparentTablet %s" % tablet_41983.tablet_alias)

    result = tablet_41983.mquery("vt_test_keyspace", "select msg from vt_insert_test where id=1")
    if len(result) != 1:
        raise utils.TestError("expected 1 row from vt_insert_test", result)

    utils.pause("check lag reparent")

    tablet_62344.kill_vttablet()
    tablet_62044.kill_vttablet()
    tablet_41983.kill_vttablet()
    tablet_31981.kill_vttablet()
def run_test_hook():
    utils.zk_wipe()
    utils.run_vtctl("CreateKeyspace -force test_keyspace")

    # create the database so vttablets start, as it is serving
    tablet_62344.create_db("vt_test_keyspace")

    tablet_62344.init_tablet("master", "test_keyspace", "0", start=True)

    # test a regular program works
    _run_hook(
        "test.sh flag1 param1=hello",
        [
            '"ExitStatus": 0',
            [
                '"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --flag1\\nPARAM: --param1=hello\\n"',
                '"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --param1=hello\\nPARAM: --flag1\\n"',
            ],
            '"Stderr": ""',
        ],
    )

    # test stderr output
    _run_hook(
        "test.sh to-stderr",
        [
            '"ExitStatus": 0',
            '"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --to-stderr\\n"',
            '"Stderr": "ERR: --to-stderr\\n"',
        ],
    )

    # test commands that fail
    _run_hook(
        "test.sh exit-error",
        [
            '"ExitStatus": 1',
            '"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --exit-error\\n"',
            '"Stderr": "ERROR: exit status 1\\n"',
        ],
    )

    # test hook that is not present
    _run_hook(
        "not_here.sh",
        ['"ExitStatus": -1', '"Stdout": "Skipping missing hook: /', '"Stderr": ""'],  # cannot go further, local path
    )

    # test hook with invalid name
    _run_hook("/bin/ls", ["action failed: ExecuteHook hook name cannot have a '/' in it"])

    tablet_62344.kill_vttablet()
Beispiel #11
0
def _run_test_mysqlctl_clone(server_mode):
    if server_mode:
        snapshot_cmd = "snapshotsourcestart -concurrency=8"
        restore_flags = "-dont-wait-for-slave-start"
    else:
        snapshot_cmd = "snapshot -concurrency=5"
        restore_flags = ""

    utils.zk_wipe()

    # Start up a master mysql and vttablet
    utils.run_vtctl('CreateKeyspace -force snapshot_test')

    tablet_62344.init_tablet('master', 'snapshot_test', '0')
    utils.run_vtctl('RebuildShardGraph snapshot_test/0')
    utils.validate_topology()

    tablet_62344.populate('vt_snapshot_test', create_vt_insert_test,
                          populate_vt_insert_test)

    tablet_62344.start_vttablet()

    err = tablet_62344.mysqlctl(
        '-port %u -mysql-port %u %s vt_snapshot_test' %
        (tablet_62344.port, tablet_62344.mysql_port, snapshot_cmd)).wait()
    if err != 0:
        raise utils.TestError('mysqlctl %s failed' % snapshot_cmd)

    utils.pause("%s finished" % snapshot_cmd)

    call(["touch", "/tmp/vtSimulateFetchFailures"])
    err = tablet_62044.mysqlctl(
        '-port %u -mysql-port %u restore -fetch-concurrency=2 -fetch-retry-count=4 %s %s/snapshot/vt_0000062344/snapshot_manifest.json'
        % (tablet_62044.port, tablet_62044.mysql_port, restore_flags,
           utils.vtdataroot)).wait()
    if err != 0:
        raise utils.TestError('mysqlctl restore failed')

    tablet_62044.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4)

    if server_mode:
        err = tablet_62344.mysqlctl(
            '-port %u -mysql-port %u snapshotsourceend -read-write vt_snapshot_test'
            % (tablet_62344.port, tablet_62344.mysql_port)).wait()
        if err != 0:
            raise utils.TestError('mysqlctl snapshotsourceend failed')

        # see if server restarted properly
        tablet_62344.assert_table_count('vt_snapshot_test', 'vt_insert_test',
                                        4)

    tablet_62344.kill_vttablet()
Beispiel #12
0
def _run_test_vtctl_clone(server_mode):
    if server_mode:
        clone_flags = '-server-mode'
    else:
        clone_flags = ''
    utils.zk_wipe()

    # Start up a master mysql and vttablet
    utils.run_vtctl('CreateKeyspace -force snapshot_test')

    tablet_62344.init_tablet('master', 'snapshot_test', '0')
    utils.run_vtctl('RebuildShardGraph snapshot_test/0')
    utils.validate_topology()

    tablet_62344.populate('vt_snapshot_test', create_vt_insert_test,
                          populate_vt_insert_test)
    tablet_62344.start_vttablet()

    tablet_62044.create_db('vt_snapshot_test')
    tablet_62044.init_tablet('idle', start=True)

    # small test to make sure the directory validation works
    snapshot_dir = os.path.join(utils.vtdataroot, 'snapshot')
    utils.run("rm -rf %s" % snapshot_dir)
    utils.run("mkdir -p %s" % snapshot_dir)
    utils.run("chmod -w %s" % snapshot_dir)
    out, err = utils.run(
        utils.vtroot + '/bin/vtctl --alsologtostderr Clone -force %s %s %s' %
        (clone_flags, tablet_62344.tablet_alias, tablet_62044.tablet_alias),
        trap_output=True,
        raise_on_error=False)
    if "Cannot validate snapshot directory" not in err:
        raise utils.TestError("expected validation error", err)
    if "Un-reserved test_nj-0000062044" not in err:
        raise utils.TestError("expected Un-reserved", err)
    utils.debug("Failed Clone output: " + err)
    utils.run("chmod +w %s" % snapshot_dir)

    call(["touch", "/tmp/vtSimulateFetchFailures"])
    utils.run_vtctl(
        'Clone -force %s %s %s' %
        (clone_flags, tablet_62344.tablet_alias, tablet_62044.tablet_alias),
        auto_log=True)

    utils.pause("look at logs!")
    tablet_62044.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4)
    tablet_62344.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4)

    utils.validate_topology()

    tablet_62344.kill_vttablet()
    tablet_62044.kill_vttablet()
def _run_test_vtctl_clone(server_mode):
    if server_mode:
        clone_flags = "-server-mode"
    else:
        clone_flags = ""
    utils.zk_wipe()

    # Start up a master mysql and vttablet
    utils.run_vtctl("CreateKeyspace -force snapshot_test")

    tablet_62344.init_tablet("master", "snapshot_test", "0")
    utils.run_vtctl("RebuildShardGraph snapshot_test/0")
    utils.validate_topology()

    tablet_62344.populate("vt_snapshot_test", create_vt_insert_test, populate_vt_insert_test)
    tablet_62344.start_vttablet()

    tablet_62044.create_db("vt_snapshot_test")
    tablet_62044.init_tablet("idle", start=True)

    # small test to make sure the directory validation works
    snapshot_dir = os.path.join(utils.vtdataroot, "snapshot")
    utils.run("rm -rf %s" % snapshot_dir)
    utils.run("mkdir -p %s" % snapshot_dir)
    utils.run("chmod -w %s" % snapshot_dir)
    out, err = utils.run(
        utils.vtroot
        + "/bin/vtctl --alsologtostderr Clone -force %s %s %s"
        % (clone_flags, tablet_62344.tablet_alias, tablet_62044.tablet_alias),
        trap_output=True,
        raise_on_error=False,
    )
    if "Cannot validate snapshot directory" not in err:
        raise utils.TestError("expected validation error", err)
    if "Un-reserved test_nj-0000062044" not in err:
        raise utils.TestError("expected Un-reserved", err)
    utils.debug("Failed Clone output: " + err)
    utils.run("chmod +w %s" % snapshot_dir)

    call(["touch", "/tmp/vtSimulateFetchFailures"])
    utils.run_vtctl(
        "Clone -force %s %s %s" % (clone_flags, tablet_62344.tablet_alias, tablet_62044.tablet_alias), auto_log=True
    )

    utils.pause("look at logs!")
    tablet_62044.assert_table_count("vt_snapshot_test", "vt_insert_test", 4)
    tablet_62344.assert_table_count("vt_snapshot_test", "vt_insert_test", 4)

    utils.validate_topology()

    tablet_62344.kill_vttablet()
    tablet_62044.kill_vttablet()
Beispiel #14
0
def test_multisnapshot_and_restore_vtctl():
  tables = ['vt_insert_test', 'vt_insert_test1']
  create_template = '''create table %s (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB'''
  insert_template = "insert into %s (id, msg) values (%s, 'test %s')"
  utils.zk_wipe()

  # Start up a master mysql and vttablet
  utils.run_vtctl('CreateKeyspace -force test_keyspace')

  # Start three tablets for three different shards. At this point the
  # sharding schema is not really important, as long as it is
  # consistent.
  new_spec = '-0000000000000028-'
  old_tablets = [tablet_62044, tablet_41983, tablet_31981]
  for i, tablet in enumerate(old_tablets):
    tablet.init_tablet('master', 'test_keyspace', str(i))
    utils.run_vtctl('RebuildShardGraph test_keyspace/%s' % i)
  utils.validate_topology()

  for i, tablet in enumerate(old_tablets):
    tablet.populate(
      "vt_test_keyspace",
      [create_template % table for table in tables],
      sum([[insert_template % (table, 10*j + i, 10*j + i) for j in range(1, 8)] for table in tables], []))
    tablet.start_vttablet()
    utils.run_vtctl('MultiSnapshot -force -maximum-file-size=1 -spec=%s %s id' % (new_spec, tablet.tablet_alias), trap_output=True)

  utils.run_vtctl('CreateKeyspace -force test_keyspace_new')
  tablet_62344.init_tablet('master', 'test_keyspace_new', '-0000000000000028', dbname='not_vt_test_keyspace')
  utils.run_vtctl('RebuildShardGraph test_keyspace_new/-0000000000000028')
  utils.validate_topology()
  tablet_62344.mquery('', 'DROP DATABASE IF EXISTS not_vt_test_keyspace')
  tablet_62344.start_vttablet(wait_for_state='CONNECTING') # db not created

  # 0x28 = 40
  source_aliases = ' '.join(t.tablet_alias for t in old_tablets)
  utils.run_vtctl('MultiRestore %s %s' % (tablet_62344.tablet_alias, source_aliases), auto_log=True, raise_on_error=True)
  time.sleep(1)
  for table in tables:
    rows = tablet_62344.mquery('not_vt_test_keyspace', 'select id from %s' % table)
    if len(rows) == 0:
      raise utils.TestError("There are no rows in the restored database.")
    for row in rows:
      if row[0] > 32:
        raise utils.TestError("Bad row: %s" % row)
  for tablet in tablet_62044, tablet_41983, tablet_31981, tablet_62344:
    tablet.kill_vttablet()
def _run_test_mysqlctl_clone(server_mode):
    if server_mode:
        snapshot_cmd = "snapshotsourcestart -concurrency=8"
        restore_flags = "-dont-wait-for-slave-start"
    else:
        snapshot_cmd = "snapshot -concurrency=5"
        restore_flags = ""

    utils.zk_wipe()

    # Start up a master mysql and vttablet
    utils.run_vtctl("CreateKeyspace -force snapshot_test")

    tablet_62344.init_tablet("master", "snapshot_test", "0")
    utils.run_vtctl("RebuildShardGraph snapshot_test/0")
    utils.validate_topology()

    tablet_62344.populate("vt_snapshot_test", create_vt_insert_test, populate_vt_insert_test)

    tablet_62344.start_vttablet()

    err = tablet_62344.mysqlctl(
        "-port %u -mysql-port %u %s vt_snapshot_test" % (tablet_62344.port, tablet_62344.mysql_port, snapshot_cmd)
    ).wait()
    if err != 0:
        raise utils.TestError("mysqlctl %s failed" % snapshot_cmd)

    utils.pause("%s finished" % snapshot_cmd)

    call(["touch", "/tmp/vtSimulateFetchFailures"])
    err = tablet_62044.mysqlctl(
        "-port %u -mysql-port %u restore -fetch-concurrency=2 -fetch-retry-count=4 %s %s/snapshot/vt_0000062344/snapshot_manifest.json"
        % (tablet_62044.port, tablet_62044.mysql_port, restore_flags, utils.vtdataroot)
    ).wait()
    if err != 0:
        raise utils.TestError("mysqlctl restore failed")

    tablet_62044.assert_table_count("vt_snapshot_test", "vt_insert_test", 4)

    if server_mode:
        err = tablet_62344.mysqlctl(
            "-port %u -mysql-port %u snapshotsourceend -read-write vt_snapshot_test"
            % (tablet_62344.port, tablet_62344.mysql_port)
        ).wait()
        if err != 0:
            raise utils.TestError("mysqlctl snapshotsourceend failed")

        # see if server restarted properly
        tablet_62344.assert_table_count("vt_snapshot_test", "vt_insert_test", 4)

    tablet_62344.kill_vttablet()
Beispiel #16
0
def _run_test_vtctl_clone(server_mode):
  if server_mode:
    clone_flags = '-server-mode'
  else:
    clone_flags = ''
  utils.zk_wipe()

  # Start up a master mysql and vttablet
  utils.run_vtctl('CreateKeyspace -force snapshot_test')

  tablet_62344.init_tablet('master', 'snapshot_test', '0')
  utils.run_vtctl('RebuildShardGraph snapshot_test/0')
  utils.validate_topology()

  tablet_62344.populate('vt_snapshot_test', create_vt_insert_test,
                        populate_vt_insert_test)
  tablet_62344.start_vttablet()

  tablet_62044.create_db('vt_snapshot_test')
  tablet_62044.init_tablet('idle', start=True)

  # small test to make sure the directory validation works
  snapshot_dir = os.path.join(utils.vtdataroot, 'snapshot')
  utils.run("rm -rf %s" % snapshot_dir)
  utils.run("mkdir -p %s" % snapshot_dir)
  utils.run("chmod -w %s" % snapshot_dir)
  out, err = utils.run(utils.vtroot+'/bin/vtctl -logfile=/dev/null Clone -force %s %s %s' %
                       (clone_flags, tablet_62344.tablet_alias,
                        tablet_62044.tablet_alias),
                       trap_output=True, raise_on_error=False)
  if "Cannot validate snapshot directory" not in err:
    raise utils.TestError("expected validation error", err)
  if "Un-reserved test_nj-0000062044" not in err:
    raise utils.TestError("expected Un-reserved", err)
  utils.debug("Failed Clone output: " + err)
  utils.run("chmod +w %s" % snapshot_dir)

  call(["touch", "/tmp/vtSimulateFetchFailures"])
  utils.run_vtctl('Clone -force %s %s %s' %
                  (clone_flags, tablet_62344.tablet_alias,
                   tablet_62044.tablet_alias))

  utils.pause("look at logs!")
  tablet_62044.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4)
  tablet_62344.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4)

  utils.validate_topology()

  tablet_62344.kill_vttablet()
  tablet_62044.kill_vttablet()
Beispiel #17
0
def run_test_restart():
    utils.zk_wipe()
    utils.run_vtctl('CreateKeyspace -force test_keyspace')

    # create the database so vttablets start, as it is serving
    tablet_62344.create_db('vt_test_keyspace')

    tablet_62344.init_tablet('master', 'test_keyspace', '0')
    proc1 = tablet_62344.start_vttablet()
    proc2 = tablet_62344.start_vttablet()
    time.sleep(2.0)
    proc1.poll()
    if proc1.returncode is None:
        raise utils.TestError("proc1 still running")
    tablet_62344.kill_vttablet()
def run_test_restart():
    utils.zk_wipe()
    utils.run_vtctl("CreateKeyspace -force test_keyspace")

    # create the database so vttablets start, as it is serving
    tablet_62344.create_db("vt_test_keyspace")

    tablet_62344.init_tablet("master", "test_keyspace", "0")
    proc1 = tablet_62344.start_vttablet()
    proc2 = tablet_62344.start_vttablet()
    time.sleep(2.0)
    proc1.poll()
    if proc1.returncode is None:
        raise utils.TestError("proc1 still running")
    tablet_62344.kill_vttablet()
Beispiel #19
0
def run_test_hook():
    utils.zk_wipe()
    utils.run_vtctl('CreateKeyspace -force test_keyspace')

    # create the database so vttablets start, as it is serving
    tablet_62344.create_db('vt_test_keyspace')

    tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)

    # test a regular program works
    _run_hook("test.sh flag1 param1=hello", [
        '"ExitStatus": 0',
        [
            '"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --flag1\\nPARAM: --param1=hello\\n"',
            '"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --param1=hello\\nPARAM: --flag1\\n"',
        ],
        '"Stderr": ""',
    ])

    # test stderr output
    _run_hook("test.sh to-stderr", [
        '"ExitStatus": 0',
        '"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --to-stderr\\n"',
        '"Stderr": "ERR: --to-stderr\\n"',
    ])

    # test commands that fail
    _run_hook("test.sh exit-error", [
        '"ExitStatus": 1',
        '"Stdout": "TABLET_ALIAS: test_nj-0000062344\\nPARAM: --exit-error\\n"',
        '"Stderr": "ERROR: exit status 1\\n"',
    ])

    # test hook that is not present
    _run_hook(
        "not_here.sh",
        [
            '"ExitStatus": -1',
            '"Stdout": "Skipping missing hook: /',  # cannot go further, local path
            '"Stderr": ""',
        ])

    # test hook with invalid name
    _run_hook("/bin/ls", [
        "action failed: ExecuteHook hook name cannot have a '/' in it",
    ])

    tablet_62344.kill_vttablet()
Beispiel #20
0
def _run_test_vtctl_partial_clone(create, populate,
                                  start, end):
  utils.zk_wipe()

  # Start up a master mysql and vttablet
  utils.run_vtctl('CreateKeyspace -force snapshot_test')

  tablet_62344.init_tablet('master', 'snapshot_test', '0')
  utils.run_vtctl('RebuildShardGraph snapshot_test/0')
  utils.validate_topology()

  tablet_62344.populate('vt_snapshot_test', create, populate)

  tablet_62344.start_vttablet()

  tablet_62044.init_tablet('idle', start=True)

  # FIXME(alainjobart): not sure where the right place for this is,
  # but it doesn't seem it should right here. It should be either in
  # InitTablet (running an action on the vttablet), or in PartialClone
  # (instead of doing a 'USE dbname' it could do a 'CREATE DATABASE
  # dbname').
  tablet_62044.mquery('', 'stop slave')
  tablet_62044.create_db('vt_snapshot_test')
  call(["touch", "/tmp/vtSimulateFetchFailures"])
  utils.run_vtctl('PartialClone -force %s %s id %s %s' %
                  (tablet_62344.tablet_alias, tablet_62044.tablet_alias,
                   start, end))

  utils.pause("after PartialClone")

  # grab the new tablet definition from zk, make sure the start and
  # end keys are set properly
  out = utils.zk_cat(tablet_62044.zk_tablet_path)
  if '"Start": "%s"' % start not in out or '"End": "%s"' % end not in out:
    print "Tablet output:"
    print "out"
    raise utils.TestError('wrong Start or End')

  tablet_62044.assert_table_count('vt_snapshot_test', 'vt_insert_test', 2)

  utils.validate_topology()

  tablet_62344.kill_vttablet()
  tablet_62044.kill_vttablet()
Beispiel #21
0
def run_test_vttablet_authenticated():
  utils.zk_wipe()
  utils.run_vtctl('CreateKeyspace -force test_keyspace')
  tablet_62344.init_tablet('master', 'test_keyspace', '0')
  utils.run_vtctl('RebuildShardGraph test_keyspace/0')
  utils.validate_topology()

  tablet_62344.populate('vt_test_keyspace', create_vt_select_test,
                        populate_vt_select_test)
  agent = tablet_62344.start_vttablet(auth=True)
  utils.run_vtctl('SetReadWrite ' + tablet_62344.tablet_alias)

  err, out = tablet_62344.vquery('select * from vt_select_test', path='test_keyspace/0', user='******', password=r'ma kota')
  utils.debug("Got rows: " + out)
  if 'Row count: ' not in out:
    raise utils.TestError("query didn't go through: %s, %s" % (err, out))

  utils.kill_sub_process(agent)
def test_multisnapshot_mysqlctl():
    populate = sum(
        [["insert into vt_insert_test_%s (msg) values ('test %s')" % (i, x) for x in xrange(4)] for i in range(6)], []
    )
    create = [
        """create table vt_insert_test_%s (
id bigint auto_increment,
msg varchar(64),
primary key (id)
) Engine=InnoDB"""
        % i
        for i in range(6)
    ]

    utils.zk_wipe()

    # Start up a master mysql and vttablet
    utils.run_vtctl("CreateKeyspace -force test_keyspace")

    tablet_62344.init_tablet("master", "test_keyspace", "0")
    utils.run_vtctl("RebuildShardGraph test_keyspace/0")
    utils.validate_topology()

    tablet_62344.populate("vt_test_keyspace", create, populate)

    tablet_62344.start_vttablet()
    err = tablet_62344.mysqlctl(
        "-port %u -mysql-port %u multisnapshot --tables=vt_insert_test_1,vt_insert_test_2,vt_insert_test_3 --spec=-0000000000000003- vt_test_keyspace id"
        % (tablet_62344.port, tablet_62344.mysql_port)
    ).wait()
    if err != 0:
        raise utils.TestError("mysqlctl multisnapshot failed")
    if os.path.exists(
        os.path.join(
            utils.vtdataroot, "snapshot/vt_0000062344/data/vt_test_keyspace-,0000000000000003/vt_insert_test_4.csv.gz"
        )
    ):
        raise utils.TestError("Table vt_insert_test_4 wasn't supposed to be dumped.")
    for kr in "vt_test_keyspace-,0000000000000003", "vt_test_keyspace-0000000000000003,":
        path = os.path.join(utils.vtdataroot, "snapshot/vt_0000062344/data/", kr, "vt_insert_test_1.0.csv.gz")
        with gzip.open(path) as f:
            if len(f.readlines()) != 2:
                raise utils.TestError("Data looks wrong in %s" % path)
    tablet_62344.kill_vttablet()
Beispiel #23
0
def run_test_reparent_slave_offline(shard_id='0'):
    utils.zk_wipe()

    utils.run_vtctl('CreateKeyspace -force test_keyspace')

    # create the database so vttablets start, as they are serving
    tablet_62344.create_db('vt_test_keyspace')
    tablet_62044.create_db('vt_test_keyspace')
    tablet_41983.create_db('vt_test_keyspace')
    tablet_31981.create_db('vt_test_keyspace')

    # Start up a master mysql and vttablet
    tablet_62344.init_tablet('master', 'test_keyspace', shard_id, start=True)

    # Create a few slaves for testing reparenting.
    tablet_62044.init_tablet('replica', 'test_keyspace', shard_id, start=True)
    tablet_41983.init_tablet('replica', 'test_keyspace', shard_id, start=True)
    tablet_31981.init_tablet('replica', 'test_keyspace', shard_id, start=True)

    # Recompute the shard layout node - until you do that, it might not be valid.
    utils.run_vtctl('RebuildShardGraph test_keyspace/' + shard_id)
    utils.validate_topology()

    # Force the slaves to reparent assuming that all the datasets are identical.
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
        t.reset_replication()
    utils.run_vtctl('ReparentShard -force test_keyspace/%s %s' %
                    (shard_id, tablet_62344.tablet_alias))
    utils.validate_topology(ping_tablets=True)

    expected_addr = utils.hostname + ':' + str(tablet_62344.port)
    _check_db_addr('test_keyspace.%s.master:_vtocc' % shard_id, expected_addr)

    # Kill one tablet so we seem offline
    tablet_31981.kill_vttablet()

    # Perform a graceful reparent operation.
    utils.run_vtctl('ReparentShard test_keyspace/%s %s' %
                    (shard_id, tablet_62044.tablet_alias))

    tablet_62344.kill_vttablet()
    tablet_62044.kill_vttablet()
    tablet_41983.kill_vttablet()
def run_test_vttablet_authenticated():
    utils.zk_wipe()
    utils.run_vtctl("CreateKeyspace -force test_keyspace")
    tablet_62344.init_tablet("master", "test_keyspace", "0")
    utils.run_vtctl("RebuildShardGraph test_keyspace/0")
    utils.validate_topology()

    tablet_62344.populate("vt_test_keyspace", create_vt_select_test, populate_vt_select_test)
    tablet_62344.start_vttablet(auth=True)
    utils.run_vtctl("SetReadWrite " + tablet_62344.tablet_alias)

    err, out = tablet_62344.vquery(
        "select * from vt_select_test", path="test_keyspace/0", user="******", password=r"ma kota"
    )
    utils.debug("Got rows: " + out)
    if "Row count: " not in out:
        raise utils.TestError("query didn't go through: %s, %s" % (err, out))

    tablet_62344.kill_vttablet()
def run_test_reparent_slave_offline(shard_id="0"):
    utils.zk_wipe()

    utils.run_vtctl("CreateKeyspace -force test_keyspace")

    # create the database so vttablets start, as they are serving
    tablet_62344.create_db("vt_test_keyspace")
    tablet_62044.create_db("vt_test_keyspace")
    tablet_41983.create_db("vt_test_keyspace")
    tablet_31981.create_db("vt_test_keyspace")

    # Start up a master mysql and vttablet
    tablet_62344.init_tablet("master", "test_keyspace", shard_id, start=True)

    # Create a few slaves for testing reparenting.
    tablet_62044.init_tablet("replica", "test_keyspace", shard_id, start=True)
    tablet_41983.init_tablet("replica", "test_keyspace", shard_id, start=True)
    tablet_31981.init_tablet("replica", "test_keyspace", shard_id, start=True)

    # Recompute the shard layout node - until you do that, it might not be valid.
    utils.run_vtctl("RebuildShardGraph test_keyspace/" + shard_id)
    utils.validate_topology()

    # Force the slaves to reparent assuming that all the datasets are identical.
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
        t.reset_replication()
    utils.run_vtctl("ReparentShard -force test_keyspace/%s %s" % (shard_id, tablet_62344.tablet_alias))
    utils.validate_topology(ping_tablets=True)

    expected_addr = utils.hostname + ":" + str(tablet_62344.port)
    _check_db_addr("test_keyspace.%s.master:_vtocc" % shard_id, expected_addr)

    # Kill one tablet so we seem offline
    tablet_31981.kill_vttablet()

    # Perform a graceful reparent operation.
    utils.run_vtctl("ReparentShard test_keyspace/%s %s" % (shard_id, tablet_62044.tablet_alias))

    tablet_62344.kill_vttablet()
    tablet_62044.kill_vttablet()
    tablet_41983.kill_vttablet()
Beispiel #26
0
  def setUp(self):
    utils.zk_wipe()
    utils.run(utils.vtroot+'/bin/zk touch -p /zk/test_nj/vt/zkocc1')
    utils.run(utils.vtroot+'/bin/zk touch -p /zk/test_nj/vt/zkocc2')
    fd = tempfile.NamedTemporaryFile(dir=utils.tmp_root, delete=False)
    filename1 = fd.name
    fd.write("Test data 1")
    fd.close()
    utils.run(utils.vtroot+'/bin/zk cp '+filename1+' /zk/test_nj/vt/zkocc1/data1')

    fd = tempfile.NamedTemporaryFile(dir=utils.tmp_root, delete=False)
    filename2 = fd.name
    fd.write("Test data 2")
    fd.close()
    utils.run(utils.vtroot+'/bin/zk cp '+filename2+' /zk/test_nj/vt/zkocc1/data2')

    fd = tempfile.NamedTemporaryFile(dir=utils.tmp_root, delete=False)
    filename3 = fd.name
    fd.write("Test data 3")
    fd.close()
    utils.run(utils.vtroot+'/bin/zk cp '+filename3+' /zk/test_nj/vt/zkocc1/data3')
Beispiel #27
0
def run_test_reparent_from_outside():
  utils.zk_wipe()

  utils.run_vtctl('CreateKeyspace test_keyspace')

  # create the database so vttablets start, as they are serving
  tablet_62344.create_db('vt_test_keyspace')
  tablet_62044.create_db('vt_test_keyspace')
  tablet_41983.create_db('vt_test_keyspace')
  tablet_31981.create_db('vt_test_keyspace')

  # Start up a master mysql and vttablet
  tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)

  # Create a few slaves for testing reparenting.
  tablet_62044.init_tablet('replica', 'test_keyspace', '0', start=True)
  tablet_41983.init_tablet('replica', 'test_keyspace', '0', start=True)
  tablet_31981.init_tablet('replica', 'test_keyspace', '0', start=True)

  # Reparent as a starting point
  utils.run_vtctl('ReparentShard -force test_keyspace/0 %s' % tablet_62344.tablet_alias)

  # now manually reparent 1 out of 2 tablets
  # 62044 will be the new master
  # 31981 won't be re-parented, so it w2ill be busted
  tablet_62044.mquery('', [
      "RESET MASTER",
      "STOP SLAVE",
      "RESET SLAVE",
      "CHANGE MASTER TO MASTER_HOST = ''",
      ])
  new_pos = tablet_62044.mquery('', 'show master status')
  utils.debug("New master position: %s" % str(new_pos))

  # 62344 will now be a slave of 62044
  tablet_62344.mquery('', [
      "RESET MASTER",
      "RESET SLAVE",
      "change master to master_host='%s', master_port=%u, master_log_file='%s', master_log_pos=%u" % (utils.hostname, tablet_62044.mysql_port, new_pos[0][0], new_pos[0][1]),
      'start slave'
      ])

  # 41983 will be a slave of 62044
  tablet_41983.mquery('', [
      'stop slave',
      "change master to master_port=%u, master_log_file='%s', master_log_pos=%u" % (tablet_62044.mysql_port, new_pos[0][0], new_pos[0][1]),
      'start slave'
      ])

  # update zk with the new graph
  utils.run_vtctl('ShardExternallyReparented -scrap-stragglers test_keyspace/0 %s' % tablet_62044.tablet_alias, auto_log=True)

  # make sure the replication graph is fine
  shard_files = utils.zk_ls('/zk/global/vt/keyspaces/test_keyspace/shards/0')
  utils.debug('shard_files: %s' % " ".join(shard_files))
  if shard_files != ['action', 'actionlog', 'test_nj-0000062044']:
    raise utils.TestError('unexpected zk content: %s' % " ".join(shard_files))

  slave_files = utils.zk_ls('/zk/global/vt/keyspaces/test_keyspace/shards/0/test_nj-0000062044')
  utils.debug('slave_files: %s' % " ".join(slave_files))
  if slave_files != ['test_nj-0000041983', 'test_nj-0000062344']:
    raise utils.TestError('unexpected zk content: %s' % " ".join(slave_files))

  tablet_31981.kill_vttablet()
  tablet_62344.kill_vttablet()
  tablet_62044.kill_vttablet()
  tablet_41983.kill_vttablet()
Beispiel #28
0
def _run_test_reparent_graceful(shard_id):
  utils.zk_wipe()

  utils.run_vtctl('CreateKeyspace -force test_keyspace')

  # create the database so vttablets start, as they are serving
  tablet_62344.create_db('vt_test_keyspace')
  tablet_62044.create_db('vt_test_keyspace')
  tablet_41983.create_db('vt_test_keyspace')
  tablet_31981.create_db('vt_test_keyspace')

  # Start up a master mysql and vttablet
  tablet_62344.init_tablet('master', 'test_keyspace', shard_id, start=True)

  # Create a few slaves for testing reparenting.
  tablet_62044.init_tablet('replica', 'test_keyspace', shard_id, start=True)
  tablet_41983.init_tablet('replica', 'test_keyspace', shard_id, start=True)
  tablet_31981.init_tablet('replica', 'test_keyspace', shard_id, start=True)

  # Recompute the shard layout node - until you do that, it might not be valid.
  utils.run_vtctl('RebuildShardGraph test_keyspace/' + shard_id)
  utils.validate_topology()

  # Force the slaves to reparent assuming that all the datasets are identical.
  utils.pause("force ReparentShard?")
  utils.run_vtctl('ReparentShard -force test_keyspace/%s %s' % (shard_id, tablet_62344.tablet_alias))
  utils.validate_topology(ping_tablets=True)

  expected_addr = utils.hostname + ':' + str(tablet_62344.port)
  _check_db_addr('test_keyspace.%s.master:_vtocc' % shard_id, expected_addr)

  # Convert two replica to spare. That should leave only one node serving traffic,
  # but still needs to appear in the replication graph.
  utils.run_vtctl('ChangeSlaveType ' + tablet_41983.tablet_alias + ' spare')
  utils.run_vtctl('ChangeSlaveType ' + tablet_31981.tablet_alias + ' spare')
  utils.validate_topology()
  expected_addr = utils.hostname + ':' + str(tablet_62044.port)
  _check_db_addr('test_keyspace.%s.replica:_vtocc' % shard_id, expected_addr)

  # Run this to make sure it succeeds.
  utils.run_vtctl('ShardReplicationPositions test_keyspace/%s' % shard_id, stdout=devnull)

  # Perform a graceful reparent operation.
  utils.pause("graceful ReparentShard?")
  utils.run_vtctl('ReparentShard test_keyspace/%s %s' % (shard_id, tablet_62044.tablet_alias), auto_log=True)
  utils.validate_topology()

  expected_addr = utils.hostname + ':' + str(tablet_62044.port)
  _check_db_addr('test_keyspace.%s.master:_vtocc' % shard_id, expected_addr)

  tablet_62344.kill_vttablet()
  tablet_62044.kill_vttablet()
  tablet_41983.kill_vttablet()
  tablet_31981.kill_vttablet()

  # Test address correction.
  new_port = utils.reserve_ports(1)
  tablet_62044.start_vttablet(port=new_port)
  # Wait a moment for address to reregister.
  time.sleep(1.0)

  expected_addr = utils.hostname + ':' + str(new_port)
  _check_db_addr('test_keyspace.%s.master:_vtocc' % shard_id, expected_addr)

  tablet_62044.kill_vttablet()
Beispiel #29
0
def run_test_reparent_down_master():
  utils.zk_wipe()

  utils.run_vtctl('CreateKeyspace -force test_keyspace')

  # create the database so vttablets start, as they are serving
  tablet_62344.create_db('vt_test_keyspace')
  tablet_62044.create_db('vt_test_keyspace')
  tablet_41983.create_db('vt_test_keyspace')
  tablet_31981.create_db('vt_test_keyspace')

  # Start up a master mysql and vttablet
  tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)

  # Create a few slaves for testing reparenting.
  tablet_62044.init_tablet('replica', 'test_keyspace', '0', start=True)
  tablet_41983.init_tablet('replica', 'test_keyspace', '0', start=True)
  tablet_31981.init_tablet('replica', 'test_keyspace', '0', start=True)

  # Recompute the shard layout node - until you do that, it might not be valid.
  utils.run_vtctl('RebuildShardGraph test_keyspace/0')
  utils.validate_topology()

  # Force the slaves to reparent assuming that all the datasets are identical.
  utils.run_vtctl('ReparentShard -force test_keyspace/0 ' + tablet_62344.tablet_alias, auto_log=True)
  utils.validate_topology()

  # Make the master agent and database unavailable.
  tablet_62344.kill_vttablet()
  tablet_62344.shutdown_mysql().wait()

  expected_addr = utils.hostname + ':' + str(tablet_62344.port)
  _check_db_addr('test_keyspace.0.master:_vtocc', expected_addr)

  # Perform a reparent operation - the Validate part will try to ping
  # the master and fail somewhat quickly
  stdout, stderr = utils.run_fail(utils.vtroot+'/bin/vtctl -logfile=/dev/null -log.level=INFO -wait-time 5s ReparentShard test_keyspace/0 ' + tablet_62044.tablet_alias)
  utils.debug("Failed ReparentShard output:\n" + stderr)
  if 'ValidateShard verification failed: timed out during validate' not in stderr:
    raise utils.TestError("didn't find the right error strings in failed ReparentShard: " + stderr)

  # Should timeout and fail
  stdout, stderr = utils.run_fail(utils.vtroot+'/bin/vtctl -logfile=/dev/null -log.level=INFO -wait-time 5s ScrapTablet ' + tablet_62344.tablet_alias)
  utils.debug("Failed ScrapTablet output:\n" + stderr)
  if 'deadline exceeded' not in stderr:
    raise utils.TestError("didn't find the right error strings in failed ScrapTablet: " + stderr)

  # Should interrupt and fail
  sp = utils.run_bg(utils.vtroot+'/bin/vtctl -log.level=INFO -wait-time 10s ScrapTablet ' + tablet_62344.tablet_alias, stdout=PIPE, stderr=PIPE)
  # Need time for the process to start before killing it.
  time.sleep(0.1)
  os.kill(sp.pid, signal.SIGINT)
  stdout, stderr = sp.communicate()

  utils.debug("Failed ScrapTablet output:\n" + stderr)
  if 'interrupted' not in stderr:
    raise utils.TestError("didn't find the right error strings in failed ScrapTablet: " + stderr)

  # Force the scrap action in zk even though tablet is not accessible.
  tablet_62344.scrap(force=True)

  utils.run_fail(utils.vtroot+'/bin/vtctl -logfile=/dev/null -log.level=WARNING ChangeSlaveType -force %s idle' %
                 tablet_62344.tablet_alias)

  # Remove pending locks (make this the force option to ReparentShard?)
  utils.run_vtctl('PurgeActions /zk/global/vt/keyspaces/test_keyspace/shards/0/action')

  # Re-run reparent operation, this shoud now proceed unimpeded.
  utils.run_vtctl('-wait-time 1m ReparentShard test_keyspace/0 ' + tablet_62044.tablet_alias, auto_log=True)

  utils.validate_topology()
  expected_addr = utils.hostname + ':' + str(tablet_62044.port)
  _check_db_addr('test_keyspace.0.master:_vtocc', expected_addr)

  utils.run_vtctl('ChangeSlaveType -force %s idle' % tablet_62344.tablet_alias)

  idle_tablets, _ = utils.run_vtctl('ListAllTablets test_nj', trap_output=True)
  if '0000062344 <null> <null> idle' not in idle_tablets:
    raise utils.TestError('idle tablet not found', idle_tablets)

  tablet_62044.kill_vttablet()
  tablet_41983.kill_vttablet()
  tablet_31981.kill_vttablet()

  # sothe other tests don't have any surprise
  tablet_62344.start_mysql().wait()
Beispiel #30
0
 def tearDown(self):
   tablet.Tablet.check_vttablet_count()
   utils.zk_wipe()
   for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
     t.reset_replication()
     t.clean_dbs()
Beispiel #31
0
 def tearDown(self):
     tablet.Tablet.check_vttablet_count()
     utils.zk_wipe()
     for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
         t.reset_replication()
         t.clean_dbs()
Beispiel #32
0
 def setUp(self):
   utils.zk_wipe()
   self.zkocc_server = utils.zkocc_start()
   self.vttopo_server = utils.vttopo_start()
   self.topo = zkocc.ZkOccConnection("localhost:%u" % utils.zkocc_port_base, 'test_nj', 30)
   self.topo.dial()
def test_multisnapshot_and_restore():
    tables = ["vt_insert_test", "vt_insert_test1"]
    create_template = """create table %s (
id bigint auto_increment,
msg varchar(64),
primary key (id),
index by_msg (msg)
) Engine=InnoDB"""
    create_view = """create view vt_insert_view(id, msg) as select id, msg from vt_insert_test"""
    insert_template = "insert into %s (id, msg) values (%s, 'test %s')"

    utils.zk_wipe()

    # Start up a master mysql and vttablet
    utils.run_vtctl("CreateKeyspace -force test_keyspace")

    # Start three tablets for three different shards. At this point the
    # sharding schema is not really important, as long as it is
    # consistent.
    new_spec = "-0000000000000028-"
    old_tablets = [tablet_62044, tablet_41983, tablet_31981]
    for i, tablet in enumerate(old_tablets):
        tablet.init_tablet("master", "test_keyspace", str(i))
        utils.run_vtctl("RebuildShardGraph test_keyspace/%s" % i)
    utils.validate_topology()

    for i, tablet in enumerate(old_tablets):
        tablet.populate(
            "vt_test_keyspace",
            [create_template % table for table in tables] + [create_view],
            sum([[insert_template % (table, 10 * j + i, 10 * j + i) for j in range(1, 8)] for table in tables], []),
        )
        tablet.start_vttablet()
        utils.run_vtctl("MultiSnapshot --force  --spec=%s %s id" % (new_spec, tablet.tablet_alias), trap_output=True)

    utils.pause("After snapshot")

    # try to get the schema on the source, make sure the view is there
    out, err = utils.run_vtctl("GetSchema --include-views " + tablet_62044.tablet_alias, trap_output=True)
    if "vt_insert_view" not in err or "VIEW `{{.DatabaseName}}`.`vt_insert_view` AS select" not in err:
        raise utils.TestError("Unexpected GetSchema --include-views output: %s" % err)
    out, err = utils.run_vtctl("GetSchema " + tablet_62044.tablet_alias, trap_output=True)
    if "vt_insert_view" in err:
        raise utils.TestError("Unexpected GetSchema output: %s" % err)

    utils.run_vtctl("CreateKeyspace -force test_keyspace_new")
    tablet_62344.init_tablet("master", "test_keyspace_new", "0", dbname="not_vt_test_keyspace")
    utils.run_vtctl("RebuildShardGraph test_keyspace_new/0")
    utils.validate_topology()
    tablet_62344.mquery("", "DROP DATABASE IF EXISTS not_vt_test_keyspace")
    tablet_62344.start_vttablet(wait_for_state="CONNECTING")  # db not created

    tablet_urls = " ".join("vttp://localhost:%s/vt_test_keyspace" % tablet.port for tablet in old_tablets)

    # 0x28 = 40
    err = tablet_62344.mysqlctl(
        "multirestore --end=0000000000000028 -strategy=skipAutoIncrement(vt_insert_test1),delayPrimaryKey,delaySecondaryIndexes,useMyIsam not_vt_test_keyspace %s"
        % tablet_urls
    ).wait()
    if err != 0:
        raise utils.TestError("mysqlctl failed: %u" % err)
    for table in tables:
        rows = tablet_62344.mquery("not_vt_test_keyspace", "select id from %s" % table)
        if len(rows) == 0:
            raise utils.TestError("There are no rows in the restored database.")
        for row in rows:
            if row[0] > 32:
                raise utils.TestError("Bad row: %s" % row)

    # try to get the schema on multi-restored guy, make sure the view is there
    out, err = utils.run_vtctl("GetSchema --include-views " + tablet_62344.tablet_alias, trap_output=True)
    if "vt_insert_view" not in err or "VIEW `{{.DatabaseName}}`.`vt_insert_view` AS select" not in err:
        raise utils.TestError("Unexpected GetSchema --include-views output after multirestore: %s" % err)

    for tablet in tablet_62044, tablet_41983, tablet_31981, tablet_62344:
        tablet.kill_vttablet()
def _run_test_reparent_from_outside(brutal=False):
    utils.zk_wipe()

    utils.run_vtctl("CreateKeyspace test_keyspace")

    # create the database so vttablets start, as they are serving
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
        t.create_db("vt_test_keyspace")

    # Start up a master mysql and vttablet
    tablet_62344.init_tablet("master", "test_keyspace", "0", start=True)

    # Create a few slaves for testing reparenting.
    tablet_62044.init_tablet("replica", "test_keyspace", "0", start=True)
    tablet_41983.init_tablet("replica", "test_keyspace", "0", start=True)
    tablet_31981.init_tablet("replica", "test_keyspace", "0", start=True)

    # Reparent as a starting point
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
        t.reset_replication()
    utils.run_vtctl("ReparentShard -force test_keyspace/0 %s" % tablet_62344.tablet_alias, auto_log=True)

    # now manually reparent 1 out of 2 tablets
    # 62044 will be the new master
    # 31981 won't be re-parented, so it will be busted
    tablet_62044.mquery("", ["RESET MASTER", "STOP SLAVE", "RESET SLAVE", "CHANGE MASTER TO MASTER_HOST = ''"])
    new_pos = tablet_62044.mquery("", "show master status")
    utils.debug("New master position: %s" % str(new_pos))

    # 62344 will now be a slave of 62044
    tablet_62344.mquery(
        "",
        [
            "RESET MASTER",
            "RESET SLAVE",
            "change master to master_host='%s', master_port=%u, master_log_file='%s', master_log_pos=%u"
            % (utils.hostname, tablet_62044.mysql_port, new_pos[0][0], new_pos[0][1]),
            "start slave",
        ],
    )

    # 41983 will be a slave of 62044
    tablet_41983.mquery(
        "",
        [
            "stop slave",
            "change master to master_port=%u, master_log_file='%s', master_log_pos=%u"
            % (tablet_62044.mysql_port, new_pos[0][0], new_pos[0][1]),
            "start slave",
        ],
    )

    # in brutal mode, we scrap the old master first
    if brutal:
        tablet_62344.scrap(force=True)
        # we have some automated tools that do this too, so it's good to simulate
        utils.run(utils.vtroot + "/bin/zk rm -rf " + tablet_62344.zk_tablet_path)

    # update zk with the new graph
    utils.run_vtctl(
        "ShardExternallyReparented -scrap-stragglers test_keyspace/0 %s" % tablet_62044.tablet_alias, auto_log=True
    )

    # make sure the replication graph is fine
    shard_files = utils.zk_ls("/zk/global/vt/keyspaces/test_keyspace/shards/0")
    utils.debug("shard_files: %s" % " ".join(shard_files))
    if shard_files != ["action", "actionlog", "test_nj-0000062044"]:
        raise utils.TestError("unexpected zk content: %s" % " ".join(shard_files))

    slave_files = utils.zk_ls("/zk/global/vt/keyspaces/test_keyspace/shards/0/test_nj-0000062044")
    utils.debug("slave_files: %s" % " ".join(slave_files))
    expected_slave_files = ["test_nj-0000041983", "test_nj-0000062344"]
    if brutal:
        expected_slave_files = ["test_nj-0000041983"]
    if slave_files != expected_slave_files:
        raise utils.TestError(
            "unexpected zk content: %s instead of expected %s"
            % ("|".join(slave_files), "|".join(expected_slave_files_files))
        )

    tablet_31981.kill_vttablet()
    tablet_62344.kill_vttablet()
    tablet_62044.kill_vttablet()
    tablet_41983.kill_vttablet()
Beispiel #35
0
def _run_test_vtctl_snapshot_restore(server_mode):
  if server_mode:
    snapshot_flags = '-server-mode -concurrency=8'
    restore_flags = '-dont-wait-for-slave-start'
  else:
    snapshot_flags = '-concurrency=4'
    restore_flags = ''
  utils.zk_wipe()

  # Start up a master mysql and vttablet
  utils.run_vtctl('CreateKeyspace -force snapshot_test')

  tablet_62344.init_tablet('master', 'snapshot_test', '0')
  utils.run_vtctl('RebuildShardGraph snapshot_test/0')
  utils.validate_topology()

  tablet_62344.populate('vt_snapshot_test', create_vt_insert_test,
                        populate_vt_insert_test)

  tablet_62044.create_db('vt_snapshot_test')

  tablet_62344.start_vttablet()

  # Need to force snapshot since this is a master db.
  out, err = utils.run_vtctl('Snapshot -force %s %s ' % (snapshot_flags, tablet_62344.tablet_alias), log_level='INFO', trap_output=True)
  results = {}
  for name in ['Manifest', 'ParentAlias', 'SlaveStartRequired', 'ReadOnly', 'OriginalType']:
    sepPos = err.find(name + ": ")
    if sepPos != -1:
      results[name] = err[sepPos+len(name)+2:].splitlines()[0]
  if "Manifest" not in results:
    raise utils.TestError("Snapshot didn't echo Manifest file", err)
  if "ParentAlias" not in results:
    raise utils.TestError("Snapshot didn't echo ParentAlias", err)
  utils.pause("snapshot finished: " + results['Manifest'] + " " + results['ParentAlias'])
  if server_mode:
    if "SlaveStartRequired" not in results:
      raise utils.TestError("Snapshot didn't echo SlaveStartRequired", err)
    if "ReadOnly" not in results:
      raise utils.TestError("Snapshot didn't echo ReadOnly", err)
    if "OriginalType" not in results:
      raise utils.TestError("Snapshot didn't echo OriginalType", err)
    if (results['SlaveStartRequired'] != 'false' or
        results['ReadOnly'] != 'true' or
        results['OriginalType'] != 'master'):
      raise utils.TestError("Bad values returned by Snapshot", err)
  tablet_62044.init_tablet('idle', start=True)

  # do not specify a MANIFEST, see if 'default' works
  call(["touch", "/tmp/vtSimulateFetchFailures"])
  utils.run_vtctl('Restore -fetch-concurrency=2 -fetch-retry-count=4 %s %s default %s %s' %
                  (restore_flags, tablet_62344.tablet_alias,
                   tablet_62044.tablet_alias, results['ParentAlias']), auto_log=True)
  utils.pause("restore finished")

  tablet_62044.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4)

  utils.validate_topology()

  # in server_mode, get the server out of it and check it
  if server_mode:
    utils.run_vtctl('SnapshotSourceEnd %s %s' % (tablet_62344.tablet_alias, results['OriginalType']), auto_log=True)
    tablet_62344.assert_table_count('vt_snapshot_test', 'vt_insert_test', 4)
    utils.validate_topology()

  tablet_62344.kill_vttablet()
  tablet_62044.kill_vttablet()
Beispiel #36
0
def test_multisnapshot_and_restore():
  tables = ['vt_insert_test', 'vt_insert_test1']
  create_template = '''create table %s (
id bigint auto_increment,
msg varchar(64),
primary key (id),
index by_msg (msg)
) Engine=InnoDB'''
  create_view = '''create view vt_insert_view(id, msg) as select id, msg from vt_insert_test'''
  insert_template = "insert into %s (id, msg) values (%s, 'test %s')"

  utils.zk_wipe()

  # Start up a master mysql and vttablet
  utils.run_vtctl('CreateKeyspace -force test_keyspace')

  # Start three tablets for three different shards. At this point the
  # sharding schema is not really important, as long as it is
  # consistent.
  new_spec = '-0000000000000028-'
  old_tablets = [tablet_62044, tablet_41983, tablet_31981]
  for i, tablet in enumerate(old_tablets):
    tablet.init_tablet('master', 'test_keyspace', str(i))
    utils.run_vtctl('RebuildShardGraph test_keyspace/%s' % i)
  utils.validate_topology()

  for i, tablet in enumerate(old_tablets):
    tablet.populate(
      "vt_test_keyspace",
      [create_template % table for table in tables] + [create_view],
      sum([[insert_template % (table, 10*j + i, 10*j + i) for j in range(1, 8)] for table in tables], []))
    tablet.start_vttablet()
    utils.run_vtctl('MultiSnapshot --force  --spec=%s %s id' % (new_spec, tablet.tablet_alias), trap_output=True)

  utils.pause("After snapshot")

  # try to get the schema on the source, make sure the view is there
  out, err = utils.run_vtctl('GetSchema --include-views ' +
                             tablet_62044.tablet_alias,
                             log_level='INFO', trap_output=True)
  if 'vt_insert_view' not in err or 'VIEW `{{.DatabaseName}}`.`vt_insert_view` AS select' not in err:
    raise utils.TestError('Unexpected GetSchema --include-views output: %s' % err)
  out, err = utils.run_vtctl('GetSchema ' +
                             tablet_62044.tablet_alias,
                             log_level='INFO', trap_output=True)
  if 'vt_insert_view' in err:
    raise utils.TestError('Unexpected GetSchema output: %s' % err)

  utils.run_vtctl('CreateKeyspace -force test_keyspace_new')
  tablet_62344.init_tablet('master', 'test_keyspace_new', "0", dbname="not_vt_test_keyspace")
  utils.run_vtctl('RebuildShardGraph test_keyspace_new/0')
  utils.validate_topology()
  tablet_62344.mquery('', 'DROP DATABASE IF EXISTS not_vt_test_keyspace')
  tablet_62344.start_vttablet(wait_for_state='CONNECTING') # db not created

  tablet_urls = ' '.join("vttp://localhost:%s/vt_test_keyspace" % tablet.port for tablet in old_tablets)

  # 0x28 = 40
  err = tablet_62344.mysqlctl("multirestore --end=0000000000000028 -strategy=skipAutoIncrement(vt_insert_test1),delayPrimaryKey,delaySecondaryIndexes,useMyIsam,populateBlpRecovery(6514) not_vt_test_keyspace %s" % tablet_urls).wait()
  if err != 0:
    raise utils.TestError("mysqlctl failed: %u" % err)
  for table in tables:
    rows = tablet_62344.mquery('not_vt_test_keyspace', 'select id from %s' % table)
    if len(rows) == 0:
      raise utils.TestError("There are no rows in the restored database.")
    for row in rows:
      if row[0] > 32:
        raise utils.TestError("Bad row: %s" % row)
  rows = tablet_62344.mquery('_vt', 'select * from blp_checkpoint')
  if len(rows) != 3:
    raise utils.TestError("Was expecting 3 rows in blp_checkpoint but got: %s" % str(rows))

  # try to get the schema on multi-restored guy, make sure the view is there
  out, err = utils.run_vtctl('GetSchema --include-views ' +
                             tablet_62344.tablet_alias,
                             log_level='INFO', trap_output=True)
  if 'vt_insert_view' not in err or 'VIEW `{{.DatabaseName}}`.`vt_insert_view` AS select' not in err:
    raise utils.TestError('Unexpected GetSchema --include-views output after multirestore: %s' % err)

  for tablet in tablet_62044, tablet_41983, tablet_31981, tablet_62344:
    tablet.kill_vttablet()
Beispiel #37
0
def _run_test_reparent_from_outside(brutal=False):
    utils.zk_wipe()

    utils.run_vtctl('CreateKeyspace test_keyspace')

    # create the database so vttablets start, as they are serving
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
        t.create_db('vt_test_keyspace')

    # Start up a master mysql and vttablet
    tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)

    # Create a few slaves for testing reparenting.
    tablet_62044.init_tablet('replica', 'test_keyspace', '0', start=True)
    tablet_41983.init_tablet('replica', 'test_keyspace', '0', start=True)
    tablet_31981.init_tablet('replica', 'test_keyspace', '0', start=True)

    # Reparent as a starting point
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
        t.reset_replication()
    utils.run_vtctl('ReparentShard -force test_keyspace/0 %s' %
                    tablet_62344.tablet_alias,
                    auto_log=True)

    # now manually reparent 1 out of 2 tablets
    # 62044 will be the new master
    # 31981 won't be re-parented, so it will be busted
    tablet_62044.mquery('', [
        "RESET MASTER",
        "STOP SLAVE",
        "RESET SLAVE",
        "CHANGE MASTER TO MASTER_HOST = ''",
    ])
    new_pos = tablet_62044.mquery('', 'show master status')
    utils.debug("New master position: %s" % str(new_pos))

    # 62344 will now be a slave of 62044
    tablet_62344.mquery('', [
        "RESET MASTER", "RESET SLAVE",
        "change master to master_host='%s', master_port=%u, master_log_file='%s', master_log_pos=%u"
        % (utils.hostname, tablet_62044.mysql_port, new_pos[0][0],
           new_pos[0][1]), 'start slave'
    ])

    # 41983 will be a slave of 62044
    tablet_41983.mquery('', [
        'stop slave',
        "change master to master_port=%u, master_log_file='%s', master_log_pos=%u"
        %
        (tablet_62044.mysql_port, new_pos[0][0], new_pos[0][1]), 'start slave'
    ])

    # in brutal mode, we scrap the old master first
    if brutal:
        tablet_62344.scrap(force=True)
        # we have some automated tools that do this too, so it's good to simulate
        utils.run(utils.vtroot + '/bin/zk rm -rf ' +
                  tablet_62344.zk_tablet_path)

    # update zk with the new graph
    utils.run_vtctl(
        'ShardExternallyReparented -scrap-stragglers test_keyspace/0 %s' %
        tablet_62044.tablet_alias,
        auto_log=True)

    # make sure the replication graph is fine
    shard_files = utils.zk_ls('/zk/global/vt/keyspaces/test_keyspace/shards/0')
    utils.debug('shard_files: %s' % " ".join(shard_files))
    if shard_files != ['action', 'actionlog', 'test_nj-0000062044']:
        raise utils.TestError('unexpected zk content: %s' %
                              " ".join(shard_files))

    slave_files = utils.zk_ls(
        '/zk/global/vt/keyspaces/test_keyspace/shards/0/test_nj-0000062044')
    utils.debug('slave_files: %s' % " ".join(slave_files))
    expected_slave_files = ['test_nj-0000041983', 'test_nj-0000062344']
    if brutal:
        expected_slave_files = ['test_nj-0000041983']
    if slave_files != expected_slave_files:
        raise utils.TestError(
            'unexpected zk content: %s instead of expected %s' %
            ("|".join(slave_files), "|".join(expected_slave_files_files)))

    tablet_31981.kill_vttablet()
    tablet_62344.kill_vttablet()
    tablet_62044.kill_vttablet()
    tablet_41983.kill_vttablet()