Пример #1
0
  def test_scrap_and_reinit(self):
    utils.run_vtctl('CreateKeyspace test_keyspace')

    tablet_62344.create_db('vt_test_keyspace')
    tablet_62044.create_db('vt_test_keyspace')

    # one master one replica
    tablet_62344.init_tablet('master', 'test_keyspace', '0')
    tablet_62044.init_tablet('replica', 'test_keyspace', '0')

    # make sure the replica is in the replication graph
    before_scrap = utils.zk_cat_json('/zk/test_nj/vt/replication/test_keyspace/0')
    self.assertEqual(1, len(before_scrap['ReplicationLinks']), 'wrong replication links before: %s' % str(before_scrap))

    # scrap and re-init
    utils.run_vtctl('ScrapTablet -force ' + tablet_62044.tablet_alias)
    tablet_62044.init_tablet('replica', 'test_keyspace', '0')

    after_scrap = utils.zk_cat_json('/zk/test_nj/vt/replication/test_keyspace/0')
    self.assertEqual(1, len(after_scrap['ReplicationLinks']), 'wrong replication links after: %s' % str(after_scrap))

    # manually add a bogus entry to the replication graph, and check
    # it is removed by ShardReplicationFix
    utils.run_vtctl('ShardReplicationAdd test_keyspace/0 test_nj-0000066666 test_nj-0000062344', auto_log=True)
    with_bogus = utils.zk_cat_json('/zk/test_nj/vt/replication/test_keyspace/0')
    self.assertEqual(2, len(with_bogus['ReplicationLinks']), 'wrong replication links with bogus: %s' % str(with_bogus))
    utils.run_vtctl('ShardReplicationFix test_nj test_keyspace/0', auto_log=True)
    after_fix = utils.zk_cat_json('/zk/test_nj/vt/replication/test_keyspace/0')
    self.assertEqual(1, len(after_scrap['ReplicationLinks']), 'wrong replication links after fix: %s' % str(after_fix))
Пример #2
0
  def test_scrap_and_reinit(self):
    utils.run_vtctl('CreateKeyspace test_keyspace')

    tablet_62344.create_db('vt_test_keyspace')
    tablet_62044.create_db('vt_test_keyspace')

    # one master one replica
    tablet_62344.init_tablet('master', 'test_keyspace', '0')
    tablet_62044.init_tablet('replica', 'test_keyspace', '0')

    # make sure the replica is in the replication graph
    before_scrap = utils.zk_cat_json('/zk/test_nj/vt/replication/test_keyspace/0')
    self.assertEqual(1, len(before_scrap['ReplicationLinks']), 'wrong replication links before: %s' % str(before_scrap))

    # scrap and re-init
    utils.run_vtctl('ScrapTablet -force ' + tablet_62044.tablet_alias)
    tablet_62044.init_tablet('replica', 'test_keyspace', '0')

    after_scrap = utils.zk_cat_json('/zk/test_nj/vt/replication/test_keyspace/0')
    self.assertEqual(1, len(after_scrap['ReplicationLinks']), 'wrong replication links before: %s' % str(after_scrap))
Пример #3
0
 def _test_reparent_from_outside_check(self, brutal):
   # make sure the shard replication graph is fine
   shard_replication = utils.zk_cat_json('/zk/test_nj/vt/replication/test_keyspace/0')
   hashed_links = {}
   for rl in shard_replication['ReplicationLinks']:
     key = rl['TabletAlias']['Cell'] + "-" + str(rl['TabletAlias']['Uid'])
     value = rl['Parent']['Cell'] + "-" + str(rl['Parent']['Uid'])
     hashed_links[key] = value
   logging.debug("Got replication links: %s", str(hashed_links))
   expected_links = { 'test_nj-41983': 'test_nj-62044' }
   if not brutal:
     expected_links['test_nj-62344'] = 'test_nj-62044'
   self.assertEqual(expected_links, hashed_links, "Got unexpected links: %s != %s" % (str(expected_links), str(hashed_links)))
Пример #4
0
def check_srv_keyspace(cell, keyspace, expected):
  ks = utils.zk_cat_json('/zk/%s/vt/ns/%s' % (cell, keyspace))
  result = ""
  for tablet_type in sorted(ks['Partitions'].keys()):
    result += "Partitions(%s):" % tablet_type
    partition = ks['Partitions'][tablet_type]
    for shard in partition['Shards']:
      result = result + " %s-%s" % (shard['KeyRange']['Start'],
                                    shard['KeyRange']['End'])
    result += "\n"
  result += "TabletTypes: " + ",".join(sorted(ks['TabletTypes']))
  utils.debug("Cell %s keyspace %s has data:\n%s" % (cell, keyspace, result))
  if result != expected:
    raise utils.TestError("***** Expected srv keyspace:\n%s\nbut got:\n%s\n" %
                          (expected, result))
Пример #5
0
 def _check_srv_keyspace(self, cell, keyspace, expected):
   ks = utils.zk_cat_json('/zk/%s/vt/ns/%s' % (cell, keyspace))
   result = ""
   for tablet_type in sorted(ks['Partitions'].keys()):
     result += "Partitions(%s):" % tablet_type
     partition = ks['Partitions'][tablet_type]
     for shard in partition['Shards']:
       result = result + " %s-%s" % (shard['KeyRange']['Start'],
                                     shard['KeyRange']['End'])
     result += "\n"
   result += "TabletTypes: " + ",".join(sorted(ks['TabletTypes']))
   logging.debug("Cell %s keyspace %s has data:\n%s", cell, keyspace, result)
   self.assertEqual(expected, result,
                    "Mismatch in srv keyspace for cell %s keyspace %s" % (
                    cell, keyspace))
Пример #6
0
 def _check_srv_keyspace(self, cell, keyspace, expected):
   ks = utils.zk_cat_json('/zk/%s/vt/ns/%s' % (cell, keyspace))
   result = ""
   for tablet_type in sorted(ks['Partitions'].keys()):
     result += "Partitions(%s):" % tablet_type
     partition = ks['Partitions'][tablet_type]
     for shard in partition['Shards']:
       result = result + " %s-%s" % (shard['KeyRange']['Start'],
                                     shard['KeyRange']['End'])
     result += "\n"
   result += "TabletTypes: " + ",".join(sorted(ks['TabletTypes']))
   logging.debug("Cell %s keyspace %s has data:\n%s", cell, keyspace, result)
   self.assertEqual(expected, result,
                    "Mismatch in srv keyspace for cell %s keyspace %s" % (
                    cell, keyspace))
Пример #7
0
def check_srv_keyspace(cell, keyspace, expected):
    ks = utils.zk_cat_json('/zk/%s/vt/ns/%s' % (cell, keyspace))
    result = ""
    for tablet_type in sorted(ks['Partitions'].keys()):
        result += "Partitions(%s):" % tablet_type
        partition = ks['Partitions'][tablet_type]
        for shard in partition['Shards']:
            result = result + " %s-%s" % (shard['KeyRange']['Start'],
                                          shard['KeyRange']['End'])
        result += "\n"
    result += "TabletTypes: " + ",".join(sorted(ks['TabletTypes']))
    utils.debug("Cell %s keyspace %s has data:\n%s" % (cell, keyspace, result))
    if result != expected:
        raise utils.TestError(
            "***** Expected srv keyspace:\n%s\nbut got:\n%s\n" %
            (expected, result))
Пример #8
0
  def _test_reparent_graceful(self, shard_id):
    utils.run_vtctl('CreateKeyspace test_keyspace')

    # create the database so vttablets start, as they are serving
    tablet_62344.create_db('vt_test_keyspace')
    tablet_62044.create_db('vt_test_keyspace')
    tablet_41983.create_db('vt_test_keyspace')
    tablet_31981.create_db('vt_test_keyspace')

    # Start up a master mysql and vttablet
    tablet_62344.init_tablet('master', 'test_keyspace', shard_id, start=True)
    shard = utils.zk_cat_json('/zk/global/vt/keyspaces/test_keyspace/shards/' + shard_id)
    self.assertEqual(shard['Cells'], ['test_nj'], 'wrong list of cell in Shard: %s' % str(shard['Cells']))

    # Create a few slaves for testing reparenting.
    tablet_62044.init_tablet('replica', 'test_keyspace', shard_id, start=True)
    tablet_41983.init_tablet('replica', 'test_keyspace', shard_id, start=True)
    tablet_31981.init_tablet('replica', 'test_keyspace', shard_id, start=True)
    shard = utils.zk_cat_json('/zk/global/vt/keyspaces/test_keyspace/shards/' + shard_id)
    self.assertEqual(shard['Cells'], ['test_nj', 'test_ny'], 'wrong list of cell in Shard: %s' % str(shard['Cells']))

    # Recompute the shard layout node - until you do that, it might not be valid.
    utils.run_vtctl('RebuildShardGraph test_keyspace/' + shard_id)
    utils.validate_topology()

    # Force the slaves to reparent assuming that all the datasets are identical.
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
      t.reset_replication()
    utils.pause("force ReparentShard?")
    utils.run_vtctl('ReparentShard -force test_keyspace/%s %s' % (shard_id, tablet_62344.tablet_alias))
    utils.validate_topology(ping_tablets=True)

    expected_addr = utils.hostname + ':' + str(tablet_62344.port)
    self._check_db_addr('test_keyspace.%s.master:_vtocc' % shard_id, expected_addr)

    # Convert two replica to spare. That should leave only one node serving traffic,
    # but still needs to appear in the replication graph.
    utils.run_vtctl(['ChangeSlaveType', tablet_41983.tablet_alias, 'spare'])
    utils.run_vtctl(['ChangeSlaveType', tablet_31981.tablet_alias, 'spare'])
    utils.validate_topology()
    expected_addr = utils.hostname + ':' + str(tablet_62044.port)
    self._check_db_addr('test_keyspace.%s.replica:_vtocc' % shard_id, expected_addr)

    # Run this to make sure it succeeds.
    utils.run_vtctl('ShardReplicationPositions test_keyspace/%s' % shard_id, stdout=utils.devnull)

    # Perform a graceful reparent operation.
    utils.pause("graceful ReparentShard?")
    utils.run_vtctl('ReparentShard test_keyspace/%s %s' % (shard_id, tablet_62044.tablet_alias), auto_log=True)
    utils.validate_topology()

    expected_addr = utils.hostname + ':' + str(tablet_62044.port)
    self._check_db_addr('test_keyspace.%s.master:_vtocc' % shard_id, expected_addr)

    tablet_62344.kill_vttablet()
    tablet_62044.kill_vttablet()
    tablet_41983.kill_vttablet()
    tablet_31981.kill_vttablet()

    # Test address correction.
    new_port = utils.reserve_ports(1)
    tablet_62044.start_vttablet(port=new_port)
    # Wait a moment for address to reregister.
    time.sleep(1.0)

    expected_addr = utils.hostname + ':' + str(new_port)
    self._check_db_addr('test_keyspace.%s.master:_vtocc' % shard_id, expected_addr)

    tablet_62044.kill_vttablet()
  def _test_reparent_graceful(self, shard_id):
    utils.run_vtctl('CreateKeyspace test_keyspace')

    # create the database so vttablets start, as they are serving
    tablet_62344.create_db('vt_test_keyspace')
    tablet_62044.create_db('vt_test_keyspace')
    tablet_41983.create_db('vt_test_keyspace')
    tablet_31981.create_db('vt_test_keyspace')

    # Start up a master mysql and vttablet
    tablet_62344.init_tablet('master', 'test_keyspace', shard_id, start=True)
    shard = utils.zk_cat_json('/zk/global/vt/keyspaces/test_keyspace/shards/' + shard_id)
    self.assertEqual(shard['Cells'], ['test_nj'], 'wrong list of cell in Shard: %s' % str(shard['Cells']))

    # Create a few slaves for testing reparenting.
    tablet_62044.init_tablet('replica', 'test_keyspace', shard_id, start=True)
    tablet_41983.init_tablet('replica', 'test_keyspace', shard_id, start=True)
    tablet_31981.init_tablet('replica', 'test_keyspace', shard_id, start=True)
    shard = utils.zk_cat_json('/zk/global/vt/keyspaces/test_keyspace/shards/' + shard_id)
    self.assertEqual(shard['Cells'], ['test_nj', 'test_ny'], 'wrong list of cell in Shard: %s' % str(shard['Cells']))

    # Recompute the shard layout node - until you do that, it might not be valid.
    utils.run_vtctl('RebuildShardGraph test_keyspace/' + shard_id)
    utils.validate_topology()

    # Force the slaves to reparent assuming that all the datasets are identical.
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
      t.reset_replication()
    utils.pause("force ReparentShard?")
    utils.run_vtctl('ReparentShard -force test_keyspace/%s %s' % (shard_id, tablet_62344.tablet_alias))
    utils.validate_topology(ping_tablets=True)

    expected_addr = utils.hostname + ':' + str(tablet_62344.port)
    self._check_db_addr('test_keyspace.%s.master:_vtocc' % shard_id, expected_addr)

    # Convert two replica to spare. That should leave only one node serving traffic,
    # but still needs to appear in the replication graph.
    utils.run_vtctl(['ChangeSlaveType', tablet_41983.tablet_alias, 'spare'])
    utils.run_vtctl(['ChangeSlaveType', tablet_31981.tablet_alias, 'spare'])
    utils.validate_topology()
    expected_addr = utils.hostname + ':' + str(tablet_62044.port)
    self._check_db_addr('test_keyspace.%s.replica:_vtocc' % shard_id, expected_addr)

    # Run this to make sure it succeeds.
    utils.run_vtctl('ShardReplicationPositions test_keyspace/%s' % shard_id, stdout=utils.devnull)

    # Perform a graceful reparent operation.
    utils.pause("graceful ReparentShard?")
    utils.run_vtctl('ReparentShard test_keyspace/%s %s' % (shard_id, tablet_62044.tablet_alias), auto_log=True)
    utils.validate_topology()

    expected_addr = utils.hostname + ':' + str(tablet_62044.port)
    self._check_db_addr('test_keyspace.%s.master:_vtocc' % shard_id, expected_addr)

    tablet_62344.kill_vttablet()
    tablet_62044.kill_vttablet()
    tablet_41983.kill_vttablet()
    tablet_31981.kill_vttablet()

    # Test address correction.
    new_port = utils.reserve_ports(1)
    tablet_62044.start_vttablet(port=new_port)
    # Wait a moment for address to reregister.
    time.sleep(1.0)

    expected_addr = utils.hostname + ':' + str(new_port)
    self._check_db_addr('test_keyspace.%s.master:_vtocc' % shard_id, expected_addr)

    tablet_62044.kill_vttablet()