コード例 #1
0
ファイル: tabletmanager.py プロジェクト: dolfly/vitess
  def _test_reparent_from_outside_check(self, brutal):
    # make sure the replication graph is fine
    shard_files = utils.zk_ls('/zk/global/vt/keyspaces/test_keyspace/shards/0')
    logging.debug('shard_files: %s' % " ".join(shard_files))
    if shard_files != ['action', 'actionlog', 'test_nj-0000062044']:
      raise utils.TestError('unexpected zk content: %s' % " ".join(shard_files))

    slave_files = utils.zk_ls('/zk/global/vt/keyspaces/test_keyspace/shards/0/test_nj-0000062044')
    logging.debug('slave_files: %s' % " ".join(slave_files))
    expected_slave_files = ['test_nj-0000041983', 'test_nj-0000062344']
    if brutal:
      expected_slave_files = ['test_nj-0000041983']
    if slave_files != expected_slave_files:
      raise utils.TestError('unexpected zk content: %s instead of expected %s' %
                            ("|".join(slave_files),
                             "|".join(expected_slave_files)))
コード例 #2
0
  def _test_reparent_from_outside_check(self, brutal):
    # make sure the replication graph is fine
    shard_files = utils.zk_ls('/zk/global/vt/keyspaces/test_keyspace/shards/0')
    logging.debug('shard_files: %s' % " ".join(shard_files))
    if shard_files != ['action', 'actionlog', 'test_nj-0000062044']:
      raise utils.TestError('unexpected zk content: %s' % " ".join(shard_files))

    slave_files = utils.zk_ls('/zk/global/vt/keyspaces/test_keyspace/shards/0/test_nj-0000062044')
    logging.debug('slave_files: %s' % " ".join(slave_files))
    expected_slave_files = ['test_nj-0000041983', 'test_nj-0000062344']
    if brutal:
      expected_slave_files = ['test_nj-0000041983']
    if slave_files != expected_slave_files:
      raise utils.TestError('unexpected zk content: %s instead of expected %s' %
                            ("|".join(slave_files),
                             "|".join(expected_slave_files_files)))
コード例 #3
0
ファイル: tabletmanager.py プロジェクト: hub501/go-pack
def run_test_reparent_from_outside():
  utils.zk_wipe()

  utils.run_vtctl('CreateKeyspace test_keyspace')

  # create the database so vttablets start, as they are serving
  tablet_62344.create_db('vt_test_keyspace')
  tablet_62044.create_db('vt_test_keyspace')
  tablet_41983.create_db('vt_test_keyspace')
  tablet_31981.create_db('vt_test_keyspace')

  # Start up a master mysql and vttablet
  tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)

  # Create a few slaves for testing reparenting.
  tablet_62044.init_tablet('replica', 'test_keyspace', '0', start=True)
  tablet_41983.init_tablet('replica', 'test_keyspace', '0', start=True)
  tablet_31981.init_tablet('replica', 'test_keyspace', '0', start=True)

  # Reparent as a starting point
  utils.run_vtctl('ReparentShard -force test_keyspace/0 %s' % tablet_62344.tablet_alias)

  # now manually reparent 1 out of 2 tablets
  # 62044 will be the new master
  # 31981 won't be re-parented, so it w2ill be busted
  tablet_62044.mquery('', [
      "RESET MASTER",
      "STOP SLAVE",
      "RESET SLAVE",
      "CHANGE MASTER TO MASTER_HOST = ''",
      ])
  new_pos = tablet_62044.mquery('', 'show master status')
  utils.debug("New master position: %s" % str(new_pos))

  # 62344 will now be a slave of 62044
  tablet_62344.mquery('', [
      "RESET MASTER",
      "RESET SLAVE",
      "change master to master_host='%s', master_port=%u, master_log_file='%s', master_log_pos=%u" % (utils.hostname, tablet_62044.mysql_port, new_pos[0][0], new_pos[0][1]),
      'start slave'
      ])

  # 41983 will be a slave of 62044
  tablet_41983.mquery('', [
      'stop slave',
      "change master to master_port=%u, master_log_file='%s', master_log_pos=%u" % (tablet_62044.mysql_port, new_pos[0][0], new_pos[0][1]),
      'start slave'
      ])

  # update zk with the new graph
  utils.run_vtctl('ShardExternallyReparented -scrap-stragglers test_keyspace/0 %s' % tablet_62044.tablet_alias, auto_log=True)

  # make sure the replication graph is fine
  shard_files = utils.zk_ls('/zk/global/vt/keyspaces/test_keyspace/shards/0')
  utils.debug('shard_files: %s' % " ".join(shard_files))
  if shard_files != ['action', 'actionlog', 'test_nj-0000062044']:
    raise utils.TestError('unexpected zk content: %s' % " ".join(shard_files))

  slave_files = utils.zk_ls('/zk/global/vt/keyspaces/test_keyspace/shards/0/test_nj-0000062044')
  utils.debug('slave_files: %s' % " ".join(slave_files))
  if slave_files != ['test_nj-0000041983', 'test_nj-0000062344']:
    raise utils.TestError('unexpected zk content: %s' % " ".join(slave_files))

  tablet_31981.kill_vttablet()
  tablet_62344.kill_vttablet()
  tablet_62044.kill_vttablet()
  tablet_41983.kill_vttablet()
コード例 #4
0
ファイル: schema.py プロジェクト: Abioy/vitess
  def test_complex_schema(self):

    utils.run_vtctl('CreateKeyspace test_keyspace')

    shard_0_master.init_tablet(  'master',  'test_keyspace', '0')
    shard_0_replica1.init_tablet('replica', 'test_keyspace', '0')
    shard_0_replica2.init_tablet('replica', 'test_keyspace', '0')
    shard_0_rdonly.init_tablet(  'rdonly',  'test_keyspace', '0')
    shard_0_backup.init_tablet(  'backup',  'test_keyspace', '0')
    shard_1_master.init_tablet(  'master',  'test_keyspace', '1')
    shard_1_replica1.init_tablet('replica', 'test_keyspace', '1')

    utils.run_vtctl('RebuildKeyspaceGraph test_keyspace', auto_log=True)

    # run checks now before we start the tablets
    utils.validate_topology()

    # create databases, start the tablets
    for t in [shard_0_master, shard_0_replica1, shard_0_replica2,
              shard_0_rdonly, shard_0_backup, shard_1_master, shard_1_replica1]:
      t.create_db('vt_test_keyspace')
      t.start_vttablet(wait_for_state=None)

    # wait for the tablets to start
    shard_0_master.wait_for_vttablet_state('SERVING')
    shard_0_replica1.wait_for_vttablet_state('SERVING')
    shard_0_replica2.wait_for_vttablet_state('SERVING')
    shard_0_rdonly.wait_for_vttablet_state('SERVING')
    shard_0_backup.wait_for_vttablet_state('NOT_SERVING')
    shard_1_master.wait_for_vttablet_state('SERVING')
    shard_1_replica1.wait_for_vttablet_state('SERVING')

    # make sure all replication is good
    for t in [shard_0_master, shard_0_replica1, shard_0_replica2,
              shard_0_rdonly, shard_0_backup, shard_1_master, shard_1_replica1]:
      t.reset_replication()
    utils.run_vtctl('ReparentShard -force test_keyspace/0 ' + shard_0_master.tablet_alias, auto_log=True)
    utils.run_vtctl('ReparentShard -force test_keyspace/1 ' + shard_1_master.tablet_alias, auto_log=True)
    utils.run_vtctl('ValidateKeyspace -ping-tablets test_keyspace')

    # check after all tablets are here and replication is fixed
    utils.validate_topology(ping_tablets=True)

    # shard 0: apply the schema using a complex schema upgrade, no
    # reparenting yet
    utils.run_vtctl(['ApplySchemaShard',
                     '-sql='+create_vt_select_test[0],
                     'test_keyspace/0'],
                    auto_log=True)

    # check all expected hosts have the change:
    # - master won't have it as it's a complex change
    self._check_tables(shard_0_master, 0)
    self._check_tables(shard_0_replica1, 1)
    self._check_tables(shard_0_replica2, 1)
    self._check_tables(shard_0_rdonly, 1)
    self._check_tables(shard_0_backup, 1)
    self._check_tables(shard_1_master, 0)
    self._check_tables(shard_1_replica1, 0)

    # shard 0: apply schema change to just master directly
    # (to test its state is not changed)
    utils.run_vtctl(['ApplySchema',
                     '-stop-replication',
                     '-sql='+create_vt_select_test[0],
                     shard_0_master.tablet_alias],
                    auto_log=True)
    self._check_tables(shard_0_master, 1)

    # shard 0: apply new schema change, with reparenting
    utils.run_vtctl(['ApplySchemaShard',
                     '-new-parent='+shard_0_replica1.tablet_alias,
                     '-sql='+create_vt_select_test[1],
                     'test_keyspace/0'],
                    auto_log=True)
    self._check_tables(shard_0_master, 1)
    self._check_tables(shard_0_replica1, 2)
    self._check_tables(shard_0_replica2, 2)
    self._check_tables(shard_0_rdonly, 2)
    self._check_tables(shard_0_backup, 2)

    # verify GetSchema --tables works
    out, err = utils.run_vtctl('GetSchema --tables=vt_select_test0 ' +
                               shard_0_replica1.tablet_alias,
                               log_level='INFO',
                               trap_output=True)
    if not "vt_select_test0" in err or "vt_select_test1" in err:
      self.fail('Unexpected GetSchema --tables=vt_select_test0 output: %s' % err)

    # keyspace: try to apply a keyspace-wide schema change, should fail
    # as the preflight would be different in both shards
    out, err = utils.run_vtctl(['ApplySchemaKeyspace',
                                '-sql='+create_vt_select_test[2],
                                'test_keyspace'],
                               trap_output=True,
                               log_level='INFO',
                               raise_on_error=False)
    if err.find('ApplySchemaKeyspace Shard 1 has inconsistent schema') == -1:
      self.fail('Unexpected ApplySchemaKeyspace output: %s' % err)

    if environment.topo_server_implementation == 'zookeeper':
      utils.run_vtctl('PurgeActions /zk/global/vt/keyspaces/test_keyspace/action')

    # shard 1: catch it up with simple updates
    utils.run_vtctl(['ApplySchemaShard',
                     '-simple',
                     '-sql='+create_vt_select_test[0],
                     'test_keyspace/1'],
                    auto_log=True)
    utils.run_vtctl(['ApplySchemaShard',
                     '-simple',
                     '-sql='+create_vt_select_test[1],
                     'test_keyspace/1'],
                    auto_log=True)
    self._check_tables(shard_1_master, 2)
    self._check_tables(shard_1_replica1, 2)

    # keyspace: apply a keyspace-wide simple schema change, should work now
    utils.run_vtctl(['ApplySchemaKeyspace',
                     '-simple',
                     '-sql='+create_vt_select_test[2],
                     'test_keyspace'],
                    auto_log=True)

    # check all expected hosts have the change
    self._check_tables(shard_0_master, 1) # was stuck a long time ago as scrap
    self._check_tables(shard_0_replica1, 3) # current master
    self._check_tables(shard_0_replica2, 3)
    self._check_tables(shard_0_rdonly, 3)
    self._check_tables(shard_0_backup, 3)
    self._check_tables(shard_1_master, 3) # current master
    self._check_tables(shard_1_replica1, 3)

    # keyspace: apply a keyspace-wide complex schema change, should work too
    utils.run_vtctl(['ApplySchemaKeyspace',
                     '-sql='+create_vt_select_test[3],
                     'test_keyspace'],
                    auto_log=True)

    # check all expected hosts have the change:
    # - master won't have it as it's a complex change
    # - backup won't have it as IsReplicatingType is false
    self._check_tables(shard_0_master, 1) # was stuck a long time ago as scrap
    self._check_tables(shard_0_replica1, 3) # current master
    self._check_tables(shard_0_replica2, 4)
    self._check_tables(shard_0_rdonly, 4)
    self._check_tables(shard_0_backup, 4)
    self._check_tables(shard_1_master, 3) # current master
    self._check_tables(shard_1_replica1, 4)

    # now test action log pruning
    if environment.topo_server_implementation == 'zookeeper':
      oldLines = utils.zk_ls(shard_0_replica1.zk_tablet_path+'/actionlog')
      oldCount = len(oldLines)
      logging.debug("I have %u actionlog before", oldCount)
      if oldCount <= 5:
        self.fail('Not enough actionlog before: %u' % oldCount)

      utils.run_vtctl('PruneActionLogs -keep-count=5 /zk/*/vt/tablets/*/actionlog', auto_log=True)

      newLines = utils.zk_ls(shard_0_replica1.zk_tablet_path+'/actionlog')
      newCount = len(newLines)
      logging.debug("I have %u actionlog after", newCount)

      self.assertEqual(newCount, 5, 'Unexpected actionlog count after: %u' % newCount)
      if oldLines[-5:] != newLines:
        self.fail('Unexpected actionlog values:\n%s\n%s' %
                  (' '.join(oldLines[-5:]), ' '.join(newLines)))

    utils.pause("Look at schema now!")

    tablet.kill_tablets([shard_0_master, shard_0_replica1, shard_0_replica2,
                         shard_0_rdonly, shard_0_backup, shard_1_master,
                         shard_1_replica1])
コード例 #5
0
ファイル: schema.py プロジェクト: yyzi/vitess
    def test_complex_schema(self):

        utils.run_vtctl(['CreateKeyspace', 'test_keyspace'])

        shard_0_master.init_tablet('master', 'test_keyspace', '0')
        shard_0_replica1.init_tablet('replica', 'test_keyspace', '0')
        shard_0_replica2.init_tablet('replica', 'test_keyspace', '0')
        shard_0_rdonly.init_tablet('rdonly', 'test_keyspace', '0')
        shard_0_backup.init_tablet('backup', 'test_keyspace', '0')
        shard_1_master.init_tablet('master', 'test_keyspace', '1')
        shard_1_replica1.init_tablet('replica', 'test_keyspace', '1')

        utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace'],
                        auto_log=True)

        # run checks now before we start the tablets
        utils.validate_topology()

        # create databases, start the tablets
        for t in [
                shard_0_master, shard_0_replica1, shard_0_replica2,
                shard_0_rdonly, shard_0_backup, shard_1_master,
                shard_1_replica1
        ]:
            t.create_db('vt_test_keyspace')
            t.start_vttablet(wait_for_state=None)

        # wait for the tablets to start
        shard_0_master.wait_for_vttablet_state('SERVING')
        shard_0_replica1.wait_for_vttablet_state('SERVING')
        shard_0_replica2.wait_for_vttablet_state('SERVING')
        shard_0_rdonly.wait_for_vttablet_state('SERVING')
        shard_0_backup.wait_for_vttablet_state('NOT_SERVING')
        shard_1_master.wait_for_vttablet_state('SERVING')
        shard_1_replica1.wait_for_vttablet_state('SERVING')

        # make sure all replication is good
        for t in [
                shard_0_master, shard_0_replica1, shard_0_replica2,
                shard_0_rdonly, shard_0_backup, shard_1_master,
                shard_1_replica1
        ]:
            t.reset_replication()
        utils.run_vtctl([
            'ReparentShard', '-force', 'test_keyspace/0',
            shard_0_master.tablet_alias
        ],
                        auto_log=True)
        utils.run_vtctl([
            'ReparentShard', '-force', 'test_keyspace/1',
            shard_1_master.tablet_alias
        ],
                        auto_log=True)
        utils.run_vtctl(['ValidateKeyspace', '-ping-tablets', 'test_keyspace'])

        # check after all tablets are here and replication is fixed
        utils.validate_topology(ping_tablets=True)

        # shard 0: apply the schema using a complex schema upgrade, no
        # reparenting yet
        utils.run_vtctl([
            'ApplySchemaShard', '-sql=' + create_vt_select_test[0],
            'test_keyspace/0'
        ],
                        auto_log=True)

        # check all expected hosts have the change:
        # - master won't have it as it's a complex change
        self._check_tables(shard_0_master, 0)
        self._check_tables(shard_0_replica1, 1)
        self._check_tables(shard_0_replica2, 1)
        self._check_tables(shard_0_rdonly, 1)
        self._check_tables(shard_0_backup, 1)
        self._check_tables(shard_1_master, 0)
        self._check_tables(shard_1_replica1, 0)

        # shard 0: apply schema change to just master directly
        # (to test its state is not changed)
        utils.run_vtctl([
            'ApplySchema', '-stop-replication',
            '-sql=' + create_vt_select_test[0], shard_0_master.tablet_alias
        ],
                        auto_log=True)
        self._check_tables(shard_0_master, 1)

        # shard 0: apply new schema change, with reparenting
        utils.run_vtctl([
            'ApplySchemaShard', '-new-parent=' + shard_0_replica1.tablet_alias,
            '-sql=' + create_vt_select_test[1], 'test_keyspace/0'
        ],
                        auto_log=True)
        self._check_tables(shard_0_master, 1)
        self._check_tables(shard_0_replica1, 2)
        self._check_tables(shard_0_replica2, 2)
        self._check_tables(shard_0_rdonly, 2)
        self._check_tables(shard_0_backup, 2)

        # verify GetSchema --tables works
        s = utils.run_vtctl_json([
            'GetSchema', '--tables=vt_select_test0',
            shard_0_replica1.tablet_alias
        ])
        self.assertEqual(len(s['TableDefinitions']), 1)
        self.assertEqual(s['TableDefinitions'][0]['Name'], 'vt_select_test0')

        # keyspace: try to apply a keyspace-wide schema change, should fail
        # as the preflight would be different in both shards
        out, err = utils.run_vtctl([
            'ApplySchemaKeyspace', '-sql=' + create_vt_select_test[2],
            'test_keyspace'
        ],
                                   trap_output=True,
                                   log_level='INFO',
                                   raise_on_error=False)
        if err.find(
                'ApplySchemaKeyspace Shard 1 has inconsistent schema') == -1:
            self.fail('Unexpected ApplySchemaKeyspace output: %s' % err)

        if environment.topo_server_implementation == 'zookeeper':
            utils.run_vtctl([
                'PurgeActions', '/zk/global/vt/keyspaces/test_keyspace/action'
            ])

        # shard 1: catch it up with simple updates
        utils.run_vtctl([
            'ApplySchemaShard', '-simple', '-sql=' + create_vt_select_test[0],
            'test_keyspace/1'
        ],
                        auto_log=True)
        utils.run_vtctl([
            'ApplySchemaShard', '-simple', '-sql=' + create_vt_select_test[1],
            'test_keyspace/1'
        ],
                        auto_log=True)
        self._check_tables(shard_1_master, 2)
        self._check_tables(shard_1_replica1, 2)

        # keyspace: apply a keyspace-wide simple schema change, should work now
        utils.run_vtctl([
            'ApplySchemaKeyspace', '-simple',
            '-sql=' + create_vt_select_test[2], 'test_keyspace'
        ],
                        auto_log=True)

        # check all expected hosts have the change
        self._check_tables(shard_0_master,
                           1)  # was stuck a long time ago as scrap
        self._check_tables(shard_0_replica1, 3)  # current master
        self._check_tables(shard_0_replica2, 3)
        self._check_tables(shard_0_rdonly, 3)
        self._check_tables(shard_0_backup, 3)
        self._check_tables(shard_1_master, 3)  # current master
        self._check_tables(shard_1_replica1, 3)

        # keyspace: apply a keyspace-wide complex schema change, should work too
        utils.run_vtctl([
            'ApplySchemaKeyspace', '-sql=' + create_vt_select_test[3],
            'test_keyspace'
        ],
                        auto_log=True)

        # check all expected hosts have the change:
        # - master won't have it as it's a complex change
        # - backup won't have it as IsReplicatingType is false
        self._check_tables(shard_0_master,
                           1)  # was stuck a long time ago as scrap
        self._check_tables(shard_0_replica1, 3)  # current master
        self._check_tables(shard_0_replica2, 4)
        self._check_tables(shard_0_rdonly, 4)
        self._check_tables(shard_0_backup, 4)
        self._check_tables(shard_1_master, 3)  # current master
        self._check_tables(shard_1_replica1, 4)

        # now test action log pruning
        if environment.topo_server_implementation == 'zookeeper':
            oldLines = utils.zk_ls(shard_0_replica1.zk_tablet_path +
                                   '/actionlog')
            oldCount = len(oldLines)
            logging.debug("I have %u actionlog before", oldCount)
            if oldCount <= 5:
                self.fail('Not enough actionlog before: %u' % oldCount)

            utils.run_vtctl([
                'PruneActionLogs', '-keep-count=5',
                '/zk/*/vt/tablets/*/actionlog'
            ],
                            auto_log=True)

            newLines = utils.zk_ls(shard_0_replica1.zk_tablet_path +
                                   '/actionlog')
            newCount = len(newLines)
            logging.debug("I have %u actionlog after", newCount)

            self.assertEqual(newCount, 5,
                             'Unexpected actionlog count after: %u' % newCount)
            if oldLines[-5:] != newLines:
                self.fail('Unexpected actionlog values:\n%s\n%s' %
                          (' '.join(oldLines[-5:]), ' '.join(newLines)))

        utils.pause("Look at schema now!")

        tablet.kill_tablets([
            shard_0_master, shard_0_replica1, shard_0_replica2, shard_0_rdonly,
            shard_0_backup, shard_1_master, shard_1_replica1
        ])
コード例 #6
0
def _run_test_reparent_from_outside(brutal=False):
    utils.zk_wipe()

    utils.run_vtctl("CreateKeyspace test_keyspace")

    # create the database so vttablets start, as they are serving
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
        t.create_db("vt_test_keyspace")

    # Start up a master mysql and vttablet
    tablet_62344.init_tablet("master", "test_keyspace", "0", start=True)

    # Create a few slaves for testing reparenting.
    tablet_62044.init_tablet("replica", "test_keyspace", "0", start=True)
    tablet_41983.init_tablet("replica", "test_keyspace", "0", start=True)
    tablet_31981.init_tablet("replica", "test_keyspace", "0", start=True)

    # Reparent as a starting point
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
        t.reset_replication()
    utils.run_vtctl("ReparentShard -force test_keyspace/0 %s" % tablet_62344.tablet_alias, auto_log=True)

    # now manually reparent 1 out of 2 tablets
    # 62044 will be the new master
    # 31981 won't be re-parented, so it will be busted
    tablet_62044.mquery("", ["RESET MASTER", "STOP SLAVE", "RESET SLAVE", "CHANGE MASTER TO MASTER_HOST = ''"])
    new_pos = tablet_62044.mquery("", "show master status")
    utils.debug("New master position: %s" % str(new_pos))

    # 62344 will now be a slave of 62044
    tablet_62344.mquery(
        "",
        [
            "RESET MASTER",
            "RESET SLAVE",
            "change master to master_host='%s', master_port=%u, master_log_file='%s', master_log_pos=%u"
            % (utils.hostname, tablet_62044.mysql_port, new_pos[0][0], new_pos[0][1]),
            "start slave",
        ],
    )

    # 41983 will be a slave of 62044
    tablet_41983.mquery(
        "",
        [
            "stop slave",
            "change master to master_port=%u, master_log_file='%s', master_log_pos=%u"
            % (tablet_62044.mysql_port, new_pos[0][0], new_pos[0][1]),
            "start slave",
        ],
    )

    # in brutal mode, we scrap the old master first
    if brutal:
        tablet_62344.scrap(force=True)
        # we have some automated tools that do this too, so it's good to simulate
        utils.run(utils.vtroot + "/bin/zk rm -rf " + tablet_62344.zk_tablet_path)

    # update zk with the new graph
    utils.run_vtctl(
        "ShardExternallyReparented -scrap-stragglers test_keyspace/0 %s" % tablet_62044.tablet_alias, auto_log=True
    )

    # make sure the replication graph is fine
    shard_files = utils.zk_ls("/zk/global/vt/keyspaces/test_keyspace/shards/0")
    utils.debug("shard_files: %s" % " ".join(shard_files))
    if shard_files != ["action", "actionlog", "test_nj-0000062044"]:
        raise utils.TestError("unexpected zk content: %s" % " ".join(shard_files))

    slave_files = utils.zk_ls("/zk/global/vt/keyspaces/test_keyspace/shards/0/test_nj-0000062044")
    utils.debug("slave_files: %s" % " ".join(slave_files))
    expected_slave_files = ["test_nj-0000041983", "test_nj-0000062344"]
    if brutal:
        expected_slave_files = ["test_nj-0000041983"]
    if slave_files != expected_slave_files:
        raise utils.TestError(
            "unexpected zk content: %s instead of expected %s"
            % ("|".join(slave_files), "|".join(expected_slave_files_files))
        )

    tablet_31981.kill_vttablet()
    tablet_62344.kill_vttablet()
    tablet_62044.kill_vttablet()
    tablet_41983.kill_vttablet()
コード例 #7
0
def _run_test_reparent_from_outside(brutal=False):
    utils.zk_wipe()

    utils.run_vtctl('CreateKeyspace test_keyspace')

    # create the database so vttablets start, as they are serving
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
        t.create_db('vt_test_keyspace')

    # Start up a master mysql and vttablet
    tablet_62344.init_tablet('master', 'test_keyspace', '0', start=True)

    # Create a few slaves for testing reparenting.
    tablet_62044.init_tablet('replica', 'test_keyspace', '0', start=True)
    tablet_41983.init_tablet('replica', 'test_keyspace', '0', start=True)
    tablet_31981.init_tablet('replica', 'test_keyspace', '0', start=True)

    # Reparent as a starting point
    for t in [tablet_62344, tablet_62044, tablet_41983, tablet_31981]:
        t.reset_replication()
    utils.run_vtctl('ReparentShard -force test_keyspace/0 %s' %
                    tablet_62344.tablet_alias,
                    auto_log=True)

    # now manually reparent 1 out of 2 tablets
    # 62044 will be the new master
    # 31981 won't be re-parented, so it will be busted
    tablet_62044.mquery('', [
        "RESET MASTER",
        "STOP SLAVE",
        "RESET SLAVE",
        "CHANGE MASTER TO MASTER_HOST = ''",
    ])
    new_pos = tablet_62044.mquery('', 'show master status')
    utils.debug("New master position: %s" % str(new_pos))

    # 62344 will now be a slave of 62044
    tablet_62344.mquery('', [
        "RESET MASTER", "RESET SLAVE",
        "change master to master_host='%s', master_port=%u, master_log_file='%s', master_log_pos=%u"
        % (utils.hostname, tablet_62044.mysql_port, new_pos[0][0],
           new_pos[0][1]), 'start slave'
    ])

    # 41983 will be a slave of 62044
    tablet_41983.mquery('', [
        'stop slave',
        "change master to master_port=%u, master_log_file='%s', master_log_pos=%u"
        %
        (tablet_62044.mysql_port, new_pos[0][0], new_pos[0][1]), 'start slave'
    ])

    # in brutal mode, we scrap the old master first
    if brutal:
        tablet_62344.scrap(force=True)
        # we have some automated tools that do this too, so it's good to simulate
        utils.run(utils.vtroot + '/bin/zk rm -rf ' +
                  tablet_62344.zk_tablet_path)

    # update zk with the new graph
    utils.run_vtctl(
        'ShardExternallyReparented -scrap-stragglers test_keyspace/0 %s' %
        tablet_62044.tablet_alias,
        auto_log=True)

    # make sure the replication graph is fine
    shard_files = utils.zk_ls('/zk/global/vt/keyspaces/test_keyspace/shards/0')
    utils.debug('shard_files: %s' % " ".join(shard_files))
    if shard_files != ['action', 'actionlog', 'test_nj-0000062044']:
        raise utils.TestError('unexpected zk content: %s' %
                              " ".join(shard_files))

    slave_files = utils.zk_ls(
        '/zk/global/vt/keyspaces/test_keyspace/shards/0/test_nj-0000062044')
    utils.debug('slave_files: %s' % " ".join(slave_files))
    expected_slave_files = ['test_nj-0000041983', 'test_nj-0000062344']
    if brutal:
        expected_slave_files = ['test_nj-0000041983']
    if slave_files != expected_slave_files:
        raise utils.TestError(
            'unexpected zk content: %s instead of expected %s' %
            ("|".join(slave_files), "|".join(expected_slave_files_files)))

    tablet_31981.kill_vttablet()
    tablet_62344.kill_vttablet()
    tablet_62044.kill_vttablet()
    tablet_41983.kill_vttablet()
コード例 #8
0
ファイル: schema.py プロジェクト: Eric-Chen/vitess
def run_test_complex_schema():

    utils.run_vtctl("CreateKeyspace test_keyspace")

    shard_0_master.init_tablet("master", "test_keyspace", "0")
    shard_0_replica1.init_tablet("replica", "test_keyspace", "0")
    shard_0_replica2.init_tablet("replica", "test_keyspace", "0")
    shard_0_rdonly.init_tablet("rdonly", "test_keyspace", "0")
    shard_0_backup.init_tablet("backup", "test_keyspace", "0")
    shard_1_master.init_tablet("master", "test_keyspace", "1")
    shard_1_replica1.init_tablet("replica", "test_keyspace", "1")

    utils.run_vtctl("RebuildShardGraph test_keyspace/0", auto_log=True)
    utils.run_vtctl("RebuildKeyspaceGraph test_keyspace", auto_log=True)

    # run checks now before we start the tablets
    utils.validate_topology()

    # create databases
    shard_0_master.create_db("vt_test_keyspace")
    shard_0_replica1.create_db("vt_test_keyspace")
    shard_0_replica2.create_db("vt_test_keyspace")
    shard_0_rdonly.create_db("vt_test_keyspace")
    shard_0_backup.create_db("vt_test_keyspace")
    shard_1_master.create_db("vt_test_keyspace")
    shard_1_replica1.create_db("vt_test_keyspace")

    # start the tablets
    shard_0_master.start_vttablet()
    shard_0_replica1.start_vttablet()
    shard_0_replica2.start_vttablet()
    shard_0_rdonly.start_vttablet()
    shard_0_backup.start_vttablet(wait_for_state="NOT_SERVING")
    shard_1_master.start_vttablet()
    shard_1_replica1.start_vttablet()

    # make sure all replication is good
    for t in [
        shard_0_master,
        shard_0_replica1,
        shard_0_replica2,
        shard_0_rdonly,
        shard_0_backup,
        shard_1_master,
        shard_1_replica1,
    ]:
        t.reset_replication()
    utils.run_vtctl("ReparentShard -force test_keyspace/0 " + shard_0_master.tablet_alias, auto_log=True)
    utils.run_vtctl("ReparentShard -force test_keyspace/1 " + shard_1_master.tablet_alias, auto_log=True)
    utils.run_vtctl("ValidateKeyspace -ping-tablets test_keyspace")

    # check after all tablets are here and replication is fixed
    utils.validate_topology(ping_tablets=True)

    # shard 0: apply the schema using a complex schema upgrade, no
    # reparenting yet
    utils.run_vtctl(["ApplySchemaShard", "-sql=" + create_vt_select_test[0], "test_keyspace/0"], auto_log=True)

    # check all expected hosts have the change:
    # - master won't have it as it's a complex change
    check_tables(shard_0_master, 0)
    check_tables(shard_0_replica1, 1)
    check_tables(shard_0_replica2, 1)
    check_tables(shard_0_rdonly, 1)
    check_tables(shard_0_backup, 1)
    check_tables(shard_1_master, 0)
    check_tables(shard_1_replica1, 0)

    # shard 0: apply schema change to just master directly
    # (to test its state is not changed)
    utils.run_vtctl(
        ["ApplySchema", "-stop-replication", "-sql=" + create_vt_select_test[0], shard_0_master.tablet_alias],
        auto_log=True,
    )
    check_tables(shard_0_master, 1)

    # shard 0: apply new schema change, with reparenting
    utils.run_vtctl(
        [
            "ApplySchemaShard",
            "-new-parent=" + shard_0_replica1.tablet_alias,
            "-sql=" + create_vt_select_test[1],
            "test_keyspace/0",
        ],
        auto_log=True,
    )
    check_tables(shard_0_master, 1)
    check_tables(shard_0_replica1, 2)
    check_tables(shard_0_replica2, 2)
    check_tables(shard_0_rdonly, 2)
    check_tables(shard_0_backup, 2)

    # verify GetSchema --tables works
    out, err = utils.run_vtctl(
        "GetSchema --tables=vt_select_test0 " + shard_0_replica1.tablet_alias, log_level="INFO", trap_output=True
    )
    if not "vt_select_test0" in err or "vt_select_test1" in err:
        raise utils.TestError("Unexpected GetSchema --tables=vt_select_test0 output: %s" % err)

    # keyspace: try to apply a keyspace-wide schema change, should fail
    # as the preflight would be different in both shards
    out, err = utils.run_vtctl(
        ["ApplySchemaKeyspace", "-sql=" + create_vt_select_test[2], "test_keyspace"],
        log_level="INFO",
        trap_output=True,
        raise_on_error=False,
    )
    if err.find("ApplySchemaKeyspace Shard 1 has inconsistent schema") == -1:
        raise utils.TestError("Unexpected ApplySchemaKeyspace output: %s" % err)

    utils.run_vtctl("PurgeActions /zk/global/vt/keyspaces/test_keyspace/action")

    # shard 1: catch it up with simple updates
    utils.run_vtctl(
        ["ApplySchemaShard", "-simple", "-sql=" + create_vt_select_test[0], "test_keyspace/1"], auto_log=True
    )
    utils.run_vtctl(
        ["ApplySchemaShard", "-simple", "-sql=" + create_vt_select_test[1], "test_keyspace/1"], auto_log=True
    )
    check_tables(shard_1_master, 2)
    check_tables(shard_1_replica1, 2)

    # keyspace: apply a keyspace-wide simple schema change, should work now
    utils.run_vtctl(
        ["ApplySchemaKeyspace", "-simple", "-sql=" + create_vt_select_test[2], "test_keyspace"], auto_log=True
    )

    # check all expected hosts have the change
    check_tables(shard_0_master, 1)  # was stuck a long time ago as scrap
    check_tables(shard_0_replica1, 3)  # current master
    check_tables(shard_0_replica2, 3)
    check_tables(shard_0_rdonly, 3)
    check_tables(shard_0_backup, 3)
    check_tables(shard_1_master, 3)  # current master
    check_tables(shard_1_replica1, 3)

    # keyspace: apply a keyspace-wide complex schema change, should work too
    utils.run_vtctl(["ApplySchemaKeyspace", "-sql=" + create_vt_select_test[3], "test_keyspace"], auto_log=True)

    # check all expected hosts have the change:
    # - master won't have it as it's a complex change
    # - backup won't have it as IsReplicatingType is false
    check_tables(shard_0_master, 1)  # was stuck a long time ago as scrap
    check_tables(shard_0_replica1, 3)  # current master
    check_tables(shard_0_replica2, 4)
    check_tables(shard_0_rdonly, 4)
    check_tables(shard_0_backup, 4)
    check_tables(shard_1_master, 3)  # current master
    check_tables(shard_1_replica1, 4)

    # now test action log pruning
    oldLines = utils.zk_ls(shard_0_replica1.zk_tablet_path + "/actionlog")
    oldCount = len(oldLines)
    if utils.options.verbose:
        print "I have %u actionlog before" % oldCount
    if oldCount <= 5:
        raise utils.TestError("Not enough actionlog before: %u" % oldCount)

    utils.run_vtctl("PruneActionLogs -keep-count=5 /zk/*/vt/tablets/*/actionlog", auto_log=True)

    newLines = utils.zk_ls(shard_0_replica1.zk_tablet_path + "/actionlog")
    newCount = len(newLines)
    if utils.options.verbose:
        print "I have %u actionlog after" % newCount

    if newCount != 5:
        raise utils.TestError("Unexpected actionlog count after: %u" % newCount)
    if oldLines[-5:] != newLines:
        raise utils.TestError("Unexpected actionlog values:\n%s\n%s" % (" ".join(oldLines[-5:]), " ".join(newLines)))

    utils.pause("Look at schema now!")

    shard_0_master.kill_vttablet()
    shard_0_replica1.kill_vttablet()
    shard_0_replica2.kill_vttablet()
    shard_0_rdonly.kill_vttablet()
    shard_0_backup.kill_vttablet()
    shard_1_master.kill_vttablet()
    shard_1_replica1.kill_vttablet()
コード例 #9
0
ファイル: schema.py プロジェクト: zj8487/golang-stuff
def run_test_complex_schema():

    utils.run_vtctl('CreateKeyspace test_keyspace')

    shard_0_master.init_tablet('master', 'test_keyspace', '0')
    shard_0_replica1.init_tablet('replica', 'test_keyspace', '0')
    shard_0_replica2.init_tablet('replica', 'test_keyspace', '0')
    shard_0_rdonly.init_tablet('rdonly', 'test_keyspace', '0')
    shard_0_backup.init_tablet('backup', 'test_keyspace', '0')
    shard_1_master.init_tablet('master', 'test_keyspace', '1')
    shard_1_replica1.init_tablet('replica', 'test_keyspace', '1')

    utils.run_vtctl('RebuildShardGraph test_keyspace/0', auto_log=True)
    utils.run_vtctl('RebuildKeyspaceGraph test_keyspace', auto_log=True)

    # run checks now before we start the tablets
    utils.validate_topology()

    # create databases
    shard_0_master.create_db('vt_test_keyspace')
    shard_0_replica1.create_db('vt_test_keyspace')
    shard_0_replica2.create_db('vt_test_keyspace')
    shard_0_rdonly.create_db('vt_test_keyspace')
    shard_0_backup.create_db('vt_test_keyspace')
    shard_1_master.create_db('vt_test_keyspace')
    shard_1_replica1.create_db('vt_test_keyspace')

    # start the tablets
    shard_0_master.start_vttablet()
    shard_0_replica1.start_vttablet()
    shard_0_replica2.start_vttablet()
    shard_0_rdonly.start_vttablet()
    shard_0_backup.start_vttablet(wait_for_state="NOT_SERVING")
    shard_1_master.start_vttablet()
    shard_1_replica1.start_vttablet()

    # make sure all replication is good
    for t in [
            shard_0_master, shard_0_replica1, shard_0_replica2, shard_0_rdonly,
            shard_0_backup, shard_1_master, shard_1_replica1
    ]:
        t.reset_replication()
    utils.run_vtctl('ReparentShard -force test_keyspace/0 ' +
                    shard_0_master.tablet_alias,
                    auto_log=True)
    utils.run_vtctl('ReparentShard -force test_keyspace/1 ' +
                    shard_1_master.tablet_alias,
                    auto_log=True)
    utils.run_vtctl('ValidateKeyspace -ping-tablets test_keyspace')

    # check after all tablets are here and replication is fixed
    utils.validate_topology(ping_tablets=True)

    # shard 0: apply the schema using a complex schema upgrade, no
    # reparenting yet
    utils.run_vtctl([
        'ApplySchemaShard', '-sql=' + create_vt_select_test[0],
        'test_keyspace/0'
    ],
                    auto_log=True)

    # check all expected hosts have the change:
    # - master won't have it as it's a complex change
    check_tables(shard_0_master, 0)
    check_tables(shard_0_replica1, 1)
    check_tables(shard_0_replica2, 1)
    check_tables(shard_0_rdonly, 1)
    check_tables(shard_0_backup, 1)
    check_tables(shard_1_master, 0)
    check_tables(shard_1_replica1, 0)

    # shard 0: apply schema change to just master directly
    # (to test its state is not changed)
    utils.run_vtctl([
        'ApplySchema', '-stop-replication', '-sql=' + create_vt_select_test[0],
        shard_0_master.tablet_alias
    ],
                    auto_log=True)
    check_tables(shard_0_master, 1)

    # shard 0: apply new schema change, with reparenting
    utils.run_vtctl([
        'ApplySchemaShard', '-new-parent=' + shard_0_replica1.tablet_alias,
        '-sql=' + create_vt_select_test[1], 'test_keyspace/0'
    ],
                    auto_log=True)
    check_tables(shard_0_master, 1)
    check_tables(shard_0_replica1, 2)
    check_tables(shard_0_replica2, 2)
    check_tables(shard_0_rdonly, 2)
    check_tables(shard_0_backup, 2)

    # verify GetSchema --tables works
    out, err = utils.run_vtctl('GetSchema --tables=vt_select_test0 ' +
                               shard_0_replica1.tablet_alias,
                               trap_output=True)
    if not "vt_select_test0" in err or "vt_select_test1" in err:
        raise utils.TestError(
            'Unexpected GetSchema --tables=vt_select_test0 output: %s' % err)

    # keyspace: try to apply a keyspace-wide schema change, should fail
    # as the preflight would be different in both shards
    out, err = utils.run_vtctl([
        'ApplySchemaKeyspace', '-sql=' + create_vt_select_test[2],
        'test_keyspace'
    ],
                               trap_output=True,
                               raise_on_error=False)
    if err.find('ApplySchemaKeyspace Shard 1 has inconsistent schema') == -1:
        raise utils.TestError('Unexpected ApplySchemaKeyspace output: %s' %
                              err)

    utils.run_vtctl(
        'PurgeActions /zk/global/vt/keyspaces/test_keyspace/action')

    # shard 1: catch it up with simple updates
    utils.run_vtctl([
        'ApplySchemaShard', '-simple', '-sql=' + create_vt_select_test[0],
        'test_keyspace/1'
    ],
                    auto_log=True)
    utils.run_vtctl([
        'ApplySchemaShard', '-simple', '-sql=' + create_vt_select_test[1],
        'test_keyspace/1'
    ],
                    auto_log=True)
    check_tables(shard_1_master, 2)
    check_tables(shard_1_replica1, 2)

    # keyspace: apply a keyspace-wide simple schema change, should work now
    utils.run_vtctl([
        'ApplySchemaKeyspace', '-simple', '-sql=' + create_vt_select_test[2],
        'test_keyspace'
    ],
                    auto_log=True)

    # check all expected hosts have the change
    check_tables(shard_0_master, 1)  # was stuck a long time ago as scrap
    check_tables(shard_0_replica1, 3)  # current master
    check_tables(shard_0_replica2, 3)
    check_tables(shard_0_rdonly, 3)
    check_tables(shard_0_backup, 3)
    check_tables(shard_1_master, 3)  # current master
    check_tables(shard_1_replica1, 3)

    # keyspace: apply a keyspace-wide complex schema change, should work too
    utils.run_vtctl([
        'ApplySchemaKeyspace', '-sql=' + create_vt_select_test[3],
        'test_keyspace'
    ],
                    auto_log=True)

    # check all expected hosts have the change:
    # - master won't have it as it's a complex change
    # - backup won't have it as IsReplicatingType is false
    check_tables(shard_0_master, 1)  # was stuck a long time ago as scrap
    check_tables(shard_0_replica1, 3)  # current master
    check_tables(shard_0_replica2, 4)
    check_tables(shard_0_rdonly, 4)
    check_tables(shard_0_backup, 4)
    check_tables(shard_1_master, 3)  # current master
    check_tables(shard_1_replica1, 4)

    # now test action log pruning
    oldLines = utils.zk_ls(shard_0_replica1.zk_tablet_path + '/actionlog')
    oldCount = len(oldLines)
    if utils.options.verbose:
        print "I have %u actionlog before" % oldCount
    if oldCount <= 5:
        raise utils.TestError('Not enough actionlog before: %u' % oldCount)

    utils.run_vtctl(
        'PruneActionLogs -keep-count=5 /zk/*/vt/tablets/*/actionlog',
        auto_log=True)

    newLines = utils.zk_ls(shard_0_replica1.zk_tablet_path + '/actionlog')
    newCount = len(newLines)
    if utils.options.verbose:
        print "I have %u actionlog after" % newCount

    if newCount != 5:
        raise utils.TestError('Unexpected actionlog count after: %u' %
                              newCount)
    if oldLines[-5:] != newLines:
        raise utils.TestError('Unexpected actionlog values:\n%s\n%s' %
                              (' '.join(oldLines[-5:]), ' '.join(newLines)))

    utils.pause("Look at schema now!")

    shard_0_master.kill_vttablet()
    shard_0_replica1.kill_vttablet()
    shard_0_replica2.kill_vttablet()
    shard_0_rdonly.kill_vttablet()
    shard_0_backup.kill_vttablet()
    shard_1_master.kill_vttablet()
    shard_1_replica1.kill_vttablet()