def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) remote = spicerack.remote() if args.live_test: logger.info('Inverting DC to perform the wipe and warmup in %s (passive DC)', args.dc_from) datacenter = args.dc_from else: datacenter = args.dc_to ask_confirmation('Are you sure to wipe and warmup caches in {dc}?'.format(dc=datacenter)) logger.info('Restart MediaWiki HHVM in %s (wipe APC)', datacenter) remote.query('A:all-mw-' + datacenter).run_sync('service hhvm restart', batch_size=25) logger.info('Running warmup script in %s', datacenter) warmup_dir = '/var/lib/mediawiki-cache-warmup' memc_warmup = "nodejs {dir}/warmup.js {dir}/urls-cluster.txt spread appservers.svc.{dc}.wmnet".format( dir=warmup_dir, dc=datacenter) appserver_warmup = "nodejs {dir}/warmup.js {dir}/urls-server.txt clone appserver {dc}".format( dir=warmup_dir, dc=datacenter) maintenance_host = spicerack.mediawiki().get_maintenance_host(datacenter) maintenance_host.run_sync(memc_warmup, appserver_warmup)
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) logger.info('Switch MediaWiki active datacenter to %s', args.dc_to) records = ('api-rw', 'appservers-rw', 'jobrunner', 'videoscaler') dnsdisc_records = spicerack.discovery(*records) mediawiki = spicerack.mediawiki() # Pool DNS discovery records on the new dc. # This will NOT trigger confd to change the DNS admin state as it will cause a validation error dnsdisc_records.pool(args.dc_to) # Switch MediaWiki master datacenter mediawiki.set_master_datacenter(args.dc_to) # Depool DNS discovery records on the old dc, confd will apply the change dnsdisc_records.depool(args.dc_from) # Verify that the IP of the records matches the expected one for record in records: name = record.replace('-rw', '') dnsdisc_records.check_record( record, '{name}.svc.{dc_to}.wmnet'.format(name=name, dc_to=args.dc_to))
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) logger.info('Rolling restart of Parsoid in %s and %s', args.dc_from, args.dc_to) remote_hosts = spicerack.remote().query('O:parsoid') remote_hosts.run_sync('restart-parsoid', batch_size=1, batch_sleep=15.0)
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) logger.info('Setting in read-write mode all the core DB masters in %s', args.dc_to) mysql = spicerack.mysql() mysql.set_core_masters_readwrite(args.dc_to)
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) redis = spicerack.redis_cluster('sessions') logger.info('Stopping replication in %s for the sessions Redis cluster', args.dc_to) redis.stop_replica(args.dc_to) logger.info('Starting replication %s => %s for the sessions Redis cluster', args.dc_to, args.dc_from) redis.start_replica(args.dc_from, args.dc_to)
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) logger.info('Starting MediaWiki maintenance jobs in %s', args.dc_to) mw_maintenance = spicerack.remote().query('A:mw-maintenance') mw_maintenance.run_sync('run-puppet-agent --enable "{message}"'.format(message=PUPPET_REASON)) mediawiki = spicerack.mediawiki() mediawiki.check_cronjobs_enabled(args.dc_to) mediawiki.check_cronjobs_disabled(args.dc_from)
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) logger.info('Set MediaWiki in read-write in %s', args.dc_to) mediawiki = spicerack.mediawiki() prefix = '' if args.live_test: prefix = '[DRY-RUN] ' mediawiki.set_readwrite(args.dc_to) spicerack.irc_logger.info('%sMediaWiki read-only period ends at: %s', prefix, datetime.utcnow())
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) records = ('api-rw', 'appservers-rw', 'jobrunner', 'videoscaler') logger.info('Restoring DNS Discovery TTL to 300 for records: %s', records) dnsdisc_records = spicerack.discovery(*records) dnsdisc_records.update_ttl(300) logger.info('Removing stale confd files generated in phase 5') command = 'rm -fv /var/run/confd-template/.discovery-{{{records}}}.state*.err'.format( records=','.join(records)) spicerack.remote().query('C:authdns').run_sync(command)
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) logger.info('Setting in read-only mode all the core DB masters in %s and verify those in %s', args.dc_from, args.dc_to) mysql = spicerack.mysql() if args.live_test: logger.info('Skip verifying core DB masters in %s are in read-only mode', args.dc_to) else: mysql.verify_core_masters_readonly(args.dc_to, True) mysql.set_core_masters_readonly(args.dc_from) logger.info('Check that all core masters in %s are in sync with the core masters in %s.', args.dc_to, args.dc_from) mysql.check_core_masters_in_sync(args.dc_from, args.dc_to)
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) remote = spicerack.remote() logger.info('Disabling Puppet on MediaWiki maintenance hosts in %s and %s', args.dc_from, args.dc_to) remote.query('A:mw-maintenance').run_sync( 'disable-puppet "{message}"'.format(message=PUPPET_REASON)) logger.info('Disabling Puppet on text caches in %s and %s', args.dc_from, args.dc_to) target = remote.query( 'A:cp-text and (A:cp-{dc_from} or A:cp-{dc_to}) and not A:cp-canary'. format(dc_from=args.dc_from, dc_to=args.dc_to)) target.run_sync('disable-puppet "{message}"'.format(message=PUPPET_REASON)) logger.info('The puppet changes for text caches can be now merged.')
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) logger.info('Update Tendril tree to start from the core DB masters in %s', args.dc_to) mysql = spicerack.mysql() tendril_host = mysql.get_dbs('P{O:mariadb::misc::tendril} and A:eqiad') for section in CORE_SECTIONS: # get_core_dbs() ensure that only one host is matched master = mysql.get_core_dbs(datacenter=args.dc_to, replication_role='master', section=section).hosts[0] query = ( "UPDATE shards SET master_id = (SELECT id FROM servers WHERE host = '{master}') WHERE " # nosec "name = '{section}'").format(master=master, section=section) tendril_host.run_query(query, database='tendril')
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) logger.info('Update traffic routing to MediaWiki backends from %s to %s', args.dc_from, args.dc_to) remote = spicerack.remote() logger.info('Running puppet on text caches in %s', args.dc_to) remote_output = remote.query( 'A:cp-text and A:cp-{dc_to} and not A:cp-canary'.format(dc_to=args.dc_to)).run_sync(ENABLE_COMMAND) _check_changes(remote_output, EXPECTED_DC_TO, args.dc_from, args.dc_to) logger.info('Text caches traffic is now active-active, running puppet in %s', args.dc_from) remote_output = remote.query( 'A:cp-text and A:cp-{dc_from} and not A:cp-canary'.format(dc_from=args.dc_from)).run_sync(ENABLE_COMMAND) _check_changes(remote_output, EXPECTED_DC_FROM, args.dc_from, args.dc_to) logger.info('Text caches traffic is now active only in %s', args.dc_to)
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) logger.info('Set MediaWiki in read-only in %s and %s', args.dc_from, args.dc_to) mediawiki = spicerack.mediawiki() if args.live_test: logger.info('Skip setting MediaWiki read-only in %s', args.dc_to) prefix = '[DRY-RUN] ' else: mediawiki.set_readonly(args.dc_to, args.ro_reason) prefix = '' spicerack.irc_logger.info('%sMediaWiki read-only period starts at: %s', prefix, datetime.utcnow()) mediawiki.set_readonly(args.dc_from, args.ro_reason) logger.info('Sleeping 10s to allow in-flight requests to complete') time.sleep(10)
def main(args, spicerack): """Required by Spicerack API.""" args = parse_args(__name__, __title__, args) logger.info('Stopping MediaWiki maintenance jobs in %s', args.dc_from) spicerack.mediawiki().stop_cronjobs(args.dc_from)