def _before_cluster_sync(self): self.announce('Started scap: %s', self.arguments.message) # Validate php syntax of wmf-config and multiversion lint.check_valid_syntax( ['%(stage_dir)s/wmf-config' % self.config, '%(stage_dir)s/multiversion' % self.config], utils.cpus_for_jobs())
def _before_cluster_sync(self): self.announce('Started scap: %s', self.arguments.message) # Validate php syntax of wmf-config and multiversion lint.check_valid_syntax([ '%(stage_dir)s/wmf-config' % self.config, '%(stage_dir)s/multiversion' % self.config ], utils.cpus_for_jobs())
def main(self, *extra_args): cdb_dir = os.path.realpath(self.arguments.directory) upstream_dir = os.path.join(cdb_dir, 'upstream') use_cores = self.arguments.threads if not os.path.isdir(cdb_dir): raise IOError(errno.ENOENT, 'Directory does not exist', cdb_dir) if use_cores < 1: use_cores = utils.cpus_for_jobs() if not os.path.isdir(upstream_dir): os.mkdir(upstream_dir) tasks.refresh_cdb_json_files(cdb_dir, use_cores, self.verbose)
def _before_cluster_sync(self): # assert file exists abspath = os.path.join(self.config['stage_dir'], self.arguments.file) if not os.path.exists(abspath): raise IOError(errno.ENOENT, 'File/directory not found', abspath) relpath = os.path.relpath(abspath, self.config['stage_dir']) if os.path.isdir(abspath): relpath = '%s/***' % relpath self.include = relpath # Notify when syncing a symlink. if os.path.islink(abspath): symlink_dest = os.path.realpath(abspath) self.get_logger().info("%s: syncing symlink, not its target [%s]", abspath, symlink_dest) else: lint.check_valid_syntax(abspath, utils.cpus_for_jobs())
def _before_cluster_sync(self): # assert file exists abspath = os.path.join( self.config['stage_dir'], self.arguments.file) if not os.path.exists(abspath): raise IOError(errno.ENOENT, 'File/directory not found', abspath) relpath = os.path.relpath(abspath, self.config['stage_dir']) if os.path.isdir(abspath): relpath = '%s/***' % relpath self.include = relpath # Notify when syncing a symlink. if os.path.islink(abspath): symlink_dest = os.path.realpath(abspath) self.get_logger().info("%s: syncing symlink, not its target [%s]", abspath, symlink_dest) else: lint.check_valid_syntax(abspath, utils.cpus_for_jobs())
def main(self, *extra_args): user = '******' source_tree = 'deploy' root_dir = self.config['deploy_dir'] if self.arguments.staging: user = '******' source_tree = 'stage' root_dir = self.config['stage_dir'] self._run_as(user) self._assert_current_user(user) # Leave some of the cores free for apache processes use_cores = utils.cpus_for_jobs() versions = self.active_wikiversions(source_tree) if self.arguments.version: version = self.arguments.version if version.startswith('php-'): version = version[4:] # Assert version is active if version not in versions: raise IOError( errno.ENOENT, 'Version not active', version) # Replace dict of active versions with the single version selected versions = {version: versions[version]} # Rebuild the CDB files from the JSON versions for version in versions.keys(): cache_dir = os.path.join( root_dir, 'php-%s' % version, 'cache', 'l10n') tasks.merge_cdb_updates( cache_dir, use_cores, True, self.arguments.mute)
def main(self, *extra_args): user = '******' source_tree = 'deploy' root_dir = self.config['deploy_dir'] if self.arguments.staging: user = '******' source_tree = 'stage' root_dir = self.config['stage_dir'] self._run_as(user) self._assert_current_user(user) # Leave some of the cores free for apache processes use_cores = utils.cpus_for_jobs() versions = self.active_wikiversions(source_tree) if self.arguments.version: version = self.arguments.version if version.startswith('php-'): version = version[4:] # Assert version is active if version not in versions: raise IOError(errno.ENOENT, 'Version not active', version) # Replace dict of active versions with the single version selected versions = {version: versions[version]} # Rebuild the CDB files from the JSON versions for version in versions.keys(): cache_dir = os.path.join(root_dir, 'php-%s' % version, 'cache', 'l10n') tasks.merge_cdb_updates(cache_dir, use_cores, True, self.arguments.mute)
def append_jobs_arg(cmd): if GIT_VERSION[0] > 2 or (GIT_VERSION[0] == 2 and GIT_VERSION[1] > 9): cmd.append('--jobs') cmd.append(str(utils.cpus_for_jobs())) return cmd
def update_localization_cache(version, wikidb, verbose, cfg, logger=None): """ Update the localization cache for a given MW version. :param version: MediaWiki version :param wikidb: Wiki running given version :param verbose: Provide verbose output :param cfg: Global configuration """ # Calculate the number of parallel threads # Leave a couple of cores free for other stuff use_cores = utils.cpus_for_jobs() verbose_messagelist = '' force_rebuild = False quiet_rebuild = True if verbose: verbose_messagelist = '--verbose' quiet_rebuild = False extension_messages = os.path.join( cfg['stage_dir'], 'wmf-config', 'ExtensionMessages-%s.php' % version) if not os.path.exists(extension_messages): # Touch the extension_messages file to prevent php require errors logger.info('Creating empty %s', extension_messages) open(extension_messages, 'a').close() cache_dir = os.path.join( cfg['stage_dir'], 'php-%s' % version, 'cache', 'l10n') if not os.path.exists(os.path.join(cache_dir, 'l10n_cache-en.cdb')): # mergeMessageFileList.php needs a l10n file logger.info('Bootstrapping l10n cache for %s', version) _call_rebuildLocalisationCache( wikidb, cache_dir, use_cores, lang='en', quiet=True) # Force subsequent cache rebuild to overwrite bootstrap version force_rebuild = True logger.info('Updating ExtensionMessages-%s.php', version) new_extension_messages = subprocess.check_output( 'sudo -u www-data -n -- /bin/mktemp', shell=True).strip() # attempt to read extension-list from the branch instead of wmf-config ext_list = os.path.join( cfg['stage_dir'], "php-%s" % version, "extension-list") if not os.path.isfile(ext_list): # fall back to the old location in wmf-config ext_list = "%s/wmf-config/extension-list" % cfg['stage_dir'] utils.sudo_check_call( 'www-data', '/usr/local/bin/mwscript mergeMessageFileList.php ' '--wiki="%s" --list-file="%s" ' '--output="%s" %s' % ( wikidb, ext_list, new_extension_messages, verbose_messagelist)) utils.sudo_check_call('www-data', 'chmod 0664 "%s"' % new_extension_messages) logger.debug('Copying %s to %s' % ( new_extension_messages, extension_messages)) shutil.copyfile(new_extension_messages, extension_messages) utils.sudo_check_call('www-data', 'rm "%s"' % new_extension_messages) # Update ExtensionMessages-*.php in the local copy. deploy_dir = os.path.realpath(cfg['deploy_dir']) stage_dir = os.path.realpath(cfg['stage_dir']) if stage_dir != deploy_dir: logger.debug('Copying ExtensionMessages-*.php to local copy') utils.sudo_check_call( 'mwdeploy', 'cp "%s" "%s/wmf-config/"' % ( extension_messages, cfg['deploy_dir'])) # Rebuild all the CDB files for each language logger.info( 'Updating LocalisationCache for %s ' 'using %s thread(s)' % (version, use_cores)) _call_rebuildLocalisationCache( wikidb, cache_dir, use_cores, force=force_rebuild, quiet=quiet_rebuild) # Include JSON versions of the CDB files and add MD5 files logger.info('Generating JSON versions and md5 files') scap_path = os.path.join(os.path.dirname(sys.argv[0]), 'scap') utils.sudo_check_call( 'l10nupdate', '%s cdb-json-refresh ' '--directory="%s" --threads=%s %s' % ( scap_path, cache_dir, use_cores, verbose_messagelist))
def test_cpus_for_jobs(): cpus = utils.cpus_for_jobs() assert cpus > 0
def canary_checks(self, canaries=None, timer=None): """ Run canary checks :param canaries: Iterable of canary servers to check :param timer: log.Timer :raises RuntimeError: on canary check failure """ if not canaries: return # If more than 1/4 of the canaries failed, stop deployment max_failed_canaries = max(len(canaries)/4, 1) swagger_url = self.config['mediawiki_canary_swagger_url'] spec_path = self.config['mediawiki_canary_swagger_spec_path'] succeeded, failed = tasks.endpoint_canary_checks( canaries, swagger_url, spec_path, cores=utils.cpus_for_jobs(), ) if failed > max_failed_canaries: canary_fail_msg = ( 'Scap failed!: {}/{} canaries failed their endpoint checks' '({})' ).format( failed, len(canaries), swagger_url ) self.announce(canary_fail_msg) raise RuntimeError(canary_fail_msg) time_since_sync = 0 if timer: time_since_sync = timer.mark('Canary Endpoint Check Complete') # Needs some time for log errors to happen canary_wait_time = self.config['canary_wait_time'] remaining_wait_time = canary_wait_time - time_since_sync # If the canary endpoint check took less than the wait time we # should wait longer if remaining_wait_time > 0: self.get_logger().info('Waiting for canary traffic...') time.sleep(remaining_wait_time) # Otherwise Canary endpoint check took more than the wait time # we should adjust the logstash canary delay else: canary_wait_time = time_since_sync logstash_canary_checks = { 'service': self.config['canary_service'], 'threshold': self.config['canary_threshold'], 'logstash': self.config['logstash_host'], 'delay': canary_wait_time, 'cores': utils.cpus_for_jobs(), } succeeded, failed = tasks.logstash_canary_checks( canaries, **logstash_canary_checks) if failed > max_failed_canaries: canary_fail_msg = ( 'scap failed: average error rate on {}/{} ' 'canaries increased by 10x ' '(rerun with --force to override this check, ' 'see {} for details)'.format( failed, len(canaries), self.config['canary_dashboard_url'])) self.announce(canary_fail_msg) raise RuntimeError(canary_fail_msg) # If some canaries failed, explain why we didn't raise a # RuntimeError - T173146 if failed > 0: self.get_logger().info( 'Canary error check failed for {} canaries, less than ' 'threshold to halt deployment ({}/{}), see {} for ' 'details. Continuing...'.format( failed, max_failed_canaries, len(canaries), self.config['canary_dashboard_url']))
def update_localization_cache(version, wikidb, verbose, cfg, logger=None): """ Update the localization cache for a given MW version. :param version: MediaWiki version :param wikidb: Wiki running given version :param verbose: Provide verbose output :param cfg: Global configuration """ # Calculate the number of parallel threads # Leave a couple of cores free for other stuff use_cores = utils.cpus_for_jobs() verbose_messagelist = '' force_rebuild = False quiet_rebuild = True if verbose: verbose_messagelist = '--verbose' quiet_rebuild = False extension_messages = os.path.join(cfg['stage_dir'], 'wmf-config', 'ExtensionMessages-%s.php' % version) if not os.path.exists(extension_messages): # Touch the extension_messages file to prevent php require errors logger.info('Creating empty %s', extension_messages) open(extension_messages, 'a').close() cache_dir = os.path.join(cfg['stage_dir'], 'php-%s' % version, 'cache', 'l10n') if not os.path.exists(os.path.join(cache_dir, 'l10n_cache-en.cdb')): # mergeMessageFileList.php needs a l10n file logger.info('Bootstrapping l10n cache for %s', version) _call_rebuildLocalisationCache(wikidb, cache_dir, use_cores, lang='en', quiet=True) # Force subsequent cache rebuild to overwrite bootstrap version force_rebuild = True logger.info('Updating ExtensionMessages-%s.php', version) new_extension_messages = subprocess.check_output( 'sudo -u www-data -n -- /bin/mktemp', shell=True).strip() # attempt to read extension-list from the branch instead of wmf-config ext_list = os.path.join(cfg['stage_dir'], "php-%s" % version, "extension-list") if not os.path.isfile(ext_list): # fall back to the old location in wmf-config ext_list = "%s/wmf-config/extension-list" % cfg['stage_dir'] utils.sudo_check_call( 'www-data', '/usr/local/bin/mwscript mergeMessageFileList.php ' '--wiki="%s" --list-file="%s" ' '--output="%s" %s' % (wikidb, ext_list, new_extension_messages, verbose_messagelist)) utils.sudo_check_call('www-data', 'chmod 0664 "%s"' % new_extension_messages) logger.debug('Copying %s to %s' % (new_extension_messages, extension_messages)) shutil.copyfile(new_extension_messages, extension_messages) utils.sudo_check_call('www-data', 'rm "%s"' % new_extension_messages) # Update ExtensionMessages-*.php in the local copy. deploy_dir = os.path.realpath(cfg['deploy_dir']) stage_dir = os.path.realpath(cfg['stage_dir']) if stage_dir != deploy_dir: logger.debug('Copying ExtensionMessages-*.php to local copy') utils.sudo_check_call( 'mwdeploy', 'cp "%s" "%s/wmf-config/"' % (extension_messages, cfg['deploy_dir'])) # Rebuild all the CDB files for each language logger.info('Updating LocalisationCache for %s ' 'using %s thread(s)' % (version, use_cores)) _call_rebuildLocalisationCache(wikidb, cache_dir, use_cores, force=force_rebuild, quiet=quiet_rebuild) # Include JSON versions of the CDB files and add MD5 files logger.info('Generating JSON versions and md5 files') scap_path = os.path.join(os.path.dirname(sys.argv[0]), 'scap') utils.sudo_check_call( 'l10nupdate', '%s cdb-json-refresh ' '--directory="%s" --threads=%s %s' % (scap_path, cache_dir, use_cores, verbose_messagelist))
def canary_checks(self, canaries=None, timer=None): """ Run canary checks :param canaries: Iterable of canary servers to check :param timer: log.Timer :raises RuntimeError: on canary check failure """ if not canaries: return # If more than 1/4 of the canaries failed, stop deployment max_failed_canaries = max(len(canaries) / 4, 1) swagger_url = self.config['mediawiki_canary_swagger_url'] spec_path = self.config['mediawiki_canary_swagger_spec_path'] succeeded, failed = tasks.endpoint_canary_checks( canaries, swagger_url, spec_path, cores=utils.cpus_for_jobs(), ) if failed > max_failed_canaries: canary_fail_msg = ( 'Scap failed!: {}/{} canaries failed their endpoint checks' '({})').format(failed, len(canaries), swagger_url) self.announce(canary_fail_msg) raise RuntimeError(canary_fail_msg) time_since_sync = 0 if timer: time_since_sync = timer.mark('Canary Endpoint Check Complete') # Needs some time for log errors to happen canary_wait_time = self.config['canary_wait_time'] remaining_wait_time = canary_wait_time - time_since_sync # If the canary endpoint check took less than the wait time we # should wait longer if remaining_wait_time > 0: self.get_logger().info('Waiting for canary traffic...') time.sleep(remaining_wait_time) # Otherwise Canary endpoint check took more than the wait time # we should adjust the logstash canary delay else: canary_wait_time = time_since_sync logstash_canary_checks = { 'service': self.config['canary_service'], 'threshold': self.config['canary_threshold'], 'logstash': self.config['logstash_host'], 'delay': canary_wait_time, 'cores': utils.cpus_for_jobs(), } succeeded, failed = tasks.logstash_canary_checks( canaries, **logstash_canary_checks) if failed > max_failed_canaries: canary_fail_msg = ('scap failed: average error rate on {}/{} ' 'canaries increased by 10x ' '(rerun with --force to override this check, ' 'see {} for details)'.format( failed, len(canaries), self.config['canary_dashboard_url'])) self.announce(canary_fail_msg) raise RuntimeError(canary_fail_msg) # If some canaries failed, explain why we didn't raise a # RuntimeError - T173146 if failed > 0: self.get_logger().info( 'Canary error check failed for {} canaries, less than ' 'threshold to halt deployment ({}/{}), see {} for ' 'details. Continuing...'.format( failed, max_failed_canaries + 1, # + 1 since we use > to compare len(canaries), self.config['canary_dashboard_url']))