def upload(self, locales=None, force=True, verbose=False, save_stats=True, download=False): source_lang = self.pod.podspec.default_locale locales = locales or self.pod.catalogs.list_locales() stats = [] num_files = len(locales) if not locales: self.pod.logger.info('No locales to upload.') return if download and self.pod.file_exists(Translator.TRANSLATOR_STATS_PATH): self.download(locales=locales, save_stats=save_stats) if not force: if (self.has_immutable_translation_resources and self.pod.file_exists(Translator.TRANSLATOR_STATS_PATH)): text = 'Found existing translator data in: {}' self.pod.logger.info( text.format(Translator.TRANSLATOR_STATS_PATH)) text = 'This will be updated with new data after the upload is complete.' self.pod.logger.info(text) text = 'Proceed to upload {} translation catalogs?' text = text.format(num_files) if not utils.interactive_confirm(text): self.pod.logger.info('Aborted.') return text = 'Uploading translations: %(value)d/{} (in %(elapsed)s)' widgets = [progressbar.FormatLabel(text.format(num_files))] bar = progressbar.ProgressBar(widgets=widgets, maxval=num_files) bar.start() threads = [] def _do_upload(locale): catalog = self.pod.catalogs.get(locale) stat = self._upload_catalog(catalog, source_lang) stats.append(stat) for i, locale in enumerate(locales): thread = utils.ProgressBarThread(bar, True, target=_do_upload, args=(locale, )) threads.append(thread) thread.start() # Perform the first operation synchronously to avoid oauth2 refresh # locking issues. if i == 0: thread.join() for i, thread in enumerate(threads): if i > 0: thread.join() bar.finish() stats = sorted(stats, key=lambda stat: stat.lang) if verbose: self.pretty_print_stats(stats) if save_stats: self.save_stats(stats) return stats
def upload(self, locales=None, force=True, verbose=False, save_stats=True, prune=False): source_lang = self.pod.podspec.default_locale locales = locales or self.pod.catalogs.list_locales() stats = [] num_files = len(locales) if not locales: self.pod.logger.info('No locales to upload.') return if not force: if (self.has_immutable_translation_resources and self.pod.file_exists(Translator.TRANSLATOR_STATS_PATH)): text = 'Found existing translator data in: {}' self.pod.logger.info(text.format( Translator.TRANSLATOR_STATS_PATH)) text = 'This will be updated with new data after the upload is complete.' self.pod.logger.info(text) text = 'Proceed to upload {} translation catalogs?' text = text.format(num_files) if not utils.interactive_confirm(text): self.pod.logger.info('Aborted.') return if self.has_multiple_langs_in_one_resource: catalogs_to_upload = [] for locale in locales: catalog_to_upload = self.pod.catalogs.get(locale) if catalog_to_upload: catalogs_to_upload.append(catalog_to_upload) stats = self._upload_catalogs(catalogs_to_upload, source_lang, prune=prune) else: text = 'Uploading translations: %(value)d/{} (in %(elapsed)s)' widgets = [progressbar.FormatLabel(text.format(num_files))] bar = progressbar.ProgressBar(widgets=widgets, maxval=num_files) bar.start() threads = [] def _do_upload(locale): catalog = self.pod.catalogs.get(locale) stat = self._upload_catalog(catalog, source_lang, prune=prune) stats.append(stat) for i, locale in enumerate(locales): thread = utils.ProgressBarThread( bar, True, target=_do_upload, args=(locale,)) threads.append(thread) thread.start() # Perform the first operation synchronously to avoid oauth2 refresh # locking issues. if i == 0: thread.join() for i, thread in enumerate(threads): if i > 0: thread.join() bar.finish() stats = sorted(stats, key=lambda stat: stat.lang) if verbose: self.pretty_print_stats(stats) if save_stats: self.save_stats(stats) return stats
def deploy(self, pod): logging.info('Deploying to: {}'.format(self.get_destination_address())) self.prelaunch() deployed_index = self.get_index_at_destination() paths_to_content = pod.dump() new_index = index.Index() new_index.update(paths_to_content) diffs = new_index.diff(deployed_index) if not diffs: text = utils.colorize('{white}Diff is empty, nothing to launch, aborted.{/white}') logging.info(text) return if self.dry_run: return if self.confirm: diffs.log_pretty() if not utils.interactive_confirm('Proceed with launch?'): logging.info('Launch aborted.') return self.start_time = time.time() index.Index.apply_diffs(diffs, paths_to_content, write_func=self.write_file, delete_func=self.delete_file) self.write_index_at_destination(new_index) logging.info('Wrote index: /{}'.format(index.Index.BASENAME)) self.postlaunch() return diffs
def deploy(self, content_generator, stats=None, repo=None, dry_run=False, confirm=False, test=True, is_partial=False, require_translations=False): self._confirm = confirm self.prelaunch(dry_run=dry_run) if test: self.test() try: deployed_index = self._get_remote_index() if require_translations: self.pod.enable(self.pod.FEATURE_TRANSLATION_STATS) diff, new_index, paths_to_rendered_doc = indexes.Diff.stream( deployed_index, content_generator, repo=repo, is_partial=is_partial) self._diff = diff if indexes.Diff.is_empty(diff): logging.info('Finished with no diffs since the last build.') return if dry_run: return indexes.Diff.pretty_print(diff) if require_translations and self.pod.translation_stats.untranslated: self.pod.translation_stats.pretty_print() raise pods.Error( 'Aborted deploy due to untranslated strings. ' 'Use the --force-untranslated flag to force deployment.') if confirm: text = 'Proceed to deploy? -> {}'.format(self) if not utils.interactive_confirm(text): logging.info('Aborted.') return indexes.Diff.apply(diff, paths_to_rendered_doc, write_func=self.write_file, batch_write_func=self.write_files, delete_func=self.delete_file, threaded=self.threaded, batch_writes=self.batch_writes) self.write_control_file(self.index_basename, indexes.Index.to_string(new_index)) if stats is not None: self.write_control_file(self.stats_basename, stats.to_string()) else: self.delete_control_file(self.stats_basename) if diff: self.write_control_file(self.diff_basename, indexes.Diff.to_string(diff)) self.success = True finally: self.postlaunch() return diff
def deploy(self, paths_to_contents, stats=None, repo=None, dry_run=False, confirm=False, test=True): self._confirm = confirm self.prelaunch(dry_run=dry_run) if test: self.test() try: deployed_index = self._get_remote_index() new_index = indexes.Index.create(paths_to_contents) if repo: indexes.Index.add_repo(new_index, repo) diff = indexes.Diff.create(new_index, deployed_index, repo=repo) self._diff = diff if indexes.Diff.is_empty(diff): logging.info('Finished with no diffs since the last build.') return if dry_run: return indexes.Diff.pretty_print(diff) if confirm: text = 'Proceed to deploy? -> {}'.format(self) if not utils.interactive_confirm(text): logging.info('Aborted.') return indexes.Diff.apply(diff, paths_to_contents, write_func=self.write_file, batch_write_func=self.write_files, delete_func=self.delete_file, threaded=self.threaded, batch_writes=self.batch_writes) self.write_control_file(self.index_basename, indexes.Index.to_string(new_index)) if stats is not None: self.write_control_file(self.stats_basename, stats.to_string()) else: self.delete_control_file(self.stats_basename) if diff: self.write_control_file(self.diff_basename, indexes.Diff.to_string(diff)) self.success = True finally: self.postlaunch() return diff
def deploy(self, paths_to_contents, stats=None, repo=None, dry_run=False, confirm=False, test=True, is_partial=False): self._confirm = confirm self.prelaunch(dry_run=dry_run) if test: self.test() try: deployed_index = self._get_remote_index() new_index = indexes.Index.create(paths_to_contents) if repo: indexes.Index.add_repo(new_index, repo) diff = indexes.Diff.create( new_index, deployed_index, repo=repo, is_partial=is_partial) self._diff = diff if indexes.Diff.is_empty(diff): logging.info('Finished with no diffs since the last build.') return if dry_run: return indexes.Diff.pretty_print(diff) if confirm: text = 'Proceed to deploy? -> {}'.format(self) if not utils.interactive_confirm(text): logging.info('Aborted.') return indexes.Diff.apply( diff, paths_to_contents, write_func=self.write_file, batch_write_func=self.write_files, delete_func=self.delete_file, threaded=self.threaded, batch_writes=self.batch_writes) self.write_control_file( self.index_basename, indexes.Index.to_string(new_index)) if stats is not None: self.write_control_file(self.stats_basename, stats.to_string()) else: self.delete_control_file(self.stats_basename) if diff: self.write_control_file( self.diff_basename, indexes.Diff.to_string(diff)) self.success = True finally: self.postlaunch() return diff
def deploy(self, paths_to_contents, stats=None, repo=None, dry_run=False, confirm=False, test=True): self.prelaunch(dry_run=dry_run) if test: self.test() try: deployed_index = self._get_remote_index() new_index = indexes.Index.create(paths_to_contents) if repo: indexes.Index.add_repo(new_index, repo) diff = indexes.Diff.create(new_index, deployed_index, repo=repo) self._diff = diff if indexes.Diff.is_empty(diff): text = 'Diff is empty, nothing to launch.' logging.info(colorize(text, ansi=57)) return if dry_run: return indexes.Diff.pretty_print(diff) if confirm: text = 'Proceed to launch? -> {}'.format(self) if not utils.interactive_confirm(text): logging.info('Launch aborted.') return indexes.Diff.apply( diff, paths_to_contents, write_func=self.write_file, delete_func=self.delete_file, threaded=self.threaded, batch_writes=self.batch_writes) self.write_control_file(self.index_basename, indexes.Index.to_string(new_index)) if stats is not None: self.write_control_file(self.stats_basename, stats.to_string()) else: self.delete_control_file(self.stats_basename) if diff: self.write_control_file(self.diff_basename, indexes.Diff.to_string(diff)) self._success = True finally: self.postlaunch() return diff
def deploy(self, content_generator, stats=None, repo=None, dry_run=False, confirm=False, test=True, is_partial=False, require_translations=False): self._confirm = confirm if dry_run: return server = self.config.server branch = self.get_branch() timed_deploy = self.get_timed_deploy() if confirm: lines = [ '', 'server: {}'.format(server), 'branch: {}'.format(branch), ] if timed_deploy: lines.append('timed deploy: {} ({})'.format( timed_deploy['datetime'], timed_deploy['timezone'])) lines.append('Proceed to deploy?') text = '\n'.join(lines) if not utils.interactive_confirm(text): logging.info('Aborted.') return if server.startswith('localhost'): # Localhost doens't require an auth token. token = '' elif self.pod.file_exists(CONFIG_PATH): token = self.pod.read_json(CONFIG_PATH)['token'] elif os.environ.get('FILESET_TOKEN'): token = os.environ['FILESET_TOKEN'] else: logging.error('"token" is required in {}'.format(CONFIG_PATH)) logging.error( 'visit {}/_fs/token to generate a new token'.format(server)) return api_host = server if branch != 'master': api_host = '{}-dot-{}'.format(branch, server) fs = fileset.FilesetClient(api_host, token) manifest = { 'commit': self.get_commit(), 'files': [], } # Warm the cache by fetching the current manifest. if not server.startswith('localhost'): self._warm_up_cache(fs, branch) with futures.ThreadPoolExecutor(max_workers=20) as executor: # Map of future => doc path. results = {} for rendered_doc in content_generator: future = executor.submit(self._upload_blob, fs, rendered_doc) results[future] = rendered_doc.path for future in futures.as_completed(results): try: data = future.result() except Exception as e: # If any upload fails, write the objectcache to file so we # don't lose information about what was already uploaded. self.pod.podcache.write() doc_path = results.get(future) logging.error('failed to upload: {}'.format(doc_path)) raise manifest['files'].append(data) self.pod.podcache.write() response = fs.upload_manifest(manifest) manifest_id = response.json()['manifest_id'] deploy_timestamp = None if timed_deploy: deploy_timestamp = timed_deploy['timestamp'] fs.set_branch_manifest(branch, manifest_id, deploy_timestamp=deploy_timestamp) lines = [ '', 'saved branch manifest:', ' branch: {}'.format(branch), ' manifest id: {}'.format(manifest_id), ] if timed_deploy: lines.append(' timed deploy: {} ({})'.format( timed_deploy['datetime'], timed_deploy['timezone'])) lines.extend([ '', 'url:', ]) if server.startswith('localhost'): lines.append(' http://{}'.format(server)) elif deploy_timestamp: lines.append(' https://manifest-{}-dot-{}'.format( manifest_id, server)) elif branch == 'master': lines.append(' https://{}'.format(server)) else: lines.append(' https://{}-dot-{}'.format(branch, server)) logging.info('\n'.join(lines))