def upload_bulk_app_translations(request, domain, app_id): lang = request.POST.get('language') validate = request.POST.get('validate') app = get_app(domain, app_id) workbook = None msgs = [] try: workbook = get_workbook(request.file) except WorkbookJSONError as e: messages.error(request, six.text_type(e)) if workbook: if validate: msgs = validate_bulk_app_translation_upload(app, workbook, request.user.email, lang) else: headers = get_bulk_app_sheet_headers(app, lang=lang) msgs = process_bulk_app_translation_upload(app, workbook, headers, lang=lang) app.save() for msg in msgs: # Add the messages to the request object. # msg[0] should be a function like django.contrib.messages.error . # msg[1] should be a string. msg[0](request, msg[1]) # In v2, languages is the default tab on the settings page return HttpResponseRedirect( reverse('app_settings', args=[domain, app_id]) )
def download_bulk_app_translations(request, domain, app_id): lang = request.GET.get('lang') skip_blacklisted = request.GET.get('skipbl', 'false') == 'true' app = get_app(domain, app_id) # if there is a lang selected, assume that user wants a single sheet is_single_sheet = bool(lang) headers = get_bulk_app_sheet_headers( app, single_sheet=is_single_sheet, lang=lang, eligible_for_transifex_only=skip_blacklisted) if is_single_sheet: sheets = get_bulk_app_single_sheet_by_name(app, lang, skip_blacklisted) else: sheets = get_bulk_app_sheets_by_name( app, eligible_for_transifex_only=skip_blacklisted) temp = io.BytesIO() data = [(k, v) for k, v in sheets.items()] export_raw(headers, data, temp) filename = '{app_name} v.{app_version} - App Translations{lang}{transifex_only}'.format( app_name=app.name, app_version=app.version, lang=' ' + lang if is_single_sheet else '', transifex_only=' (Transifex only)' if skip_blacklisted else '', ) return export_response(temp, Format.XLS_2007, filename)
def _generate_expected_headers_and_rows(self): self.headers = { h[0]: h[1] for h in get_bulk_app_sheet_headers(self.app) } self.expected_rows = get_bulk_app_sheets_by_name( self.app, exclude_module=lambda module: SKIP_TRANSFEX_STRING in module. comment, exclude_form=lambda form: SKIP_TRANSFEX_STRING in form.comment)
def download_bulk_app_translations(request, domain, app_id): lang = request.GET.get('lang') app = get_app(domain, app_id) headers = get_bulk_app_sheet_headers(app, lang=lang) sheets = get_bulk_app_single_sheet_by_name(app, lang) if lang else get_bulk_app_sheets_by_name(app) temp = io.BytesIO() data = [(k, v) for k, v in six.iteritems(sheets)] export_raw(headers, data, temp) filename = '{app_name} v.{app_version} - App Translations{lang}'.format( app_name=app.name, app_version=app.version, lang=' ' + lang if lang else '') return export_response(temp, Format.XLS_2007, filename)
def _translation_data(self): # get the translations data from corehq.apps.translations.app_translations.download import get_bulk_app_sheets_by_name # simply the rows of data per sheet name rows = get_bulk_app_sheets_by_name(self.app, eligible_for_transifex_only=True) # get the translation data headers from corehq.apps.translations.app_translations.utils import get_bulk_app_sheet_headers headers = get_bulk_app_sheet_headers( self.app, eligible_for_transifex_only=True, ) for header_row in headers: self.headers[header_row[0]] = header_row[1] self._set_sheet_name_to_module_or_form_mapping(rows[MODULES_AND_FORMS_SHEET_NAME]) return rows
def _generate_current_headers_and_rows(self): self.current_headers = { mod_or_form_id: headers for mod_or_form_id, headers in get_bulk_app_sheet_headers( self.app, lang=self.lang_to_compare, eligible_for_transifex_only=True, single_sheet=self.single_sheet, ) } if self.single_sheet: self.current_rows = get_bulk_app_single_sheet_by_name( self.app, self.lang_to_compare, eligible_for_transifex_only=True) else: self.current_rows = get_bulk_app_sheets_by_name( self.app, eligible_for_transifex_only=True) self._set_current_sheet_name_to_module_or_form_mapping() self._map_ids_to_headers() self._map_ids_to_translations()
def _translation_data(self, app): # get the translations data from corehq.apps.translations.app_translations.download import get_bulk_app_sheets_by_name # simply the rows of data per sheet name rows = get_bulk_app_sheets_by_name( app, exclude_module=lambda module: SKIP_TRANSFEX_STRING in module. comment, exclude_form=lambda form: SKIP_TRANSFEX_STRING in form.comment) # get the translation data headers from corehq.apps.translations.app_translations.utils import get_bulk_app_sheet_headers headers = get_bulk_app_sheet_headers( app, exclude_module=lambda module: SKIP_TRANSFEX_STRING in module. comment, exclude_form=lambda form: SKIP_TRANSFEX_STRING in form.comment) for header_row in headers: self.headers[header_row[0]] = header_row[1] self._set_sheet_name_to_module_or_form_mapping( rows[MODULES_AND_FORMS_SHEET_NAME]) return rows
def process_bulk_app_translation_upload(app, workbook, sheet_name_to_unique_id, lang=None): """ Process the bulk upload file for the given app. We return these message tuples instead of calling them now to allow this function to be used independently of request objects. :return: Returns a list of message tuples. The first item in each tuple is a function like django.contrib.messages.error, and the second is a string. """ def get_expected_headers(sheet_name): # This function does its best to return the headers we expect, based # on the current app, for an uploaded sheet. If the sheet is old, it # might not include the unique IDs of the modules/forms. In that case # `sheet_name_to_unique_id` will be empty and we fall back to using the # name of the sheet and hope that modules/forms have not been moved # since the sheet was originally downloaded. # # If a user created a new sheet, or renamed a sheet, or a form/module # has been deleted since this sheet was downloaded, then expected # headers will not be found. We return an empty list, and # `_check_for_sheet_error()` will handle it. if sheet_name in sheet_name_to_unique_id: unique_id = sheet_name_to_unique_id[sheet_name] if unique_id in expected_headers_by_id: return expected_headers_by_id[unique_id] return expected_headers_by_sheet_name.get(sheet_name, []) msgs = [] error = _check_workbook_length(workbook, lang) if error: msgs.append((messages.error, error)) return msgs expected_headers_by_sheet_name = { k: v for k, v in get_bulk_app_sheet_headers(app, lang=lang) } expected_headers_by_id = { k: v for k, v in get_bulk_app_sheet_headers(app, lang=lang, by_id=True) } processed_sheets = set() for sheet in workbook.worksheets: expected_headers = get_expected_headers(sheet.worksheet.title) try: _check_for_sheet_error(sheet, expected_headers, processed_sheets) except BulkAppTranslationsException as e: msgs.append((messages.error, six.text_type(e))) continue processed_sheets.add(sheet.worksheet.title) warnings = _check_for_sheet_warnings(sheet, expected_headers) for warning in warnings: msgs.append((messages.warning, warning)) if is_single_sheet(sheet.worksheet.title): msgs.extend( _process_single_sheet(app, sheet, names_map=sheet_name_to_unique_id, lang=lang)) else: msgs.extend( _process_rows(app, sheet.worksheet.title, sheet, names_map=sheet_name_to_unique_id)) msgs.append((messages.success, _("App Translations Updated!"))) return msgs
def download_audio_translator_files(domain, app, lang, eligible_for_transifex_only=True): # Get bulk app translation single sheet data headers = get_bulk_app_sheet_headers( app, single_sheet=True, lang=lang, eligible_for_transifex_only=eligible_for_transifex_only) headers = headers[ 0] # There's only one row since these are the headers for the single-sheet format headers = headers[ 1] # Drop the first element (sheet name), leaving the second (list of header names) audio_text_index = headers.index('default_' + lang) audio_path_index = headers.index('audio_' + lang) sheets = get_bulk_app_single_sheet_by_name( app, lang, eligible_for_transifex_only=True) audio_rows = [ row for row in sheets[SINGLE_SHEET_NAME] if row[audio_path_index] ] # Create file for re-upload to HQ's bulk app translations upload_workbook = openpyxl.Workbook() upload_sheet = upload_workbook.worksheets[0] upload_sheet.title = SINGLE_SHEET_NAME upload_sheet.append(headers) # Create dict of audio path to text, and disambiguate any missing path that points to multiple texts rows_by_audio = {} for row in audio_rows: audio_path = row[audio_path_index] text = row[audio_text_index] if audio_path in rows_by_audio and audio_path not in app.multimedia_map: if rows_by_audio[audio_path] != text: extension = "." + audio_path.split(".")[-1] not_extension = audio_path[:-len(extension)] suffix = 1 while audio_path in rows_by_audio and rows_by_audio[ audio_path] != text: suffix += 1 audio_path = "{}_{}{}".format(not_extension, suffix, extension) row[audio_path_index] = audio_path upload_sheet.append( row) # add new path to sheet for re-upload to HQ rows_by_audio[audio_path] = text # Create dict of rows, keyed by label text to de-duplicate paths rows_by_text = defaultdict(list) for row in audio_rows: rows_by_text[row[audio_text_index]].append(row) def _get_filename_from_duplicate_rows(rows): return rows[0][audio_path_index] # Add a row to upload sheet for each filename being eliminated because the text was duplicated for text, rows in rows_by_text.items(): filename = _get_filename_from_duplicate_rows(rows) for row in rows: if row[audio_path_index] != filename: row[audio_path_index] = filename upload_sheet.append(row) # Create file for translato, with a row for each unique text label translator_workbook = openpyxl.Workbook() sheet0 = translator_workbook.worksheets[0] sheet0.title = "filepaths" sheet0.append([lang, "audio"]) sheet1 = translator_workbook.create_sheet("verification") sheet1.append(headers) for text, rows in rows_by_text.items(): if not any( [row[audio_path_index] in app.multimedia_map for row in rows]): filename = _get_filename_from_duplicate_rows(rows) sheet0.append([text, filename]) sheet1.append(rows[0]) return { "bulk_upload.xlsx": upload_workbook, "excel_for_translator.xlsx": translator_workbook, }