def import_json(cls, request, fileobj, category=None, origin=None): from weblate.memory.tasks import update_memory_task content = fileobj.read() try: data = json.loads(force_text(content)) except (ValueError, UnicodeDecodeError) as error: report_error(error, request, prefix='Failes to parse') raise MemoryImportError(_('Failed to parse JSON file!')) updates = {} fields = cls.SCHEMA().names() if category: updates = { 'category': category, 'origin': origin, } found = 0 if isinstance(data, list): for entry in data: if not isinstance(entry, dict): continue # Apply overrides entry.update(updates) # Ensure all fields are set for field in fields: if not entry.get(field): continue # Ensure there are not extra fields record = {field: entry[field] for field in fields} update_memory_task.delay(**record) found += 1 return found
def import_json(cls, request, fileobj, category=None, origin=None): from weblate.memory.tasks import update_memory_task content = fileobj.read() try: data = json.loads(force_text(content)) except (ValueError, UnicodeDecodeError) as error: report_error(error, request) raise MemoryImportError(_('Failed to parse JSON file!')) updates = {} fields = cls.SCHEMA().names() if category: updates = { 'category': category, 'origin': origin, } found = 0 if isinstance(data, list): for entry in data: if not isinstance(entry, dict): continue # Apply overrides entry.update(updates) # Ensure all fields are set for field in fields: if not entry.get(field): continue # Ensure there are not extra fields record = {field: entry[field] for field in fields} update_memory_task.delay(**record) found += 1 return found
def import_json(cls, fileobj, category=None, origin=None): from weblate.memory.tasks import update_memory_task try: data = json.load(fileobj) except ValueError: raise MemoryImportError(_('Failed to parse JSON file!')) updates = {} fields = cls.SCHEMA().names() if category: updates = { 'category': category, 'origin': origin, } found = 0 for entry in data: if not isinstance(entry, dict): continue # Apply overrides entry.update(updates) # Ensure all fields are set for field in fields: if not entry.get(field): continue # Ensure there are not extra fields record = {field: entry[field] for field in fields} update_memory_task.delay(**record) found += 1 if not found: raise MemoryImportError( _('No valid entries found in the JSON file!'))
def import_tmx(cls, request, fileobj, langmap=None, category=None, origin=None): from weblate.memory.tasks import update_memory_task if category is None: category = CATEGORY_FILE try: storage = tmxfile.parsefile(fileobj) except (SyntaxError, AssertionError) as error: report_error(error, request, prefix='Failed to parse') raise MemoryImportError(_('Failed to parse TMX file!')) header = next(storage.document.getroot().iterchildren( storage.namespaced("header"))) source_language_code = header.get('srclang') source_language = cls.get_language_code(source_language_code, langmap) languages = {} found = 0 for unit in storage.units: # Parse translations (translate-toolkit does not care about # languages here, it just picks first and second XML elements) translations = {} for node in unit.getlanguageNodes(): lang, text = get_node_data(unit, node) if not lang or not text: continue translations[lang] = text if lang not in languages: languages[lang] = cls.get_language_code(lang, langmap) try: source = translations.pop(source_language_code) except KeyError: # Skip if source language is not present continue for lang, text in translations.items(): update_memory_task.delay( source_language=source_language, target_language=languages[lang], source=source, target=text, origin=origin, category=category, ) found += 1 return found
def import_tmx(cls, request, fileobj, langmap=None, category=None, origin=None): from weblate.memory.tasks import update_memory_task if category is None: category = CATEGORY_FILE try: storage = tmxfile.parsefile(fileobj) except (SyntaxError, AssertionError) as error: report_error(error, request, prefix='Failes to parse') raise MemoryImportError(_('Failed to parse TMX file!')) header = next( storage.document.getroot().iterchildren( storage.namespaced("header") ) ) source_language_code = header.get('srclang') source_language = cls.get_language_code(source_language_code, langmap) languages = {} found = 0 for unit in storage.units: # Parse translations (translate-toolkit does not care about # languages here, it just picks first and second XML elements) translations = {} for node in unit.getlanguageNodes(): lang, text = get_node_data(unit, node) if not lang or not text: continue translations[lang] = text if lang not in languages: languages[lang] = cls.get_language_code(lang, langmap) try: source = translations.pop(source_language_code) except KeyError: # Skip if source language is not present continue for lang, text in translations.items(): update_memory_task.delay( source_language=source_language, target_language=languages[lang], source=source, target=text, origin=origin, category=category, ) found += 1 return found