def merge_upload(self, request, fileobj, overwrite, author=None, merge_header=True, method=''): ''' Top level handler for file uploads. ''' filecopy = fileobj.read() fileobj.close() # Load backend file try: # First try using own loader store = self.subproject.file_format_cls( StringIOMode(fileobj.name, filecopy), self.subproject.template_store) except Exception: # Fallback to automatic detection store = AutoFormat(StringIOMode(fileobj.name, filecopy), ) # Optionally set authorship if author is None: author = self.get_author_name(request.user) # List translations we should process # Filter out those who don't want automatic update, but keep ourselves translations = Translation.objects.filter( language=self.language, subproject__project=self.subproject.project).filter( Q(pk=self.pk) | Q(subproject__allow_translation_propagation=True)) ret = False if method in ('', 'fuzzy'): # Do actual merge if self.subproject.has_template(): # Merge on units level self.merge_translations(request, author, store, overwrite, (method == 'fuzzy')) else: # Merge on file level for translation in translations: ret |= translation.merge_store(request, author, store, overwrite, merge_header, (method == 'fuzzy')) else: # Add as sugestions ret = self.merge_suggestions(request, store) return ret, store.count_units()
def upload(self, request, project, language, fileobj, method): ''' Handles dictionary update. ''' filecopy = fileobj.read() fileobj.close() # Load file using translate-toolkit store = AutoFormat.load(StringIOMode(fileobj.name, filecopy)) ret, skipped = self.import_store(request, project, language, store, method) if ret == 0 and skipped > 0 and isinstance(store, csvfile): # Retry with different CSV scheme store = csvfile(StringIOMode(fileobj.name, filecopy), ('source', 'target')) ret, skipped = self.import_store(request, project, language, store, method) return ret
def merge_upload(self, request, fileobj, overwrite, author=None, merge_header=True, method='', fuzzy='', merge_comments=False): """Top level handler for file uploads.""" filecopy = fileobj.read() fileobj.close() # Strip possible UTF-8 BOM if filecopy[:3] == codecs.BOM_UTF8: filecopy = filecopy[3:] # Load backend file try: # First try using own loader store = self.subproject.file_format_cls.parse( StringIOMode(fileobj.name, filecopy), self.subproject.template_store) except Exception: # Fallback to automatic detection store = AutoFormat.parse(StringIOMode(fileobj.name, filecopy), ) # Optionally set authorship if author is None: author = get_author_name(request.user) # Check valid plural forms if hasattr(store.store, 'parseheader'): header = store.store.parseheader() if 'Plural-Forms' in header and \ self.language.get_plural_form() != header['Plural-Forms']: raise Exception('Plural forms do not match the language.') # List translations we should process # Filter out those who don't want automatic update, but keep ourselves translations = Translation.objects.filter( language=self.language, subproject__project=self.subproject.project).filter( Q(pk=self.pk) | Q(subproject__allow_translation_propagation=True)) ret = False if method in ('', 'fuzzy'): # Do actual merge if self.subproject.has_template(): # Merge on units level ret = self.merge_translations(request, store, overwrite, (method == 'fuzzy'), fuzzy) else: # Merge on file level for translation in translations: ret |= translation.merge_store( request, author, store, overwrite, merge_header, (method == 'fuzzy'), fuzzy, merge_comments=merge_comments, ) else: # Add as sugestions ret = self.merge_suggestions(request, store, fuzzy) return ret, store.count_units()