def serialize(self, queryset, *args, **kwargs): r""" Initialize serialization and find out in which source and target language the given elements are. :param queryset: QuerySet of all :class:`~integreat_cms.cms.models.pages.page_translation.PageTranslation` objects which should be serialized :type queryset: ~django.db.models.query.QuerySet [ ~integreat_cms.cms.models.pages.page_translation.PageTranslation ] :param \*args: The remaining arguments :type \*args: list :param \**kwargs: The supplied keyword arguments :type \**kwargs: dict :raises ~django.core.serializers.base.SerializationError: If the serialization fails :return: The serialized XLIFF string :rtype: str """ # Get all language objects of the given page translations language_set = set(map(lambda p: p.language, queryset)) logger.debug("XLIFF 2.0 serialization for languages %r", language_set) if not language_set: raise base.SerializationError( "No page translations given to serialize.") # Check if all given translations are of the same language if len(language_set) != 1: raise base.SerializationError( "The page translations have different languages, but in XLIFF 2.0 " "all objects of one file need to have the same language.") # Get all region objects of the given page translations region_set = set(map(lambda p: p.page.region, queryset)) logger.debug("XLIFF 2.0 serialization for regions %r", region_set) # Check if all given translations are of the same region if len(region_set) != 1: raise base.SerializationError( "The page translations are from different regions.") region = next(iter(region_set)) target_language = next(iter(language_set)) if target_language == region.default_language: raise base.SerializationError( "The page translation is in the region's default language.") self.target_language = target_language self.source_language = region.get_source_language(target_language.slug) logger.debug( "Starting XLIFF 2.0 serialization for translation from %r to %r", self.source_language, target_language, ) return super().serialize(queryset, *args, **kwargs)
def handle_m2m_field(self, obj, field): """ Called to handle a ManyToManyField. """ raise base.SerializationError( "Non-relational serializer recieved object {} with a 'many to many' field {}" .format(type(obj), field.name))
def start_object(self, obj): if not hasattr(obj, "_meta"): raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj)) # count of row self.count += 1 count_col = 1 # flag saying that its first record in file if self.first: #creating sheet self.ws = self.wb.active self.ws.title = 'Clients dump' # names - dict that have fields of model, # his collumn in .xls file and verbose_name self.names = {} for field in obj._meta.fields: if (self.options.has_key('name')) and (self.options['name'] == True): #define verbose_name name head_name = field.name else: head_name = field.verbose_name index = field.name self.names[index] = {} self.names[index]['verbose_name'] = head_name self.names[index]['column'] = count_col self.ws.cell(row = 1, column = count_col).value = head_name count_col += 1 # write id value self.ws.cell(row = self.count+1, column = self.names['id']['column']).value = str(obj.id)
def handle_fk_field(self, obj, field): """ Called to handle a ForeignKey field. """ raise base.SerializationError( "Non-relational serializer recieved object {} with a foreign key field {}" .format(type(obj), field.name))
def start_object(self, obj): """ Called as each object is handled. """ if not hasattr(obj, "_meta"): raise base.SerializationError( "Non-model object (%s) encountered during serialization" % type(obj)) model_name = smart_unicode(obj._meta) elem_name = model_name.split(".")[-1] if not hasattr(obj._meta, 'ns'): setattr(obj._meta, 'ns', "_".join(model_name.split(".")[:-1])) if not self.ns: self.ns = obj._meta.ns if not hasattr(obj._meta, 'ns_uri'): setattr(obj._meta, 'ns_uri', "urn:x-ns:hotjazz:" + ".".join(model_name.split(".")[:-1])) xmlnstag = "xmlns:" + obj._meta.ns attrs = { "xmlns:hotjazz": "urn:x-ns:hotjazz", xmlnstag: obj._meta.ns_uri, "xmlns": obj._meta.ns_uri } if not self.objects_started: self.objects_started = True if not self.single_object: self.list_elem_name = elem_name + "_list" attrs["hotjazz:type"] = "model_list" self.xml.startElement(obj._meta.ns + ":" + self.list_elem_name, attrs) self.indent(1) else: if self.single_object: raise base.SerializationError( "Expecting one model object but got another one (%s) during serialization" % type(obj)) obj_pk = obj._get_pk_val() attrs["hotjazz:type"] = "model" if obj_pk is not None: attrs["hotjazz:pk"] = smart_unicode(obj._get_pk_val()) self.xml.startElement(obj._meta.ns + ":" + elem_name, attrs)
def start_object(self, obj): """ Called as each object is handled. """ if not hasattr(obj, "_meta"): raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj)) self.indent(1) self.xml.startElement("object", { "pk" : str(obj._get_pk_val()), "model" : str(obj._meta), })
def csv(self, worksheet): num_rows = len(worksheet['A']) if num_rows == 1: raise base.SerializationError('There is no data to dump.') buffer = io.StringIO() for ix in range(num_rows): buffer.write(','.join([ wrap(cell.value) if cell.value else '' for cell in worksheet[ix + 1] ])) buffer.write('\n') return buffer.getvalue()
def start_object(self, obj): """ Called as each object is handled. """ if not hasattr(obj, "_meta"): raise base.SerializationError( "Non-model object (%s) encountered during serialization" % type(obj)) self.indent(1) object_data = {"model": smart_unicode(obj._meta)} if not self.use_natural_keys or not hasattr(obj, 'natural_key'): object_data['pk'] = smart_unicode(obj._get_pk_val()) self.xml.startElement("object", object_data)
def start_object(self, obj): if not hasattr(obj, "_meta"): raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj)) self.indent(1) self.xml.startElement("object", {'model': self.model_name(obj) }) #self.stream.write('<object model="{}">\n'.format(self.model_name(obj))) obj_pk = obj.pk if obj_pk is not None: #self.stream.write('<field name="pk">{}</field>\n'.format(obj_pk)) #self.xml.addQuickElement("field", contents=, attrs={'name': 'pk'}) self.xml.startElement('field', { 'name': 'pk' }) self.xml.characters(str(obj_pk)) self.xml.endElement("field")
def start_object(self, obj): """ Called as each object is handled. """ if not hasattr(obj, "_meta"): raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj)) self.indent(1) attrs = {'model': str(obj._meta)} if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'): obj_pk = obj.pk if obj_pk is not None: attrs['pk'] = str(obj_pk) self.xml.startElement("object", attrs)
def start_object(self, obj): """ Called as each object is handled. """ if not hasattr(obj, "_meta"): raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj)) self.indent(1) attrs = OrderedDict([("model", force_text(obj._meta))]) if not self.use_natural_primary_keys or not hasattr(obj, 'natural_key'): obj_pk = obj._get_pk_val() if obj_pk is not None: attrs['pk'] = force_text(obj_pk) self.xml.startElement("object", attrs)
def start_object(self, obj): """ Called as each object is handled. Adds an XLIFF ``<file>``-block with meta-information about the object and an additional ``<body>`` for XLIFF version 1.2. :param obj: The page translation object which is started :type obj: ~integreat_cms.cms.models.pages.page_translation.PageTranslation :raises ~django.core.serializers.base.SerializationError: If the serialization fails """ source_language = obj.page.region.get_source_language( obj.language.slug) if not source_language: raise base.SerializationError( "The page translation is in the region's default language.") self.xml.startElement( "file", { "original": str(obj.page.id), "datatype": "plaintext", "source-language": source_language.slug, "target-language": obj.language.slug, }, ) # This header is required to make sure the XLIFF file can be segmented with MemoQ's WPML filter to get the same # translation memory as with the legacy export via WordPress/WPML. See also: # https://docs.memoq.com/current/en/Places/wpml-xliff-filter.html self.xml.startElement("header", {}) self.xml.startElement("phase-group", {}) self.xml.addQuickElement( "phase", attrs={ "phase-name": "shortcodes", "process-name": "Shortcodes identification", }, ) self.xml.addQuickElement("phase", attrs={ "phase-name": "post_type", "process-name": "Post type" }) self.xml.endElement("phase-group") self.xml.endElement("header") self.xml.startElement("body", {})
def start_object(self, obj): """ Start Object -- Create new sheet, if necessary """ if not hasattr(obj, "_meta"): raise base.SerializationError( "Non-model object (%s) encountered during serialization" % type(obj)) sheet_name = obj._meta.label if sheet_name not in self.workbook.get_sheet_names(): sheet = self.workbook.create_sheet( sheet_name) # Create sheet in the last position self.workbook.active = sheet # Create header: for index, field in enumerate(obj._meta.fields): self.workbook[sheet_name].cell(row=1, column=(index + 1), value=field.name) self.workbook[sheet_name].cell( row=1, column=(index + 1)).font = HEADER_FORMAT['font'] self.workbook[sheet_name].cell( row=1, column=(index + 1)).fill = HEADER_FORMAT['fill'] self.workbook[sheet_name].cell( row=1, column=(index + 1)).alignment = HEADER_FORMAT['alignment'] # Set current row self.current_row = 1 # Increase row cursor: self.current_row += 1 # Add primary key: if not self.use_natural_primary_keys or not hasattr( obj, 'natural_key'): obj_pk = obj.pk if obj_pk is not None: self.workbook[sheet_name].cell(row=self.current_row, column=1, value=obj_pk)
def start_object(self, obj): if self.Model is None: fieldspecs = {} for field in obj._meta.fields: name = field.name size = field.max_length or 10 type = field.get_internal_type() try: field = getattr(fields, type)(size=size) fieldspecs[name] = field except AttributeError: warnings.warn("cannot encode %r field (%s)" % (name, type)) name = obj.__class__.__name__ dbname = smart_unicode(obj._meta) + '.dbf' self.Model = models.makeModel(name, dbname=dbname, stream=self.stream, fields=fieldspecs) elif smart_unicode(obj._meta) != self.Model._meta.dbname[:-4]: raise base.SerializationError('different models') self._current = {}
def handle_field(self, obj, field): """ Called to handle each field on an object (except for ForeignKeys and ManyToManyFields) :param obj: The page translation object which is handled :type obj: ~integreat_cms.cms.models.pages.page_translation.PageTranslation :param field: The model field :type field: ~django.db.models.Field :raises ~django.core.serializers.base.SerializationError: If the serialization fails """ logger.debug("XLIFF 2.0 serialization handling field %r of object %r", field, obj) attrs = { "id": field.name, "resname": field.name, "restype": "string", "datatype": "html", } self.xml.startElement("unit", attrs) self.xml.startElement("segment", {}) self.xml.startElement("source", {}) source_translation = obj.source_translation if not source_translation: raise base.SerializationError( f"Page translation {obj!r} does not have a source translation in " f"{self.source_language!r} and therefore cannot be serialized to XLIFF." ) logger.debug("XLIFF 2.0 source translation %r", source_translation) self.xml.cdata(field.value_to_string(source_translation)) self.xml.endElement("source") self.xml.startElement("target", {}) self.xml.cdata(field.value_to_string(obj)) self.xml.endElement("target") self.xml.endElement("segment") self.xml.endElement("unit")
def start_object(self, obj): if not hasattr(obj, "_meta"): raise base.SerializationError( "Non-model object (%s) encountered during serialization" % type(obj)) # The first time this method is called, write the model object and field names if self.current_row < 3: self.ws['A1'] = obj._meta.label self.model_fields = dict([(mf.name, mf) for mf in obj._meta.fields if mf.serialize]) self.mf_keys = list(self.model_fields.keys()) self.ws.append( self.mf_keys ) # appends a new row with the values in the iterable self.field_positions = { key: index for index, key in enumerate(self.mf_keys) } self.num_fields = len(self.mf_keys) # prepare for another row of field values self.current_row += 1
def start_object(self, obj): """ Called as each object is handled. """ if not hasattr(obj, "_meta"): raise base.SerializationError( "Non-model object (%s) encountered during serialization" % type(obj)) self.start_fileblock(obj) self.indent(2) obj_pk, keytype = self._get_obj_pk(obj) attrs = {"restype": "row", "d:keytype": keytype} if obj_pk is not None: attrs["resname"] = "%s.%s" % (smart_text(obj._meta), obj_pk) else: attrs["resname"] = smart_text(obj._meta) self.xml.startElement("group", attrs) if obj._meta.pk.__class__.__name__ != "AutoField": self.handle_field(obj, obj._meta.pk)
def verify_object_from_model_class(self, model_class, obj): if not (obj._meta.model == model_class): raise base.SerializationError( "Object {} given to a serialiser handling class {}".format( type(obj), model_class._meta.object_name))
def model_name(self, obj): if not hasattr(obj, "_meta"): raise base.SerializationError("Non-model object (%s) encountered during serialization" % type(obj)) return str(obj._meta)
def verify_object_is_model(self, obj): if not hasattr(obj, "_meta"): raise base.SerializationError( "Non-model object (%s) encountered during serialization" % type(obj))
def sort_dependency_order(self, objects): ''' models in the fixture need to be in order, with the models that depend on other models AFTER those other models in the fixture, so they can be referred to by name, and also so that the fixture works properly w/ fks enabled. There is a very real possibility that we cannot determine an order due to circular dependencies. In that case, dumpdata will generate an error message, anf the user will need to set PYFIXTURES_CIRCULAR_DEP_BREAKERS to decide which objects should be creating using pks instead of references. ''' sorted_objects = [] model_deps, seen_models = [], [] for obj in objects: model = self.model_classes.get(obj.get('model')) obj['_model'] = model # for faster access bellow if model not in seen_models: deps = [] for field in model._meta.fields: if hasattr(field.rel, 'to'): deps.append(field.rel.to) for field in model._meta.many_to_many: deps.append(field.rel.to) if model.__name__ in getattr( settings, 'PYFIXTURES_CIRCULAR_DEP_BREAKERS', []): model_deps.append((model, [])) else: model_deps.append( (model, [d for d in list(set(deps)) if d != model])) seen_models.append(model) found = True while model_deps and found: found = False for (model, deps) in model_deps: if deps == []: remove_model = model # remove this model from the list all together model_deps = [(m, d) for (m, d) in model_deps if m != remove_model] # remove this models from the list of dependencies for other models model_deps = [(m, [d for d in dlist if d != remove_model]) for (m, dlist) in model_deps] # there are also dependencies for which there is no data top_level_deps = [m for (m, d) in model_deps] model_deps = [(m, [d for d in dlist if d in top_level_deps]) for (m, dlist) in model_deps] sorted_objects.extend( [o for o in objects if o['_model'] == remove_model]) found = True break if len(objects) != len(sorted_objects): for (model, deps) in model_deps: print model.__name__, 'depends on', [d.__name__ for d in deps] raise base.SerializationError( 'Could not sort objects in dependency order, is there a circular dependency?' ) for obj in sorted_objects: del obj['_model'] # remove temp var return sorted_objects