class CategoryEditForm(AutoExtensibleForm, EditForm): schema = IClassificationCategory @property def label(self): return _("Edit Classification Category") def update_element(self, data): element = self.context for key, value in data.items(): setattr(element, key, value) aq_parent(self.context)._update_element(element) api.portal.show_message(_(u"Changes saved"), request=self.request) self.request.response.redirect(self.redirect_url) @property def redirect_url(self): return self.context.absolute_url() @button.buttonAndHandler(_(u"Save"), name="save") def handle_save(self, action): data, errors = self.extractData() if errors: self.status = self.formErrorsMessage return self.update_element(data) @button.buttonAndHandler(_(u"Cancel"), name="cancel") def handle_cancel(self, action): self.request.response.redirect(self.redirect_url)
def validate_csv_data(obj, min_length=2): source, separator = [ obj._Data_data___.get(k) for k in ("source", "separator") ] with source.open() as f: try: f.read().decode("utf8") except UnicodeDecodeError: raise Invalid(_("File encoding is not utf8")) with source.open() as f: reader = csv.reader(f, delimiter=separator.encode("utf-8")) first_line = reader.next() if len(first_line) < 2: raise Invalid(_("CSV file must contains at least 2 columns")) base_length = len(first_line) wrong_lines = [ str(i + 2) for i, v in enumerate(reader) if len(v) != base_length ] if wrong_lines: raise Invalid( _( "Lines ${lines} does not contains the same number of element", mapping={"lines": ", ".join(wrong_lines)}, )) return True
class CategoryAddForm(AutoExtensibleForm, AddForm): schema = IClassificationCategory ignoreContext = True @property def label(self): return _("Add Classification Category") def add_element(self, data): element = createObject("ClassificationCategory") for key, value in data.items(): setattr(element, key, value) self.context._add_element(element) api.portal.show_message(_(u"Category added"), request=self.request) url = "{0}/view".format(element.absolute_url()) self.request.response.redirect(url) @button.buttonAndHandler(_(u"Add"), name="add") def handle_add(self, action): data, errors = self.extractData() if errors: self.status = self.formErrorsMessage return self.add_element(data) @button.buttonAndHandler(_(u"Cancel"), name="cancel") def handle_cancel(self, action): self.request.response.redirect(self.context.absolute_url())
def csv_separator_vocabulary_factory(context): values = ( (u";", u";"), (u",", u","), (u"|", u"|"), (u" ", _(u"tab")), # ! value is a tab not a whitespace ! (u" ", _(u"whitespace")), ) return iterable_to_vocabulary(values)
def import_keys_vocabulary_factory(context): values = ( (u"parent_identifier", _(u"Parent Identifier")), (u"identifier", _(u"Identifier")), (u"title", _(u"Name")), (u"informations", _(u"Informations")), (u"enabled", _(u"Enabled")) ) return iterable_to_vocabulary(values)
def validate_csv_content(obj, annotation, required_columns, format_dic={}): """Verify csv content: * check if all required columns have values * check some columns format with re pattern {'identifier': pattern} """ columns = { v: int(k.replace("column_", "")) for k, v in obj._Data_data___.items() if k.startswith("column_") and v } if not columns: # Validation of columns is made by another function return True separator = annotation["separator"] has_header = annotation["has_header"] source = annotation["source"] with source.open() as f: reader = csv.reader(f, delimiter=separator.encode("utf-8")) base_idx = 1 if has_header: base_idx += 1 reader.next() expected_length = len(required_columns) wrong_lines = [] wrong_values = [] for idx, line in enumerate(reader): if not getattr(obj, 'allow_empty', False): # option only in tree import values = [ line[columns[n]] for n in required_columns if line[columns[n]] ] if len(values) != expected_length: wrong_lines.append(str(idx + base_idx)) for col in format_dic: val = line[columns[col]] if not re.match(format_dic[col], val): wrong_values.append("Line {}, col {}: '{}'".format( idx + base_idx, columns[col] + 1, val)) if wrong_lines: raise Invalid( _( "Lines ${lines} have missing required value(s)", mapping={"lines": ", ".join(wrong_lines)}, )) if wrong_values: raise Invalid( _("Bad format values: ${errors}", mapping={'errors': ' || '.join(wrong_values)})) return True
def update_element(self, data): element = self.context for key, value in data.items(): setattr(element, key, value) aq_parent(self.context)._update_element(element) api.portal.show_message(_(u"Changes saved"), request=self.request) self.request.response.redirect(self.redirect_url)
def _links(self): if not api.user.has_permission("cmf.ModifyPortalContent"): return [] return [ { "title": translate(_("Edit"), context=self.request), "link": "{0}/edit".format(self.context.absolute_url()), }, { "title": translate(_("Add"), context=self.request), "link": "{0}/add-{1}".format(self.context.absolute_url(), self.context.portal_type), }, ]
def add_element(self, data): element = createObject("ClassificationCategory") for key, value in data.items(): setattr(element, key, value) self.context._add_element(element) api.portal.show_message(_(u"Category added"), request=self.request) url = "{0}/view".format(element.absolute_url()) self.request.response.redirect(url)
def schema(self): """Generated schema based on csv file columns""" first_line = [] data_lines = [] data = self._get_data() encoding = "utf-8" has_header = data["has_header"] with data["source"].open() as f: f.seek(0) reader = csv.reader(f, delimiter=data["separator"].encode(encoding)) first_line = reader.next() try: for i in range(0, 2): data_lines.append(reader.next()) except Exception: pass fields = [] for idx, element in enumerate(first_line): if has_header: name = element.decode(encoding) else: name = str(idx + 1) sample = u", ".join([ u"'{0}'".format(ln[idx].decode(encoding)) for ln in data_lines ]) fields.append( GeneratedChoice( title=_("Column ${name}", mapping={"name": name}), description=_("Sample data : ${data}", mapping={"data": sample}), vocabulary=self._vocabulary, required=False, )) return InterfaceClass( "IImportSecondStep", attrs={ "column_{0}".format(idx): field for idx, field in enumerate(fields) }, bases=(self.base_schema, ), )
def _after_import(self): duration = int((time() - self.begin) * 100) / 100.0 api.portal.show_message( message=_( u"Import completed in ${duration} seconds", mapping={"duration": str(duration)}, ), request=self.request, ) self.request.response.redirect(self.context.absolute_url())
def __call__(self): obj = self.context result = { "@id": obj.absolute_url(), "UID": obj.UID(), "identifier": obj.identifier, "title": obj.title, "informations": obj.informations, "enabled": obj.enabled and translate(_("Yes"), context=self.request) or translate(_("No"), context=self.request), "links": self._links, } return result
class IImportFirstStep(model.Schema): source = NamedBlobFile( title=_(u"File"), description=_(u"CSV file that contains the classification tree"), required=True, ) separator = schema.Choice( title=_(u"CSV Separator"), description=_(u"Separator character to use"), vocabulary="collective.classification.vocabularies:csv_separator", required=True, ) has_header = schema.Bool( title=_(u"Include CSV header"), description=_(u"The CSV file contains an header row"), default=True, required=False, ) @invariant def validate_csv_data(obj): return utils.validate_csv_data(obj)
class IImportSecondStepBase(Interface): @invariant def validate_columns(obj): return utils.validate_csv_columns(obj, ("identifier", )) @invariant def validate_data(obj): annotations = IAnnotations(obj.__context__) format_dic = {} if obj._Data_data___.get('decimal_import', False): format_dic = { 'identifier': r'(-?[./\d]+|( *, *)*)+$' } # decimal format validation with multiple values return utils.validate_csv_content( obj, annotations[ANNOTATION_KEY], ("identifier", ), format_dic, ) decimal_import = GeneratedBool( title=_(u"Identifier are decimal codes"), default=True, required=False, ) allow_empty = GeneratedBool( title=_(u"Allow empty column value"), default=False, required=False, ) replace_slash = GeneratedBool( title=_(u"Replace slash in title"), default=True, required=False, )
def validate_csv_columns(obj, required_columns): """Verify that all required columns are present""" columns = [ v for k, v in obj._Data_data___.items() if k.startswith("column_") ] missing_columns = [] for column in required_columns: if column not in columns: missing_columns.append(column) if len(missing_columns) > 0: raise Invalid( _( "The following required columns are missing: ${columns}", mapping={"columns": ", ".join(missing_columns)}, )) return True
def category_deleted(obj, event): obj_uid = api.content.get_uuid(obj) try: linked_content = api.content.find(classification_categories=obj_uid) except api.exc.CannotGetPortalError: # This happen when we try to remove plone object return if linked_content: api.portal.show_message( message=_( "cannot_delete_referenced_category", default= "This category cannot be deleted because it is referenced elsewhere", ), request=obj.REQUEST, type="warning", ) view_url = getMultiAdapter((obj, obj.REQUEST), name=u"plone_context_state").view_url() raise Redirect(view_url)
class ImportFormFirstStep(BaseForm): schema = IImportFirstStep ignoreContext = True def _set_data(self, data): annotation = IAnnotations(self.context) annotation[ANNOTATION_KEY] = PersistentDict() for key, value in data.items(): annotation[ANNOTATION_KEY][key] = value @button.buttonAndHandler(_(u"Continue"), name="continue") def handleApply(self, action): data, errors = self.extractData() if errors: self.status = self.formErrorsMessage return self._set_data(data) redirect_url = u"{0}/@@import-process".format( self.context.absolute_url()) self.request.response.redirect(redirect_url)
class IClassificationCategory(Interface): identifier = schema.TextLine( title=_(u"Identifier"), description=_("Identifier of the category"), required=True, ) title = schema.TextLine(title=_(u"Name"), description=_("Name of the category"), required=True) directives.widget('enabled', RadioFieldWidget) enabled = schema.Bool( title=_(u'Enabled'), default=True, required=False, ) informations = schema.TextLine(title=_(u"Informations"), required=False)
def label(self): return _("Add Classification Category")
class BaseImportFormSecondStep(BaseForm): """Baseclass for import form""" ignoreContext = False base_schema = IImportSecondStepBase @property def _vocabulary(self): raise NotImplementedError("_vocabulary must be defined by subclass") @property def schema(self): """Generated schema based on csv file columns""" first_line = [] data_lines = [] data = self._get_data() encoding = "utf-8" has_header = data["has_header"] with data["source"].open() as f: f.seek(0) reader = csv.reader(f, delimiter=data["separator"].encode(encoding)) first_line = reader.next() try: for i in range(0, 2): data_lines.append(reader.next()) except Exception: pass fields = [] for idx, element in enumerate(first_line): if has_header: name = element.decode(encoding) else: name = str(idx + 1) sample = u", ".join([ u"'{0}'".format(ln[idx].decode(encoding)) for ln in data_lines ]) fields.append( GeneratedChoice( title=_("Column ${name}", mapping={"name": name}), description=_("Sample data : ${data}", mapping={"data": sample}), vocabulary=self._vocabulary, required=False, )) return InterfaceClass( "IImportSecondStep", attrs={ "column_{0}".format(idx): field for idx, field in enumerate(fields) }, bases=(self.base_schema, ), ) def _get_data(self): annotation = IAnnotations(self.context) return annotation[ANNOTATION_KEY] def _process_data(self, data): """Return a list of dict containing object keys and a special key `_children` for hierarchy""" raise NotImplementedError("_process_data must be defined by subclass") def _process_csv(self, csv_reader, mapping, encoding, import_data): """Return a dict with every elements""" raise NotImplementedError("_process_csv must be defined by subclass") def _import_node(self, node): """Import a node (element with is children)""" raise NotImplementedError("_import_node must be defined by subclass") def _before_import(self): """Method that is called before import process""" def _after_import(self): """Method that is called after import process""" def _import(self, data): self._before_import() # {'source': <plone.namedfile.file.NamedBlobFile object at ...>, 'has_header': True, 'separator': u';'} import_data = self._get_data() kwargs = { k: data.pop(k) for k in copy.deepcopy(data.keys()) if not k.startswith("column_") } mapping = { int(k.replace("column_", "")): v for k, v in data.items() if v } encoding = "utf-8" data = [] with import_data["source"].open() as f: delimiter = import_data["separator"].encode(encoding) has_header = import_data["has_header"] f.seek(0) reader = csv.reader(f, delimiter=delimiter) if has_header: reader.next() data = self._process_csv(reader, mapping, encoding, import_data, **kwargs) for node in self._process_data(data): self._import_node(node) self._after_import() @button.buttonAndHandler(_(u"Import"), name="import") def handleApply(self, action): data, errors = self.extractData() if errors: self.status = self.formErrorsMessage return self._import(data)