def filter_batch_on_properties(self, batch): reverse = True if (self.sort_dir == "desc") else False if self.sort_on in self.filter_property_fields: sort_on = self.sort_on[5:] if sort_on not in self.utk: batch.sort(key=lambda x: getattr(x, sort_on), reverse=reverse) for field_name in self.filter_property_fields: md_field = self.domain_annotation.get(field_name) ff_name = "filter_%s" % (field_name) ff = self.request.get(ff_name, None) if ff and md_field: if (IDate.providedBy(md_field.property) or IDatetime.providedBy(md_field.property)): start_date_str, end_date_str = get_date_strings(ff) start_date = string_to_date(start_date_str) end_date = string_to_date(end_date_str) if start_date: batch = [ x for x in batch if (getattr(x, field_name) and getattr(x, field_name).date() >= start_date) ] if end_date: batch = [ x for x in batch if (getattr(x, field_name) and getattr(x, field_name).date() <= end_date) ] elif IText.providedBy(md_field.property): batch = [x for x in batch if ff in getattr(x, field_name)] return batch
def migrate_saved_data(ploneformgen, easyform): for data_adapter in ploneformgen.objectValues('FormSaveDataAdapter'): actions = get_actions(easyform) action = actions.get(data_adapter.getId()) schema = get_schema(easyform) if ISaveData.providedBy(action): cols = data_adapter.getColumnNames() for idx, row in enumerate(data_adapter.getSavedFormInput()): if len(row) != len(cols): logger.warning( 'Number of columns does not match. Skipping row %s in ' 'data adapter %s/%s', idx, '/'.join(easyform.getPhysicalPath()), data_adapter.getId()) continue data = {} for key, value in zip(cols, row): field = schema.get(key) value = value.decode('utf8') if IFromUnicode.providedBy(field): value = field.fromUnicode(value) elif IDatetime.providedBy(field) and value: value = DateTime(value).asdatetime() elif IDate.providedBy(field) and value: value = DateTime(value).asdatetime().date() elif ISet.providedBy(field): try: value = set(literal_eval(value)) except ValueError: pass elif INamedBlobFileField.providedBy(field): value = None data[key] = value action.addDataRow(data)
def get_collection_schema_from_interface_schema(self, schema): collection = {} for name in schema: if IDate.providedBy(schema[name]) or \ IDatetime.providedBy(schema[name]): collection['field.'+name] = 'time' elif IDecimal.providedBy(schema[name]) or \ IFloat.providedBy(schema[name]) or \ IInt.providedBy(schema[name]): collection['field.'+name] = 'number' elif IBool.providedBy(schema[name]): collection['field.'+name] = 'bool' elif ICollection.providedBy(schema[name]): if not ICollection.providedBy(schema[name].value_type) and not \ IDict.providedBy(schema[name].value_type): collection['field.'+name] = 'array' elif IDict.providedBy(schema[name]): if IText.providedBy(schema[name].key_type) and \ IText.providedBy(schema[name].value_type): collection['field.'+name] = 'array' # this is a pretty weak check for a IP address field. We might want # to update this to look for a field validator based on the ipaddress package # or mark this field with a special interface indicating it is an # IP address elif IDottedName.providedBy(schema[name]) and \ (schema[name].min_dots == schema[name].max_dots == 3): collection['field.'+name] = 'cidr' elif IText.providedBy(schema[name]) or \ INativeString.providedBy(schema[name]): collection['field.'+name] = 'string' return collection
def filter_batch_on_properties(self, batch): reverse = True if (self.sort_dir == "desc") else False if self.sort_on in self.filter_property_fields: sort_on = self.sort_on[5:] if sort_on not in self.utk: batch.sort(key=lambda x: getattr(x, sort_on), reverse=reverse) for field_name in self.filter_property_fields: md_field = self.domain_annotation.get(field_name) ff_name = "filter_%s" % (field_name) ff = self.request.get(ff_name, None) if ff and md_field: if (IDate.providedBy(md_field.property) or IDatetime.providedBy(md_field.property)): start_date_str, end_date_str = get_date_strings(ff) start_date = string_to_date(start_date_str) end_date = string_to_date(end_date_str) if start_date: batch = [x for x in batch if ( getattr(x, field_name) and getattr(x, field_name).date() >= start_date) ] if end_date: batch = [x for x in batch if ( getattr(x, field_name) and getattr(x, field_name).date() <= end_date) ] elif IText.providedBy(md_field.property): batch = [x for x in batch if ff in getattr(x, field_name)] return batch
def datagridInitialise(self, subform, widget): if not hasattr(self, '_widgets_initialized'): self._widgets_initialized = [] # don't duplicate effort! if subform not in self._widgets_initialized: date_fields = [f for f in subform.fields.values() if IDate.providedBy(f.field)] for formfield in date_fields: formfield.widgetFactory = TypeADateFieldWidget self._widgets_initialized.append(subform)
def datagridInitialise(self, subform, widget): if not hasattr(self, '_widgets_initialized'): self._widgets_initialized = [] # don't duplicate effort! if subform not in self._widgets_initialized: date_fields = [ f for f in subform.fields.values() if IDate.providedBy(f.field) ] for formfield in date_fields: formfield.widgetFactory = TypeADateFieldWidget self._widgets_initialized.append(subform)
def __init__(self, field, index=None, dialect='csv', choices=None): self.field = field self.index = index self.multiple = self._is_multiple() if self.multiple: self.sortspec = [t.value for t in field.value_type.vocabulary] if choices: self.sortspec = sorted(choices, key=self.sortspec.index) self.name = self._name() self.title = self._title() self.dialect = dialect self.isdate = IDate.providedBy(field) or IDatetime.providedBy(field)
def __init__(self, field, index=None, dialect='csv', choices=None): self.field = field self.index = index self.multiple = self._is_multiple() if self.multiple: self.sortspec = [t.value for t in field.value_type.vocabulary] if choices: self.sortspec = sorted( choices, key=lambda v: safeindex(v, self.sortspec) ) self.name = self._name() self.title = self._title() self.dialect = dialect self.isdate = IDate.providedBy(field) or IDatetime.providedBy(field)
def _populate_record(self, entry, data): changelog = [] schema = entry.schema for name, _field in getFieldsInOrder(schema): if IDate.providedBy(_field): v = self._normalize_date_value(_field, data) if v is not None: _field.validate(v) # no enforcement of required here. setattr(entry, name, v) # new value is possibly empty continue if name in data: value = data.get(name, None) if value in (u'true', u'false') and IBool.providedBy(_field): value = True if value == 'true' else False # radio widget if value == NOVALUE: value = None cast_type = field_type(_field) if cast_type: if cast_type is int and isinstance(value, basestring): value = value.replace(',', '') if cast_type is unicode and isinstance(value, str): value = value.decode('utf-8') elif (cast_type is datetime and isinstance(value, basestring)): fn = converter.DatetimeDataConverter( _field, TEXT_WIDGET) value = fn.toFieldValue(unicode(value)) else: try: value = cast_type(value) except (ValueError, TypeError): pass if value not in (None, ''): _field.validate(value) existing_value = getattr(entry, name, None) if value != existing_value: changelog.append(name) setattr(entry, name, value) else: # empty -> possible unset of previously set value in form? setattr(entry, name, None) entry._p_changed = True # in case of collection fields if changelog: changelog = [Attributes(schema, name) for name in changelog] notify(ObjectModifiedEvent(entry, *changelog))
def _populate_record(self, entry, data): changelog = [] schema = entry.schema for name, _field in getFieldsInOrder(schema): if IDate.providedBy(_field): v = self._normalize_date_value(_field, data) if v is not None: _field.validate(v) # no enforcement of required here. setattr(entry, name, v) # new value is possibly empty continue if name in data: value = data.get(name, None) if value in (u'true', u'false') and IBool.providedBy(_field): value = True if value == 'true' else False # radio widget if value == NOVALUE: value = None cast_type = field_type(_field) if cast_type: if cast_type is int and isinstance(value, basestring): value = value.replace(',', '') if cast_type is unicode and isinstance(value, str): value = value.decode('utf-8') elif (cast_type is datetime and isinstance(value, basestring)): fn = converter.DatetimeDataConverter(_field, TEXT_WIDGET) value = fn.toFieldValue(unicode(value)) else: try: value = cast_type(value) except (ValueError, TypeError): pass if value not in (None, ''): _field.validate(value) existing_value = getattr(entry, name, None) if value != existing_value: changelog.append(name) setattr(entry, name, value) else: # empty -> possible unset of previously set value in form? setattr(entry, name, None) entry._p_changed = True # in case of collection fields if changelog: changelog = [Attributes(schema, name) for name in changelog] notify(ObjectModifiedEvent(entry, *changelog))
def common_widget_updates(context): """ Given a context, update field widgets for it. Context May be any z3c.form instance or a field group contained within. """ # form field filter definition: vtype = lambda formfield: getattr(formfield.field, 'value_type', None) use_vocab = lambda v: hasattr(v, '__len__') and hasattr(v, '__iter__') is_choice = lambda formfield: IChoice.providedBy(formfield.field) v_choice = lambda formfield: IChoice.providedBy(vtype(formfield)) is_collection = lambda formfield: ICollection.providedBy(formfield.field) is_multi = lambda formfield: is_collection(formfield) and v_choice( formfield) # noqa is_date = lambda formfield: IDate.providedBy(formfield.field) is_bool = lambda formfield: IBool.providedBy(formfield.field) # filtered lists of form fields by type formfields = context.fields.values() choicefields = filter(is_choice, formfields) multifields = filter(is_multi, formfields) datefields = filter(is_date, formfields) boolfields = filter(is_bool, formfields) for formfield in choicefields: vocab = formfield.field.vocabulary if use_vocab(vocab) and len(vocab) <= 6: formfield.widgetFactory = RadioFieldWidget for formfield in multifields: vocab = formfield.field.value_type.vocabulary if use_vocab(vocab) and len(vocab) <= 16: formfield.widgetFactory = CheckBoxFieldWidget for formfield in datefields: formfield.widgetFactory = TypeADateFieldWidget for formfield in boolfields: formfield.widgetFactory = RadioFieldWidget
def common_widget_updates(context): """ Given a context, update field widgets for it. Context May be any z3c.form instance or a field group contained within. """ # form field filter definition: vtype = lambda formfield: getattr(formfield.field, 'value_type', None) use_vocab = lambda v: hasattr(v, '__len__') and hasattr(v, '__iter__') is_choice = lambda formfield: IChoice.providedBy(formfield.field) v_choice = lambda formfield: IChoice.providedBy(vtype(formfield)) is_collection = lambda formfield: ICollection.providedBy(formfield.field) is_multi = lambda formfield: is_collection(formfield) and v_choice(formfield) # noqa is_date = lambda formfield: IDate.providedBy(formfield.field) is_bool = lambda formfield: IBool.providedBy(formfield.field) # filtered lists of form fields by type formfields = context.fields.values() choicefields = filter(is_choice, formfields) multifields = filter(is_multi, formfields) datefields = filter(is_date, formfields) boolfields = filter(is_bool, formfields) for formfield in choicefields: vocab = formfield.field.vocabulary if use_vocab(vocab) and len(vocab) <= 3: formfield.widgetFactory = RadioFieldWidget for formfield in multifields: vocab = formfield.field.value_type.vocabulary if use_vocab(vocab) and len(vocab) <= 16: formfield.widgetFactory = CheckBoxFieldWidget for formfield in datefields: formfield.widgetFactory = TypeADateFieldWidget for formfield in boolfields: formfield.widgetFactory = RadioFieldWidget
def update(self): super(TaskModifiedTemplate, self).update() task = self.context ev = self.context0 request = self.request data = {} attributes = dict([(attr.interface, list(attr.attributes)) for attr in ev.descriptions]) for iface, fields in attributes.items(): ob = iface(task) for fieldId in fields: field = iface[fieldId].bind(ob) value = field.get(ob) if IChoice.providedBy(field): try: value = field.vocabulary.getTerm(value).title except LookupError: pass if ICollection.providedBy(field) and IChoice.providedBy(field.value_type): voc = field.value_type.vocabulary value = u", ".join([voc.getTerm(v).title for v in value]) if IDate.providedBy(field): value = getFormatter(request, "date", "full").format(value) if IDatetime.providedBy(field): value = getFormatter(request, "dateTime", "medium").format(value) data[field.title] = value data = data.items() data.sort() self.data = data
def get_export_data( self, portal_type, blob_format, richtext_format, blacklist, whitelist, query, ): """Return a list of dicts with a dict for each object. The key is the name of the field/value and the value the value. """ all_fields = get_schema_info(portal_type, blacklist, whitelist) results = [] catalog = api.portal.get_tool('portal_catalog') if not query: query = dict() query['portal_type'] = portal_type if 'Language' not in query and HAS_MULTILINGUAL and \ 'Language' in catalog.indexes(): query['Language'] = 'all' brains = catalog.unrestrictedSearchResults(query) for brain in brains: obj = brain.getObject() item_dict = dict() for fieldname, field in all_fields: if fieldname in self.ADDITIONAL_MAPPING: # The way to access the value from this fields is # overridden in ADDITIONAL_MAPPING continue try: value = field.get(field.interface(obj)) except: print("Skipping object at {0}".format(obj.absolute_url())) break if not value: # set a value anyway to keep the dimensions of all value = '' # make sure we do no more transforms field = None if IRichTextValue.providedBy(value): value = transform_richtext(value, mimetype=richtext_format) if IRelationList.providedBy(field): rel_val = [] for relation in value: rel_val.append(get_url_for_relation(relation)) value = pretty_join(rel_val) if IRelationChoice.providedBy(field): value = get_url_for_relation(value) if INamed.providedBy(value): value = get_blob_url(value, brain, blob_format, fieldname) if ICollection.providedBy(field): r = [] for v in value: if INamed.providedBy(v): r.append(u'{0}/@@download/{1}'.format( obj.absolute_url(), fieldname)) # r.append(base64.b64encode(v.data)) else: r.append(v) value = r if IDatetime.providedBy(field) or IDate.providedBy(field): if value.year < 1000: if value.year < 16: year = value.year + 2000 else: year = value.year + 1900 if IDate.providedBy(field): value = datetime.date(month=value.month, day=value.day, year=year) elif IDatetime.providedBy(field): value = datetime.datetime(month=value.month, day=value.day, year=year, hour=value.hour, minute=value.minute, second=value.second) value = api.portal.get_localized_time(value, long_format=True) if safe_callable(value): value = value() if isinstance(value, list) or isinstance(value, tuple): value = pretty_join(value) item_dict[fieldname] = value else: # Update the data with additional info or overridden getters item_dict.update(self.additional_data(obj, blacklist)) results.append(item_dict) continue # executed if the loop ended normally (no break) break # executed if 'continue' was skipped (break) return results
def get_attribute_values(request, record, attribute_map): values = {} vocabularies = get_vocabularies(request, attribute_map) for header, field in attribute_map.items(): downloaded = download_field_from_url(field, record[header]) if downloaded is not False: values[field.__name__] = downloaded continue if IDate.providedBy(field): if not record[header]: values[field.__name__] = None else: values[field.__name__] = parse_date(record[header]) continue if IDatetime.providedBy(field): if not record[header]: values[field.__name__] = None else: values[field.__name__] = parse_datetime(record[header]) continue if IURI.providedBy(field): if not record[header].strip(): values[field.__name__] = None continue if IList.providedBy(field): if ITextLine.providedBy(field.value_type): values[field.__name__] = convert_to_list(record[header]) continue if ISet.providedBy(field): if IChoice.providedBy(field.value_type): values[field.__name__] = set(convert_to_list(record[header])) continue if IChoice.providedBy(field): if not record[header].strip(): values[field.__name__] = None else: vocabulary = vocabularies[header] if record[header].lower() not in vocabulary: raise ContentImportError( _( u'The ${name} column contains the ' u'unknown value ${value}', mapping=dict(name=header, value=record[header]) ) ) values[field.__name__] = vocabulary[record[header].lower()] continue assert IFromUnicode.providedBy(field), """ {} does not support fromUnicode """.format(field) try: values[field.__name__] = field.fromUnicode(record[header]) if isinstance(values[field.__name__], basestring): values[field.__name__] = values[field.__name__].strip() if isinstance(field, Text): values[field.__name__] = values[field.__name__].replace( '<br />', '\n' ) except ValidationError, e: raise ContentImportError(e.doc(), colname=header) except ValueError, e: raise ContentImportError(e.message, colname=header)
def get_export_data( self, portal_type, blob_format, richtext_format, blacklist, whitelist, query, ): """Return a list of dicts with a dict for each object. The key is the name of the field/value and the value the value. """ all_fields = get_schema_info(portal_type, blacklist, whitelist) results = [] catalog = api.portal.get_tool('portal_catalog') if not query: query = dict() query['portal_type'] = portal_type if 'Language' not in query and HAS_MULTILINGUAL and \ 'Language' in catalog.indexes(): query['Language'] = 'all' if 'path' not in query: query['path'] = {} query['path']['query'] = '/'.join(self.context.getPhysicalPath()) brains = catalog(query) for brain in brains: obj = brain.getObject() item_dict = dict() for fieldname, field in all_fields: if fieldname in self.ADDITIONAL_MAPPING: # The way to access the value from this fields is # overridden in ADDITIONAL_MAPPING continue value = field.get(field.interface(obj)) if not value: # set a value anyway to keep the dimensions of all value = '' # make sure we do no more transforms field = None if IRichTextValue.providedBy(value): value = transform_richtext(value, mimetype=richtext_format) if IRelationList.providedBy(field): rel_val = [] for relation in value: rel_val.append(get_url_for_relation(relation)) value = pretty_join(rel_val) if IRelationChoice.providedBy(field): value = get_url_for_relation(value) if INamed.providedBy(value): value = get_blob_url(value, brain, blob_format, fieldname) if IDatetime.providedBy(field) or IDate.providedBy(field): value = api.portal.get_localized_time(value, long_format=True) if safe_callable(value): value = value() if isinstance(value, list) or isinstance(value, tuple): value = pretty_join(value) if HAS_GEOLOCATION and isinstance(value, Geolocation): value = value.__dict__ item_dict[fieldname] = value # Update the data with additional info or overridden getters item_dict.update(self.additional_data(obj, blacklist)) results.append(item_dict) return results
def get_export_data( self, portal_type, blob_format, richtext_format, blacklist, whitelist, query, ): """Return a list of dicts with a dict for each object. The key is the name of the field/value and the value the value. """ all_fields = get_schema_info(portal_type, blacklist, whitelist) results = [] catalog = api.portal.get_tool('portal_catalog') if not query: query = dict() query['portal_type'] = portal_type if 'Language' not in query and HAS_MULTILINGUAL and \ 'Language' in catalog.indexes(): query['Language'] = 'all' brains = catalog(query) for brain in brains: obj = brain.getObject() item_dict = dict() for fieldname, field in all_fields: if fieldname in self.ADDITIONAL_MAPPING: # The way to access the value from this fields is # overridden in ADDITIONAL_MAPPING continue value = field.get(field.interface(obj)) if not value: # set a value anyway to keep the dimensions of all value = '' # make sure we do no more transforms field = None if IRichTextValue.providedBy(value): value = transform_richtext(value, mimetype=richtext_format) if IRelationList.providedBy(field): rel_val = [] for relation in value: rel_val.append(get_url_for_relation(relation)) value = pretty_join(rel_val) if IRelationChoice.providedBy(field): value = get_url_for_relation(value) if INamed.providedBy(value): value = get_blob_url(value, brain, blob_format, fieldname) if IDatetime.providedBy(field) or IDate.providedBy(field): value = api.portal.get_localized_time( value, long_format=True) if safe_callable(value): value = value() if isinstance(value, list) or isinstance(value, tuple): value = pretty_join(value) item_dict[fieldname] = value # Update the data with additional info or overridden getters item_dict.update(self.additional_data(obj, blacklist)) results.append(item_dict) return results