Example #1
0
 def saveDiscussions(self):
     session = Session()
     new_record_keys = []
     domain_model = removeSecurityProxy(self.context.domain_model)
     for record in self.data:
         discussion_text = record.get("body", "")
         object_id = record.get("object_id", None)
         if object_id:
             current_record = removeSecurityProxy(self.context.get(getItemKey(object_id)))
             current_record.body = discussion_text
             session.add(current_record)
             session.flush()
             notify(ObjectModifiedEvent(current_record))
             new_record_keys.append(stringKey(current_record))
         else:
             new_record = domain_model(body=discussion_text, language=get_default_language())
             new_record.scheduled_item = removeSecurityProxy(self.context.__parent__)
             session.add(new_record)
             session.flush()
             notify(ObjectCreatedEvent(new_record))
             new_record_keys.append(stringKey(new_record))
     records_to_delete = [
         removeSecurityProxy(self.context.get(key)) for key in self.context.keys() if key not in new_record_keys
     ]
     map(session.delete, records_to_delete)
     map(lambda deleted: notify(ObjectRemovedEvent(deleted)), records_to_delete)
Example #2
0
 def saveDiscussions(self):
     session = Session()
     new_record_keys = []
     domain_model = removeSecurityProxy(self.context.domain_model)
     for record in self.data:
         discussion_text = record.get("body", "")
         object_id = record.get("object_id", None)
         if object_id:
             current_record = removeSecurityProxy(
                 self.context.get(getItemKey(object_id)))
             current_record.body = discussion_text
             session.add(current_record)
             session.flush()
             notify(ObjectModifiedEvent(current_record))
             new_record_keys.append(stringKey(current_record))
         else:
             new_record = domain_model(body=discussion_text,
                                       language=get_default_language())
             new_record.scheduled_item = removeSecurityProxy(
                 self.context.__parent__)
             session.add(new_record)
             session.flush()
             notify(ObjectCreatedEvent(new_record))
             new_record_keys.append(stringKey(new_record))
     records_to_delete = [
         removeSecurityProxy(self.context.get(key))
         for key in self.context.keys() if key not in new_record_keys
     ]
     map(session.delete, records_to_delete)
     map(lambda deleted: notify(ObjectRemovedEvent(deleted)),
         records_to_delete)
Example #3
0
 def id(self, object):
     """ defines the xapian 'primary key' """
     #TODO Add the language to the index!
     string_key = container.stringKey(object)
     if string_key == "obj-None":
         session = Session()
         session.flush()
         string_key = container.stringKey(object)
     return "%s.%s-%s" % (object.__class__.__module__,
                          object.__class__.__name__, string_key)
Example #4
0
 def id(self, object): 
     """ defines the xapian 'primary key' """
     #TODO Add the language to the index!
     string_key = container.stringKey(object)
     if string_key == "obj-None":
         session = Session()
         session.flush()
         string_key = container.stringKey(object)
     return "%s.%s-%s"%(object.__class__.__module__,
                         object.__class__.__name__,
                         string_key)
Example #5
0
def validate_open_interval(obj, domain_model):
    query = Session().query(domain_model).filter(domain_model.end_date == None)
    results = query.all()
    if results:
        if obj:
            for result in results:
                if stringKey(result) == stringKey(obj):
                    continue
                else:
                    yield result
        else:
            for result in results:
                yield result
Example #6
0
def validate_open_interval(obj, domain_model):
    query = Session().query(domain_model).filter(domain_model.end_date == None)
    results = query.all()
    if results:
        if obj:
            for result in results:
                if stringKey(result) == stringKey(obj):
                    continue
                else:
                    yield result
        else:
           for result in results:
                yield result
Example #7
0
    def _json_values(self, nodes):
        """
        filter values from the nodes to respresent in json, currently
        that means some footwork around, probably better as another
        set of adapters.
        """

        def get_listing_column_getters():
            # dict of (descriptor) field getters by name, for fast lookup
            getters = dict(
                [
                    (f.name, getattr(f.listing_column, "getter", lambda n, field: field.query(n)))
                    for f in self.domain_annotation.fields
                ]
            )
            return getters

        listing_column_getters = get_listing_column_getters()
        values = []
        for node in nodes:
            d = {}
            for field in self.fields:
                fn = field.__name__
                d[fn] = listing_column_getters[fn](node, field)
                v = d[fn]
                # localize date values
                if isinstance(v, (datetime.datetime, datetime.date, datetime.time)):
                    d[fn] = date.get_localized_date(self.request, v)

            d["object_id"] = url.set_url_context(container.stringKey(node))
            values.append(d)
        return values
Example #8
0
 def __str__(self):
     doc = removeSecurityProxy(self.context)
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)
     return "%s/admin/content/chambers/obj-%s/%s/%s" % (
         base_url, doc.chamber_id,
         naming.plural(naming.polymorphic_identity(
             type(doc))), stringKey(doc))
Example #9
0
def get_sitting_items(sitting, request, include_actions=False):
    items = []

    if (sitting.status in IWorkflow(sitting).get_state_ids(
            keys=["draft_agenda", "published_agenda"])):
        order = "planned_order"
    else:
        order = "real_order"

    schedulings = map(removeSecurityProxy,
                      sitting.items.batch(order_by=order, limit=None))
    for scheduling in schedulings:
        item = ProxyFactory(location_wrapped(scheduling.item, sitting))

        props = IDCDescriptiveProperties.providedBy(item) and item or \
                IDCDescriptiveProperties(item)

        discussions = tuple(scheduling.discussions.values())
        discussion = discussions and discussions[0] or None
        truncated_discussion = None
        if ((discussion is not None) and (discussion.body is not None)):
            #truncate discussion to first hundred characters
            t_discussion = discussion.body[0:100]
            try:
                #truncate discussion to first two lines
                index = t_discussion.index("<br>")
                index2 = t_discussion.index("<br>", index + 4)
                truncated_discussion = t_discussion[0:index2] + "..."
            except ValueError:
                truncated_discussion = t_discussion + "..."
        state_title = IWorkflow(item).get_state(item.status).title
        item = removeSecurityProxy(item)
        record = {
            "title": props.title,
            "description": props.description,
            "name": stringKey(scheduling),
            "status": item.status,
            "type": item.type.capitalize,
            "state_title": state_title,
            "heading": True if item.type == "heading" else False,
            #"category_id": scheduling.category_id,
            #"category": scheduling.category,
            "discussion": discussion,
            "truncated_discussion": truncated_discussion,
            "delete_url": "%s/delete" % url.absoluteURL(scheduling, request),
            "url": url.absoluteURL(item, request),
        }

        if include_actions:
            record["actions"] = get_scheduling_actions(scheduling, request)
            record["workflow"] = get_workflow_actions(item, request)

            discussion_actions = get_discussion_actions(discussion, request)
            if discussion_actions:
                assert len(discussion_actions) == 1
                record["discussion_action"] = discussion_actions[0]
            else:
                record["discussion_action"] = None
        items.append(record)
    return items
Example #10
0
    def getBatch(self, start=0, limit=20, lang=None):
        context = proxy.removeSecurityProxy(self.context)
        query = context._query

        # date_range filter (try from: model, then cookie, then request)
        query = query_filter_date_range(context, self.request, query,
                                        self.domain_model)

        # other filters
        query = self.query_add_filters(query, self.getFilter())

        # order_by
        order_by = self.getSort()  # [sort_on_expressions]
        if order_by:
            query = query.order_by(order_by)
        # ore.alchemist.container.AlchemistContainer.batch()
        # nodes: [<bungeni.models.domain.Question]
        nodes = [
            container.contained(ob, self, container.stringKey(ob))
            for ob in query_iterator(query, self.context, self.permission)
        ]
        self.set_size = len(nodes)
        nodes[:] = nodes[start:start + limit]
        nodes = self.translate_objects(nodes, lang)
        batch = self._jsonValues(nodes, self.fields)
        return batch
Example #11
0
    def get_batch(self, start, limit):
        """Get the data instances for this batch.
        """
        context = proxy.removeSecurityProxy(self.context)
        query = context._query

        # date_range filter (try from: model, then cookie, then request)
        query = query_filter_date_range(context, self.request, query,
                                        self.domain_model)

        # other filters
        query = self.query_add_filters(query, self.getFilter())
        # order_by
        order_by = self.getSort()  # [sort_on_expressions]
        if order_by:
            query = query.order_by(order_by)
        # get total number of items before applying an offset and limit
        self.set_size = query.count()
        # offset and limit
        query = query.offset(start).limit(limit)
        # bungeni.alchemist.container.AlchemistContainer.batch()
        # nodes: [<bungeni.models.domain.Question]
        return [
            container.contained(ob, self, container.stringKey(ob))
            for ob in query_iterator(query, self.context, self.permission)
        ]
Example #12
0
    def _json_values(self, nodes):
        """
        filter values from the nodes to respresent in json, currently
        that means some footwork around, probably better as another
        set of adapters.
        """
        def get_listing_column_getters():
            # dict of (descriptor) field getters by name, for fast lookup
            getters = dict([(f.name,
                             getattr(f.listing_column, "getter",
                                     lambda n, field: field.query(n)))
                            for f in self.domain_annotation.fields])
            return getters

        listing_column_getters = get_listing_column_getters()
        values = []
        for node in nodes:
            d = {}
            for field in self.fields:
                fn = field.__name__
                d[fn] = listing_column_getters[fn](node, field)
                v = d[fn]
                #localize date values
                if isinstance(
                        v, (datetime.datetime, datetime.date, datetime.time)):
                    d[fn] = date.get_localized_date(self.request, v)

            d["object_id"] = url.set_url_context(container.stringKey(node))
            values.append(d)
        return values
Example #13
0
 def _json_values(self, nodes):
     """
     filter values from the nodes to respresent in json, currently
     that means some footwork around, probably better as another
     set of adapters.
     """
     def get_listing_column_getters():
         # dict of (descriptor) field getters by name, for fast lookup
         getters = dict([
             (f.name, getattr(f.listing_column, "getter",
                 lambda n, field: field.query(n)))
             for f in self.domain_annotation.fields
         ])
         return getters
     listing_column_getters = get_listing_column_getters()
     values = []
     for node in nodes:
         d = {}
         for field in self.fields:
             fn = field.__name__
             d[fn] = listing_column_getters[fn](node, field)
             # !+i18n_DATE(mr, sep-2010) two problems with the isinstance
             # tests below:
             # a) they seem to always fail (no field values of this type?)
             # b) this is incorrect way to localize dates
             v = d[fn]
             if isinstance(v, datetime.datetime):
                 d[fn] = v.strftime("%F %I:%M %p")
             elif isinstance(v, datetime.date):
                 d[fn] = v.strftime("%F")
         d["object_id"] = url.set_url_context(container.stringKey(node))
         values.append(d)
     return values
Example #14
0
 def id(self, object, language="en"): 
     """ defines the xapian 'primary key' """
     #TODO Add the language to the index!
     return "%s.%s-%s:%s"%(object.__class__.__module__,
                         object.__class__.__name__,
                         container.stringKey(object),
                         language)
Example #15
0
    def get_batch(self, start, limit):
        """Get the data instances for this batch.
        """
        context = proxy.removeSecurityProxy(self.context)
        query = context._query
        
        # date_range filter (try from: model, then cookie, then request)
        query = query_filter_date_range(context, self.request, query,
            self.domain_model)

        # other filters
        query = self.query_add_filters(query, self.getFilter())
        # order_by
        order_by = self.getSort()  # [sort_on_expressions]
        if order_by:
            query = query.order_by(order_by)
        # get total number of items before applying an offset and limit
        self.set_size = query.count()
        # offset and limit
        query = query.offset(start).limit(limit)
        # bungeni.alchemist.container.AlchemistContainer.batch()
        # nodes: [<bungeni.models.domain.Question]
        return [ 
            container.contained(ob, self, container.stringKey(ob))
            for ob in query_iterator(query, self.context, self.permission)
        ]
Example #16
0
 def __str__(self):
     item_id = self.context.item_id
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)
     session = Session()
     item = session.query(ParliamentaryItem).filter(ParliamentaryItem.parliamentary_item_id==item_id).first()
     return '%s/business/%ss/obj-%s/files/%s/' % (base_url, item.type,\
                                                item_id, stringKey(self.context))
Example #17
0
    def _jsonValues(self, nodes, fields):
        """
        filter values from the nodes to respresent in json, currently
        that means some footwork around, probably better as another
        set of adapters.
        """
        def get_listing_column_getters():
            # dict of (descriptor) field getters by name, for fast lookup
            getters = dict([(f.name,
                             getattr(f.listing_column, "getter",
                                     lambda n, field: field.query(n)))
                            for f in self.domain_annotation.fields])
            return getters

        listing_column_getters = get_listing_column_getters()
        values = []
        for node in nodes:
            d = {}
            for field in fields:
                fn = field.__name__
                d[fn] = listing_column_getters[fn](node, field)
                # !+i18n_DATE(mr, sep-2010) two problems with the isinstance
                # tests below:
                # a) they seem to always fail (no field values of this type?)
                # b) this is incorrect way to localize dates
                v = d[fn]
                if isinstance(v, datetime.datetime):
                    d[fn] = v.strftime("%F %I:%M %p")
                elif isinstance(v, datetime.date):
                    d[fn] = v.strftime("%F")
            d["object_id"] = url.set_url_context(container.stringKey(node))
            values.append(d)
        return values
Example #18
0
def validate_date_in_interval(obj, domain_model, date):
    query = Session().query(domain_model).filter(sql.expression.between(
            date, domain_model.start_date, domain_model.end_date))
    results = query.all()
    if results:
        if obj:
            # the object itself can overlap
            for result in results:
                if stringKey(result) == stringKey(obj):
                    continue
                else:
                    yield result
        else:
            # all results indicate an error
           for result in results:
                yield result
Example #19
0
    def getBatch(self, start=0, limit=20, lang=None):
        context = proxy.removeSecurityProxy(self.context)
        query = context._query

        # date_range filter (try from: model, then cookie, then request)
        query = query_filter_date_range(context, self.request, query, self.domain_model)

        # other filters
        query = self.query_add_filters(query, self.getFilter())

        # order_by
        order_by = self.getSort()  # [sort_on_expressions]
        if order_by:
            query = query.order_by(order_by)
        # ore.alchemist.container.AlchemistContainer.batch()
        # nodes: [<bungeni.models.domain.Question]
        nodes = [
            container.contained(ob, self, container.stringKey(ob))
            for ob in query_iterator(query, self.context, self.permission)
        ]
        self.set_size = len(nodes)
        nodes[:] = nodes[start : start + limit]
        nodes = self.translate_objects(nodes, lang)
        batch = self._jsonValues(nodes, self.fields)
        return batch
Example #20
0
def get_sitting_items(sitting, request, include_actions=False):
    items = []

    if sitting.status in get_states("groupsitting", keys=["draft_agenda", "published_agenda"]):
        order = "planned_order"
    else:
        order = "real_order"

    schedulings = map(removeSecurityProxy, sitting.items.batch(order_by=order, limit=None))
    site_url = url.absoluteURL(getSite(), request)
    for scheduling in schedulings:
        item = ProxyFactory(location_wrapped(scheduling.item, sitting))

        props = IDCDescriptiveProperties.providedBy(item) and item or IDCDescriptiveProperties(item)

        discussions = tuple(scheduling.discussions.values())
        discussion = discussions and discussions[0] or None
        truncated_discussion = None
        if (discussion is not None) and (discussion.body_text is not None):
            # truncate discussion to first hundred characters
            t_discussion = discussion.body_text[0:100]
            try:
                # truncate discussion to first two lines
                index = t_discussion.index("<br>")
                index2 = t_discussion.index("<br>", index + 4)
                truncated_discussion = t_discussion[0:index2] + "..."
            except ValueError:
                truncated_discussion = t_discussion + "..."
        info = IWorkflowInfo(item, None)
        state_title = info.workflow().workflow.states[item.status].title

        record = {
            "title": props.title,
            "description": props.description,
            "name": stringKey(scheduling),
            "status": item.status,
            "type": item.type.capitalize,
            "t": item.type,
            "state_title": state_title,
            #'category_id': scheduling.category_id,
            #'category': scheduling.category,
            "discussion": discussion,
            "truncated_discussion": truncated_discussion,
            "delete_url": "%s/delete" % url.absoluteURL(scheduling, request),
            "url": url.set_url_context(site_url + ("/business/%ss/obj-%s" % (item.type, item.parliamentary_item_id))),
        }

        if include_actions:
            record["actions"] = get_scheduling_actions(scheduling, request)
            record["workflow"] = get_workflow_actions(item, request)

            discussion_actions = get_discussion_actions(discussion, request)
            if discussion_actions:
                assert len(discussion_actions) == 1
                record["discussion_action"] = discussion_actions[0]
            else:
                record["discussion_action"] = None
        items.append(record)
    return items
Example #21
0
def validate_date_in_interval(obj, domain_model, date):
    query = Session().query(domain_model).filter(
        sql.expression.between(date, domain_model.start_date,
                               domain_model.end_date))
    results = query.all()
    if results:
        if obj:
            # the object itself can overlap
            for result in results:
                if stringKey(result) == stringKey(obj):
                    continue
                else:
                    yield result
        else:
            # all results indicate an error
            for result in results:
                yield result
Example #22
0
 def __str__(self):
     member = removeSecurityProxy(self.context)
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)
     return "%s/%s/%s/%s" % (
         base_url, super(GroupMemberAdminAbsoluteURLView,
                         self)._group_url_path(member.group),
         naming.plural(naming.polymorphic_identity(
             type(member))), stringKey(member))
 def __str__(self):
     member = removeSecurityProxy(self.context)
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)        
     return "%s/%s/%s/%s" % (
         base_url,
         super(GroupMemberAdminAbsoluteURLView, self)._group_url_path(member.group),
         naming.plural(naming.polymorphic_identity(type(member))),
         stringKey(member))
 def __str__(self):
     doc = removeSecurityProxy(self.context)
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)        
     return "%s/admin/content/chambers/obj-%s/%s/%s" % (
                 base_url, 
                 doc.chamber_id, 
                 naming.plural(naming.polymorphic_identity(type(doc))), 
                 stringKey(doc))
 def __str__(self):
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)
     return "%s/archive/browse/parliaments/obj-%s/%s/%s" % (
         base_url,
         self.context.parliament_id,
         self.subsection,
         stringKey(self.context),
     )
 def __str__(self):
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)
     return "%s/admin/content/parliaments/obj-%s/%s/%s" % (
         base_url,
         self.context.group_id,
         self.subsection,
         stringKey(self.context),
     )
 def _group_url_path(self, group):
     url_comps = []
     group = removeSecurityProxy(group)
     while group:
         url_comps.insert(0, "%s/%s" % (
                 naming.plural(naming.polymorphic_identity(type(group))),
                 stringKey(group)))
         group = removeSecurityProxy(group.parent_group)
     return "/".join(url_comps)
Example #28
0
def get_sitting_items(sitting, request, include_actions=False):
    items = []

    if sitting.status in IWorkflow(sitting).get_state_ids(keys=["draft_agenda", "published_agenda"]):
        order = "planned_order"
    else:
        order = "real_order"

    schedulings = map(removeSecurityProxy, sitting.items.batch(order_by=order, limit=None))
    for scheduling in schedulings:
        item = ProxyFactory(location_wrapped(scheduling.item, sitting))

        props = IDCDescriptiveProperties.providedBy(item) and item or IDCDescriptiveProperties(item)

        discussions = tuple(scheduling.discussions.values())
        discussion = discussions and discussions[0] or None
        truncated_discussion = None
        if (discussion is not None) and (discussion.body is not None):
            # truncate discussion to first hundred characters
            t_discussion = discussion.body[0:100]
            try:
                # truncate discussion to first two lines
                index = t_discussion.index("<br>")
                index2 = t_discussion.index("<br>", index + 4)
                truncated_discussion = t_discussion[0:index2] + "..."
            except ValueError:
                truncated_discussion = t_discussion + "..."
        state_title = IWorkflow(item).get_state(item.status).title
        item = removeSecurityProxy(item)
        record = {
            "title": props.title,
            "description": props.description,
            "name": stringKey(scheduling),
            "status": item.status,
            "type": item.type.capitalize,
            "state_title": state_title,
            "heading": True if item.type == "heading" else False,
            # "category_id": scheduling.category_id,
            # "category": scheduling.category,
            "discussion": discussion,
            "truncated_discussion": truncated_discussion,
            "delete_url": "%s/delete" % url.absoluteURL(scheduling, request),
            "url": url.absoluteURL(item, request),
        }

        if include_actions:
            record["actions"] = get_scheduling_actions(scheduling, request)
            record["workflow"] = get_workflow_actions(item, request)

            discussion_actions = get_discussion_actions(discussion, request)
            if discussion_actions:
                assert len(discussion_actions) == 1
                record["discussion_action"] = discussion_actions[0]
            else:
                record["discussion_action"] = None
        items.append(record)
    return items
Example #29
0
    def render(self, date, template=None):
        #need('yui-editor')
        need('yui-connection')
        need('yui-rte')
        need('yui-resize')
        need('yui-button')

        if template is None:
            template = self.template

        container = self.context.__parent__
        #schedule_url = self.request.getURL()
        container_url = url.absoluteURL(container, self.request)

        # determine position in container
        key = stringKey(self.context)
        keys = list(container.keys())
        pos = keys.index(key)

        links = {}
        if pos > 0:
            links['previous'] = "%s/%s/%s" % (container_url, keys[pos - 1],
                                              self.__name__)
        if pos < len(keys) - 1:
            links['next'] = "%s/%s/%s" % (container_url, keys[pos + 1],
                                          self.__name__)

        #start_date = utils.datetimedict.fromdatetime(self.context.start_date)
        #end_date = utils.datetimedict.fromdatetime(self.context.end_date)

        site_url = url.absoluteURL(getSite(), self.request)
        reorder = "reorder" if self.context.status in \
                                ["draft_agenda", "draft_minutes"] \
                            else "dont-reorder"
        return template(
            display="sitting",
            #title=_(u"$A $e, $B $Y", mapping=start_date),
            title="%s: %s - %s" %
            (self.context.group.short_name,
             self.context.start_date.strftime('%Y-%m-%d %H:%M'),
             self.context.end_date.strftime('%H:%M')),
            description=_(u"Sitting Info"),
            #            title = u"",
            #            description = u"",
            #
            links=links,
            actions=get_sitting_actions(self.context, self.request),
            items=get_sitting_items(self.context,
                                    self.request,
                                    include_actions=True),
            #categories=vocabulary.ItemScheduleCategories(self.context),
            new_category_url="%s/admin/content/categories/add?next_url=..." %
            site_url,
            status=self.context.status,
            reorder=reorder,
        )
Example #30
0
 def _group_url_path(self, group):
     url_comps = []
     group = removeSecurityProxy(group)
     while group:
         url_comps.insert(
             0, "%s/%s" %
             (naming.plural(naming.polymorphic_identity(
                 type(group))), stringKey(group)))
         group = removeSecurityProxy(group.parent_group)
     return "/".join(url_comps)
Example #31
0
 def __call__(self, context, location):
     key = stringKey(context)
     for container in self.containers:
         if IQueryContent.providedBy(container):
             parent = container.__parent__
             container = container.query(location)
             if parent is not None:
                 container.__parent__ = parent
         if key in container:
             return LocationProxy(context, container, key)
     raise LocationError(key)
Example #32
0
 def __call__(self, context, location):
     key = stringKey(context)
     for container in self.containers:
         if IQueryContent.providedBy(container):
             parent = container.__parent__
             container = container.query(location)
             if parent is not None:
                 container.__parent__ = parent
         if key in container:
             return LocationProxy(context, container, key)
     raise LocationError(key)
Example #33
0
    def render(self, date, template=None):
        #need('yui-editor')
        need('yui-connection')
        need('yui-rte')
        need('yui-resize')
        need('yui-button')
        
        if template is None:
            template = self.template

        container = self.context.__parent__
        #schedule_url = self.request.getURL()
        container_url = url.absoluteURL(container, self.request)
        
        # determine position in container
        key = stringKey(self.context)
        keys = list(container.keys())
        pos = keys.index(key)

        links = {}
        if pos > 0:
            links['previous'] = "%s/%s/%s" % (
                container_url, keys[pos-1], self.__name__)
        if pos < len(keys) - 1:
            links['next'] = "%s/%s/%s" % (
                container_url, keys[pos+1], self.__name__)

        #start_date = utils.datetimedict.fromdatetime(self.context.start_date)
        #end_date = utils.datetimedict.fromdatetime(self.context.end_date)
        

        site_url = url.absoluteURL(getSite(), self.request)
        reorder = "reorder" if self.context.status in \
                                ["draft_agenda", "draft_minutes"] \
                            else "dont-reorder"
        return template(
            display="sitting",
            #title=_(u"$A $e, $B $Y", mapping=start_date),
            title = "%s: %s - %s" % (self.context.group.short_name, 
                self.context.start_date.strftime('%Y-%m-%d %H:%M'), 
                self.context.end_date.strftime('%H:%M')),
            description=_(u"Sitting Info"),
#            title = u"",
#            description = u"",
#
            links=links,
            actions=get_sitting_actions(self.context, self.request),
            items=get_sitting_items(
                self.context, self.request, include_actions=True),
            #categories=vocabulary.ItemScheduleCategories(self.context),
            new_category_url="%s/admin/content/categories/add?next_url=..." % site_url,
            status=self.context.status,
            reorder=reorder,
            )
Example #34
0
 def _json_values(self, nodes):
     """Return nodes as JSON"""
     values = []
     for node in nodes:
         d = {}
         for field in self.fields:
             fn = field.__name__
             d[fn] = dthandler(getattr(node, fn, None))
         d["object_id"] = url.set_url_context(container.stringKey(node))
         d["media_url"] = node.media_url
         values.append(d)
     return values
Example #35
0
def publish_to_xml(context, type="", include=None):
    """ Generates XML for object and saves it to the file. If object contains
        attachments - XML is saved in zip archive with all attached files. 
    """
    
    if include is None:
        include = []
    
    context = removeSecurityProxy(context)
    
    if IVersionable.implementedBy(context.__class__):
        include.append("versions")
    if IAuditable.implementedBy(context.__class__):
        include.append("event")
    
    data = obj2dict(context,1,parent=None,include=include,
                    exclude=["file_data", "image", "logo_data","event_item"])
    if type=="":
        type = getattr(context,"type", None)

        data["permissions"]= []
        permissions = get_object_state_rpm(context).permissions
        for x in permissions:
            data["permissions"].append({"role":x[2], 
                                    "permission":x[1], 
                                    "setting":x[0] and "Allow" or "Deny"})
        

    assert type, "%s has no 'type' field. Use 'type' function parameter." % context.__class__
                
    files = []
    path = os.path.join(setupStorageDirectory(), type)
    if not os.path.exists(path):
        os.makedirs(path)
        
    file_path = os.path.join(path,stringKey(context))
    files.append(file_path+".xml") 
    with open(file_path+".xml","w") as file:
        file.write(serialize(data, name=type))
    
    if IAttachmentable.implementedBy(context.__class__):
        attached_files = getattr(context, "attached_files", None)
        if attached_files:
            for attachment in attached_files:
                attachment_path = os.path.join(path, attachment.file_name)
                files.append(attachment_path)
                with open(os.path.join(path, attachment.file_name), "wb") as file:
                    file.write(attachment.file_data)
            zip = ZipFile(file_path+".zip", "w")
            for file in files:
                zip.write(file, os.path.split(file)[-1])
                os.remove(file)
            zip.close()    
Example #36
0
    def _json_values(self, nodes):
        """Return nodes as JSON"""
        values = []
        for node in nodes:
            d = {}
            for field in self.fields:
                fn = field.__name__
                d[fn] = getattr(node, fn, None)
                v = d[fn]
                if isinstance(v, datetime.datetime):
                    d[fn] = v.strftime("%F %I:%M %p")
                elif isinstance(v, datetime.date):
                    d[fn] = v.strftime("%F")

            d["object_id"] = url.set_url_context(container.stringKey(node))
            values.append(d)
        return values
Example #37
0
 def get_batch(self, start, limit):
     """Get the data instances for this batch.
     """
     context = proxy.removeSecurityProxy(self.context)
     query = context._query
     # date_range filter (try from: model, then cookie, then request)
     query = query_filter_date_range(context, self.request, query,
         self.domain_model)
     sort_on_expressions = []
     # other filters
     lc_filter_queries = self.get_filter()
     sort_on_keys = self.get_sort_keys()
     if sort_on_keys:
         for sort_on in sort_on_keys:
             md_field = self.domain_annotation.get(sort_on)
             if md_field:
                 if not md_field.listing_column_filter:
                     pass
                     #continue
                     #!+SORTING(mb, Mar-2013) why does sorting need a filter?
             else:
                 # check domain model if this if field is not in descriptor
                 if not hasattr(self.domain_model, sort_on):
                     continue
             sort_on_expressions.append(
                 self.sort_dir_func(
                     getattr(self.domain_model, sort_on)))
     for lc_filter_query, params in lc_filter_queries:
         filter_string = params[0]
         column_name = params[1]
         query = lc_filter_query(query, filter_string, self.sort_dir_func, column_name)
     if sort_on_expressions:
         query = query.order_by(*sort_on_expressions)
     #add optional filters, used by sub classes
     query = self.query_add_filters(query)
     # get total number of items before applying an offset and limit
     self.set_size = query.count()
     # offset and limit
     query = query.offset(start).limit(limit)
     # bungeni.alchemist.container.AlchemistContainer.batch()
     # nodes: [<bungeni.models.domain.Question]
     # !+STRING_KEY_FILE_VERSION + no permission "bungeni.attachment_version.View"!
     return [
         container.contained(ob, self, container.stringKey(ob))
         for ob in query_iterator(query, self.context)
     ]
Example #38
0
    def _json_values(self, nodes):
        """Return nodes as JSON"""
        values = []
        for node in nodes:
            d = {}
            for field in self.fields:
                fn = field.__name__
                d[fn] = getattr(node, fn, None)
                v = d[fn]
                if isinstance(v, datetime.datetime):
                    d[fn] = v.strftime("%F %I:%M %p")
                elif isinstance(v, datetime.date):
                    d[fn] = v.strftime("%F")

            d["object_id"] = url.set_url_context(container.stringKey(node))
            values.append(d)
        return values
Example #39
0
 def get_batch(self, start, limit):
     """Get the data instances for this batch.
     """
     context = proxy.removeSecurityProxy(self.context)
     query = context._query
     # date_range filter (try from: model, then cookie, then request)
     query = query_filter_date_range(context, self.request, query,
                                     self.domain_model)
     sort_on_expressions = []
     # other filters
     lc_filter_queries = self.get_filter()
     sort_on_keys = self.get_sort_keys()
     if sort_on_keys:
         for sort_on in sort_on_keys:
             md_field = self.domain_annotation.get(sort_on)
             if md_field:
                 if not md_field.listing_column_filter:
                     pass
                     #continue
                     #!+SORTING(mb, Mar-2013) why does sorting need a filter?
             else:
                 # check domain model if this if field is not in descriptor
                 if not hasattr(self.domain_model, sort_on):
                     continue
             sort_on_expressions.append(
                 self.sort_dir_func(getattr(self.domain_model, sort_on)))
     for lc_filter_query, params in lc_filter_queries:
         filter_string = params[0]
         column_name = params[1]
         query = lc_filter_query(query, filter_string, self.sort_dir_func,
                                 column_name)
     if sort_on_expressions:
         query = query.order_by(*sort_on_expressions)
     #add optional filters, used by sub classes
     query = self.query_add_filters(query)
     # get total number of items before applying an offset and limit
     self.set_size = query.count()
     # offset and limit
     query = query.offset(start).limit(limit)
     # bungeni.alchemist.container.AlchemistContainer.batch()
     # nodes: [<bungeni.models.domain.Question]
     # !+STRING_KEY_FILE_VERSION + no permission "bungeni.attachment_version.View"!
     return [
         container.contained(ob, self, container.stringKey(ob))
         for ob in query_iterator(query, self.context)
     ]
Example #40
0
 def get_batch(self, start, limit):
     """Get the data instances for this batch.
     """
     context = proxy.removeSecurityProxy(self.context)
     query = context._query
     
     # date_range filter (try from: model, then cookie, then request)
     query = query_filter_date_range(context, self.request, query,
         self.domain_model)
     sort_on_expressions = []
     # other filters
     filter_string, lc_filter_queries = self.get_filter()
     query = self.query_add_filters(query, filter_string)
     sort_on_keys = self.get_sort_keys()
     if sort_on_keys:
         for sort_on in sort_on_keys:
             md_field = self.domain_annotation.get(sort_on)
             if md_field:
                 lc_filter = md_field.listing_column_filter
                 if not lc_filter:   
                     sort_on_expressions.append(
                         self.sort_dir_func(
                             getattr(self.domain_model, sort_on)))
     for lc_filter_query, lc_filter_string in lc_filter_queries:
         query = lc_filter_query(query, lc_filter_string, self.sort_dir_func)
     if sort_on_expressions:
         query = query.order_by(sort_on_expressions)
     # get total number of items before applying an offset and limit
     self.set_size = query.count()
     # offset and limit
     query = query.offset(start).limit(limit)
     # bungeni.alchemist.container.AlchemistContainer.batch()
     # nodes: [<bungeni.models.domain.Question]
     return [ 
         container.contained(ob, self, container.stringKey(ob))
         for ob in query_iterator(query, self.context, self.permission)
     ]
Example #41
0
 def _get_items(self):
     """Return the data of the query.
     """
     data_list = []
     path = self.get_sittings_path()
     formatter = self.get_date_formatter("time", "short")
     for result in self.query.all():
         data = {}
         data["sittingid"] = ("sid_" + str(result.sitting_id))
         data["sid"] = result.sitting_id
         data["short_name"] = "%s - %s" % (
             formatter.format(result.start_date),
             formatter.format(result.end_date)
         )
         data["start_date"] = result.start_date
         data["end_date"] = result.end_date
         data["start_time"] = result.start_date.time()
         data["end_time"] = result.end_date.time()
         data["day"] = result.start_date.date()
         data["url"] = "/".join([path, stringKey(result)])
         data["did"] = "dlid_%s" % (
             datetime.datetime.strftime(result.start_date, "%Y-%m-%d"))
         data_list.append(data)
     return data_list
Example #42
0
def get_sitting_items(sitting, request, include_actions=False):
    items = []

    if sitting.status in get_states("groupsitting",
                                    keys=["draft_agenda", "published_agenda"]):
        order = "planned_order"
    else:
        order = "real_order"

    schedulings = map(removeSecurityProxy,
                      sitting.items.batch(order_by=order, limit=None))
    site_url = url.absoluteURL(getSite(), request)
    for scheduling in schedulings:
        item = ProxyFactory(location_wrapped(scheduling.item, sitting))

        props = IDCDescriptiveProperties.providedBy(item) and item or \
                IDCDescriptiveProperties(item)

        discussions = tuple(scheduling.discussions.values())
        discussion = discussions and discussions[0] or None
        truncated_discussion = None
        if ((discussion is not None) and (discussion.body_text is not None)):
            #truncate discussion to first hundred characters
            t_discussion = discussion.body_text[0:100]
            try:
                #truncate discussion to first two lines
                index = t_discussion.index("<br>")
                index2 = t_discussion.index("<br>", index + 4)
                truncated_discussion = t_discussion[0:index2] + "..."
            except ValueError:
                truncated_discussion = t_discussion + "..."
        info = IWorkflowInfo(item, None)
        state_title = info.workflow().workflow.states[item.status].title

        record = {
            'title':
            props.title,
            'description':
            props.description,
            'name':
            stringKey(scheduling),
            'status':
            item.status,
            'type':
            item.type.capitalize,
            't':
            item.type,
            'state_title':
            state_title,
            #'category_id': scheduling.category_id,
            #'category': scheduling.category,
            'discussion':
            discussion,
            'truncated_discussion':
            truncated_discussion,
            'delete_url':
            "%s/delete" % url.absoluteURL(scheduling, request),
            'url':
            url.set_url_context(site_url +
                                ('/business/%ss/obj-%s' %
                                 (item.type, item.parliamentary_item_id)))
        }

        if include_actions:
            record['actions'] = get_scheduling_actions(scheduling, request)
            record['workflow'] = get_workflow_actions(item, request)

            discussion_actions = get_discussion_actions(discussion, request)
            if discussion_actions:
                assert len(discussion_actions) == 1
                record['discussion_action'] = discussion_actions[0]
            else:
                record['discussion_action'] = None
        items.append(record)
    return items
Example #43
0
def publish_to_xml(context):
    """Generates XML for object and saves it to the file. If object contains
    attachments - XML is saved in zip archive with all attached files. 
    """

    #create a fake interaction to ensure items requiring a participation
    #are serialized 
    #!+SERIALIZATION(mb, Jan-2013) review this approach
    try:
        zope.security.management.getInteraction()
    except zope.security.interfaces.NoInteraction:
        principal = zope.security.testing.Principal('user', 'manager', ())
        zope.security.management.newInteraction(create_participation(principal))

    include = []
    # list of files to zip
    files = []
    # data dict to be published
    data = {}
    
    context = zope.security.proxy.removeSecurityProxy(context)
    
    if interfaces.IFeatureVersion.providedBy(context):
        include.append("versions")
    if interfaces.IFeatureAudit.providedBy(context):
        include.append("event")
    
    exclude = ["data", "event", "attachments", "changes"]
    
    # include binary fields and include them in the zip of files for this object
    for column in class_mapper(context.__class__).columns:
        if column.type.__class__ == Binary:
            exclude.append(column.key)
            content = getattr(context, column.key, None)
            if content:
                bfile = tmp(delete=False)
                bfile.write(content)
                files.append(bfile.name)
                data[column.key] = dict(
                    saved_file=os.path.basename(bfile.name)
                )
                bfile.close()
    data.update(
        obj2dict(context, 1, 
            parent=None,
            include=include,
            exclude=exclude
        )
    )
    obj_type = IWorkflow(context).name    
    tags = IStateController(context).get_state().tags
    if tags:
        data["tags"] = tags
    permissions = get_object_state_rpm(context).permissions
    data["permissions"] = get_permissions_dict(permissions)
    data["changes"] = []
    for change in getattr(context, "changes", []):
        change_dict = obj2dict(change, 0, parent=context)
        change_permissions = get_head_object_state_rpm(change).permissions
        change_dict["permissions"] = get_permissions_dict(change_permissions)
        data["changes"].append(change_dict)
    
    # setup path to save serialized data 
    path = os.path.join(setupStorageDirectory(), obj_type)
    if not os.path.exists(path):
        os.makedirs(path)
    
    # xml file path
    file_path = os.path.join(path, stringKey(context)) 
    
    if interfaces.IFeatureAttachment.providedBy(context):
        attachments = getattr(context, "attachments", None)
        if attachments:
            data["attachments"] = []
            for attachment in attachments:
                # serializing attachment
                attachment_dict = obj2dict(attachment, 1,
                    parent=context,
                    exclude=["data", "event", "versions"])
                permissions = get_object_state_rpm(attachment).permissions
                attachment_dict["permissions"] = \
                    get_permissions_dict(permissions)
                # saving attachment to tmp
                attached_file = tmp(delete=False)
                attached_file.write(attachment.data)
                attached_file.flush()
                attached_file.close()
                files.append(attached_file.name)
                attachment_dict["saved_file"] = os.path.basename(
                    attached_file.name
                )
                data["attachments"].append(attachment_dict)

    
    # zipping xml, attached files plus any binary fields
    # also remove the temporary files
    if files:
        #generate temporary xml file
        temp_xml = tmp(delete=False)
        temp_xml.write(serialize(data, name=obj_type))
        temp_xml.close()
        #write attachments/binary fields to zip
        zip_file = ZipFile("%s.zip" % (file_path), "w")
        for f in files:
            zip_file.write(f, os.path.basename(f))
            os.remove(f)
        #write the xml
        zip_file.write(temp_xml.name, "%s.xml" % os.path.basename(file_path))
        zip_file.close()
        #placed remove after zip_file.close !+ZIP_FILE_CRC_FAILURE
        os.remove(temp_xml.name) 

    else:
        # save serialized xml to file
        with open("%s.xml" % (file_path), "w") as xml_file:
            xml_file.write(serialize(data, name=obj_type))
            xml_file.close()

    #publish to rabbitmq outputs queue
    connection = get_mq_connection()
    if not connection:
        return
    channel = connection.channel()
    publish_file_path = "%s.%s" %(file_path, ("zip" if files else "xml"))
    channel.basic_publish(
        exchange=SERIALIZE_OUTPUT_EXCHANGE,
        routing_key=SERIALIZE_OUTPUT_ROUTING_KEY,
        body=simplejson.dumps({"type": "file", "location": publish_file_path }),
        properties=pika.BasicProperties(content_type="text/plain",
            delivery_mode=2
        )
    )
    
    #clean up - remove any files if zip was created
    if files:
        prev_xml_file = "%s.%s" %(file_path, "xml")
        if os.path.exists(prev_xml_file):
            os.remove(prev_xml_file)
Example #44
0
 def __str__(self):
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)        
     return '%s/admin/content/parliaments/obj-%s/%s/%s' % \
            (base_url, self.context.group_id, self.subsection, stringKey(self.context))
 def __str__(self):
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)
     return "%s/%s/%s/%s" % (base_url, self.section, self.subsection, stringKey(self.context))
Example #46
0
def publish_to_xml(context):
    """Generates XML for object and saves it to the file. If object contains
    attachments - XML is saved in zip archive with all attached files. 
    """
    context = zope.security.proxy.removeSecurityProxy(context)
    obj_type = IWorkflow(context).name
    #locking
    random_name_sfx = generate_random_filename()
    context_file_name = "%s-%s" % (stringKey(context), random_name_sfx)
    #lock_name = "%s-%s" %(obj_type, context_file_name)
    #!+LOCKING(AH, 25-01-2014) disabling file locking
    #! locking was reqiured when the serializer used ta constant file name
    #! for an object. Now serialized file names are unique, and non repeated
    #with LockStore.get_lock(lock_name):
    #    
    #root key (used to cache files to zip)
    root_key = make_key()
    # create a fake interaction to ensure items requiring a participation
    # are serialized 
    #!+SERIALIZATION(mb, Jan-2013) review this approach
    try:
        zope.security.management.getInteraction()
    except zope.security.interfaces.NoInteraction:
        principal = zope.security.testing.Principal("user", "manager", ())
        zope.security.management.newInteraction(create_participation(principal))
    include = []
    # data dict to be published
    data = {}
    if IFeatureVersion.providedBy(context):
        include.append("versions")
    if IFeatureEvent.providedBy(context):
        include.append("event")
    
    exclude = ["data", "event", "attachments"]
    updated_dict = obj2dict(context, 1, 
	    parent=None,
	    include=include,
	    exclude=exclude,
	    root_key=root_key
        )
    data.update(
        updated_dict
    )

    tags = IStateController(context).get_state().tags
    if tags:
        data["tags"] = tags
    permissions = get_object_state_rpm(context).permissions
    data["permissions"] = get_permissions_dict(permissions)

    # setup path to save serialized data
    path = os.path.join(setupStorageDirectory(), obj_type)
    log.info("Setting up path to write to : %s", path)
    if not os.path.exists(path):
        #
        # !+THREADSAFE(AH, 2014-09-24) making makedirs threadsafe, 
        # sometimes between checking for existence and execution 
        # of makedirs() the folder has already been created by 
        # another thread
        try:
            os.makedirs(path)
        except OSError as exc:
            if exc.errno == errno.EEXIST and os.path.isdir(path):
                log.info("Error Folder : %s already exists, ignoring exception ", path)
            else:
                raise

    # xml file path
    file_path = os.path.join(path, context_file_name) 
    # files to zip
    files = []

    if IFeatureAttachment.providedBy(context):
        attachments = getattr(context, "attachments", None)
        if attachments:
	    data["attachments"] = []
	    for attachment in attachments:
	        # serializing attachment
	        attachment_dict = obj2dict(attachment, 1,
	            parent=context,
	            exclude=["data", "event", "versions"])
	        # saving attachment to tmp
	        attached_file = tmp(delete=False)
	        attached_file.write(attachment.data)
	        attached_file.flush()
	        attached_file.close()
	        files.append(attached_file.name)
	        attachment_dict["saved_file"] = os.path.basename(
	            attached_file.name
	        )
	        data["attachments"].append(attachment_dict)

    # add explicit origin chamber for this object (used to partition data
    # in if more than one chamber exists)
    
    if obj_type == "Legislature":
        data["origin_chamber"] = None
    else:
        data["origin_chamber"] = get_origin_chamber(context)

    # add any additional files to file list
    files = files + PersistFiles.get_files(root_key)
    # zipping xml, attached files plus any binary fields
    # also remove the temporary files
    if files:
        # generate temporary xml file
        temp_xml = tmp(delete=False)
        temp_xml.write(serialize(data, name=obj_type))
        temp_xml.close()
        # write attachments/binary fields to zip
        with  ZipFile("%s.zip" % (file_path), "w") as zip_file:
	    for f in files:
	        zip_file.write(f, os.path.basename(f))
	    # write the xml
	    zip_file.write(temp_xml.name, "%s.xml" % os.path.basename(file_path))
        files.append(temp_xml.name)
    else:
        # save serialized xml to file
        with open("%s.xml" % (file_path), "w") as xml_file:
	    xml_file.write(serialize(data, name=obj_type))
	    xml_file.close()
    # publish to rabbitmq outputs queue
    connection = bungeni.core.notifications.get_mq_connection()
    if not connection:
        return
    channel = connection.channel()
    #channel.confirm_delivery()
    publish_file_path = "%s.%s" %(file_path, ("zip" if files else "xml"))
    #channel_delivery = 
    channel.basic_publish(
        exchange=SERIALIZE_OUTPUT_EXCHANGE,
        routing_key=SERIALIZE_OUTPUT_ROUTING_KEY,
        body=simplejson.dumps({"type": "file", "location": publish_file_path }),
        properties=pika.BasicProperties(content_type="text/plain",
            delivery_mode=2
        )
    )
    #if channel_delivery:
    #    log.info("Message published to exchange %s with key %s for %s" % 
    #        (SERIALIZE_OUTPUT_EXCHANGE, SERIALIZE_OUTPUT_ROUTING_KEY, publish_file_path))
    #else:
    #    log.error("Message publication failed for %r", publish_file_path)
        

    #clean up - remove any files if zip was/was not created
    if files:
        files.append("%s.%s" %(file_path, "xml"))
    else:
        files.append("%s.%s" %(file_path, "zip"))
    remove_files(files)

    # clear the cache
    PersistFiles.clear_files(root_key)
Example #47
0
def publish_to_xml(context):
    """Generates XML for object and saves it to the file. If object contains
    attachments - XML is saved in zip archive with all attached files. 
    """
    include = []

    context = removeSecurityProxy(context)

    if IVersionable.implementedBy(context.__class__):
        include.append("versions")
    if IAuditable.implementedBy(context.__class__):
        include.append("event")

    data = obj2dict(context,
                    1,
                    parent=None,
                    include=include,
                    exclude=[
                        "file_data", "image", "logo_data", "event",
                        "attached_files", "changes"
                    ])

    type = IWorkflow(context).name

    tags = IStateController(context).get_state().tags
    if tags:
        data["tags"] = tags

    permissions = get_object_state_rpm(context).permissions
    data["permissions"] = get_permissions_dict(permissions)

    data["changes"] = []
    for change in getattr(context, "changes", []):
        change_dict = obj2dict(change, 0, parent=context)
        change_permissions = get_head_object_state_rpm(change).permissions
        change_dict["permissions"] = get_permissions_dict(change_permissions)
        data["changes"].append(change_dict)

    # list of files to zip
    files = []
    # setup path to save serialized data
    path = os.path.join(setupStorageDirectory(), type)
    if not os.path.exists(path):
        os.makedirs(path)

    # xml file path
    file_path = os.path.join(path, stringKey(context))

    has_attachments = False
    if IAttachmentable.implementedBy(context.__class__):
        attached_files = getattr(context, "attached_files", None)
        if attached_files:
            has_attachments = True
            # add xml file to list of files to zip
            files.append("%s.xml" % (file_path))
            data["attached_files"] = []
            for attachment in attached_files:
                # serializing attachment
                attachment_dict = obj2dict(
                    attachment,
                    1,
                    parent=context,
                    exclude=["file_data", "event", "versions", "changes"])
                permissions = get_object_state_rpm(attachment).permissions
                attachment_dict["permissions"] = \
                    get_permissions_dict(permissions)
                # saving attachment to tmp
                with tmp(delete=False) as f:
                    f.write(attachment.file_data)
                    files.append(f.name)
                    attachment_dict["saved_file"] = \
                        os.path.split(f.name)[-1]
                data["attached_files"].append(attachment_dict)

    # saving xml file
    with open("%s.xml" % (file_path), "w") as file:
        file.write(serialize(data, name=type))

    # zipping xml and attached files
    # unzipped files are removed
    if has_attachments:
        zip = ZipFile("%s.zip" % (file_path), "w")
        for f in files:
            zip.write(f, os.path.split(f)[-1])
            os.remove(f)
        zip.close()
 def __str__(self):
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)
     return "%s/business/%ss/%s/" % (base_url, self.context.type, stringKey(self.context))
 def __str__(self):
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)        
     return '%s/admin/content/offices/%s' % \
            (base_url, stringKey(self.context))
Example #50
0
def publish_to_xml(context):
    """Generates XML for object and saves it to the file. If object contains
    attachments - XML is saved in zip archive with all attached files. 
    """

    #create a fake interaction to ensure items requiring a participation
    #are serialized
    #!+SERIALIZATION(mb, Jan-2013) review this approach
    try:
        zope.security.management.getInteraction()
    except zope.security.interfaces.NoInteraction:
        principal = zope.security.testing.Principal('user', 'manager', ())
        zope.security.management.newInteraction(
            create_participation(principal))

    include = []
    # list of files to zip
    files = []
    # data dict to be published
    data = {}

    context = zope.security.proxy.removeSecurityProxy(context)

    if interfaces.IFeatureVersion.providedBy(context):
        include.append("versions")
    if interfaces.IFeatureAudit.providedBy(context):
        include.append("event")

    exclude = ["data", "event", "attachments", "changes"]

    # include binary fields and include them in the zip of files for this object
    for column in class_mapper(context.__class__).columns:
        if column.type.__class__ == Binary:
            exclude.append(column.key)
            content = getattr(context, column.key, None)
            if content:
                bfile = tmp(delete=False)
                bfile.write(content)
                files.append(bfile.name)
                data[column.key] = dict(
                    saved_file=os.path.basename(bfile.name))
                bfile.close()
    data.update(
        obj2dict(context, 1, parent=None, include=include, exclude=exclude))
    obj_type = IWorkflow(context).name
    tags = IStateController(context).get_state().tags
    if tags:
        data["tags"] = tags
    permissions = get_object_state_rpm(context).permissions
    data["permissions"] = get_permissions_dict(permissions)
    data["changes"] = []
    for change in getattr(context, "changes", []):
        change_dict = obj2dict(change, 0, parent=context)
        change_permissions = get_head_object_state_rpm(change).permissions
        change_dict["permissions"] = get_permissions_dict(change_permissions)
        data["changes"].append(change_dict)

    # setup path to save serialized data
    path = os.path.join(setupStorageDirectory(), obj_type)
    if not os.path.exists(path):
        os.makedirs(path)

    # xml file path
    file_path = os.path.join(path, stringKey(context))

    if interfaces.IFeatureAttachment.providedBy(context):
        attachments = getattr(context, "attachments", None)
        if attachments:
            data["attachments"] = []
            for attachment in attachments:
                # serializing attachment
                attachment_dict = obj2dict(
                    attachment,
                    1,
                    parent=context,
                    exclude=["data", "event", "versions"])
                permissions = get_object_state_rpm(attachment).permissions
                attachment_dict["permissions"] = \
                    get_permissions_dict(permissions)
                # saving attachment to tmp
                attached_file = tmp(delete=False)
                attached_file.write(attachment.data)
                attached_file.flush()
                attached_file.close()
                files.append(attached_file.name)
                attachment_dict["saved_file"] = os.path.basename(
                    attached_file.name)
                data["attachments"].append(attachment_dict)

    # zipping xml, attached files plus any binary fields
    # also remove the temporary files
    if files:
        #generate temporary xml file
        temp_xml = tmp(delete=False)
        temp_xml.write(serialize(data, name=obj_type))
        temp_xml.close()
        #write attachments/binary fields to zip
        zip_file = ZipFile("%s.zip" % (file_path), "w")
        for f in files:
            zip_file.write(f, os.path.basename(f))
            os.remove(f)
        #write the xml
        zip_file.write(temp_xml.name, "%s.xml" % os.path.basename(file_path))
        zip_file.close()
        #placed remove after zip_file.close !+ZIP_FILE_CRC_FAILURE
        os.remove(temp_xml.name)

    else:
        # save serialized xml to file
        with open("%s.xml" % (file_path), "w") as xml_file:
            xml_file.write(serialize(data, name=obj_type))
            xml_file.close()

    #publish to rabbitmq outputs queue
    connection = get_mq_connection()
    if not connection:
        return
    channel = connection.channel()
    publish_file_path = "%s.%s" % (file_path, ("zip" if files else "xml"))
    channel.basic_publish(exchange=SERIALIZE_OUTPUT_EXCHANGE,
                          routing_key=SERIALIZE_OUTPUT_ROUTING_KEY,
                          body=simplejson.dumps({
                              "type": "file",
                              "location": publish_file_path
                          }),
                          properties=pika.BasicProperties(
                              content_type="text/plain", delivery_mode=2))

    #clean up - remove any files if zip was created
    if files:
        prev_xml_file = "%s.%s" % (file_path, "xml")
        if os.path.exists(prev_xml_file):
            os.remove(prev_xml_file)
 def __str__(self):
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)
     return "%s/admin/content/parties/%s" % (base_url, stringKey(self.context))
Example #52
0
 def __str__(self):
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)        
     return '%s/%s/%s/%s' % (base_url, self.section, self.subsection,\
            stringKey(self.context))
Example #53
0
 def __str__(self):
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)        
     return '%s/admin/content/parties/%s' % (base_url, stringKey(self.context))
Example #54
0
 def __str__(self):
     base_url = ui_utils.url.absoluteURL(getSite(), self.request)        
     return '%s/archive/browse/parliaments/obj-%s/%s/%s' % \
            (base_url, self.context.parliament_id, self.subsection, stringKey(self.context))