Example #1
0
 def render_view(self, foo, index, storage=None):
     """Whatever dummy value we receive, the result will be a TAL expression evaluation"""
     self.data = None
     expression = self.configuration.get('vocabulary')
     if expression:
         expression = expression.splitlines()[0]
         talEngine = Expressions.getEngine()
         compiledExpr = talEngine.compile(expression)
         try:
             self.data = compiledExpr(talEngine.getContext(self.eval_mappings(index=index, storage=storage)))
         except CompilerError:
             logger.debug("Can't evaluate %s" % expression)
             self.data = None
         except Exception:
             logger.debug("Error evaluating %s or row %d" % (expression, index))
             self.data = None
     return self.view_template(data=self.data)
Example #2
0
 def data_for_display(self, foo, backend=False, row_index=None):
     """Data is always ignored""" 
     if backend:
         raise NotImplementedError("ComputedColumn will not output anything for backend mode")
     expression = self.configuration.get('vocabulary')
     if expression:
         expression = expression.splitlines()[0]
         talEngine = Expressions.getEngine()
         compiledExpr = talEngine.compile(expression)
         try:
             data = compiledExpr(talEngine.getContext(self.eval_mappings(index=row_index)))
         except CompilerError:
             logger.debug("Can't evaluate %s" % expression)
             data = None
         except Exception:
             logger.debug("Error evaluating %s or row %d" % (expression, row_index))
             data = None
     return data
Example #3
0
 def render_view(self, foo, index, storage=None):
     """Whatever dummy value we receive, the result will be a TAL expression evaluation"""
     self.data = None
     expression = self.configuration.get('vocabulary')
     if expression:
         expression = expression.splitlines()[0]
         talEngine = Expressions.getEngine()
         compiledExpr = talEngine.compile(expression)
         try:
             self.data = compiledExpr(
                 talEngine.getContext(
                     self.eval_mappings(index=index, storage=storage)))
         except CompilerError:
             logger.debug("Can't evaluate %s" % expression)
             self.data = None
         except Exception:
             logger.debug("Error evaluating %s or row %d" %
                          (expression, index))
             self.data = None
     return self.view_template(data=self.data)
Example #4
0
 def data_for_display(self, foo, backend=False, row_index=None):
     """Data is always ignored"""
     if backend:
         raise NotImplementedError(
             "ComputedColumn will not output anything for backend mode")
     expression = self.configuration.get('vocabulary')
     if expression:
         expression = expression.splitlines()[0]
         talEngine = Expressions.getEngine()
         compiledExpr = talEngine.compile(expression)
         try:
             data = compiledExpr(
                 talEngine.getContext(self.eval_mappings(index=row_index)))
         except CompilerError:
             logger.debug("Can't evaluate %s" % expression)
             data = None
         except Exception:
             logger.debug("Error evaluating %s or row %d" %
                          (expression, row_index))
             data = None
     return data
Example #5
0
    def rows(self, batch=False, bsize=0, b_start=0, search=False, ignore_cache=False):
        context = self.context
        request = self.request
        if not search:
            storage = self.storage
            self.result_length = len(storage)
        else:
            tp_catalog = getToolByName(context, 'tablepage_catalog')
            storage = tp_catalog.searchTablePage(context, **self._clean_query(request.form))
            self.result_length = getattr(storage, 'actual_result_count', None) or len(storage)

        rows = []
        adapters = {}
        # check if b_start is out on index
        if b_start>self.result_length:
            b_start = 0
        b_start

        # let's cache adapters
        for conf in self.context.getPageColumns():
            col_type = conf['type']
            if not adapters.get(col_type):
                adapters[col_type] = getMultiAdapter((context, request),
                                                     IColumnField, name=col_type)

        self.last_page_label = self._findLastPageLabel(b_start)

        index = b_start
        write_attempt_count = 0
        for record in storage[b_start:bsize and (b_start+bsize) or None]:
            if search:
                record = self.storage[record.UID]
            if batch and index >= b_start+bsize:
                # Ok, in this way we display always bsize rows, not bsize rows of data
                # But is enough for now
                # BBB: can this be true ever?
                break
            
            if record.get('__label__') or getattr(record, 'is_label', False):
                rows.append(record.get('__label__') or getattr(record, 'label'))
                index += 1
                continue

            # every row data is a dict with the UID, and a list of data for single cells
            row = {'UID': record.get('__uuid__') or record.UID,
                   'cols': []}
            write_attempt = False
            for conf in context.getPageColumns():
                field = adapters[conf['type']]
                field.configuration = conf
                # Cache hit
                if not ignore_cache and field.cache_time and record.get("__cache__", {}).get(conf['id']) and \
                        self.now.millis() < record["__cache__"][conf['id']]['timestamp'] + field.cache_time * 1000:
                    output = record["__cache__"][conf['id']]['data']
                    logger.debug("Cache hit (%s)" % conf['id'])
                # Cache miss
                else:
                    output = field.render_view(record.get(conf['id']), index, storage)
                    if field.cache_time:
                        if record.get("__cache__") is None:
                            record["__cache__"] = PersistentDict()
                        record["__cache__"][conf['id']] = PersistentDict()
                        record["__cache__"][conf['id']]['data'] = output
                        record["__cache__"][conf['id']]['timestamp'] = self.now.millis()
                        write_attempt = True
                        logger.debug("Cache miss (%s)" % conf['id'])
                row['cols'].append({'content': output,
                                    'classes': 'coltype-%s col-%s' % (self.ploneview.normalizeString(col_type),
                                                                      self.ploneview.normalizeString(conf['id'])
                                                                      )})
            rows.append(row)
            index += 1
            if write_attempt:
                write_attempt_count += 1
            if write_attempt_count and write_attempt_count % 100 == 0:
                transaction.savepoint()
                logger.debug('Writing to cache fresh data (%d rows done)' % write_attempt_count)
        return rows
Example #6
0
    def rows(self, batch=False, bsize=0, b_start=0, search=False, ignore_cache=False):
        context = self.context
        request = self.request
        if not search:
            storage = self.storage
            self.result_length = len(storage)
        else:
            tp_catalog = getToolByName(context, 'tablepage_catalog')
            storage = tp_catalog.searchTablePage(context, **self._clean_query(request.form))
            self.result_length = getattr(storage, 'actual_result_count', None) or len(storage)

        rows = []
        adapters = {}
        # check if b_start is out on index
        if b_start>self.result_length:
            b_start = 0
        b_start

        # let's cache adapters
        for conf in self.context.getPageColumns():
            col_type = conf['type']
            if not adapters.get(col_type):
                adapters[col_type] = getMultiAdapter((context, request),
                                                     IColumnField, name=col_type)

        self.last_page_label = self._findLastPageLabel(b_start)

        index = b_start
        write_attempt_count = 0
        for record in storage[b_start:bsize and (b_start+bsize) or None]:
            if search:
                record = self.storage[record.UID]
            if batch and index >= b_start+bsize:
                # Ok, in this way we display always bsize rows, not bsize rows of data
                # But is enough for now
                # BBB: can this be true ever?
                break
            
            if record.get('__label__') or getattr(record, 'is_label', False):
                rows.append({
                    '__label__': record.get('__label__') or getattr(record, 'label'),
                    '__tablerowstyle__': record.get('__tablerowstyle__')
                })
                index += 1
                continue

            # every row data is a dict with the UID, and a list of data for single cells
            row = {'UID': record.get('__uuid__') or record.UID,
                   '__tablerowstyle__': record.get('__tablerowstyle__'),
                   'cols': []}
            write_attempt = False
            for conf in context.getPageColumns():
                field = adapters[conf['type']]
                field.configuration = conf
                # Cache hit
                if not ignore_cache and field.cache_time and record.get("__cache__", {}).get(conf['id']) and \
                        self.now.millis() < record["__cache__"][conf['id']]['timestamp'] + field.cache_time * 1000:
                    output = record["__cache__"][conf['id']]['data']
                    logger.debug("Cache hit (%s)" % conf['id'])
                # Cache miss
                else:
                    output = field.render_view(record.get(conf['id']), index, storage)
                    if field.cache_time:
                        if record.get("__cache__") is None:
                            record["__cache__"] = PersistentDict()
                        record["__cache__"][conf['id']] = PersistentDict()
                        record["__cache__"][conf['id']]['data'] = output
                        record["__cache__"][conf['id']]['timestamp'] = self.now.millis()
                        write_attempt = True
                        logger.debug("Cache miss (%s)" % conf['id'])
                row['cols'].append({'content': output,
                                    'classes': 'coltype-%s col-%s' % (self.ploneview.normalizeString(col_type),
                                                                      self.ploneview.normalizeString(conf['id'])
                                                                      )})
            rows.append(row)
            index += 1
            if write_attempt:
                write_attempt_count += 1
            if write_attempt_count and write_attempt_count % 100 == 0:
                transaction.savepoint()
                logger.debug('Writing to cache fresh data (%d rows done)' % write_attempt_count)
        return rows