コード例 #1
0
    def getReservedMessageList(self,
                               db,
                               date,
                               processing_node,
                               limit,
                               group_method_id=None,
                               node_set=None):
        """
      Get and reserve a list of messages.
      limit
        Maximum number of messages to fetch.
        This number is not garanted to be reached, because of not enough
        messages being pending execution.
    """
        assert limit
        quote = db.string_literal
        query = db.query
        args = (self.sql_table, sqltest_dict['to_date'](date, quote),
                ' AND group_method_id=' +
                quote(group_method_id) if group_method_id else '', limit)

        # Get reservable messages.
        # During normal operation, sorting by date (as last criteria) is fairer
        # for users and reduce the probability to do the same work several times
        # (think of an object that is modified several times in a short period of
        # time).
        if node_set is None:
            result = Results(
                query(
                    "SELECT * FROM %s WHERE processing_node=0 AND %s%s"
                    " ORDER BY priority, date LIMIT %s FOR UPDATE" % args, 0))
        else:
            # We'd like to write
            #   ORDER BY priority, IF(node, IF(node={node}, -1, 1), 0), date
            # but this makes indices inefficient.
            subquery = ("(SELECT *, 3*priority{} as effective_priority FROM %s"
                        " WHERE {} AND processing_node=0 AND %s%s"
                        " ORDER BY priority, date LIMIT %s FOR UPDATE)" %
                        args).format
            node = 'node=%s' % processing_node
            result = Results(
                query(
                    # "ALL" on all but one, to incur deduplication cost only once.
                    # "UNION ALL" between the two naturally distinct sets.
                    "SELECT * FROM (%s UNION ALL %s UNION %s%s) as t"
                    " ORDER BY effective_priority, date LIMIT %s" %
                    (subquery(-1, node), subquery('', 'node=0'),
                     subquery('+IF(node, IF(%s, -1, 1), 0)' % node, 'node>=0'),
                     ' UNION ALL ' + subquery(
                         -1, 'node IN (%s)' % ','.join(map(str, node_set)))
                     if node_set else '', limit),
                    0))
        if result:
            # Reserve messages.
            uid_list = [x.uid for x in result]
            self.assignMessageList(db, processing_node, uid_list)
            self._log(TRACE, 'Reserved messages: %r' % uid_list)
            return result
        return ()
コード例 #2
0
 def test_results(self):
     r = Results(([{
         'name': 'foo',
         'type': 'integer'
     }, {
         'name': 'bar',
         'type': 'integer'
     }], ((1, 2), (3, 4))),
                 brains=Brain,
                 parent=Parent)
     self.assertEqual(len(r), 2)
     row = r[0]
     self.assertEqual(row[0], 1)
     self.assertEqual(row[1], 2)
     self.assertEqual(row.foo, 1)
     self.assertEqual(row.bar, 2)
     self.assertEqual(row.FOO, 1)
     self.assertEqual(row.BAR, 2)
     row = r[1]
     self.assertEqual(row[0], 3)
     self.assertEqual(row[1], 4)
     self.assertEqual(row.foo, 3)
     self.assertEqual(row.bar, 4)
     self.assertEqual(row.FOO, 3)
     self.assertEqual(row.BAR, 4)
     self.assertTrue(isinstance(row, Brain))
コード例 #3
0
    def SQLhasVoted(self, poll_id, unicity_factor):
        poll_id = self.safe_escape(poll_id)
        unicity_factor = self.safe_escape(unicity_factor)

        query = "SELECT COUNT(*) FROM TM_POLL WHERE POLL_Id = '%s' AND UNICITY_FACTOR = '%s'" % (
            poll_id, unicity_factor)
        res = self.executeQuery(query)
        return Results(res)
コード例 #4
0
    def _results(self, rows):
        """_results.

        :param rows:
        """
        if hasattr(self, '_v_brain'):
            brain = self._v_brain
        else:
            brain = self._v_brain = getBrain(self.class_file_,
                                             self.class_name_)
        logger.debug(brain)
        return Results((self._items, rows))
コード例 #5
0
ファイル: SQLBase.py プロジェクト: yuanpli/erp5
 def _getMessageList(self, db, count=1000, src__=0, **kw):
   # XXX: Because most columns have NOT NULL constraint, conditions with None
   #      value should be ignored, instead of trying to render them
   #      (with comparisons with NULL).
   q = db.string_literal
   sql = '\n  AND '.join(sqltest_dict[k](v, q) for k, v in kw.iteritems())
   sql = "SELECT * FROM %s%s\nORDER BY priority, date, uid%s" % (
     self.sql_table,
     sql and '\nWHERE ' + sql,
     '' if count is None else '\nLIMIT %d' % count,
   )
   return sql if src__ else Results(db.query(sql, max_rows=0))
コード例 #6
0
 def _getMessageList(self, activity_tool, offset=0, count=1000, src__=0, **kw):
   # XXX: Because most columns have NOT NULL constraint, conditions with None
   #      value should be ignored, instead of trying to render them
   #      (with comparisons with NULL).
   sql_connection = activity_tool.getPortalObject().cmf_activity_sql_connection
   q = sql_connection.sql_quote__
   if offset:
     limit = '\nLIMIT %d,%d' % (offset, sys.maxint if count is None else count)
   else:
     limit = '' if count is None else '\nLIMIT %d' % count
   sql = '\n  AND '.join(sqltest_dict[k](v, q) for k, v in kw.iteritems())
   sql = "SELECT * FROM %s%s\nORDER BY priority, date, uid%s" % (
     self.sql_table, sql and '\nWHERE ' + sql, limit)
   return sql if src__ else Results(sql_connection().query(sql, max_rows=0))
コード例 #7
0
ファイル: SQLBase.py プロジェクト: pombredanne/erp5
    def getReservedMessageList(self,
                               db,
                               date,
                               processing_node,
                               limit,
                               group_method_id=None):
        """
      Get and reserve a list of messages.
      limit
        Maximum number of messages to fetch.
        This number is not garanted to be reached, because of not enough
        messages being pending execution.
    """
        assert limit
        quote = db.string_literal
        query = db.query
        args = (self.sql_table, sqltest_dict['to_date'](date, quote),
                ' AND group_method_id=' +
                quote(group_method_id) if group_method_id else '', limit)

        # Get reservable messages.
        # During normal operation, sorting by date (as last criteria) is fairer
        # for users and reduce the probability to do the same work several times
        # (think of an object that is modified several times in a short period of
        # time).
        if 1:
            result = Results(
                query(
                    "SELECT * FROM %s WHERE processing_node=0 AND %s%s"
                    " ORDER BY priority, date LIMIT %s FOR UPDATE" % args, 0))
        if result:
            # Reserve messages.
            uid_list = [x.uid for x in result]
            self.assignMessageList(db, processing_node, uid_list)
            self._log(TRACE, 'Reserved messages: %r' % uid_list)
            return result
        return ()
コード例 #8
0
ファイル: SQLDict.py プロジェクト: codencoffe/CC_ERP5
 def load(line):
     # getProcessableMessageList already fetch messages with the same
     # group_method_id, so what remains to be filtered on are path and
     # method_id.
     # XXX: What about tag ?
     path = line.path
     method_id = line.method_id
     key = path, method_id
     uid = line.uid
     original_uid = path_and_method_id_dict.get(key)
     if original_uid is None:
         sql_method_id = " AND method_id = %s AND group_method_id = %s" % (
             quote(method_id), quote(line.group_method_id))
         m = Message.load(line.message, uid=uid, line=line)
         merge_parent = m.activity_kw.get('merge_parent')
         try:
             if merge_parent:
                 path_list = []
                 while merge_parent != path:
                     path = path.rsplit('/', 1)[0]
                     assert path
                     original_uid = path_and_method_id_dict.get(
                         (path, method_id))
                     if original_uid is not None:
                         return None, original_uid, [uid]
                     path_list.append(path)
                 uid_list = []
                 if path_list:
                     # Select parent messages.
                     result = Results(
                         db.query(
                             "SELECT * FROM message"
                             " WHERE processing_node IN (0, %s) AND path IN (%s)%s"
                             " ORDER BY path LIMIT 1 FOR UPDATE" % (
                                 processing_node,
                                 ','.join(map(quote, path_list)),
                                 sql_method_id,
                             ), 0))
                     if result:  # found a parent
                         # mark child as duplicate
                         uid_list.append(uid)
                         # switch to parent
                         line = result[0]
                         key = line.path, method_id
                         uid = line.uid
                         m = Message.load(line.message,
                                          uid=uid,
                                          line=line)
                 # return unreserved similar children
                 path = line.path
                 result = db.query(
                     "SELECT uid FROM message"
                     " WHERE processing_node = 0 AND (path = %s OR path LIKE %s)"
                     "%s FOR UPDATE" % (
                         quote(path),
                         quote(path.replace('_', r'\_') + '/%'),
                         sql_method_id,
                     ), 0)[1]
                 reserve_uid_list = [x for x, in result]
                 uid_list += reserve_uid_list
                 if not line.processing_node:
                     # reserve found parent
                     reserve_uid_list.append(uid)
             else:
                 # Select duplicates.
                 result = db.query(
                     "SELECT uid FROM message"
                     " WHERE processing_node = 0 AND path = %s%s FOR UPDATE"
                     % (
                         quote(path),
                         sql_method_id,
                     ), 0)[1]
                 reserve_uid_list = uid_list = [x for x, in result]
             if reserve_uid_list:
                 self.assignMessageList(db, processing_node,
                                        reserve_uid_list)
             else:
                 db.query("COMMIT")  # XXX: useful ?
         except:
             self._log(WARNING, 'Failed to reserve duplicates')
             db.query("ROLLBACK")
             raise
         if uid_list:
             self._log(TRACE,
                       'Reserved duplicate messages: %r' % uid_list)
         path_and_method_id_dict[key] = uid
         return m, uid, uid_list
     # We know that original_uid != uid because caller skips lines we returned
     # earlier.
     return None, original_uid, [uid]
コード例 #9
0
ファイル: SQLBase.py プロジェクト: yuanpli/erp5
  def getProcessableMessageList(self, activity_tool, processing_node,
                                node_family_id_list):
    """
      Always true:
        For each reserved message, delete redundant messages when it gets
        reserved (definitely lost, but they are expandable since redundant).

      - reserve a message
      - if this message has a group_method_id:
        - reserve a bunch of messages
        - until the total "cost" of the group goes over 1
          - get one message from the reserved bunch (this messages will be
            "needed")
          - update the total cost
        - unreserve "unneeded" messages
      - return still-reserved message list and a group_method_id

      If any error happens in above described process, try to unreserve all
      messages already reserved in that process.
      If it fails, complain loudly that some messages might still be in an
      unclean state.

      Returned values:
        4-tuple:
          - list of messages
          - group_method_id
          - uid_to_duplicate_uid_list_dict
    """
    db = activity_tool.getSQLConnection()
    now_date = getNow(db)
    uid_to_duplicate_uid_list_dict = {}
    try:
      while 1: # not a loop
        # Select messages that were either assigned manually or left
        # unprocessed after a shutdown. Most of the time, there's none.
        # To minimize the probability of deadlocks, we also COMMIT so that a
        # new transaction starts on the first 'FOR UPDATE' query, which is all
        # the more important as the current on started with getPriority().
        result = db.query("SELECT * FROM %s WHERE processing_node=%s"
          " ORDER BY priority, date LIMIT 1\0COMMIT" % (
          self.sql_table, processing_node), 0)
        already_assigned = result[1]
        if already_assigned:
          result = Results(result)
        else:
          result = self.getReservedMessageList(db, now_date, processing_node,
                                               1, node_set=node_family_id_list)
          if not result:
            break
          # So reserved documents are properly released even if load raises.
          for line in result:
            uid_to_duplicate_uid_list_dict[line.uid] = []
        load = self.getProcessableMessageLoader(db, processing_node)
        m, uid, uid_list = load(result[0])
        message_list = [m]
        uid_to_duplicate_uid_list_dict[uid] = uid_list
        group_method_id = m.line.group_method_id
        if group_method_id[0] != '\0':
          # Count the number of objects to prevent too many objects.
          cost = m.getGroupMethodCost()
          assert 0 < cost <= 1, (self.sql_table, uid)
          count = m.getObjectCount(activity_tool)
          # this is heuristic (messages with same group_method_id
          # are likely to have the same group_method_cost)
          limit = int(1. / cost + 1 - count)
          if limit > 1: # <=> cost * count < 1
            cost *= count
            # Retrieve objects which have the same group method.
            result = iter(already_assigned
              and Results(db.query("SELECT * FROM %s"
                " WHERE processing_node=%s AND group_method_id=%s"
                " ORDER BY priority, date LIMIT %s" % (
                self.sql_table, processing_node,
                db.string_literal(group_method_id), limit), 0))
                # Do not optimize rare case: keep the code simple by not
                # adding more results from getReservedMessageList if the
                # limit is not reached.
              or self.getReservedMessageList(db, now_date, processing_node,
                limit, group_method_id, node_family_id_list))
            for line in result:
              if line.uid in uid_to_duplicate_uid_list_dict:
                continue
              m, uid, uid_list = load(line)
              if m is None:
                uid_to_duplicate_uid_list_dict[uid] += uid_list
                continue
              uid_to_duplicate_uid_list_dict[uid] = uid_list
              cost += m.getObjectCount(activity_tool) * \
                      m.getGroupMethodCost()
              message_list.append(m)
              if cost >= 1:
                # Unreserve extra messages as soon as possible.
                uid_list = [line.uid for line in result if line.uid != uid]
                if uid_list:
                  self.assignMessageList(db, 0, uid_list)
        return message_list, group_method_id, uid_to_duplicate_uid_list_dict
    except:
      self._log(WARNING, 'Exception while reserving messages.')
      if uid_to_duplicate_uid_list_dict:
        to_free_uid_list = uid_to_duplicate_uid_list_dict.keys()
        for uid_list in uid_to_duplicate_uid_list_dict.itervalues():
          to_free_uid_list += uid_list
        try:
          self.assignMessageList(db, 0, to_free_uid_list)
        except:
          self._log(ERROR, 'Failed to free messages: %r' % to_free_uid_list)
        else:
          if to_free_uid_list:
            self._log(TRACE, 'Freed messages %r' % to_free_uid_list)
      else:
        self._log(TRACE, '(no message was reserved)')
    return (), None, None
コード例 #10
0
                    for col in row:
                        if isinstance(col, types.StringType):
                            # coerce column to unicode with database encoding
                            newcol = unicode(col, db_encoding)
                            # Encode column as string with site_encoding
                            newcol = newcol.encode(site_encoding)
                        else:
                            newcol = col

                        columns += newcol,

                    encoded_result.append(columns)

                result = (result[0], encoded_result)

            result = Results(result, brain, p, None)

        columns = result._searchable_result_columns()

        if test__ and columns != self._col:
            self._col = columns

        # If run in test mode, return both the query and results so
        # that the template doesn't have to be rendered twice!
        if test__:
            return query, result

        return result

    def abort(self):
        dbc, DB__ = self._get_dbc()
コード例 #11
0
    def SQLpersonVoteCount(self, poll_id):
        poll_id = self.safe_escape(poll_id)

        query = "SELECT COUNT(*) FROM TM_POLL WHERE POLL_Id = '%s'" % poll_id
        res = self.executeQuery(query)
        return Results(res)
コード例 #12
0
class SQLMethod(Aqueduct.BaseQuery):

    _arg = None
    _col = None

    def __init__(self, context):
        self.context = context
        self.id = str(context.__class__.__name__)
        self.title = ''
        for k, v in _defaults.items():
            if not hasattr(context, k):
                setattr(context, k, v)

    def edit(self, connection_id, arguments, template):
        """Change database method  properties

        The 'connection_id' argument is the id of a database connection
        that resides in the current folder or in a folder above the
        current folder.  The database should understand SQL.

        The 'arguments' argument is a string containing an arguments
        specification, as would be given in the SQL method cration form.

        The 'template' argument is a string containing the source for the
        SQL Template.
        """
        context = self.context
        self.connection_id = str(connection_id)
        arguments = str(arguments)
        self.arguments_src = arguments
        self._arg = Aqueduct.parse(arguments)
        if not isinstance(template, (str, unicode)):
            template = str(template)
        self.src = template
        self.template = t = context.template_class(template)
        t.cook()
        context._v_query_cache = {}, Bucket()

    def advanced_edit(self,
                      max_rows=1000,
                      max_cache=100,
                      cache_time=0,
                      class_name='',
                      class_file='',
                      REQUEST=None):
        """Change advanced properties

        The arguments are:

        max_rows -- The maximum number of rows to be returned from a query.

        max_cache -- The maximum number of results to cache

        cache_time -- The maximum amound of time to use a cached result.

        class_name -- The name of a class that provides additional
          attributes for result record objects. This class will be a
          base class of the result record class.

        class_file -- The name of the file containing the class
          definition.

        The class file normally resides in the 'Extensions'
        directory, however, the file name may have a prefix of
        'product.', indicating that it should be found in a product
        directory.

        For example, if the class file is: 'ACMEWidgets.foo', then an
        attempt will first be made to use the file
        'lib/python/Products/ACMEWidgets/Extensions/foo.py'. If this
        failes, then the file 'Extensions/ACMEWidgets.foo.py' will be
        used.

        """
        context = self.context
        # paranoid type checking
        if type(max_rows) is not type(1):
            max_rows = atoi(max_rows)
        if type(max_cache) is not type(1):
            max_cache = atoi(max_cache)
        if type(cache_time) is not type(1):
            cache_time = atoi(cache_time)
        class_name = str(class_name)
        class_file = str(class_file)

        context.max_rows_ = max_rows
        context.max_cache_, context.cache_time_ = max_cache, cache_time
        context._v_sql_cache = {}, Bucket()
        context.class_name_, context.class_file_ = class_name, class_file
        context._v_sql_brain = getBrain(context.class_file_,
                                        context.class_name_, 1)

    def _cached_result(self, DB__, query):
        context = self.context
        # Try to fetch from cache
        if hasattr(context, '_v_sql_cache'):
            cache = context._v_sql_cache
        else:
            cache = context._v_sql_cache = {}, Bucket()
        cache, tcache = cache
        max_cache = context.max_cache_
        now = time()
        t = now - context.cache_time_
        if len(cache) > max_cache / 2:
            keys = tcache.keys()
            keys.reverse()
            while keys and (len(keys) > max_cache or keys[-1] < t):
                key = keys[-1]
                q = tcache[key]
                del tcache[key]
                if int(cache[q][0]) == key:
                    del cache[q]
                del keys[-1]

        if query in cache:
            k, r = cache[query]
            if k > t:
                return r

        result = apply(DB__.query, query)
        if context.cache_time_ > 0:
            tcache[int(now)] = query
            cache[query] = now, result

        return result

    def _get_dbc(self):
        """Get the database connection"""
        context = self.context

        try:
            dbc = getattr(context, self.connection_id)
        except AttributeError:
            raise AttributeError, (
                "The database connection <em>%s</em> cannot be found." %
                (self.connection_id))

        try:
            DB__ = dbc()
        except ConflictError:
            raise
        except:
            raise 'Database Error', ('%s is not connected to a database' %
                                     self.id)

        return dbc, DB__

    def __call__(self, src__=0, test__=0, **kw):
        """Call the database method

        The arguments to the method should be passed via keyword
        arguments, or in a single mapping object. If no arguments are
        given, and if the method was invoked through the Web, then the
        method will try to acquire and use the Web REQUEST object as
        the argument mapping.

        The returned value is a sequence of record objects.
        """
        context = self.context

        dbc, DB__ = self._get_dbc()

        p = None

        argdata = self._argdata(kw)
        argdata['sql_delimiter'] = '\0'
        argdata['sql_quote__'] = dbc.sql_quote__

        # TODO: Review the argdata dictonary. The line bellow is receiving unicode
        # strings, mixed with standard strings. It is insane! Archetypes needs a policy
        # about unicode, and lots of tests on this way. I prefer to not correct it now,
        # only doing another workarround. We need to correct the cause of this problem,
        # not its side effects :-(

        try:
            query = apply(self.template, (p, ), argdata)
        except TypeError, msg:
            msg = str(msg)
            if 'client' in msg:
                raise NameError("'client' may not be used as an " +
                                "argument name in this context")
            else:
                raise

        __traceback_info__ = query

        if src__:
            return query

        # Get the encoding arguments
        # We have two possible kw arguments:
        #   db_encoding:        The encoding used in the external database
        #   site_encoding:      The uncoding used for the site
        #                       If not specified, we use sys.getdefaultencoding()
        db_encoding = kw.get('db_encoding', None)

        site_encoding = kw.get('site_encoding', 'utf-8')

        if type(query) == type(u''):
            if db_encoding:
                query = query.encode(db_encoding)
            else:
                try:
                    query = query.encode(site_encoding)
                except UnicodeEncodeError:
                    query = query.encode('UTF-8')

        if context.cache_time_ > 0 and context.max_cache_ > 0:
            result = self._cached_result(DB__, (query, context.max_rows_))
        else:
            try:
                result = DB__.query(query, context.max_rows_)
            except ConflictError:
                raise
            except:
                log_exc(msg='Database query failed', reraise=1)

        if hasattr(context, '_v_sql_brain'):
            brain = context._v_sql_brain
        else:
            brain = context._v_sql_brain = getBrain(context.class_file_,
                                                    context.class_name_)

        if type(result) is type(''):
            f = StringIO()
            f.write(result)
            f.seek(0)
            result = RDB.File(f, brain, p, None)
        else:
            if db_encoding:
                # Encode result before we wrap it in Result object
                # We will change the encoding from source to either the specified target_encoding
                # or the site default encoding

                # The data is a list of tuples of column data
                encoded_result = []
                for row in result[1]:
                    columns = ()
                    for col in row:
                        if isinstance(col, types.StringType):
                            # coerce column to unicode with database encoding
                            newcol = unicode(col, db_encoding)
                            # Encode column as string with site_encoding
                            newcol = newcol.encode(site_encoding)
                        else:
                            newcol = col

                        columns += newcol,

                    encoded_result.append(columns)

                result = (result[0], encoded_result)

            result = Results(result, brain, p, None)

        columns = result._searchable_result_columns()

        if test__ and columns != self._col:
            self._col = columns

        # If run in test mode, return both the query and results so
        # that the template doesn't have to be rendered twice!
        if test__:
            return query, result

        return result
コード例 #13
0
  def getReservedMessageList(self, db, date, processing_node, limit,
                             group_method_id=None, node_set=None):
    """
      Get and reserve a list of messages.
      limit
        Maximum number of messages to fetch.
        This number is not garanted to be reached, because of not enough
        messages being pending execution.
    """
    assert limit
    quote = db.string_literal
    query = db.query
    args = (str2bytes(self.sql_table), sqltest_dict['to_date'](date, quote),
            b' AND group_method_id=' + quote(group_method_id)
            if group_method_id else b'' , limit)

    # Note: Not all write accesses to our table are protected by this lock.
    # This lock is not here for data consistency reasons, but to avoid wasting
    # time on SQL deadlocks caused by the varied lock ordering chosen by the
    # database. These queries specifically seem to be extremely prone to such
    # deadlocks, so prevent them from attempting to run in parallel on a given
    # activity table.
    # If more accesses are found to cause a significant waste of time because
    # of deadlocks, then they should acquire such lock as well. But
    # preemptively applying such lock everywhere without checking the amount
    # of waste is unlikely to produce a net gain.
    # XXX: timeout may benefit from being tweaked, but one second seem like a
    # reasonable starting point.
    # XXX: locking could probably be skipped altogether on clusters with few
    # enough processing nodes, as there should be little deadlocks and the
    # tradeoff becomes unfavorable to explicit locks. What threshold to
    # choose ?
    with SQLLock(db, self.sql_table, timeout=1) as acquired:
      if not acquired:
        # This table is busy, check for work to do elsewhere
        return ()
      # Get reservable messages.
      # During normal operation, sorting by date (as last criteria) is fairer
      # for users and reduce the probability to do the same work several times
      # (think of an object that is modified several times in a short period of
      # time).
      if node_set is None:
        result = Results(query(
          b"SELECT * FROM %s WHERE processing_node=0 AND %s%s"
          b" ORDER BY priority, date LIMIT %d FOR UPDATE" % args, 0))
      else:
        # We'd like to write
        #   ORDER BY priority, IF(node, IF(node={node}, -1, 1), 0), date
        # but this makes indices inefficient.
        subquery = (b"(SELECT *, 3*priority%%s as effective_priority FROM %s"
          b" WHERE %%s AND processing_node=0 AND %s%s"
          b" ORDER BY priority, date LIMIT %d FOR UPDATE)" % args)
        node = b'node=%d' % processing_node
        result = Results(query(
          # "ALL" on all but one, to incur deduplication cost only once.
          # "UNION ALL" between the two naturally distinct sets.
          b"SELECT * FROM (%s UNION ALL %s UNION %s%s) as t"
          b" ORDER BY effective_priority, date LIMIT %d"% (
              subquery % (b'-1', node),
              subquery % (b'', b'node=0'),
              subquery % (b'+IF(node, IF(%s, -1, 1), 0)' % node, b'node>=0'),
              b' UNION ALL ' + subquery % (str2bytes(str(-1)), str2bytes('node IN (%s)' % ','.join(map(str, node_set)))) if node_set else b'',
              limit), 0))
      if result:
        # Reserve messages.
        uid_list = [x.uid for x in result]
        self.assignMessageList(db, processing_node, uid_list)
        self._log(TRACE, 'Reserved messages: %r' % uid_list)
        return result
    return ()
コード例 #14
0
    def SQLgetResults(self, poll_id):
        poll_id = self.safe_escape(poll_id)

        query = "SELECT CHOICE_Id, COUNT(UNICITY_FACTOR) FROM TM_POLL WHERE POLL_Id = '%s' GROUP BY CHOICE_Id" % poll_id
        res = self.executeQuery(query)
        return Results(res)
コード例 #15
0
ファイル: DA.py プロジェクト: alvsgithub/erp5
def DA__call__(self, REQUEST=None, __ick__=None, src__=0, test__=0, **kw):
    """Call the database method

    The arguments to the method should be passed via keyword
    arguments, or in a single mapping object. If no arguments are
    given, and if the method was invoked through the Web, then the
    method will try to acquire and use the Web REQUEST object as
    the argument mapping.

    The returned value is a sequence of record objects.
    """
    __traceback_supplement__ = (SQLMethodTracebackSupplement, self)

    c = kw.pop("connection_id", None)
    #if c is not None:
      #LOG("DA", 300, "connection %s provided to %s" %(c, self.id))
    # patch: dynamic brain configuration
    zsql_brain = kw.pop('zsql_brain', None)
    # patch end


    if REQUEST is None:
        if kw: REQUEST=kw
        else:
            if hasattr(self, 'REQUEST'): REQUEST=self.REQUEST
            else: REQUEST={}

    # Patch to implement dynamic connection id
    # Connection id is retrieve from user preference
    if c is None:
      physical_path = self.getPhysicalPath()
      # XXX cleaner solution will be needed
      if 'portal_catalog' not in physical_path and\
         'cmf_activity' not in self.connection_id and\
         'transactionless' not in self.connection_id:
        try:
          archive_id = self.portal_preferences.getPreferredArchive()
        except AttributeError:
          pass
        else:
          if archive_id not in (None, ''):
            archive_id = archive_id.split('/')[-1]
            #LOG("DA__call__, archive_id 2", 300, archive_id)
            archive = self.portal_archives._getOb(archive_id, None)
            if archive is not None:
              c = archive.getConnectionId()
              #LOG("DA call", INFO, "retrieved connection %s from preference" %(c,))

    if c is None:
      # connection hook
      c = self.connection_id
      # for backwards compatability
      hk = self.connection_hook
      # go get the connection hook and call it
      if hk: c = getattr(self, hk)()
    #LOG("DA__call__ connection", 300, c)
    try: dbc=getattr(self, c)
    except AttributeError:
        raise AttributeError, (
            "The database connection <em>%s</em> cannot be found." % (
            c))

    try: DB__=dbc()
    except: raise DatabaseError, (
        '%s is not connected to a database' % self.id)

    p = aq_parent(self) # None if no aq_parent

    argdata=self._argdata(REQUEST)
    argdata['sql_delimiter']='\0'
    argdata['sql_quote__']=dbc.sql_quote__

    security=getSecurityManager()
    security.addContext(self)
    try:
        try:     query=apply(self.template, (p,), argdata)
        except TypeError, msg:
            msg = str(msg)
            if find(msg,'client') >= 0:
                raise NameError("'client' may not be used as an " +
                    "argument name in this context")
            else: raise
    finally: security.removeContext(self)

    if src__: return query

    if self.cache_time_ > 0 and self.max_cache_ > 0:
        result=self._cached_result(DB__, query, self.max_rows_, c)
    else:
      try:
#         if 'portal_ids' in query:
#           LOG("DA query", INFO, "query = %s" %(query,))
        result=DB__.query(query, self.max_rows_)
      except:
        LOG("DA call raise", ERROR, "DB = %s, c = %s, query = %s" %(DB__, c, query), error=sys.exc_info())
        raise

    # patch: dynamic brain configuration
    if zsql_brain is not None:
        try:
          class_file_, class_name_ = zsql_brain.rsplit('.', 1)
        except:
          #import pdb; pdb.post_mortem()
          raise
        brain = getBrain(class_file_, class_name_)
        # XXX remove this logging for performance
        LOG(__name__, INFO, "Using special brain: %r\n" % (brain,))
    else:
        brain = getBrain(self.class_file_, self.class_name_)

    if type(result) is type(''):
        f=StringIO()
        f.write(result)
        f.seek(0)
        result=RDB.File(f,brain,p)
    else:
        result=Results(result, brain, p)
    columns=result._searchable_result_columns()
    if test__ and columns != self._col: self._col=columns

    # If run in test mode, return both the query and results so
    # that the template doesn't have to be rendered twice!
    if test__: return query, result

    return result
コード例 #16
0
ファイル: DA.py プロジェクト: Elbagoury/erp5
def DA__call__(self, REQUEST=None, __ick__=None, src__=0, test__=0, **kw):
    """Call the database method

    The arguments to the method should be passed via keyword
    arguments, or in a single mapping object. If no arguments are
    given, and if the method was invoked through the Web, then the
    method will try to acquire and use the Web REQUEST object as
    the argument mapping.

    The returned value is a sequence of record objects.
    """
    __traceback_supplement__ = (SQLMethodTracebackSupplement, self)

    c = kw.pop("connection_id", None)
    #if c is not None:
    #LOG("DA", 300, "connection %s provided to %s" %(c, self.id))
    # patch: dynamic brain configuration
    zsql_brain = kw.pop('zsql_brain', None)
    # patch end

    if REQUEST is None:
        if kw: REQUEST = kw
        else:
            if hasattr(self, 'REQUEST'): REQUEST = self.REQUEST
            else: REQUEST = {}

    # Patch to implement dynamic connection id
    # Connection id is retrieve from user preference
    if c is None:
        physical_path = self.getPhysicalPath()
        # XXX cleaner solution will be needed
        if 'portal_catalog' not in physical_path and\
           'cmf_activity' not in self.connection_id and\
           'transactionless' not in self.connection_id:
            try:
                archive_id = self.portal_preferences.getPreferredArchive()
            except AttributeError:
                pass
            else:
                if archive_id not in (None, ''):
                    archive_id = archive_id.split('/')[-1]
                    #LOG("DA__call__, archive_id 2", 300, archive_id)
                    archive = self.portal_archives._getOb(archive_id, None)
                    if archive is not None:
                        c = archive.getConnectionId()
                        #LOG("DA call", INFO, "retrieved connection %s from preference" %(c,))

    if c is None:
        # connection hook
        c = self.connection_id
        # for backwards compatability
        hk = self.connection_hook
        # go get the connection hook and call it
        if hk: c = getattr(self, hk)()
    #LOG("DA__call__ connection", 300, c)
    try:
        dbc = getattr(self, c)
    except AttributeError:
        raise AttributeError, (
            "The database connection <em>%s</em> cannot be found." % (c))

    try:
        DB__ = dbc()
    except:
        raise DatabaseError, ('%s is not connected to a database' % self.id)

    p = aq_parent(self)  # None if no aq_parent

    argdata = self._argdata(REQUEST)
    argdata['sql_delimiter'] = '\0'
    argdata['sql_quote__'] = dbc.sql_quote__

    security = getSecurityManager()
    security.addContext(self)
    try:
        try:
            query = apply(self.template, (p, ), argdata)
        except TypeError, msg:
            msg = str(msg)
            if find(msg, 'client') >= 0:
                raise NameError("'client' may not be used as an " +
                                "argument name in this context")
            else:
                raise
    finally:
        security.removeContext(self)

    if src__: return query

    if self.cache_time_ > 0 and self.max_cache_ > 0:
        result = self._cached_result(DB__, query, self.max_rows_, c)
    else:
        try:
            #         if 'portal_ids' in query:
            #           LOG("DA query", INFO, "query = %s" %(query,))
            result = DB__.query(query, self.max_rows_)
        except:
            LOG("DA call raise",
                ERROR,
                "DB = %s, c = %s, query = %s" % (DB__, c, query),
                error=sys.exc_info())
            raise

    # patch: dynamic brain configuration
    if zsql_brain is not None:
        try:
            class_file_, class_name_ = zsql_brain.rsplit('.', 1)
        except:
            #import pdb; pdb.post_mortem()
            raise
        brain = getBrain(class_file_, class_name_)
        # XXX remove this logging for performance
        LOG(__name__, INFO, "Using special brain: %r\n" % (brain, ))
    else:
        brain = getBrain(self.class_file_, self.class_name_)

    if type(result) is type(''):
        f = StringIO()
        f.write(result)
        f.seek(0)
        result = RDB.File(f, brain, p)
    else:
        result = Results(result, brain, p)
    columns = result._searchable_result_columns()
    if test__ and columns != self._col: self._col = columns

    # If run in test mode, return both the query and results so
    # that the template doesn't have to be rendered twice!
    if test__: return query, result

    return result
コード例 #17
0
        except:
            #import pdb; pdb.post_mortem()
            raise
        brain = getBrain(class_file_, class_name_)
        # XXX remove this logging for performance
        LOG(__name__, INFO, "Using special brain: %r\n" % (brain, ))
    else:
        brain = getBrain(self.class_file_, self.class_name_)

    if type(result) is type(''):
        f = StringIO()
        f.write(result)
        f.seek(0)
        result = RDB.File(f, brain, p)
    else:
        result = Results(result, brain, p)
    columns = result._searchable_result_columns()
    if test__ and columns != self._col: self._col = columns

    # If run in test mode, return both the query and results so
    # that the template doesn't have to be rendered twice!
    if test__: return query, result

    return result


def DA_upgradeSchema(self,
                     connection_id=None,
                     create_if_not_exists=False,
                     initialize=None,
                     src__=0,