def execute_paginated_report(self, req, db, id, sql, args, limit=0, offset=0): sql, args = self.sql_sub_vars(sql, args, db) if not sql: raise TracError(_('Report %(num)s has no SQL query.', num=id)) self.log.debug('Executing report with SQL "%s"' % sql) self.log.debug('Request args: %r' % req.args) cursor = db.cursor() num_items = 0 if id != -1 and limit > 0: # The number of tickets is obtained. count_sql = 'SELECT COUNT(*) FROM (' + sql + ') AS tab' cursor.execute(count_sql, args) self.log.debug("Query SQL(Get num items): " + count_sql) for row in cursor: pass num_items = row[0] # The column name is obtained. get_col_name_sql = 'SELECT * FROM ( ' + sql + ' ) AS tab LIMIT 1' cursor.execute(get_col_name_sql, args) self.env.log.debug("Query SQL(Get col names): " + get_col_name_sql) cols = get_column_names(cursor) sort_col = req.args.get('sort', '') self.log.debug("Columns %r, Sort column %s" % (cols, sort_col)) order_cols = [] if sort_col: if '__group__' in cols: order_cols.append('__group__') if sort_col in cols: order_cols.append(sort_col) else: raise TracError(_('Query parameter "sort=%(sort_col)s" ' ' is invalid', sort_col=sort_col)) # The report-query results is obtained asc = req.args.get('asc', '1') asc_str = asc == '1' and 'ASC' or 'DESC' order_by = '' if len(order_cols) != 0: order = ', '.join(order_cols) order_by = " ".join([' ORDER BY', order, asc_str]) sql = " ".join(['SELECT * FROM (', sql, ') AS tab', order_by]) sql =" ".join([sql, 'LIMIT', str(limit), 'OFFSET', str(offset)]) self.log.debug("Query SQL: " + sql) cursor.execute(sql, args) # FIXME: fetchall should probably not be used. info = cursor.fetchall() or [] cols = get_column_names(cursor) db.rollback() return cols, info, num_items
def execute_paginated_report(self, req, db, id, sql, args, limit=0, offset=0): sql, args, missing_args = self.sql_sub_vars(sql, args, db) if not sql: raise TracError(_("Report {%(num)s} has no SQL query.", num=id)) cursor = db.cursor() num_items = 0 if id != -1 and limit > 0: cursor.execute("SELECT COUNT(*) FROM (%s) AS tab" % sql, args) num_items = cursor.fetchone()[0] # get the column names cursor.execute("SELECT * FROM (%s) AS tab LIMIT 1" % sql, args) cols = get_column_names(cursor) sort_col = req.args.get('sort', '') order_cols = [] if '__group__' in cols: sort_col = '' # sorting is disabled (#15030) if sort_col: if sort_col in cols: order_cols.append(sort_col) else: raise TracError( _('Query parameter "sort=%(sort_col)s" ' ' is invalid', sort_col=sort_col)) # get the (partial) report results order_by = '' if order_cols: asc = req.args.get('asc', '1') order_by = " ORDER BY %s %s" % (', '.join( db.quote(col) for col in order_cols), 'ASC' if asc == '1' else 'DESC') sql = "SELECT * FROM (%s) AS tab %s LIMIT %s OFFSET %s" % \ (sql, order_by, str(limit), str(offset)) self.log.debug("Query SQL: " + sql) cursor.execute(sql, args) rows = cursor.fetchall() or [] cols = get_column_names(cursor) return cols, rows, num_items, missing_args
def execute(self, req, db=None): if not self.cols: self.get_columns() sql, args = self.get_sql() self.env.log.debug("Query SQL: " + sql % tuple([repr(a) for a in args])) if not db: db = self.env.get_db_cnx() cursor = db.cursor() cursor.execute(sql, args) columns = get_column_names(cursor) results = [] for row in cursor: id = int(row[0]) result = {'id': id, 'href': req.href.ticket(id)} for i in range(1, len(columns)): name, val = columns[i], row[i] if name == self.group: val = val or 'None' elif name == 'reporter': val = val or 'anonymous' elif name in ['changetime', 'time']: val = int(val) elif val is None: val = '--' result[name] = val results.append(result) cursor.close() return results
def execute(self, req, db=None, cached_ids=None): if not self.cols: self.get_columns() sql, args = self.get_sql(req, cached_ids) self.env.log.debug("Query SQL: " + sql % tuple([repr(a) for a in args])) results = [] for shortname, env in TracForgeAdminSystem(self.env).get_projects(): # Check that this project matches the constraints, if any if 'project' in self.constraints: for con in self.constraints['project']: if con.startswith('!'): test = lambda name: name != con[1:] else: test = lambda name: name == con if test(shortname): break else: # No constraints matched, skip this env continue db = env.get_db_cnx() cursor = db.cursor() cursor.execute(sql, args) columns = get_column_names(cursor) fields = [] for column in columns: fields += [f for f in self.fields if f['name'] == column ] or [None] for row in cursor: id = int(row[0]) result = {'id': id, 'href': req.href.ticket(id)} for i in range(1, len(columns)): name, field, val = columns[i], fields[i], row[i] if name == self.group: val = val or 'None' elif name == 'reporter': val = val or 'anonymous' elif val is None: val = '--' elif name in ('changetime', 'time'): val = datetime.fromtimestamp(int(val), utc) elif field and field['type'] == 'checkbox': try: val = bool(int(val)) except TypeError, ValueError: val = False result[name] = val # Add my new data result['project'] = shortname result['href'] = req.href.projects(shortname, 'ticket', id) results.append(result) cursor.close()
def execute(self, req, db=None, cached_ids=None): if not db: db = self.env.get_db_cnx() cursor = db.cursor() sql, args = self.get_sql(req, cached_ids) self.num_items = self._count(sql, args, db) if self.num_items <= self.max: self.has_more_pages = False if self.has_more_pages: max = self.max if self.group: max += 1 sql = sql + " LIMIT %d OFFSET %d" % (max, self.offset) if (self.page > int(ceil(float(self.num_items) / self.max)) and self.num_items != 0): raise TracError(_('Page %(page)s is beyond the number of ' 'pages in the query', page=self.page)) self.env.log.debug("Query SQL: " + sql % tuple([repr(a) for a in args])) try: cursor.execute(sql, args) except: db.rollback() raise columns = get_column_names(cursor) fields = [] for column in columns: fields += [f for f in self.fields if f['name'] == column] or [None] results = [] column_indices = range(len(columns)) for row in cursor: result = {} for i in column_indices: name, field, val = columns[i], fields[i], row[i] if name == self.group: val = val or 'None' elif name == 'reporter': val = val or 'anonymous' elif name == 'id': val = int(val) result['href'] = req.href.ticket(val) elif val is None: val = '--' elif name in ('changetime', 'time'): val = datetime.fromtimestamp(int(val or 0), utc) elif field and field['type'] == 'checkbox': try: val = bool(int(val)) except (TypeError, ValueError): val = False result[name] = val results.append(result) cursor.close() return results
def execute(self, req, db=None, cached_ids=None): if not self.cols: self.get_columns() sql, args = self.get_sql(req, cached_ids) self.env.log.debug("Query SQL: " + sql % tuple([repr(a) for a in args])) results = [] for shortname, env in TracForgeAdminSystem(self.env).get_projects(): # Check that this project matches the constraints, if any if 'project' in self.constraints: for con in self.constraints['project']: if con.startswith('!'): test = lambda name: name != con[1:] else: test = lambda name: name == con if test(shortname): break else: # No constraints matched, skip this env continue db = env.get_db_cnx() cursor = db.cursor() cursor.execute(sql, args) columns = get_column_names(cursor) fields = [] for column in columns: fields += [f for f in self.fields if f['name'] == column] or [None] for row in cursor: id = int(row[0]) result = {'id': id, 'href': req.href.ticket(id)} for i in range(1, len(columns)): name, field, val = columns[i], fields[i], row[i] if name == self.group: val = val or 'None' elif name == 'reporter': val = val or 'anonymous' elif val is None: val = '--' elif name in ('changetime', 'time'): val = datetime.fromtimestamp(int(val), utc) elif field and field['type'] == 'checkbox': try: val = bool(int(val)) except TypeError, ValueError: val = False result[name] = val # Add my new data result['project'] = shortname result['href'] = req.href.projects(shortname, 'ticket', id) results.append(result) cursor.close()
def upgrade_to_0_2_5(env, db, installed_version): if installed_version>=[0,2,5]: return True db = db or env.get_db_cnx() cursor = db.cursor() now = to_timestamp(datetime.now(utc)) cursor.execute("INSERT INTO ticket (type, status, summary, description, time, changetime) " " SELECT '$milestone$', 'new', name, description, %s, %s" " FROM milestone m" " WHERE NOT EXISTS (" " SELECT 1" " FROM ticket t" " WHERE t.type='$milestone$'" " AND t.summary=m.name)", (now, now)) cursor.execute("UPDATE ticket" " SET status='finished'" " WHERE status='new'" " AND type='$milestone$'" " AND EXISTS (" " SELECT 1" " FROM milestone m" " WHERE summary=m.name" " AND COALESCE(m.completed,0)<>0)") cursor.execute("SELECT * FROM milestone as tab LIMIT 1") col_names = get_column_names(cursor) if 'started' in col_names: cursor.execute("UPDATE ticket" " SET status='started'" " WHERE status='new'" " AND type='$milestone$'" " AND EXISTS (" " SELECT 1" " FROM milestone m" " WHERE summary=m.name" " AND COALESCE(m.started,0)<>0)") cursor.execute("INSERT INTO ticket_custom (ticket, name, value)" " SELECT t.id, 'started', m.started" " FROM ticket t, milestone m" " WHERE t.type='$milestone$'" " AND t.summary=m.name" " AND COALESCE(m.started,0)<>0") cursor.execute("UPDATE ticket" " SET milestone = (SELECT parent FROM milestone_struct WHERE name=summary)" " WHERE type='$milestone$'") trac_cfg = env.config['trac'] policies = trac_cfg.get('permission_policies') or '' for policy in ('CalendarSystem', 'HideMilestoneTicketPolicy'): if policies: policies = ','.join([policy, policies]) else: policies = policy trac_cfg.set('permission_policies', policies) env.config.save()
def execute_paginated_report(self, req, db, id, sql, args, limit=0, offset=0): sql, args, missing_args = self.sql_sub_vars(sql, args, db) if not sql: raise TracError(_("Report {%(num)s} has no SQL query.", num=id)) cursor = db.cursor() num_items = 0 if id != -1 and limit > 0: cursor.execute("SELECT COUNT(*) FROM (%s) AS tab" % sql, args) num_items = cursor.fetchone()[0] # get the column names cursor.execute("SELECT * FROM (%s) AS tab LIMIT 1" % sql, args) cols = get_column_names(cursor) sort_col = req.args.get('sort', '') order_cols = [] if sort_col: if '__group__' in cols: order_cols.append('__group__') if sort_col in cols: order_cols.append(sort_col) else: raise TracError(_('Query parameter "sort=%(sort_col)s" ' ' is invalid', sort_col=sort_col)) # get the (partial) report results order_by = '' if order_cols: asc = req.args.get('asc', '1') order_by = " ORDER BY %s %s" % ( ', '.join(db.quote(col) for col in order_cols), 'ASC' if asc == '1' else 'DESC') sql = "SELECT * FROM (%s) AS tab %s LIMIT %s OFFSET %s" % \ (sql, order_by, str(limit), str(offset)) self.log.debug("Query SQL: " + sql) cursor.execute(sql, args) rows = cursor.fetchall() or [] cols = get_column_names(cursor) return cols, rows, num_items, missing_args
def get_ticket_template(self, name): db = self.env.get_db_cnx() cursor = db.cursor() sql = 'SELECT name, fields FROM ticket_templates WHERE name="%s"' % name cursor.execute(sql) row = cursor.fetchone() if row: cols = get_column_names(cursor) tmpl = dict(zip(cols[0:], row[0:])) if tmpl['fields']: tmpl['fields'] = tmpl['fields'].split(',') fields = [f['name'] for f in TicketSystem(self.env).get_custom_fields()] tmpl['fields'] = [f for f in tmpl['fields'] if f in fields] else: tmpl['fields'] = [] return tmpl else: return None
def get_field_groups(self): db = self.env.get_db_cnx() cursor = db.cursor() sql = 'SELECT id AS "name", label, priority AS "order", fields FROM field_groups ORDER BY priority, label ASC' groups = [] cursor.execute(sql) rows = cursor.fetchall() if not rows: return groups cols = get_column_names(cursor) fields = [f['name'] for f in TicketSystem(self.env).get_custom_fields()] for row in rows: group = dict(zip(cols[0:], row[0:])) group['name'] = 'fieldgroup_'+str(group['name']) if group['fields']: group['fields'] = group['fields'].split(',') group['fields'] = [f for f in group['fields'] if f in fields] groups.append(group) return groups
def execute_report(self, req, db, id, sql, args): sql, args = self.sql_sub_vars(req, sql, args, db) if not sql: raise TracError('Report %s has no SQL query.' % id) if sql.find('__group__') == -1: req.hdf['report.sorting.enabled'] = 1 self.log.debug('Executing report with SQL "%s" (%s)', sql, args) cursor = db.cursor() cursor.execute(sql, args) # FIXME: fetchall should probably not be used. info = cursor.fetchall() or [] cols = get_column_names(cursor) db.rollback() return cols, info
def get_field_group(self, name): # names should always be of the form 'fieldgroup_'+id db = self.env.get_db_cnx() cursor = db.cursor() cursor.execute(""" SELECT id AS "name", label, priority AS "order", fields FROM field_groups WHERE id=%s ORDER BY priority, label ASC """, (int(name[11:]),)) row = cursor.fetchone() if row: cols = get_column_names(cursor) group = dict(zip(cols[0:], row[0:])) group['name'] = 'fieldgroup_'+str(group['name']) if group['fields']: group['fields'] = group['fields'].split(',') fields = [f['name'] for f in TicketSystem(self.env).get_custom_fields()] group['fields'] = [f for f in group['fields'] if f in fields] return group else: return None
def get_ticket_templates(self): db = self.env.get_db_cnx() cursor = db.cursor() sql = 'SELECT name, fields FROM ticket_templates ORDER BY name ASC' templates = [] cursor.execute(sql) rows = cursor.fetchall() if not rows: return templates cols = get_column_names(cursor) fields = [f['name'] for f in TicketSystem(self.env).get_custom_fields()] for row in rows: tmpl = dict(zip(cols[0:], row[0:])) if tmpl['fields']: tmpl['fields'] = tmpl['fields'].split(',') tmpl['fields'] = [f for f in tmpl['fields'] if f in fields] else: tmpl['fields'] = [] templates.append(tmpl) return templates
def get_ticket_template(self, name): db = self.env.get_db_cnx() cursor = db.cursor() sql = 'SELECT name, fields FROM ticket_templates WHERE name="%s"' % name cursor.execute(sql) row = cursor.fetchone() if row: cols = get_column_names(cursor) tmpl = dict(zip(cols[0:], row[0:])) if tmpl['fields']: tmpl['fields'] = tmpl['fields'].split(',') fields = [ f['name'] for f in TicketSystem(self.env).get_custom_fields() ] tmpl['fields'] = [f for f in tmpl['fields'] if f in fields] else: tmpl['fields'] = [] return tmpl else: return None
def get_field_groups(self): db = self.env.get_db_cnx() cursor = db.cursor() sql = 'SELECT id AS "name", label, priority AS "order", fields FROM field_groups ORDER BY priority, label ASC' groups = [] cursor.execute(sql) rows = cursor.fetchall() if not rows: return groups cols = get_column_names(cursor) fields = [ f['name'] for f in TicketSystem(self.env).get_custom_fields() ] for row in rows: group = dict(zip(cols[0:], row[0:])) group['name'] = 'fieldgroup_' + str(group['name']) if group['fields']: group['fields'] = group['fields'].split(',') group['fields'] = [f for f in group['fields'] if f in fields] groups.append(group) return groups
def get_ticket_templates(self): db = self.env.get_db_cnx() cursor = db.cursor() sql = 'SELECT name, fields FROM ticket_templates ORDER BY name ASC' templates = [] cursor.execute(sql) rows = cursor.fetchall() if not rows: return templates cols = get_column_names(cursor) fields = [ f['name'] for f in TicketSystem(self.env).get_custom_fields() ] for row in rows: tmpl = dict(zip(cols[0:], row[0:])) if tmpl['fields']: tmpl['fields'] = tmpl['fields'].split(',') tmpl['fields'] = [f for f in tmpl['fields'] if f in fields] else: tmpl['fields'] = [] templates.append(tmpl) return templates
def get_field_group(self, name): # names should always be of the form 'fieldgroup_'+id db = self.env.get_db_cnx() cursor = db.cursor() cursor.execute( """ SELECT id AS "name", label, priority AS "order", fields FROM field_groups WHERE id=%s ORDER BY priority, label ASC """, (int(name[11:]), )) row = cursor.fetchone() if row: cols = get_column_names(cursor) group = dict(zip(cols[0:], row[0:])) group['name'] = 'fieldgroup_' + str(group['name']) if group['fields']: group['fields'] = group['fields'].split(',') fields = [ f['name'] for f in TicketSystem(self.env).get_custom_fields() ] group['fields'] = [f for f in group['fields'] if f in fields] return group else: return None
def execute(self, req=None, db=None, cached_ids=None, authname=None, tzinfo=None, href=None, locale=None): """Retrieve the list of matching tickets. :since 1.0: the `db` parameter is no longer needed and will be removed in version 1.1.1 """ if req is not None: href = req.href with self.env.db_direct_query as db: cursor = db.cursor() self.num_items = 0 sql, args = self.get_sql(req, cached_ids, authname, tzinfo, locale) if sql.startswith('SELECT ') and not sql.startswith('SELECT DISTINCT '): sql = 'SELECT DISTINCT * FROM (' + sql + ') AS subquery' if isinstance(self.env, ProductEnvironment): sql = sql + """ WHERE product='%s'""" % (self.env.product.prefix, ) self.num_items = self._count(sql, args) if self.num_items <= self.max: self.has_more_pages = False if self.has_more_pages: max = self.max if self.group: max += 1 sql = sql + " LIMIT %d OFFSET %d" % (max, self.offset) if (self.page > int(ceil(float(self.num_items) / self.max)) and self.num_items != 0): raise TracError(_("Page %(page)s is beyond the number of " "pages in the query", page=self.page)) # self.env.log.debug("SQL: " + sql % tuple([repr(a) for a in args])) cursor.execute(sql, args) columns = get_column_names(cursor) fields = [] for column in columns: fields += [f for f in self.fields if f['name'] == column] or \ [None] results = [] product_idx = columns.index('product') column_indices = range(len(columns)) for row in cursor: result = {} for i in column_indices: name, field, val = columns[i], fields[i], row[i] if name == 'reporter': val = val or 'anonymous' elif name == 'id': val = int(val) result['href'] = self._get_ticket_href( row[product_idx], val) elif name in self.time_fields: val = from_utimestamp(val) elif field and field['type'] == 'checkbox': try: val = bool(int(val)) except (TypeError, ValueError): val = False elif val is None: val = '' result[name] = val results.append(result) cursor.close() return results
def _execute_paginated_report(self, req, db, id, sql, args, limit=0, offset=0): """Deprecated and will be removed in Trac 1.3.1. Call `execute_paginated_report` instead.""" sql, args, missing_args = self.sql_sub_vars(sql, args) if not sql: raise TracError(_("Report {%(num)s} has no SQL query.", num=id)) self.log.debug('Report {%d} with SQL "%s"', id, sql) self.log.debug('Request args: %r', req.args) num_items = 0 order_by = [] limit_offset = None base_sql = sql.replace(SORT_COLUMN, '1').replace(LIMIT_OFFSET, '') cursor = db.cursor() if id == self.REPORT_LIST_ID or limit == 0: sql = base_sql else: # The number of tickets is obtained count_sql = 'SELECT COUNT(*) FROM (\n%s\n) AS tab' % base_sql self.log.debug("Report {%d} SQL (count): %s", id, count_sql) try: cursor.execute(count_sql, args) except Exception as e: self.log.warn('Exception caught while executing Report {%d}: ' '%r, args %r%s', id, count_sql, args, exception_to_unicode(e, traceback=True)) return e, count_sql num_items = cursor.fetchone()[0] # The column names are obtained colnames_sql = 'SELECT * FROM (\n%s\n) AS tab LIMIT 1' % base_sql self.log.debug("Report {%d} SQL (col names): %s", id, colnames_sql) try: cursor.execute(colnames_sql, args) except Exception as e: self.log.warn('Exception caught while executing Report {%d}: ' '%r, args %r%s', id, colnames_sql, args, exception_to_unicode(e, traceback=True)) return e, colnames_sql cols = get_column_names(cursor) # The ORDER BY columns are inserted sort_col = req.args.get('sort', '') asc = req.args.get('asc', '1') self.log.debug("%r %s (%s)", cols, sort_col, asc and '^' or 'v') order_cols = [] if sort_col and sort_col not in cols: raise TracError(_('Query parameter "sort=%(sort_col)s" ' ' is invalid', sort_col=sort_col)) skel = None if '__group__' in cols: order_cols.append('__group__') if sort_col: sort_col = '%s %s' % (db.quote(sort_col), asc == '1' and 'ASC' or 'DESC') if SORT_COLUMN in sql: # Method 1: insert sort_col at specified position sql = sql.replace(SORT_COLUMN, sort_col or '1') elif sort_col: # Method 2: automagically insert sort_col (and __group__ # before it, if __group__ was specified) as first criteria if '__group__' in cols: order_by.append('__group__ ASC') order_by.append(sort_col) # is there already an ORDER BY in the original sql? skel = sql_skeleton(sql) before, after = split_sql(sql, _order_by_re, skel) if after: # there were some other criterions, keep them order_by.append(after) sql = ' '.join([before, 'ORDER BY', ', '.join(order_by)]) # Add LIMIT/OFFSET if pagination needed limit_offset = '' if num_items > limit: limit_offset = ' '.join(['LIMIT', str(limit), 'OFFSET', str(offset)]) if LIMIT_OFFSET in sql: # Method 1: insert LIMIT/OFFSET at specified position sql = sql.replace(LIMIT_OFFSET, limit_offset) else: # Method 2: limit/offset is added unless already present skel = skel or sql_skeleton(sql) if 'LIMIT' not in skel.upper(): sql = ' '.join([sql, limit_offset]) self.log.debug("Report {%d} SQL (order + limit): %s", id, sql) try: cursor.execute(sql, args) except Exception as e: self.log.warn('Exception caught while executing Report {%d}: ' '%r, args %r%s', id, sql, args, exception_to_unicode(e, traceback=True)) if order_by or limit_offset: add_notice(req, _("Hint: if the report failed due to automatic" " modification of the ORDER BY clause or the" " addition of LIMIT/OFFSET, please look up" " %(sort_column)s and %(limit_offset)s in" " TracReports to see how to gain complete" " control over report rewriting.", sort_column=SORT_COLUMN, limit_offset=LIMIT_OFFSET)) return e, sql rows = cursor.fetchall() or [] cols = get_column_names(cursor) return cols, rows, num_items, missing_args, limit_offset
count_sql = "SELECT COUNT(*) FROM (\n%s\n) AS tab" % base_sql self.log.debug("Report {%d} SQL (count): %s", id, count_sql) try: cursor.execute(count_sql, args) except Exception, e: return e, count_sql num_items = cursor.fetchone()[0] # The column names are obtained colnames_sql = "SELECT * FROM (\n%s\n) AS tab LIMIT 1" % base_sql self.log.debug("Report {%d} SQL (col names): %s", id, colnames_sql) try: cursor.execute(colnames_sql, args) except Exception, e: return e, colnames_sql cols = get_column_names(cursor) # The ORDER BY columns are inserted sort_col = req.args.get("sort", "") asc = req.args.get("asc", "1") self.log.debug("%r %s (%s)", cols, sort_col, asc and "^" or "v") order_cols = [] if sort_col and sort_col not in cols: raise TracError(_('Query parameter "sort=%(sort_col)s" ' " is invalid", sort_col=sort_col)) skel = None if "__group__" in cols: order_cols.append("__group__") if sort_col: sort_col = "%s %s" % (db.quote(sort_col), asc == "1" and "ASC" or "DESC") if SORT_COLUMN in sql:
'%r, args %r%s', id, count_sql, args, exception_to_unicode(e, traceback=True)) return e, count_sql num_items = cursor.fetchone()[0] # The column names are obtained colnames_sql = 'SELECT * FROM (\n%s\n) AS tab LIMIT 1' % base_sql self.log.debug("Report {%d} SQL (col names): %s", id, colnames_sql) try: cursor.execute(colnames_sql, args) except Exception, e: self.log.warn('Exception caught while executing Report {%d}: ' '%r, args %r%s', id, colnames_sql, args, exception_to_unicode(e, traceback=True)) return e, colnames_sql cols = get_column_names(cursor) # The ORDER BY columns are inserted sort_col = req.args.get('sort', '') asc = req.args.get('asc', '1') self.log.debug("%r %s (%s)", cols, sort_col, asc and '^' or 'v') order_cols = [] if sort_col and sort_col not in cols: raise TracError(_('Query parameter "sort=%(sort_col)s" ' ' is invalid', sort_col=sort_col)) skel = None if '__group__' in cols: order_cols.append('__group__') if sort_col: sort_col = '%s %s' % (db.quote(sort_col), asc == '1' and 'ASC' or 'DESC')
def execute_paginated_report(self, req, id, sql, args, limit=0, offset=0): """ :param req: `Request` object. :param id: Integer id of the report. :param sql: SQL query that generates the report. :param args: SQL query arguments. :param limit: Maximum number of results to return (optional). :param offset: Offset to start of results (optional). """ sql, args, missing_args = self.sql_sub_vars(sql, args) if not sql: raise TracError(_("Report {%(num)s} has no SQL query.", num=id)) self.log.debug('Report {%d} with SQL "%s"', id, sql) self.log.debug('Request args: %r', req.args) num_items = 0 order_by = [] limit_offset = None base_sql = sql.replace(SORT_COLUMN, '1').replace(LIMIT_OFFSET, '') with self.env.db_query as db: cursor = db.cursor() if id == self.REPORT_LIST_ID or limit == 0: sql = base_sql else: # The number of tickets is obtained count_sql = 'SELECT COUNT(*) FROM (\n%s\n) AS tab' % base_sql self.log.debug("Report {%d} SQL (count): %s", id, count_sql) try: cursor.execute(count_sql, args) except Exception as e: self.log.warning( 'Exception caught while executing ' 'Report {%d}: %r, args %r%s', id, count_sql, args, exception_to_unicode(e, traceback=True)) return e, count_sql num_items = cursor.fetchone()[0] # The column names are obtained colnames_sql = 'SELECT * FROM (\n%s\n) AS tab LIMIT 1' \ % base_sql self.log.debug("Report {%d} SQL (col names): %s", id, colnames_sql) try: cursor.execute(colnames_sql, args) except Exception as e: self.log.warning( 'Exception caught while executing ' 'Report {%d}: args %r%s', id, colnames_sql, args, exception_to_unicode(e, traceback=True)) return e, colnames_sql cols = get_column_names(cursor) # The ORDER BY columns are inserted sort_col = req.args.get('sort', '') asc = req.args.getint('asc', 0, min=0, max=1) self.log.debug("%r %s (%s)", cols, sort_col, '^' if asc else 'v') order_cols = [] if sort_col and sort_col not in cols: raise TracError( _('Query parameter "sort=%(sort_col)s" ' ' is invalid', sort_col=sort_col)) skel = None if '__group__' in cols: order_cols.append('__group__') if sort_col: sort_col = '%s %s' % (db.quote(sort_col), 'ASC' if asc else 'DESC') if SORT_COLUMN in sql: # Method 1: insert sort_col at specified position sql = sql.replace(SORT_COLUMN, sort_col or '1') elif sort_col: # Method 2: automagically insert sort_col (and __group__ # before it, if __group__ was specified) as first criteria if '__group__' in cols: order_by.append('__group__ ASC') order_by.append(sort_col) # is there already an ORDER BY in the original sql? skel = sql_skeleton(sql) before, after = split_sql(sql, _order_by_re, skel) if after: # there were some other criterions, keep them order_by.append(after) sql = ' '.join([before, 'ORDER BY', ', '.join(order_by)]) # Add LIMIT/OFFSET if pagination needed limit_offset = '' if num_items > limit: limit_offset = ' '.join( ['LIMIT', str(limit), 'OFFSET', str(offset)]) if LIMIT_OFFSET in sql: # Method 1: insert LIMIT/OFFSET at specified position sql = sql.replace(LIMIT_OFFSET, limit_offset) else: # Method 2: limit/offset is added unless already present skel = skel or sql_skeleton(sql) if 'LIMIT' not in skel.upper(): sql = ' '.join([sql, limit_offset]) self.log.debug("Report {%d} SQL (order + limit): %s", id, sql) try: cursor.execute(sql, args) except Exception as e: self.log.warning( 'Exception caught while executing Report ' '{%d}: %r, args %r%s', id, sql, args, exception_to_unicode(e, traceback=True)) if order_by or limit_offset: add_notice( req, _( "Hint: if the report failed due to" " automatic modification of the ORDER" " BY clause or the addition of" " LIMIT/OFFSET, please look up" " %(sort_column)s and %(limit_offset)s" " in TracReports to see how to gain" " complete control over report" " rewriting.", sort_column=SORT_COLUMN, limit_offset=LIMIT_OFFSET)) return e, sql rows = cursor.fetchall() or [] cols = get_column_names(cursor) return cols, rows, num_items, missing_args, limit_offset
def intercept_report_rss(self,req,data): #figure out which headers are being used # - iterate through data's header_groups' list of lists of dictionaries # - I'm assuming that each group has identical headers (should always be the case), so I can check only the first header group titles = [] for header in data['header_groups'][0]: titles.append(header['col'].strip('_')) #Note that "titles" is not used at this point. Because of aliasing in sql, it would be hard to figure out which title means which column #figure out which tickets are listed # - iterate through data's row_groups list of lists of dictionaries # - I need to check each row group, but the rss feed won't be grouped (it's only time-sorted) so I can then ignore the groupings ticket_ids = set() ticket_ids.update([row['resource'].id for (_, row_group) in data['row_groups'] for row in row_group]) self.env.log.debug("Tickets in Report: %s" % ticket_ids) #generate data based on the headers and the ticket ids # - actually, for now I'm just going by the ticket ids because the headers will be tricky: SQL aliasing will make it so I can't just use them verbatim # - oddly enough, the query module will make this work, because it can't alias. # - make sure tickets that have been *created* but not *modified* show up in the list # - this is easy with a LEFT JOIN # - COALESCE is used to pick values from the ticket if they don't exist in the change # - we're now using our own template, so we don't have to obey report.rss's rules anymore. db = self.env.get_db_cnx() cursor = db.cursor() idstring = ','.join([str(s) for s in ticket_ids]) #if their limit is set to '0', sqlite will return 0 rows. thus, make it -1 instead limit = self.config.getint('report','items_per_page_rss',-1) or -1 #all the fields starting with 'tc_' will get printed for *all* changes. #fields without this prefix will only get printed once for each set of changes. sql = """SELECT t.summary AS summary, t.id AS id, t.owner AS owner, t.priority AS priority, t.milestone AS milestone, t.component AS component, t.version AS version, t.cc AS cc, t.keywords AS keywords, COALESCE(tc.time, t.time) AS changetime, COALESCE(tc.author, t.reporter) AS reporter, tc.field AS tc_field, tc.oldvalue AS tc_oldvalue, tc.newvalue AS tc_newvalue FROM ticket t LEFT JOIN ticket_change tc ON t.id = tc.ticket WHERE t.id IN (%(ids)s) ORDER BY changetime ASC LIMIT %(limit)s; """ % {'ids':idstring,'limit':limit} cursor.execute(sql) #convert the rows to dictionaries keyed off of the column names, so we need to find the column names cols = get_column_names(cursor) #store the rows in a dictionary by ticket so all of one ticket's changes are in one spot. # - then store by timestamp in a sub-dictionary so simultaneous changes to a ticket are one feed item # - this means that if people make changes to two different tickets simultaneously it won't be an issue # - because of the way that the query grabs the data, "title" and "id" will be the same for all changetime-colliding rows items = {} for row in cursor: rowAsDict = dict(zip(cols,row)) if rowAsDict['id'] in items: if rowAsDict['changetime'] in items[rowAsDict['id']]: #we've already got something at this time; append it to the changetime's value list items[rowAsDict['id']][rowAsDict['changetime']].append(rowAsDict) else: items[rowAsDict['id']][rowAsDict['changetime']] = [rowAsDict] else: items[rowAsDict['id']] = {rowAsDict['changetime']:[rowAsDict]} self.env.log.debug('Tickets in Feed: %s' % items.values()) #because we're using our own template, we can just blow away the current data structure # - keep the report's title, and description, and, uh, report (report is a dictionary that has an id and a resource object pointing to the report) # - context lets us use wiki_to_html data = {'items':items, 'title':data['title'], 'description':data['description'], 'report':data['report'], 'context':data['context']} return ('detailedrss.rss',data,'application/rss+xml')
def execute_paginated_report(self, req, db, id, sql, args, limit=0, offset=0): sql, args, missing_args = self.sql_sub_vars(sql, args, db) if not sql: raise TracError(_('Report {%(num)s} has no SQL query.', num=id)) self.log.debug('Executing report with SQL "%s"' % sql) self.log.debug('Request args: %r' % req.args) cursor = db.cursor() num_items = 0 if id != -1 and limit > 0: # The number of tickets is obtained. count_sql = 'SELECT COUNT(*) FROM (' + sql + ') AS tab' cursor.execute(count_sql, args) self.log.debug("Query SQL(Get num items): " + count_sql) for row in cursor: pass num_items = row[0] # The column name is obtained. get_col_name_sql = 'SELECT * FROM ( ' + sql + ' ) AS tab LIMIT 1' cursor.execute(get_col_name_sql, args) self.env.log.debug("Query SQL(Get col names): " + get_col_name_sql) cols = get_column_names(cursor) sort_col = req.args.get('sort', '') self.log.debug("Columns %r, Sort column %s" % (cols, sort_col)) order_cols = [] if sort_col: if '__group__' in cols: order_cols.append('__group__') if sort_col in cols: order_cols.append(sort_col) else: raise TracError( _('Query parameter "sort=%(sort_col)s" ' ' is invalid', sort_col=sort_col)) # The report-query results is obtained asc = req.args.get('asc', '1') asc_str = asc == '1' and 'ASC' or 'DESC' order_by = '' if len(order_cols) != 0: order = ', '.join(db.quote(col) for col in order_cols) order_by = " ".join([' ORDER BY', order, asc_str]) sql = " ".join(['SELECT * FROM (', sql, ') AS tab', order_by]) sql = " ".join([sql, 'LIMIT', str(limit), 'OFFSET', str(offset)]) self.log.debug("Query SQL: " + sql) cursor.execute(sql, args) # FIXME: fetchall should probably not be used. info = cursor.fetchall() or [] cols = get_column_names(cursor) db.rollback() return cols, info, num_items, missing_args
def intercept_report_rss(self,req,data): #figure out which headers are being used # - iterate through data's header_groups' list of lists of dictionaries # - I'm assuming that each group has identical headers (should always be the case), so I can check only the first header group titles = [] for header in data['header_groups'][0]: titles.append(header['col'].strip('_')) #Note that "titles" is not used at this point. Because of aliasing in sql, it would be hard to figure out which title means which column #figure out which tickets are listed # - iterate through data's row_groups list of lists of dictionaries # - I need to check each row group, but the rss feed won't be grouped (it's only time-sorted) so I can then ignore the groupings ticket_ids = set() ticket_ids.update([row['resource'].id for (_, row_group) in data['row_groups'] for row in row_group]) self.env.log.debug("Tickets in Report: %s" % ticket_ids) #generate data based on the headers and the ticket ids # - actually, for now I'm just going by the ticket ids because the headers will be tricky: SQL aliasing will make it so I can't just use them verbatim # - oddly enough, the query module will make this work, because it can't alias. # - make sure tickets that have been *created* but not *modified* show up in the list # - this is easy with a LEFT JOIN # - COALESCE is used to pick values from the ticket if they don't exist in the change # - we're now using our own template, so we don't have to obey report.rss's rules anymore. db = self.env.get_db_cnx() cursor = db.cursor() idstring = ','.join([str(s) for s in ticket_ids]) #if their limit is set to '0', sqlite will return 0 rows. thus, make it -1 instead limit = self.config.getint('report','items_per_page_rss',-1) or -1 #all the fields starting with 'tc_' will get printed for *all* changes. #fields without this prefix will only get printed once for each set of changes. sql = """SELECT t.summary AS summary, t.id AS id, t.owner AS owner, t.priority AS priority, t.milestone AS milestone, t.component AS component, t.version AS version, t.cc AS cc, t.keywords AS keywords, COALESCE(tc.time, t.time) AS changetime, COALESCE(tc.author, t.reporter) AS reporter, tc.field AS tc_field, tc.oldvalue AS tc_oldvalue, tc.newvalue AS tc_newvalue FROM ticket t LEFT JOIN ticket_change tc ON t.id = tc.ticket WHERE t.id IN (%(ids)s) ORDER BY changetime DESC LIMIT %(limit)s; """ % {'ids':idstring,'limit':limit} cursor.execute(sql) #convert the rows to dictionaries keyed off of the column names, so we need to find the column names cols = get_column_names(cursor) #store the rows in a dictionary by (ticket, timestamp) tuple so simultaneous changes to a ticket are one feed item # - this means that if people make changes to two different tickets simultaneously it won't be an issue # - because of the way that the query grabs the data, "title" and "id" will be the same for all changetime-colliding rows items = {} for row in cursor: rowAsDict = dict(zip(cols,row)) selector = (rowAsDict['changetime'], rowAsDict['id']) if selector in items: items[selector].append(rowAsDict) else: items[selector] = [rowAsDict] self.env.log.debug('Tickets in Feed: %s' % items.values()) #because we're using our own template, we can just blow away the current data structure # - keep the report's title, and description, and, uh, report (report is a dictionary that has an id and a resource object pointing to the report) # - context lets us use wiki_to_html data = {'items':items, 'title':data['title'], 'description':data['description'], 'report':data['report'], 'context':data['context']} return ('changesrss.rss',data,'application/rss+xml')