示例#1
0
    def default(self, *args):
        if len(args) == 0:  # show all dbs
            return self.list_all_dbs()
        elif len(args) == 1:  # show all tables for a db
            return self.list_all_tables(args[0])

        message = ''
        print 'args', args
        urlparams = UrlParams(datadb.object_cache, self.features, *args)
        print 'up', urlparams
        sql, sql_params = urlparams.to_sql()
        print 'sql', sql

        data, column_names, error = datadb.execute_on_db_uniq(urlparams.db_uniq, sql, sql_params)
        if error:
            raise Exception('Error executing the query: ' + error)
        # print 'data', data
        column_info = datadb.get_column_info(urlparams.db_uniq, urlparams.table, column_names)  # TODO highlight PK in UI

        if urlparams.output_format == 'json':
            # stringify everything, not to get "is not JSON serializable"
            stringified = []
            for row in data:
                stringified.append([str(x) for x in row])   # TODO better to cast all cols to ::text in SQL?
            return json.dumps(stringified)
        elif urlparams.output_format in ['graph', 'png']:
            return self.plot_graph(data, urlparams)
        elif urlparams.output_format == 'csv':
            return self.to_csv(data, column_names, urlparams)
        else:
            tmpl = env.get_template('index.html')
            return tmpl.render(message=message, dbname=urlparams.dbname, table=urlparams.table, sql=sql, data=data,
                               column_info=column_info, max_text_length=self.features['maximum_text_column_length'])
示例#2
0
def fill_timeline_holes(data, bucket, db_uniq):
    """ fills gaps with zeroes between min and max with help of database
    data: [(datetime, count),...], bucket: [min|hour|day|week|month]
      TODO can be actually added to the initial select
    """

    if len(data) <= 1:
        return data

    data_as_dict = {}
    for d in data:
        data_as_dict[d[0]] = d
    ret_data = []
    sql = "select generate_series(%s, %s, '1{}'::interval)".format(bucket)
    time_series, col_names, error = datadb.execute_on_db_uniq(db_uniq, sql, (data[0][0], data[-1][0]))
    if error:
        raise Exception(error)
    for s in time_series:
        ret_data.append(data_as_dict.get(s[0], (s[0], 0L)))
    return ret_data