def generate(scraper): db.load(scraper) runs = list(aggregate_loglevels(RUNS_QUERY, ('scraperId',))) tasks = list(aggregate_loglevels(TASKS_QUERY, ('scraperId', 'taskName'))) index_file = render.paginate(scraper, runs, 'index%s.html', 'index.html', tasks=tasks) for task_run in db.query(TASK_RUNS_LIST): task = task_run.get('taskName') or render.PADDING file_name = '%s/%s/index%%s.html' % (task, task_run.get('scraperId')) if path.exists(path.join(path.dirname(index_file), file_name % '')): continue if task_run.get('taskName') is None: runs = aggregate_loglevels(TASK_RUNS_QUERY_NULL, ('taskId',), scraperId=task_run.get('scraperId')) else: runs = aggregate_loglevels(TASK_RUNS_QUERY, ('taskId',), scraperId=task_run.get('scraperId'), taskName=task_run.get('taskName')) runs = sort_aggregates(runs) render.paginate(scraper, runs, file_name, 'task_run_list.html', taskName=task) for (scraperId, taskId), rows in all_task_runs(scraper).items(): taskName = rows[0].get('taskName') file_name = (taskName or render.PADDING, scraperId or render.PADDING, taskId or render.PADDING) file_name = '%s/%s/%s%%s.html' % file_name if path.exists(path.join(path.dirname(index_file), file_name % '')): continue render.paginate(scraper, rows, file_name, 'task_run_item.html', scraperId=scraperId, taskId=taskId, taskName=taskName) return index_file
def aggregate_loglevels(sql, keys, **kwargs): data, key = {}, None for row in db.query(sql, **kwargs): row_key = map(lambda k: row[k], keys) if key != row_key: if key is not None: yield data data = row key = row_key else: data['messages'] += row['messages'] data['tasks'] += row['tasks'] data[row['levelname']] = row['messages'] if key is not None: yield data