def gen_json_with_filters(self, obj, project_id, expanded, filters): objs = browse_tasks_export(obj, project_id, expanded, filters) n = browse_tasks_export_count(obj, project_id, expanded, filters) sep = ", " yield "[" count = 0 for obj in objs: item = json.dumps(self.process_filtered_row(dict(obj))) count += 1 if count == n: sep = "" yield item + sep yield "]"
def _get_csv_with_filters(self, out, writer, table, project_id, expanded, filters): objs = browse_tasks_export(table, project_id, expanded, filters) rows = [obj for obj in objs] headers = self._get_all_headers(objs=rows, expanded=expanded, table=table, from_obj=False) writer.writerow(headers) for row in rows: row = self.process_filtered_row(dict(row)) writer.writerow(self._format_csv_row(row, headers)) out.seek(0) yield out.read()
def _get_csv_with_filters(self, out, writer, table, project_id, expanded, filters): objs = browse_tasks_export(table, project_id, expanded, filters) rows = [obj for obj in objs] for row in rows: if row['info']: info = dict(TaskCsvExporter.flatten(row['info'].iteritems())) row['info'].update(info) headers = self._get_all_headers(objs=rows, expanded=expanded, table=table, from_obj=False) writer.writerow(headers) for row in rows: row = self.process_filtered_row(dict(row)) writer.writerow(self._format_csv_row(row, headers)) out.seek(0) yield out.read()