def compute_times(query, tables, criteria, params): tzname = criteria.business_hours_tzname logger.debug("%s: timezone: %s" % (query.job, tzname)) tz = pytz.timezone(tzname) # Convert to datetime objects in the requested timezone st = criteria.starttime.astimezone(tz) et = criteria.endtime.astimezone(tz) logger.debug("%s: times: %s - %s" % (query.job, st, et)) # Business hours start/end, as string "HH:MMam" like 8:00am sb = parse_time(criteria.business_hours_start) eb = parse_time(criteria.business_hours_end) weekends = criteria.business_hours_weekends # Iterate from st to et until times = [] t = st while t <= et: # Set t0/t1 to date of t but time of sb/eb t0 = replace_time(t, sb) t1 = replace_time(t, eb) # Advance t by 1 day t = t + datetime.timedelta(days=1) # Skip weekends if not weekends and t0.weekday() >= 5: continue # Now see if we have any overlap of busines hours for today if et < t0: # Report end time is today before busines hours start, all done break if et < t1: # Report end time is today in the middle of busines hours, adjust t1 = et if t1 < st: # Report start time occurs today *after* business end, nothing today continue if t0 < st: # Report start time occurs today in the middle of the business hours # Adjust t0 t0 = st logger.debug("%s: start: %s, end: %s, duration: %s" % (query.job, str(t0), str(t1), str(timedelta_total_seconds(t1-t0)))) times.append((t0, t1, timedelta_total_seconds(t1-t0))) if len(times) == 0: return None else: return pandas.DataFrame(times, columns=['starttime', 'endtime', 'totalsecs'])
def as_text(self): """ Return certain field values as a dict for simple json parsing """ result = {} for k, v in self.cleaned_data.iteritems(): if isinstance(v, datetime.datetime): result[k] = v.isoformat() elif isinstance(v, datetime.timedelta): result[k] = str(timedelta_total_seconds(v)) + " seconds" elif isinstance(v, UploadedFile): # look for uploaded files, save them off to another # temporary file and return the path for use in JSON # consumers of this file will need to clean them up # TODO this will be replaced by the File Storage App newtemp = tempfile.NamedTemporaryFile(delete=False) v.seek(0) shutil.copyfileobj(v, newtemp) v.close() newtemp.close() result[k] = newtemp.name else: result[k] = v return result
def render(self, name, value, attrs): initial_time = attrs.get('initial_time', None) if initial_time: m = re.match("now *- *(.+)", initial_time) if m: secs = timedelta_total_seconds(parse_timedelta(m.group(1))) initial_time = ( "d = new Date(); d.setSeconds(d.getSeconds()-%d);" \ % secs) else: initial_time = "d = '%s';" % initial_time else: initial_time = "d = new Date();" msg = ''' {0} <span id="timenow_{name}" class="icon-time" title="Set time/date to now"> </span> <script type="text/javascript"> $("#id_{name}").timepicker({{ step: 15, scrollDefaultNow:true, timeFormat:"g:i:s a" }}); $("#timenow_{name}").click(function() {{ $("#id_{name}").timepicker("setTime", new Date()); }}); {initial_time} $("#id_{name}").timepicker("setTime", d); </script> ''' #'$("#id_{name}").timepicker("setTime", new Date());' return msg.format( super(TimeWidget, self).render(name, value, attrs), name=name, initial_time=initial_time )
def render(self, name, value, attrs): initial_date = attrs.get('initial_date', None) if initial_date: m = re.match("now *- *(.+)", initial_date) if m: secs = timedelta_total_seconds(parse_timedelta(m.group(1))) initial_date = ( "d = new Date(); d.setSeconds(d.getSeconds()-%d);" \ % secs) else: initial_date = "d = '%s';" % initial_date else: initial_date = "d = new Date();" msg = ''' {0} <span id="datenow_{name}" class="icon-calendar" title="Set date to today"> </span> <script type="text/javascript"> $("#id_{name}").datepicker({{ format: "mm/dd/YY", defaultDate: +2, autoclose: true }}); {initial_date} $("#id_{name}").datepicker("setDate", d); $("#datenow_{name}").click(function() {{ $("#id_{name}").datepicker("setDate", new Date()); }}); </script> ''' return msg.format( super(DateWidget, self).render(name, value, attrs), name=name, initial_date=initial_date )
def create(cls, name, groupby, realm, duration, resolution='auto', filterexpr=None, interface=False, **kwargs): logger.debug('Creating ProfilerTable table %s (%s) - %s/%s' % (name, duration, groupby, realm)) options = TableOptions(groupby=groupby, realm=realm, centricity='int' if interface else 'hos') t = Table(name=name, module=__name__, filterexpr=filterexpr, options=options, **kwargs) t.save() if resolution != 'auto': if isinstance(resolution, int): res = resolution else: res = int(timedelta_total_seconds(parse_timedelta(resolution))) resolution = rvbd.profiler.report.Report.RESOLUTION_MAP[res] if isinstance(duration, int): duration = "%d min" % duration fields_add_device_selection(t, keyword='profiler_device', label='Profiler', module='profiler', enabled=True) fields_add_time_selection(t, initial_duration=duration) fields_add_filterexpr(t) fields_add_resolution(t, initial=resolution, resolutions=[('auto', 'Automatic'), '1min', '15min', 'hour', '6hour'], special_values=['auto']) return t
def __init__(self, table, job): self.table = table self.job = job self.timeseries = False # if key column called 'time' is created self.column_names = [] # Resolution comes in as a time_delta resolution = timedelta_total_seconds(job.criteria.resolution) default_delta = 1000000000 # one second self.delta = int(default_delta * resolution) # sample size interval
def decompress(self, value): if isinstance(value, str) or isinstance(value, unicode): value = timedelta_total_seconds(parse_timedelta(value)) if value: m = [v for v in self.choices if v[0] == value] if len(m) == 1: return m[0] else: return [0, '%d min' % (value / 60)] return [None, None]
def create(cls, name, template_id, duration, resolution='auto', filterexpr=None, **kwargs): """ Create a ProfilerTemplateTable. This queries a Profiler saved report template rather than creating a new report from scratch. `template_id` is a saved-report template ID `duration` is in minutes or a string like '15min' """ logger.debug('Creating ProfilerTemplateTable table %s (%s) - %s/%s' % (name, template_id, duration, resolution)) options = TableOptions(template_id=template_id) t = Table(name=name, module=__name__, filterexpr=filterexpr, options=options, **kwargs) t.save() if resolution != 'auto': if isinstance(resolution, int): res = resolution else: res = int(timedelta_total_seconds(parse_timedelta(resolution))) resolution = rvbd.profiler.report.Report.RESOLUTION_MAP[res] if isinstance(duration, int): duration = "%d min" % duration fields_add_device_selection(t, keyword='profiler_device', label='Profiler', module='profiler', enabled=True) fields_add_time_selection(t, initial_duration=duration) fields_add_filterexpr(t) fields_add_resolution(t, initial=resolution, resolutions=[('auto', 'Automatic'), '1min', '15min', 'hour', '6hour'], special_values=['auto']) return t
def create(cls, shark, interface, name, packet_retention_size_limit, packet_retention_packet_limit=None, packet_retention_time_limit=None, bpf_filter=None, snap_length=65525, indexing_size_limit=None, indexing_synced=False, indexing_time_limit=None, start_immediately=False, requested_start_time=None, requested_stop_time=None, stop_rule_size_limit=None, stop_rule_packet_limit=None, stop_rule_time_limit=None): """Create a new capture job""" def _calc_size(size, total): if isinstance(size, str) and size[-1] == '%': size = total * int(size[:-1]) /100 elif not isinstance(size, (int, long)) and size is not None: size = utils.human2bytes(size) return size stats = shark.get_stats() packet_storage_total = stats['storage']['packet_storage']['total'] index_storage_total = stats['storage']['os_storage']['index_storage']['total'] packet_retention_size_limit = _calc_size(packet_retention_size_limit, packet_storage_total) if packet_retention_size_limit else None stop_rule_size_limit = _calc_size(stop_rule_size_limit, packet_storage_total) if stop_rule_size_limit else None indexing_size_limit = _calc_size(indexing_size_limit, index_storage_total) if indexing_size_limit else None packet_retention_packet_limit = int(packet_retention_packet_limit) if packet_retention_packet_limit else None stop_rule_size_limit = int(stop_rule_size_limit) if stop_rule_size_limit else None stop_rule_packet_limit = int(stop_rule_packet_limit) if stop_rule_packet_limit else None snap_length = int(snap_length) if snap_length else 65535 indexing_size_limit = int(indexing_size_limit) if indexing_size_limit else None if indexing_time_limit: indexing_time_limit = int(timeutils.timedelta_total_seconds(indexing_time_limit)) if packet_retention_time_limit: packet_retention_time_limit = int(timeutils.timedelta_total_seconds(packet_retention_time_limit)) if stop_rule_time_limit: stop_rule_time_limit = int(timeutils.timedelta_total_seconds(stop_rule_time_limit)) if requested_start_time: requested_start_time = requested_start_time.strftime(cls._timefmt) if requested_stop_time: requested_stop_time = requested_stop_time.strftime(cls._timefmt) jobrequest = { 'interface_name': interface.id } if name: jobrequest['name'] = name if packet_retention_size_limit or packet_retention_packet_limit or packet_retention_time_limit: jobrequest['packet_retention'] = dict() if packet_retention_size_limit: jobrequest['packet_retention']['size_limit'] = packet_retention_size_limit if packet_retention_packet_limit: jobrequest['packet_retention']['packet_limit'] = packet_retention_packet_limit if packet_retention_time_limit: jobrequest['packet_retention']['time_limit'] = packet_retention_time_limit if bpf_filter: jobrequest['bpf_filter'] = bpf_filter if requested_start_time: jobrequest['requested_start_time'] = requested_start_time if requested_stop_time: jobrequest['requested_stop_time'] = requested_stop_time if stop_rule_size_limit or stop_rule_packet_limit or stop_rule_time_limit: jobrequest['stop_rule'] = dict() if stop_rule_size_limit: jobrequest['stop_rule']['size_limit'] = stop_rule_size_limit if stop_rule_packet_limit: jobrequest['stop_rule']['packet_limit'] = stop_rule_packet_limit if stop_rule_time_limit: jobrequest['stop_rule']['time_limit'] = stop_rule_time_limit if snap_length: jobrequest['snap_length'] = int(snap_length) if indexing_synced or indexing_size_limit or indexing_time_limit: jobrequest['indexing'] = dict() if indexing_synced: jobrequest['indexing']['synced'] = indexing_synced if indexing_size_limit: jobrequest['indexing']['size_limit'] = indexing_size_limit if indexing_time_limit: jobrequest['indexing']['time_limit'] = indexing_time_limit if start_immediately: jobrequest['start_immediately'] = start_immediately job_id = shark.api.jobs.add(jobrequest) job = cls(shark, job_id) return job
def run(self): # All user entered criteria is available directly from this object. # Values for any fields added to the table will appear as # attributes according to the field keyword. criteria = self.job.criteria # Check that a sample_device was selected if criteria.sample_device == "": logger.debug("%s: No sample device selected" % self.table) self.job.mark_error("No Sample Device Selected") return False sample_device = DeviceManager.get_device(criteria.sample_device) # Get the columns for this report columns = self.table.get_columns(synthetic=False) sortcol = None if self.table.sortcol is not None: sortcol = self.table.sortcol.name # Time selection is available via criterai.starttime and endtime. # These are both datetime objects. t0 = criteria.starttime t1 = criteria.endtime # Time resolution is a timedelta object resolution = criteria.resolution # Grab the custom min and max criteria cmin = float(criteria.min) cmax = float(criteria.max) # Grab the table options beta = self.table.options.beta # Now, do some computation -- create table with a 'time' column # ranging from t0 to t1 with the defined resolution. Then # for each additional column do some math function on the # data t = t0 rows = [] while t < t1: row = [] for col in columns: if col.name == "time": row.append(t) else: period_td = parse_timedelta(col.options.period) period_secs = timedelta_total_seconds(period_td) alpha = col.options.alpha funcname = col.options.func # seconds since the t0 secs = timedelta_total_seconds(t - t0) rad = (secs / period_secs) * 2 * math.pi funcmap = {"sin": math.sin, "cos": math.cos} # Compute! val = beta + alpha * funcmap[funcname](rad) # Clip by the min/max criteria val = max(cmin, val) val = min(cmax, val) # Add the value to the row row.append(val) # Add the row rows.append(row) # This function runs pretty fast, but this shows how to mark # progress self.job.mark_progress(100 * (timedelta_total_seconds(t - t0) / timedelta_total_seconds(t1 - t0))) t = t + resolution # Save the result in self.data self.data = rows if self.table.rows > 0: self.data = self.data[: self.table.rows] logger.info("Report %s returned %s rows" % (self.job, len(self.data))) return True
def run(self): # All user entered criteria is available directly from this object. # Values for any fields added to the table will appear as # attributes according to the field keyword. criteria = self.job.criteria # Check that a sample_device was selected if criteria.sample_device == '': logger.debug('%s: No sample device selected' % self.table) self.job.mark_error("No Sample Device Selected") return False sample_device = DeviceManager.get_device(criteria.sample_device) # Get the columns for this report columns = self.table.get_columns(synthetic=False) sortcol = None if self.table.sortcol is not None: sortcol = self.table.sortcol.name # Time selection is available via criterai.starttime and endtime. # These are both datetime objects. t0 = criteria.starttime t1 = criteria.endtime # Time resolution is a timedelta object resolution = criteria.resolution # Grab the custom min and max criteria cmin = float(criteria.min) cmax = float(criteria.max) # Grab the table options beta = self.table.options.beta # Now, do some computation -- create table with a 'time' column # ranging from t0 to t1 with the defined resolution. Then # for each additional column do some math function on the # data t = t0 rows = [] while t < t1: row = [] for col in columns: if col.name == 'time': row.append(t) else: period_td = parse_timedelta(col.options.period) period_secs = timedelta_total_seconds(period_td) alpha = col.options.alpha funcname = col.options.func # seconds since the t0 secs = timedelta_total_seconds(t - t0) rad = (secs / period_secs) * 2 * math.pi funcmap = { 'sin': math.sin, 'cos': math.cos, } # Compute! val = beta + alpha * funcmap[funcname](rad) # Clip by the min/max criteria val = max(cmin, val) val = min(cmax, val) # Add the value to the row row.append(val) # Add the row rows.append(row) # This function runs pretty fast, but this shows how to mark # progress self.job.mark_progress(100 * (timedelta_total_seconds(t - t0) / timedelta_total_seconds(t1 - t0))) t = t + resolution # Save the result in self.data self.data = rows if self.table.rows > 0: self.data = self.data[:self.table.rows] logger.info("Report %s returned %s rows" % (self.job, len(self.data))) return True
def report_business_hours(query, tables, criteria, params): times = tables['times'] if times is None or len(times) == 0: return None deptable = Table.objects.get(id=params['table']) # Create all the jobs batch = BatchJobRunner(query) for i, row in times.iterrows(): t0 = row['starttime']/1000 t1 = row['endtime']/1000 sub_criteria = copy.copy(criteria) sub_criteria.starttime = datetime.datetime.utcfromtimestamp(t0).replace(tzinfo=pytz.utc) sub_criteria.endtime = datetime.datetime.utcfromtimestamp(t1).replace(tzinfo=pytz.utc) job = Job.create(table=deptable, criteria=sub_criteria) logger.debug("Created %s: %s - %s" % (job, t0, t1)) batch.add_job(job) if len(batch.jobs) == 0: return None # Run all the Jobs batch.run() # Now collect the data total_secs = 0 df = None idx = 0 for job in batch.jobs: if job.status == Job.ERROR: raise AnalysisException("%s for %s-%s failed: %s" % (job, job.criteria.starttime, job.criteria.endtime, job.message)) subdf = job.data() logger.debug("%s: returned %d rows" % (job, len(subdf) if subdf is not None else 0)) if subdf is None: continue logger.debug("%s: actual_criteria %s" % (job, job.actual_criteria)) t0 = job.actual_criteria.starttime t1 = job.actual_criteria.endtime subdf['__secs__'] = timedelta_total_seconds(t1 - t0) total_secs += timedelta_total_seconds(t1 - t0) idx += 1 if df is None: df = subdf else: df = df.append(subdf) if df is None: return None keynames = [key.name for key in deptable.get_columns(iskey=True)] if 'aggregate' in params: ops = params['aggregate'] for col in deptable.get_columns(iskey=False): if col.name not in ops: ops[col.name] = 'sum' else: ops = 'sum' df = avg_groupby_aggregate(df, keynames, ops, '__secs__', total_secs) return df
def run( self, template_id, timefilter=None, resolution="auto", query=None, trafficexpr=None, data_filter=None, sync=True ): """Create the report on Profiler and begin running the report. If the `sync` option is True, periodically poll until the report is complete, otherwise return immediately. `template_id` is the numeric id of the template to use for the report `timefilter` is the range of time to query, a TimeFilter object `resolution` is the data resolution (1min, 15min, etc.), defaults to 'auto' `query` is the query object containing criteria `trafficexpr` is a TrafficFilter object `data_filter` is a deprecated filter to run against report data `sync` if True, poll for status until the report is complete """ self.template_id = template_id self.custom_columns = False if self.template_id != 184: # the columns in this report won't match, use custom columns instead self.custom_columns = True if timefilter is None: self.timefilter = TimeFilter.parse_range("last 5 min") else: self.timefilter = timefilter self.query = query self.trafficexpr = trafficexpr self.data_filter = data_filter self.id = None self.queries = list() self.last_status = None if resolution not in ["auto", "1min", "15min", "hour", "6hour", "day", "week", "month"]: rd = parse_timedelta(resolution) resolution = self.RESOLUTION_MAP[int(timedelta_total_seconds(rd))] self.resolution = resolution start = datetime_to_seconds(self.timefilter.start) end = datetime_to_seconds(self.timefilter.end) # using a RecursiveUpdateDict criteria = RecursiveUpdateDict(**{"time_frame": {"start": int(start), "end": int(end)}}) if self.query is not None: criteria["query"] = self.query if self.resolution != "auto": criteria["time_frame"]["resolution"] = self.resolution if self.data_filter: criteria["deprecated"] = {self.data_filter[0]: self.data_filter[1]} if self.trafficexpr is not None: criteria["traffic_expression"] = self.trafficexpr.filter to_post = {"template_id": self.template_id, "criteria": criteria} logger.debug("Posting JSON: %s" % to_post) response = self.profiler.api.report.reports(data=to_post) try: self.id = int(response["id"]) except KeyError: raise ValueError("failed to retrieve report id from report creation response: %s" % response) logger.info("Created report %d" % self.id) if sync: self.wait_for_complete()
def run(self): """ Main execution method. """ criteria = self.job.criteria if criteria.profiler_device == '': logger.debug('%s: No profiler device selected' % self.table) self.job.mark_error("No Profiler Device Selected") return False profiler = DeviceManager.get_device(criteria.profiler_device) report = rvbd.profiler.report.MultiQueryReport(profiler) tf = TimeFilter(start=criteria.starttime, end=criteria.endtime) logger.info("Running ProfilerTemplateTable table %d report " "for timeframe %s" % (self.table.id, str(tf))) trafficexpr = TrafficFilter( self.job.combine_filterexprs(exprs=criteria.profiler_filterexpr) ) # Incoming criteria.resolution is a timedelta logger.debug('Profiler report got criteria resolution %s (%s)' % (criteria.resolution, type(criteria.resolution))) if criteria.resolution != 'auto': rsecs = int(timedelta_total_seconds(criteria.resolution)) resolution = rvbd.profiler.report.Report.RESOLUTION_MAP[rsecs] else: resolution = 'auto' logger.debug('Profiler report using resolution %s (%s)' % (resolution, type(resolution))) with lock: res = report.run(template_id=self.table.options.template_id, timefilter=tf, trafficexpr=trafficexpr, resolution=resolution) if res is True: logger.info("Report template complete.") self.job.safe_update(progress=100) # Retrieve the data with lock: query = report.get_query_by_index(0) data = query.get_data() headers = report.get_legend() tz = criteria.starttime.tzinfo # Update criteria criteria.starttime = (datetime.datetime .utcfromtimestamp(query.actual_t0) .replace(tzinfo=tz)) criteria.endtime = (datetime.datetime .utcfromtimestamp(query.actual_t1) .replace(tzinfo=tz)) self.job.safe_update(actual_criteria=criteria) # create dataframe with all of the default headers df = pandas.DataFrame(data, columns=[h.key for h in headers]) # now filter down to the columns requested by the table columns = [col.name for col in self.table.get_columns(synthetic=False)] self.data = df[columns] if self.table.sortcol is not None: self.data = self.data.sort(self.table.sortcol.name) if self.table.rows > 0: self.data = self.data[:self.table.rows] logger.info("Report %s returned %s rows" % (self.job, len(self.data))) return True
def report_business_hours(query, tables, criteria, params): times = tables['times'] if times is None or len(times) == 0: return None deptable = Table.objects.get(id=params['table']) # Create all the jobs batch = BatchJobRunner(query) for i, row in times.iterrows(): t0 = row['starttime'] / 1000 t1 = row['endtime'] / 1000 sub_criteria = copy.copy(criteria) sub_criteria.starttime = datetime.datetime.utcfromtimestamp( t0).replace(tzinfo=pytz.utc) sub_criteria.endtime = datetime.datetime.utcfromtimestamp(t1).replace( tzinfo=pytz.utc) job = Job.create(table=deptable, criteria=sub_criteria) logger.debug("Created %s: %s - %s" % (job, t0, t1)) batch.add_job(job) if len(batch.jobs) == 0: return None # Run all the Jobs batch.run() # Now collect the data total_secs = 0 df = None idx = 0 for job in batch.jobs: if job.status == Job.ERROR: raise AnalysisException("%s for %s-%s failed: %s" % (job, job.criteria.starttime, job.criteria.endtime, job.message)) subdf = job.data() logger.debug("%s: returned %d rows" % (job, len(subdf) if subdf is not None else 0)) if subdf is None: continue logger.debug("%s: actual_criteria %s" % (job, job.actual_criteria)) t0 = job.actual_criteria.starttime t1 = job.actual_criteria.endtime subdf['__secs__'] = timedelta_total_seconds(t1 - t0) total_secs += timedelta_total_seconds(t1 - t0) idx += 1 if df is None: df = subdf else: df = df.append(subdf) if df is None: return None keynames = [key.name for key in deptable.get_columns(iskey=True)] if 'aggregate' in params: ops = params['aggregate'] for col in deptable.get_columns(iskey=False): if col.name not in ops: ops[col.name] = 'sum' else: ops = 'sum' df = avg_groupby_aggregate(df, keynames, ops, '__secs__', total_secs) return df
def run(self): """ Main execution method """ criteria = self.job.criteria if criteria.profiler_device == '': logger.debug('%s: No profiler device selected' % self.table) self.job.mark_error("No Profiler Device Selected") return False #self.fake_run() #return True profiler = DeviceManager.get_device(criteria.profiler_device) report = rvbd.profiler.report.SingleQueryReport(profiler) columns = [col.name for col in self.table.get_columns(synthetic=False)] sortcol = None if self.table.sortcol is not None: sortcol = self.table.sortcol.name tf = TimeFilter(start=criteria.starttime, end=criteria.endtime) logger.info("Running Profiler table %d report for timeframe %s" % (self.table.id, str(tf))) if ('datafilter' in criteria) and (criteria.datafilter is not None): datafilter = criteria.datafilter.split(',') else: datafilter = None trafficexpr = TrafficFilter( self.job.combine_filterexprs(exprs=criteria.profiler_filterexpr) ) # Incoming criteria.resolution is a timedelta logger.debug('Profiler report got criteria resolution %s (%s)' % (criteria.resolution, type(criteria.resolution))) if criteria.resolution != 'auto': rsecs = int(timedelta_total_seconds(criteria.resolution)) resolution = rvbd.profiler.report.Report.RESOLUTION_MAP[rsecs] else: resolution = 'auto' logger.debug('Profiler report using resolution %s (%s)' % (resolution, type(resolution))) with lock: report.run(realm=self.table.options.realm, groupby=profiler.groupbys[self.table.options.groupby], centricity=self.table.options.centricity, columns=columns, timefilter=tf, trafficexpr=trafficexpr, data_filter=datafilter, resolution=resolution, sort_col=sortcol, sync=False ) done = False logger.info("Waiting for report to complete") while not done: time.sleep(0.5) with lock: s = report.status() self.job.safe_update(progress=int(s['percent'])) done = (s['status'] == 'completed') # Retrieve the data with lock: query = report.get_query_by_index(0) self.data = query.get_data() tz = criteria.starttime.tzinfo # Update criteria criteria.starttime = (datetime.datetime .utcfromtimestamp(query.actual_t0) .replace(tzinfo=tz)) criteria.endtime = (datetime.datetime .utcfromtimestamp(query.actual_t1) .replace(tzinfo=tz)) self.job.safe_update(actual_criteria=criteria) if self.table.rows > 0: self.data = self.data[:self.table.rows] logger.info("Report %s returned %s rows" % (self.job, len(self.data))) return True