ids = frame['id'] max_id = ids.max() Log.t('sweep of ids {}..{} returned {} entries', next_min_id, max_id, row_count) next_min_id = max_id + 1 # start from the next row Log.i('search for nonparsed responses done, parse count: {}/{}', parse_count, total_row_count) return next_min_id def find_parser(self, datasource_id, datasources_frame): for p in self.subscribers: datasource_name = p.datasource_name parser_datasource_id = self.store.datasource_id_by_name( datasource_name, datasources=datasources_frame) if parser_datasource_id == datasource_id: return p raise ValueError('No parser available for datasource id {}', datasource_id) if __name__ == '__main__': app = ParseApp() try: app.parse_existing_disk_files() except KeyboardInterrupt: print('\n\nKeyboardInterrupt\n') except Exception as e: Log.c('app failed') stacktrace = traceback.format_exc() Log.d('exception stack:\n{}', stacktrace)
df_to_append, format='table', data_columns=True) row_count = h5.get_storer(job.uid).nrows Log.d('...h5 key {}, row count is {}', job.uid, row_count) except Exception as append_error: raise append_error Log.d('...time spent adding to h5: {:.2f}s', time.time() - h5_process_start_time) row_processing_time = time.time() - subset_process_start_time Log.d('...total time spent on subset: {:.2f}s ({:.2f}s per row)', row_processing_time, row_processing_time / row_process_count) return transaction_min_timestamp if __name__ == '__main__': file_dirpath = OsExpert.path_backstep(__file__) pd.options.display.float_format = '{:.2f}'.format try: app = GeneratorApp() app.feed_jobs_forever( job_changed_handler=lambda job: None #Log.w('dummy callback for job: {}', job.uid) ) except KeyboardInterrupt: print('\n\nKeyboardInterrupt\n') except Exception as e: Log.c('app failed: {}', e) stacktrace = OsExpert.stacktrace() Log.d('stacktrace:\n{}', stacktrace)