def download_to_file(notebook, snippet, file_format='csv', max_rows=-1, **kwargs): from beeswax import data_export download_to_file.update_state(task_id=notebook['uuid'], state='STARTED', meta={}) request = _get_request(**kwargs) api = get_api(request, snippet) meta = {'row_counter': 0, 'handle': {}, 'status': '', 'truncated': False} with storage.open(_log_key(notebook), 'wb') as f_log: result_wrapper = ExecutionWrapper( api, notebook, snippet, ExecutionWrapperCallback(notebook['uuid'], meta, f_log)) content_generator = data_export.DataAdapter( result_wrapper, max_rows=max_rows, store_data_type_in_header=True ) #TODO: Move PREFETCH_RESULT_COUNT to front end response = export_csvxls.create_generator(content_generator, file_format) with storage.open(_result_key(notebook), 'wb') as f: for chunk in response: f.write(chunk) meta['row_counter'] = content_generator.row_counter meta['truncated'] = content_generator.is_truncated download_to_file.update_state(task_id=notebook['uuid'], state='AVAILABLE', meta=meta) return meta
def download_to_file(notebook, snippet, file_format='csv', max_rows=-1, **kwargs): download_to_file.update_state(task_id=notebook['uuid'], state='STARTED', meta={}) request = _get_request(**kwargs) api = get_api(request, snippet) meta = {'row_counter': 0, 'handle': {}, 'status': '', 'truncated': False} with storage.open(_log_key(notebook), 'wb') as f_log: # TODO: use cache for editor 1000 rows and storage for result export result_wrapper = ExecutionWrapper(api, notebook, snippet, ExecutionWrapperCallback(notebook['uuid'], meta, f_log)) content_generator = data_export.DataAdapter(result_wrapper, max_rows=max_rows, store_data_type_in_header=True) # TODO: Move FETCH_RESULT_LIMIT to front end response = export_csvxls.create_generator(content_generator, file_format) with storage.open(_result_key(notebook), 'wb') as f: for chunk in response: f.write(chunk) meta['row_counter'] = content_generator.row_counter meta['truncated'] = content_generator.is_truncated download_to_file.update_state(task_id=notebook['uuid'], state='AVAILABLE', meta=meta) if TASK_SERVER.RESULT_CACHE.get(): with storage.open(_result_key(notebook)) as f: csv_reader = csv.reader(f, delimiter=','.encode('utf-8')) caches[CACHES_CELERY_QUERY_RESULT_KEY].set(_result_key(notebook), [row for row in csv_reader], 60 * 5) return meta
def download_to_file(notebook, snippet, file_format='csv', postdict=None, user_id=None, max_rows=-1): from beeswax import data_export download_to_file.update_state(task_id=notebook['uuid'], state='STARTED', meta={}) request = _get_request(postdict, user_id) api = get_api(request, snippet) f, path = tempfile.mkstemp() f_log, path_log = tempfile.mkstemp() f_progress, path_progress = tempfile.mkstemp() try: os.write(f_progress, '0') meta = {'row_counter': 0, 'file_path': path, 'handle': {}, 'log_path': path_log, 'progress_path': path_progress, 'status': 'running', 'truncated': False} #TODO: Truncated result_wrapper = ResultWrapper(api, notebook, snippet, ResultWrapperCallback(notebook['uuid'], meta, f_log)) content_generator = data_export.DataAdapter(result_wrapper, max_rows=max_rows, store_data_type_in_header=True) #TODO: Move PREFETCH_RESULT_COUNT to front end response = export_csvxls.create_generator(content_generator, file_format) for chunk in response: os.write(f, chunk) meta['row_counter'] = content_generator.row_counter download_to_file.update_state(task_id=notebook['uuid'], state='AVAILABLE', meta=meta) finally: os.close(f) os.close(f_log) os.close(f_progress) return meta
def download(self, notebook, snippet, file_format='csv'): from beeswax import data_export #TODO: Move to notebook? from beeswax import conf result_wrapper = ExecutionWrapper(self, notebook, snippet) max_rows = conf.DOWNLOAD_ROW_LIMIT.get() max_bytes = conf.DOWNLOAD_BYTES_LIMIT.get() content_generator = data_export.DataAdapter(result_wrapper, max_rows=max_rows, max_bytes=max_bytes) return export_csvxls.create_generator(content_generator, file_format)
def download_to_file(notebook, snippet, file_format='csv', max_rows=-1, **kwargs): from beeswax import data_export download_to_file.update_state(task_id=notebook['uuid'], state='STARTED', meta={}) request = _get_request(**kwargs) api = get_api(request, snippet) f, path = tempfile.mkstemp() f_log, path_log = tempfile.mkstemp() try: #TODO: We need to move this metadata somewhere else, it gets erased on exception and we can no longer cleanup the files. meta = { 'row_counter': 0, 'file_path': path, 'handle': {}, 'log_path': path_log, 'status': 'running', 'truncated': False } result_wrapper = ResultWrapper( api, notebook, snippet, ResultWrapperCallback(notebook['uuid'], meta, f_log)) content_generator = data_export.DataAdapter( result_wrapper, max_rows=max_rows, store_data_type_in_header=True ) #TODO: Move PREFETCH_RESULT_COUNT to front end response = export_csvxls.create_generator(content_generator, file_format) for chunk in response: os.write(f, chunk) meta['row_counter'] = content_generator.row_counter meta['truncated'] = content_generator.is_truncated download_to_file.update_state(task_id=notebook['uuid'], state='AVAILABLE', meta=meta) finally: os.close(f) os.close(f_log) return meta