async def coro_show_image_mat(image_mat, text=None, title=None, cell_size: tuple = None, block=False, image_name=None, cbs=None, task_id=None): """ :param image_name: used as callback arguments :param cbs: a tuple in order of (on_done, on_succeeded, on_failed, on_progress). only `on_done` is dispatched. :return: no return value """ # NOTE: matplot backend is single-threaded, and can only be accessed from its host thread. import matplotlib.pyplot as plt import asyncio from async_ import amend_blank_cbs on_done, on_succeeded, on_failed, on_progress = amend_blank_cbs(cbs) ret = {'image_name': image_name} try: fig = show_image_mat(image_mat, text=text, title=title, cell_size=cell_size, block=block) except Exception as e: ret.update({'error': e}) on_done(ret) return if not block: try: async def coro_pause(delay_s): plt.pause(delay_s) await asyncio.sleep(0.1) # give control to other tasks in the same loop while True: # IMPROVE: figure on_close() => manager.destroy(), but cannot hook to it. fig.show() await coro_pause(1) except Exception as e: # for TKinter backend, a _tkinter.TclError DEBUG(f'[coro_show_image_mats] show loop ended: {e}') plt.close(fig) on_done(ret)
async def coro_consume_files(abspath_or_list, cbs): # nonlocal this_task # assert this_task is not None, '`this_task` should have been assigned before entering related coro.' import modules.data.decode_tf as decode_tf import tensorflow as tf DEBUG(f'[coro_consume_inputs]: {locals()}') on_done, on_succeeded, on_failed, on_progress = amend_blank_cbs( cbs) filepaths = abspath_or_list if isinstance( abspath_or_list, list) else [abspath_or_list] result = { } # data: tf.data.Dataset::{image_t}, error: optional(str) # from helpers.tf_helper import image_example # IMPROVE: try to use TFRecordDataset.from_tensors([tf_example]) data = DataManager._process_files(filepaths, **params_decode) result.update({'data': data}) # # if show inputs # try: # asynctask = async_show_image_mats(image_mats) # result.update({'asynctask_id': asynctask.id}) # except Exception as e: # result.update({'error': e.__repr__()}) on_done(result) # TODO: how to link to the next task (e.g. model.predict) so user can monitor process. return result # == this_task.set_result(result)