def get(self): """ Returns information about current version """ task_model = TaskModel(group="test", name="Testing Celery") task_model.save() task = long_task.delay(20, task_model.id) return {'id': task.id, 'state': task.state}
def load_annotation_files(task_id, dataset_id, coco_json_strings, dataset_name): """ Task loading single json file, splitting it if necessary and starting importing it on other workers """ task = TaskModel.objects.get(id=task_id) max_json_string_size = 32000000 task.update(status="PROGRESS") socket = create_socket() task.set_progress(0, socket=socket) task.info("===== Beginning Loading =====") total_files = len(coco_json_strings) for file_index, single_json_string in enumerate(coco_json_strings): task.info(f"===== Processing file nr {file_index} =====") task.info(f"Checking size of json string, max allowed size = {max_json_string_size}") json_string_size = sys.getsizeof(single_json_string) task.info(f"Current file size = {json_string_size}") if json_string_size > max_json_string_size: task.info("Json string to large") task.info("===== Splitting json string =====") list_of_json_strings = split_coco_labels(single_json_string, max_byte_size=max_json_string_size, current_task=task) else: task.info("Correct size of json string") list_of_json_strings = [single_json_string] task.info("===== Outsourcing import annotations tasks to other workers =====") for substring_index, json_substring in enumerate(list_of_json_strings): task.info(f"Current subfile size = {sys.getsizeof(json_substring)}") load_annotations_task = TaskModel( name="Import COCO format into {}".format(dataset_name), dataset_id=dataset_id, group="Annotation Import" ) load_annotations_task.save() task.info(f"Sending json subfile nr {substring_index} from file nr {file_index} to workers queue") cel_test_task = import_annotations.delay(load_annotations_task.id, dataset_id, json_substring) task.set_progress((file_index + 1) * 100 / total_files, socket=socket) task.set_progress(100, socket=socket) task.info("===== Finished =====")
def convert_dataset(task_id, dataset_id, coco_json, dataset_name): task = TaskModel.objects.get(id=task_id) max_json_string_size = 32000000 task.update(status="PROGRESS") socket = create_socket() task.info("===== Beginning Conversion =====") task.set_progress(0, socket=socket) task.info('Trying to import your dataset...') coco_json, success = convert_to_coco(coco_json, task) if not success: task.info('Format not supported') task.set_progress(100, socket=socket) task.info("===== Finished =====") return task.set_progress(50, socket=socket) task.info(f"Checking size of json string, max size = {max_json_string_size}") json_string_size = sys.getsizeof(coco_json) task.info(f"Json string size = {json_string_size}") if json_string_size > max_json_string_size: task.info("Json string to large") task.info("===== Splitting json string =====") list_of_json_strings = split_coco_labels(coco_json, max_byte_size=max_json_string_size, current_task=task) else: task.info("Correct size of json string") list_of_json_strings = [coco_json] task.set_progress(75, socket=socket) task.info("===== Outsourcing import annotations tasks to other workers =====") for i, json_substring in enumerate(list_of_json_strings): load_annotations_task = TaskModel( name="Import COCO format into {}".format(dataset_name), dataset_id=dataset_id, group="Annotation Import" ) load_annotations_task.save() task.info(f"Sending json subfile to worker {i}") cel_test_task = import_annotations.delay(load_annotations_task.id, dataset_id, json_substring) task.set_progress(100, socket=socket) task.info("===== Finished =====")