def upload_study_me(file_path, is_segmentation_model, host, port): file_dict = [] headers = {'Content-Type': 'multipart/related; '} request_json = { 'request': 'post', 'route': '/', 'inference_command': 'get-probability-mask' if is_segmentation_model else 'get-bounding-box-2d' } images = load_image_data(file_path) # images = sort_images(images) width = 0 height = 0 count = 0 for image in images: try: dcm_file = pydicom.dcmread(image.path) if width == 0 or height == 0: width = dcm_file.Columns height = dcm_file.Rows count += 1 field = str(count) fo = open(image.path, 'rb').read() filename = os.path.basename(os.path.normpath(image.path)) file_dict.append((field, (filename, fo, 'application/dicom'))) except: print('File {} is not a DICOM file'.format(image.path)) continue print('Sending {} files...'.format(count)) request_json['depth'] = count request_json['height'] = height request_json['width'] = width file_dict.insert( 0, ('request_json', ('request', json.dumps(request_json).encode('utf-8'), 'text/json'))) me = MultipartEncoder(fields=file_dict) boundary = me.content_type.split('boundary=')[1] headers['Content-Type'] = headers['Content-Type'] + 'boundary="{}"'.format( boundary) r = requests.post('http://' + host + ':' + port + '/', data=me, headers=headers) if r.status_code != 200: print("Got error status code ", r.status_code) exit(1) multipart_data = decoder.MultipartDecoder.from_response(r) json_response = json.loads(multipart_data.parts[0].text) print("JSON response:", json_response) mask_count = len(json_response["parts"]) masks = [ np.frombuffer(p.content, dtype=np.uint8) for p in multipart_data.parts[1:mask_count + 1] ] if is_segmentation_model: output_folder = 'output' if images[0].position is None: # We must sort the images by their instance UID based on the order of the response: identifiers = [ part['dicom_image']['SOPInstanceUID'] for part in json_response["parts"] ] filtered_images = [] for id in identifiers: image = next((img for img in images if img.instanceUID == id)) filtered_images.append(image) test_inference_mask.generate_images_for_single_image_masks( filtered_images, masks, output_folder) else: test_inference_mask.generate_images_with_masks( images, masks, output_folder) print("Segmentation mask images generated in folder: {}".format( output_folder)) print("Saving output masks to files 'output/output_masks_*.npy") for index, mask in enumerate(masks): mask.tofile('output/output_masks_{}.npy'.format(index + 1))
def upload_study_me(file_path, model_type, host, port, output_folder, attachments, override_inference_command=None, send_study_size=False): file_dict = [] headers = {'Content-Type': 'multipart/related; '} images = load_image_data(file_path) images = sort_images(images) if model_type == BOUNDING_BOX: print("Performing bounding box prediction") inference_command = 'get-bounding-box-2d' elif model_type == SEGMENTATION_MODEL: if images[0].position is None: # No spatial information available. Perform 2D segmentation print("Performing 2D mask segmentation") inference_command = 'get-probability-mask-2D' else: print("Performing 3D mask segmentation") inference_command = 'get-probability-mask-3D' else: inference_command = 'other' if override_inference_command: inference_command = override_inference_command request_json = { 'request': 'post', 'route': '/', 'inference_command': inference_command } count = 0 width = 0 height = 0 for att in attachments: count += 1 field = str(count) fo = open(att, 'rb').read() filename = os.path.basename(os.path.normpath(att)) file_dict.append((field, (filename, fo, 'application/octet-stream'))) for image in images: try: dcm_file = pydicom.dcmread(image.path) if width == 0 or height == 0: width = dcm_file.Columns height = dcm_file.Rows count += 1 field = str(count) fo = open(image.path, 'rb').read() filename = os.path.basename(os.path.normpath(image.path)) file_dict.append((field, (filename, fo, 'application/dicom'))) except: print('File {} is not a DICOM file'.format(image.path)) continue print('Sending {} files...'.format(len(images))) if send_study_size: request_json['depth'] = count request_json['height'] = height request_json['width'] = width file_dict.insert( 0, ('request_json', ('request', json.dumps(request_json).encode('utf-8'), 'text/json'))) me = MultipartEncoder(fields=file_dict) boundary = me.content_type.split('boundary=')[1] headers['Content-Type'] = headers['Content-Type'] + 'boundary="{}"'.format( boundary) r = requests.post('http://' + host + ':' + port + '/', data=me, headers=headers) if r.status_code != 200: print("Got error status code ", r.status_code) exit(1) multipart_data = decoder.MultipartDecoder.from_response(r) json_response = json.loads(multipart_data.parts[0].text) print("JSON response:", json_response) if model_type == SEGMENTATION_MODEL: mask_count = len(json_response["parts"]) # Assert that we get one binary part for each object in 'parts' # The additional two multipart object are: JSON response and request:response digests assert mask_count == len(multipart_data.parts) - 2, \ "The server must return one binary buffer for each object in `parts`. Got {} buffers and {} 'parts' objects" \ .format(len(multipart_data.parts) - 2, mask_count) masks = [ np.frombuffer(p.content, dtype=np.uint8) for p in multipart_data.parts[1:mask_count + 1] ] if images[0].position is None: # We must sort the images by their instance UID based on the order of the response: identifiers = [ part['dicom_image']['SOPInstanceUID'] for part in json_response["parts"] ] filtered_images = [] for id in identifiers: image = next((img for img in images if img.instanceUID == id)) filtered_images.append(image) test_inference_mask.generate_images_for_single_image_masks( filtered_images, masks, json_response, output_folder) else: test_inference_mask.generate_images_with_masks( images, masks, json_response, output_folder) print("Segmentation mask images generated in folder: {}".format( output_folder)) print("Saving output masks to files '{}/output_masks_*.npy".format( output_folder)) for index, mask in enumerate(masks): mask.tofile('{}/output_masks_{}.npy'.format( output_folder, index + 1)) elif model_type == BOUNDING_BOX: boxes = json_response['bounding_boxes_2d'] test_inference_boxes.generate_images_with_boxes( images, boxes, output_folder) with open(os.path.join(output_folder, 'response.json'), 'w') as outfile: json.dump(json_response, outfile)