def post_process(model_input, model_output, b_apply_k_means=True): center_output = model_output[1] model_output = np.concatenate((model_output[0], model_output[2]), 1) center_output = cv2.resize(center_output, (250, 256)) model_output = cv2.resize(model_output, (500, 256)) model_output[:, 125:375] = center_output model_output = (model_output - model_output.min()) / \ (model_output.max() - model_output.min()) img = model_input['scaled'] mask = get_mask(model_input['subject'], model_input['n_slice']) model_output[model_output >= 0.8] = 1.0 model_output[model_output < 0.8] = 0.0 model_output = model_output * mask if b_apply_k_means: model_output = apply_k_means(denoise_image(img), model_output) model_output = np.array(model_output * 255, dtype=np.uint8) display_model_output = np.array(model_output, dtype=np.uint8) img = np.stack((model_input['scaled'],)*3, axis=-1) display_model_output = cv2.applyColorMap( display_model_output, cv2.COLORMAP_JET) fin = cv2.addWeighted(display_model_output, 0.6, img, 0.4, 0) return (model_output, fin)
def prepare_training_data(info): for i in range(len(info)): for j in range(len(info[i][1:len(info[i])]) - 1): if 'gt' in info[i][j]: for l in range(3): layer_mask = get_mask(i + 1, j)[:, l * 125:l * 125 + 250] try: cake_stack = np.stack([ prepare_cakes( info[i][k]['denoised_normalized'] [:, l * 125:l * 125 + 250] * layer_mask) for k in range(j - 1, j + 2) ], axis=2) cake = prepare_cakes( info[i][j]['denoised_normalized'][:, l * 125:l * 125 + 250] * layer_mask) train_info = { 'x': [cake_stack.tolist(), cake.tolist()], 'y': cv2.resize( info[i][j]['gt'][:, l * 125:l * 125 + 250], (125, 128)).tolist() } with open(f'./train/s_{i+1:02}-{j:02}-{l}', 'w') as f: json.dump(train_info, f) except: continue
def prepare_input(n_subject, n_slice): result = {} mat = scipy.io.loadmat(f'2015_BOE_Chiu/Subject_{n_subject:02}.mat') imgs = mat['images'][:, :, n_slice - 1: n_slice + 2] scaled_imgs = [scale_image( imgs[:, :, i]) * get_mask(n_subject, n_slice) for i in range(imgs.shape[2])] # center = find_image_center(scaled_imgs[1]) # imgs_roi = [[extract_roi(img[:,i * 250:i*250 + 250], center) for img in scaled_imgs] for i in range(2)] denoised_imgs = [[normalize_img(denoise_image( img[:, i * 125:i*125 + 250])) for img in scaled_imgs] for i in range(3)] result['X'] = prepare_X(denoised_imgs) result['scaled'] = scale_image(imgs[:, :, 1]) result['subject'] = n_subject result['n_slice'] = n_slice return result
def extract_data(path='./data', b_write_to_disk=False): info = [] for i in range(1, 11): subject_info = [] mat = scipy.io.loadmat(f'{path}/Subject_{i:02}.mat') images = mat['images'] grader1 = mat['manualFluid1'] grader2 = mat['manualFluid1'] for j in range(len(images[0][0])): info_dict = {} mf1 = np.nan_to_num(scale_image( grader1[:, :, j]), nan=0.0) mf2 = np.nan_to_num(scale_image( grader2[:, :, j]), nan=0.0) newimg = scale_image(images[:, :, j]) try: denoised = denoise_image(newimg) * get_mask(i, j) if j == 32: a = 1 info_dict['i'] = i info_dict['j'] = j info_dict['img'] = images[:, :, j] info_dict['scaled_image'] = newimg info_dict['denoised_normalized'] = normalize_img(denoised) info_dict['denoised'] = denoised if np.count_nonzero(mf1) and np.count_nonzero(mf2): ground_truth = generate_ground_truth(mf1, mf2) info_dict['gt'] = ground_truth if b_write_to_disk: output_to_disk(info_dict) except: pass subject_info.append(info_dict) info.append(subject_info) print(f'Parsed subject {i:02}') return info