class CXRApi(Resource): unet = Unet(trained=True, model_name=unet_model).cuda() chexnet = ChexNet(trained=True, model_name=chexnet_model).cuda() heatmap_generator = HeatmapGenerator(chexnet, mode='cam') unet.eval() chexnet.eval() def post(self): image_name = request.json.get('image_name') print(image_name) image = Image.open( f'frontend/public/images/uploads/{image_name}').convert('RGB') # run through net (t, l, b, r), mask = self.unet.segment(image) cropped_image = image.crop((l, t, r, b)) prob = self.chexnet.predict(cropped_image) # save segmentation result blended = blend_segmentation(image, mask) cv2.rectangle(blended, (l, t), (r, b), (255, 0, 0), 5) plt.imsave(f'frontend/public/images/results/segment/{image_name}', blended) # save cam result w, h = cropped_image.size heatmap, _ = self.heatmap_generator.from_prob(prob, w, h) p_l, p_t = l, t p_r, p_b = 1024 - r, 1024 - b heatmap = np.pad(heatmap, ((p_t, p_b), (p_l, p_r)), mode='linear_ramp', end_values=0) heatmap = ((heatmap - heatmap.min()) * (1 / (heatmap.max() - heatmap.min())) * 255).astype( np.uint8) cam = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET) * 0.4 + np.array(image) cv2.imwrite(f'frontend/public/images/results/cam/{image_name}', cam) # top-10 disease idx = np.argsort(-prob) top_prob = prob[idx[:10]] top_prob = map(lambda x: f'{x:.3}', top_prob) top_disease = DISEASES[idx[:10]] prediction = dict(zip(top_disease, top_prob)) result = {'result': prediction, 'image_name': image_name} print(result) return result
plt.title('ROC for: '+ modelName + "-" + class_names[i]) print("ROC for: "+ modelName + "-" + class_names[i] + " ones- %0.2f" % roc_auc) # print("ROC for: "+ modelName + "-" + class_names[i] + " zeros- %0.2f" % roc_auc2) plt.plot(fpr, tpr, label = 'U-ones: AUC = %0.2f' % roc_auc) # plt.plot(fpr2, tpr2, label = 'U-zeros: AUC = %0.2f' % roc_auc2) #plt.plot(fpr3, tpr3, label = 'AUC = %0.2f' % roc_auc3) plt.legend(loc = 'lower right') #plt.plot([0, 1], [0, 1],'r--') #plt.xlim([0, 1]) #plt.ylim([0, 1]) plt.ylabel('True Positive Rate') plt.xlabel('False Positive Rate') plt.savefig('ROC_'+modelName + "_" + class_names[i]+".png") fig_size = plt.rcParams["figure.figsize"] fig_size[0] = 30 fig_size[1] = 10 plt.rcParams["figure.figsize"] = fig_size plt.savefig("ROC1345.png", dpi=1000) plt.show() # Generate heatmap pathInputImage = 'view1_frontal.jpg' pathOutputImage = 'heatmap_view1_frontal' + modelName + '.png' pathModel = onesModeltoTest h = HeatmapGenerator(pathModel, nnClassCount, imgtransCrop) h.generate(pathInputImage, pathOutputImage, imgtransCrop)
plt.title("%s: AUC = %0.3f" % (class_name, auc)) plt.plot(fpr, tpr, label="%s: AUC = %0.3f" % (class_name, auc)) plt.xlabel("Specificity") plt.ylabel("Sensitivity") plt.savefig("plots/%s_roc.png" % class_name) plt.clf() if args.data_stats: train_dataset = CheXpertTorchDataset("CheXpert-v1.0-small/train.csv", None) valid_dataset = CheXpertTorchDataset("CheXpert-v1.0-small/valid.csv", None) class_names = [ "No Finding", "Enlarged Cardiom.", "Cardiomegaly", "Lung Opacity", "Lung Lesion", "Edema", "Consolidation", "Pneumonia", "Atelectasis", "Pneumothorax", "Pleural Effusion", "Pleural Other", "Fracture", "Support Devices" ] label_stats(train_dataset.labels, class_names, 'Train Label Stats') label_stats(valid_dataset.labels, class_names, 'Test Label Stats') if args.heat_maps: hmg = HeatmapGenerator(model.model, device) index = 1 for src_image_path in args.heat_maps: patient_id = src_image_path.split('/')[2] hmg.generate(src_image_path, "heat_map/%s_1.png" % patient_id) index += 1
class AttentionDataset(Dataset): def __init__(self): df = pd.read_csv('../csv/Data_Entry_Clean.csv') self.image_names = df['Image Index'].values self.tfm = transforms.Compose([ transforms.Resize(224), transforms.ToTensor(), transforms.Normalize(IMAGENET_MEAN, IMAGENET_STD) ]) def __getitem__(self, i): image = Image.open(PATH/IMAGE_DN/self.image_names[i]).convert('RGB') return self.tfm(image), self.image_names[i] def __len__(self): return len(self.image_names) dataset = AttentionDataset() dataloader = DataLoader(dataset, 16) # fastai dataloader g = HeatmapGenerator() for images, image_names in tqdm(iter(dataloader)): print() heatmaps, _ = g.generate(images) for i in range(16): heatmap = to_np(heatmaps[i]) heatmap = cv2.resize(heatmap, (1024, 1024)) crop_attention(heatmap, image_names[i])