def main(trc, tec, vac, cls, weight, testset=None, to_reload=None, test=None): if test: # restore for testing only m = cnn5.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR, meta_dir=LOG_DIR, model=md, weights=weight) print("Loaded! Ready for test!") if tec >= bs: THE = tfreloader('test', 1, bs, cls, trc, tec, vac) m.inference(THE, dirr, testset=testset, pmd=pdmd) else: print("Not enough testing images!") elif to_reload: # restore for further training and testing m = cnn5.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR, meta_dir=LOG_DIR, model=md, weights=weight) print("Loaded! Restart training.") HE = tfreloader('train', ep, bs, cls, trc, tec, vac) VHE = tfreloader('validation', ep*100, bs, cls, trc, tec, vac) itt = int(trc * ep / bs) if trc <= 2 * bs or vac <= bs: print("Not enough training/validation images!") else: m.train(HE, VHE, trc, bs, pmd=pdmd, dirr=dirr, max_iter=itt, save=True, outdir=METAGRAPH_DIR) if tec >= bs: THE = tfreloader('test', 1, bs, cls, trc, tec, vac) m.inference(THE, dirr, testset=testset, pmd=pdmd) else: print("Not enough testing images!") else: # train and test m = cnn5.INCEPTION(INPUT_DIM, HYPERPARAMS, log_dir=LOG_DIR, model=md, weights=weight) print("Start a new training!") HE = tfreloader('train', ep, bs, cls, trc, tec, vac) VHE = tfreloader('validation', ep*100, bs, cls, trc, tec, vac) itt = int(trc*ep/bs)+1 if trc <= 2 * bs or vac <= bs: print("Not enough training/validation images!") else: m.train(HE, VHE, trc, bs, pmd=pdmd, dirr=dirr, max_iter=itt, save=True, outdir=METAGRAPH_DIR) if tec >= bs: THE = tfreloader('test', 1, bs, cls, trc, tec, vac) m.inference(THE, dirr, testset=testset, pmd=pdmd) else: print("Not enough testing images!")
def test(bs, cls, to_reload, LOG_DIR, METAGRAPH_DIR): # input image dimension INPUT_DIM = [bs, 299, 299, 3] # hyper parameters HYPERPARAMS = { "batch_size": bs, "dropout": 0.3, "learning_rate": 1E-4, "classes": 2, "sup": False } m = cnn.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=to_reload, log_dir=LOG_DIR, meta_dir=METAGRAPH_DIR, model=md) print("Loaded! Ready for test!") HE = tfreloader(bs, cls, None) m.inference(HE, meta_cutter, Not_Realtest=False, bs=bs, pmd=pdmd)
]) for idx, row in datapd.iterrows(): tile_ids = Sample_prep.paired_tile_ids_in( row['slide'], row['label'], row['path'], row['age'], row['BMI']) test_tiles = pd.concat([test_tiles, tile_ids]) test_tiles.to_csv(data_dir + '/te_sample.csv', header=True, index=False) tes = test_tiles tecc = len(tes['label']) if not os.path.isfile(data_dir + '/test.tfrecords'): loaders.loaderX(data_dir, 'test') m = cnn5.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=opt.modeltoload, log_dir=LOG_DIR, meta_dir=METAGRAPH_DIR, model=opt.mode) print("Loaded! Ready for test!") if tecc >= bs: datasets = data_input_fusion.DataSet(bs, tecc, ep=1, cls=2, mode='test', filename=data_dir + '/test.tfrecords') m.inference(datasets, opt.dirr, testset=tes, pmd=opt.pdmd) else: print("Not enough testing images!")
def main(imgfile, bs, cls, modeltoload, pdmd, md, img_dir, data_dir, out_dir, LOG_DIR, METAGRAPH_DIR): if pdmd == 'immune': pos_score = ['im1_score', 'im2_score', 'im3_score', 'im4_score'] pos_ls = ['im1', 'im2', 'im3', 'im4'] else: pos_score = ["POS_score", "NEG_score"] pos_ls = [pdmd, 'negative'] level = 0 ft = 2 slide = OpenSlide(img_dir + imgfile) bounds_width = slide.level_dimensions[level][0] bounds_height = slide.level_dimensions[level][1] x = 0 y = 0 half_width_region = 49 * ft full_width_region = 299 * ft stepsize = (full_width_region - half_width_region) n_x = int((bounds_width - 1) / stepsize) n_y = int((bounds_height - 1) / stepsize) lowres = slide.read_region( (x, y), level + 1, (int(n_x * stepsize / 4), int(n_y * stepsize / 4))) raw_img = np.array(lowres)[:, :, :3] if not os.path.isfile(data_dir + '/level3/dict.csv'): cutter(img_dir + imgfile, data_dir) if not os.path.isfile(data_dir + '/test.tfrecords'): loaderX(data_dir) if not os.path.isfile(out_dir + '/' + md + '_Test.csv'): # input image dimension INPUT_DIM = [bs, 299, 299, 3] # hyper parameters HYPERPARAMS = { "batch_size": bs, "dropout": 0.5, "learning_rate": 1E-4, "classes": cls, "sup": False } m = cnn.INCEPTION(INPUT_DIM, HYPERPARAMS, meta_graph=modeltoload, log_dir=LOG_DIR, meta_dir=METAGRAPH_DIR, model=md) print("Loaded! Ready for test!") HE = tfreloader(bs, cls, None) m.inference(HE, str(imgfile.split('.')[0]), realtest=True, pmd=pdmd, prefix=md + '_Test') if not os.path.isfile(out_dir + '/' + md + '_Overlay.png'): slist = pd.read_csv(data_dir + '/te_sample.csv', header=0) # load dictionary of predictions on tiles teresult = pd.read_csv(out_dir + '/' + md + '_Test.csv', header=0) # join 2 dictionaries joined = pd.merge(slist, teresult, how='inner', on=['Num']) joined = joined.drop(columns=['Num']) tile_dict = pd.read_csv(data_dir + '/level1/dict.csv', header=0) tile_dict = tile_dict.rename(index=str, columns={"Loc": "L0path"}) joined_dict = pd.merge(joined, tile_dict, how='inner', on=['L0path']) logits = joined_dict[pos_score] prd_ls = np.asmatrix(logits).argmax(axis=1).astype('uint8') prd = int(np.mean(prd_ls)) print(str(pos_ls[prd]) + '!') print("Prediction score = " + str(logits.iloc[:, prd].mean().round(5))) joined_dict['predict_index'] = prd_ls # save joined dictionary joined_dict.to_csv(out_dir + '/' + md + '_finaldict.csv', index=False) # output heat map of pos and neg. # initialize a graph and for each RGB channel opt = np.full((n_x, n_y), 0) hm_R = np.full((n_x, n_y), 0) hm_G = np.full((n_x, n_y), 0) hm_B = np.full((n_x, n_y), 0) # Positive is labeled red in output heat map for index, row in joined_dict.iterrows(): opt[int(row["X_pos"]), int(row["Y_pos"])] = 255 if row['predict_index'] == 0: hm_R[int(row["X_pos"]), int(row["Y_pos"])] = 228 hm_G[int(row["X_pos"]), int(row["Y_pos"])] = 26 hm_B[int(row["X_pos"]), int(row["Y_pos"])] = 28 elif row['predict_index'] == 1: hm_R[int(row["X_pos"]), int(row["Y_pos"])] = 55 hm_G[int(row["X_pos"]), int(row["Y_pos"])] = 126 hm_B[int(row["X_pos"]), int(row["Y_pos"])] = 184 elif row['predict_index'] == 2: hm_R[int(row["X_pos"]), int(row["Y_pos"])] = 77 hm_G[int(row["X_pos"]), int(row["Y_pos"])] = 175 hm_B[int(row["X_pos"]), int(row["Y_pos"])] = 74 elif row['predict_index'] == 3: hm_R[int(row["X_pos"]), int(row["Y_pos"])] = 255 hm_G[int(row["X_pos"]), int(row["Y_pos"])] = 255 hm_B[int(row["X_pos"]), int(row["Y_pos"])] = 51 else: pass # expand 5 times opt = opt.repeat(50, axis=0).repeat(50, axis=1) # small-scaled original image ori_img = cv2.resize(raw_img, (np.shape(opt)[0], np.shape(opt)[1])) ori_img = ori_img[:np.shape(opt)[1], :np.shape(opt)[0], :3] tq = ori_img[:, :, 0] ori_img[:, :, 0] = ori_img[:, :, 2] ori_img[:, :, 2] = tq cv2.imwrite(out_dir + '/Original_scaled.png', ori_img) # binary output image topt = np.transpose(opt) opt = np.full((np.shape(topt)[0], np.shape(topt)[1], 3), 0) opt[:, :, 0] = topt opt[:, :, 1] = topt opt[:, :, 2] = topt cv2.imwrite(out_dir + '/Mask.png', opt * 255) # output heatmap hm_R = np.transpose(hm_R) hm_G = np.transpose(hm_G) hm_B = np.transpose(hm_B) hm_R = hm_R.repeat(50, axis=0).repeat(50, axis=1) hm_G = hm_G.repeat(50, axis=0).repeat(50, axis=1) hm_B = hm_B.repeat(50, axis=0).repeat(50, axis=1) hm = np.dstack([hm_B, hm_G, hm_R]) cv2.imwrite(out_dir + '/' + md + '_HM.png', hm) # superimpose heatmap on scaled original image overlay = ori_img * 0.5 + hm * 0.5 cv2.imwrite(out_dir + '/' + md + '_Overlay.png', overlay)