def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root): print("Using data from: %s" % data_root) data_provider = ultrasound_util.DataProvider(data_root + "/*.tif", a_min=0, a_max=210) net = unet.Unet( channels=data_provider.channels, n_class=data_provider.n_class, layers=layers, features_root=features_root, cost="dice_coefficient", ) path = output_path if restore else create_training_path(output_path) trainer = unet.Trainer(net, norm_grads=True, optimizer="adam") path = trainer.train(data_provider, path, training_iters=training_iters, epochs=epochs, dropout=0.5, display_step=2, restore=restore) x_test, y_test = data_provider(1) prediction = net.predict(path, x_test) print("Testing error rate: {:.2f}%".format( unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root): print("Using data from: %s" % data_root) data_provider = DataProvider(600, glob.glob(data_root + "/*")) net = unet.Unet( channels=data_provider.channels, n_class=data_provider.n_class, layers=layers, features_root=features_root, cost_kwargs=dict(regularizer=0.001), ) path = output_path if restore else create_training_path(output_path) trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2)) path = trainer.train(data_provider, path, training_iters=training_iters, epochs=epochs, dropout=0.5, display_step=2, restore=restore) x_test, y_test = data_provider(1) prediction = net.predict(path, x_test) print("Testing error rate: {:.2f}%".format( unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root): data_provider = DataProvider(572, data_root) data, label = data_provider(1) weights = None #(1/3) / (label.sum(axis=2).sum(axis=1).sum(axis=0) / data.size) net = unet.Unet( channels=data_provider.channels, n_class=data_provider.n_class, layers=layers, features_root=features_root, cost_kwargs=dict(regularizer=0.001, class_weights=weights), ) path = output_path if restore else util.create_training_path(output_path) trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91)) path = trainer.train(data_provider, path, training_iters=training_iters, epochs=epochs, dropout=0.5, display_step=2, restore=restore) prediction = net.predict(path, data) print("Testing error rate: {:.2f}%".format( unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape))))
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root): print("Using data from: %s"%data_root) data_provider = ultrasound_util.DataProvider(data_root + "/*.tif", a_min=0, a_max=210) net = unet.Unet(channels=data_provider.channels, n_class=data_provider.n_class, layers=layers, features_root=features_root, cost="dice_coefficient", ) path = output_path if restore else create_training_path(output_path) trainer = unet.Trainer(net, norm_grads=True, optimizer="adam") path = trainer.train(data_provider, path, training_iters=training_iters, epochs=epochs, dropout=0.5, display_step=2, restore=restore) x_test, y_test = data_provider(1) prediction = net.predict(path, x_test) print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root): print("Using data from: %s"%data_root) data_provider = DataProvider(600, glob.glob(data_root+"/*")) net = unet.Unet(channels=data_provider.channels, n_class=data_provider.n_class, layers=layers, features_root=features_root, add_regularizers=True, # filter_size=5 ) path = output_path if restore else create_training_path(output_path) trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2)) path = trainer.train(data_provider, path, training_iters=training_iters, epochs=epochs, dropout=0.5, display_step=2, restore=restore) x_test, y_test = data_provider(1) prediction = net.predict(path, x_test) print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape)))) # import numpy as np # np.save("prediction", prediction[0, ..., 1]) img = util.combine_img_prediction(x_test, y_test, prediction) util.save_image(img, "prediction.jpg")
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root): generator = Generator(572, data_root) data, label = generator(1) weights = None#(1/3) / (label.sum(axis=2).sum(axis=1).sum(axis=0) / data.size) net = unet.Unet(channels=generator.channels, n_class=generator.n_class, layers=layers, features_root=features_root, add_regularizers=True, class_weights=weights, # filter_size=5 ) path = output_path if restore else create_training_path(output_path) # trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2)) trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91)) path = trainer.train(generator, path, training_iters=training_iters, epochs=epochs, dropout=0.5, display_step=2, restore=restore) prediction = net.predict(path, data) print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape)))) # import numpy as np # np.save("prediction", prediction[0, ..., 1]) img = util.combine_img_prediction(data, label, prediction) util.save_image(img, "prediction.jpg")
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root): data_provider = DataProvider(572, data_root) data, label = data_provider(1) weights = None#(1/3) / (label.sum(axis=2).sum(axis=1).sum(axis=0) / data.size) net = unet.Unet(channels=data_provider.channels, n_class=data_provider.n_class, layers=layers, features_root=features_root, cost_kwargs=dict(regularizer=0.001, class_weights=weights), ) path = output_path if restore else create_training_path(output_path) trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91)) path = trainer.train(data_provider, path, training_iters=training_iters, epochs=epochs, dropout=0.5, display_step=2, restore=restore) prediction = net.predict(path, data) print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape))))
def main(): dp = DataProvider(batchSize=BATCH_SIZE, validationSize=VALIDATION_SIZE) dp.readData() print("DONE READING DATA") # calculate num of iterations iters = dp.getTrainSize() // BATCH_SIZE # unet net = unet.Unet(channels = 1, n_class = 2, layers = 3,\ features_root = 16, cost="cross_entropy", cost_kwargs={}) # # trainer # options = {"momentum":0.2, "learning_rate":0.2,"decay_rate":0.95} # trainer = unet.Trainer(net, optimizer="momentum",plotter = plot, opt_kwargs=options ) # # train model # path = trainer.train(dp, OUTPUT_PATH,training_iters = iters,epochs=EPOCHS,\ # dropout=DROPOUT_KEEP_PROB, display_step = DISPLAY_STEP,restore = restore) path = os.getcwd() + "/retinaModel/model.cpkt" x_test, y_test = dp.getTestData(3, crop=False) prediction = net.predict(path, x_test) # # sanity check # fig, ax = plt.subplots(3, 3) # ax[0][0].imshow(x_test[0,:,:,0],cmap=plt.cm.gray) # ax[0][1].imshow(y_test[0,:,:,1],cmap=plt.cm.gray) # ax[0][2].imshow(np.argmax(prediction[0,...],axis =2),cmap=plt.cm.gray) # ax[1][0].imshow(x_test[1,:,:,0],cmap=plt.cm.gray) # ax[1][1].imshow(y_test[1,:,:,1],cmap=plt.cm.gray) # ax[1][2].imshow(np.argmax(prediction[1,...],axis =2),cmap=plt.cm.gray) # ax[2][0].imshow(x_test[2,:,:,0],cmap=plt.cm.gray) # ax[2][1].imshow(y_test[2,:,:,1],cmap=plt.cm.gray) # ax[2][2].imshow(np.argmax(prediction[2,...],axis =2),cmap=plt.cm.gray) # plt.show() # save test result as image # check for path if not os.path.lexists(OUTPUT_PATH): os.makedirs(OUTPUT_PATH) sampleSize = 3 img = util.combine_img_prediction(x_test[0:sampleSize,...], y_test[0:sampleSize,...]\ , prediction[0:sampleSize,...]) util.save_image( img, "%s/%s.jpg" % (os.getcwd() + "/" + "testResults", "testSample")) print("Testing error rate: {:.2f}%".format( unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
def launch(data_root, roidictfile, output_path, training_iters, epochs, restore, layers, features_root, val=None): with open(roidictfile) as fh: roidict = yaml.load(fh) if val: val_data_provider = ImageDataProvider(val, roidict) data_provider = ImageDataProvider(data_root, roidict) data, label = data_provider(1) # make sure the labels are not flat assert np.any( np.asarray([label[-1, ..., nn].var() for nn in range(label.shape[-1])]) > 0) weights = None #(1/3) / (label.sum(axis=2).sum(axis=1).sum(axis=0) / data.size) net = unet.Unet( channels=data_provider.channels, n_class=data_provider.n_class, layers=layers, features_root=features_root, cost_kwargs=dict(regularizer=0.001, class_weights=weights), ) path = output_path if restore else create_training_path(output_path) trainer = unet.Trainer(net, optimizer="adam", opt_kwargs=dict(beta1=0.91)) path = trainer.train(data_provider, path, training_iters=training_iters, epochs=epochs, dropout=0.5, display_step=2, restore=restore, val_data_provider=val_data_provider) prediction = net.predict(path, data) print("Testing error rate: {:.2f}%".format( unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape))))
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root): print("Using data from: %s" % data_root) if not os.path.exists(data_root): raise IOError("Kaggle Ultrasound Dataset not found") data_provider = DataProvider(search_path=data_root + "/*.tif", mean=100, std=56) net = unet.Unet( channels=data_provider.channels, n_class=data_provider.n_class, layers=layers, features_root=features_root, #cost="dice_coefficient", ) path = output_path if restore else util.create_training_path(output_path) trainer = unet.Trainer(net, batch_size=1, norm_grads=False, optimizer="adam") path = trainer.train(data_provider, path, training_iters=training_iters, epochs=epochs, dropout=0.5, display_step=2, restore=restore) x_test, y_test = data_provider(1) prediction = net.predict(path, x_test) print("Testing error rate: {:.2f}%".format( unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
def launch(data_root, output_path, training_iters, epochs, restore, layers, features_root): print("Using data from: %s"%data_root) data_provider = DataProvider(600, glob.glob(data_root+"/*")) net = unet.Unet(channels=data_provider.channels, n_class=data_provider.n_class, layers=layers, features_root=features_root, cost_kwargs=dict(regularizer=0.001), ) path = output_path if restore else create_training_path(output_path) trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2)) path = trainer.train(data_provider, path, training_iters=training_iters, epochs=epochs, dropout=0.5, display_step=2, restore=restore) x_test, y_test = data_provider(1) prediction = net.predict(path, x_test) print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
ny = 572 training_iters = 20 epochs = 100 dropout = 0.75 # Dropout, probability to keep units display_step = 2 restore = False generator = image_gen.get_image_gen_rgb(nx, ny, cnt=20) net = unet.Unet(channels=generator.channels, n_class=generator.n_class, layers=3, features_root=16) trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2)) path = trainer.train(generator, "./unet_trained", training_iters=training_iters, epochs=epochs, dropout=dropout, display_step=display_step, restore=restore) x_test, y_test = generator(4) prediction = net.predict(path, x_test) print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape)))) import numpy as np np.savetxt("prediction.txt", prediction[..., 1].reshape(-1, prediction.shape[2])) img = util.combine_img_prediction(x_test, y_test, prediction) util.save_image(img, "prediction.jpg")
search_path=test_dataset_search_pattern, data_suffix='.tif', mask_suffix='_mask.tif') x_tests, y_tests = test_data_provider(30) # 30 images. #idx = 3 ##x_img = Image.fromarray(np.uint8(x_tests[idx] * 255).reshape(x_tests[idx].shape[0], x_tests[idx].shape[1]), mode='L') #x_img = Image.fromarray(np.uint8(x_tests[idx,:,:,0] * 255).reshape(x_tests[idx,:,:,0].shape[0], x_tests[idx,:,:,0].shape[1]), mode='L') #x_img.show() #y_img = Image.fromarray(np.uint8(y_tests[idx,:,:,0] * 255).reshape(y_tests[idx,:,:,0].shape[0], y_tests[idx,:,:,0].shape[1])) ##y_img = Image.fromarray(np.uint8(y_tests[idx,:,:,1] * 255).reshape(y_tests[idx,:,:,1].shape[0], y_tests[idx,:,:,1].shape[1])) #y_img.show() predictions = net.predict(model_filepath, x_tests) print("Error rate = %f" % unet.error_rate( predictions, util.crop_to_shape(y_tests, predictions.shape))) indexes = range(x_tests.shape[0]) for idx in indexes: print("\tProcessing %d-th test image..." % idx) x_test = x_tests[idx].reshape(1, x_tests[idx].shape[0], x_tests[idx].shape[1], x_tests[idx].shape[2]) y_test = y_tests[idx].reshape(1, y_tests[idx].shape[0], y_tests[idx].shape[1], y_tests[idx].shape[2]) prediction = prediction[idx].reshape(1, prediction[idx].shape[0], prediction[idx].shape[1], prediction[idx].shape[2]) img = util.combine_img_prediction(x_test, y_test, prediction) util.save_image(img, prediction_dir_path + "/prediction" + str(idx) + ".jpg")
''' cv2.imshow('label',label[0,...,1]) cv2.waitKey(0) cv2.destroyAllWindows() ''' prediction = net.predict('3.30/model.cpkt', data) print(prediction.shape) cv2.imshow('label', mask) cv2.waitKey(0) cv2.destroyAllWindows() pred1 = prediction[0, :, :, :] #pred2 = prediction[1,:,:,:] #pred3 = prediction[2,:,:,:] print(unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape))) #img = util.combine_img_prediction(data, label, prediction) #util.save_image(img, "prediction.jpg") ''' prediction ''' data, label = data_provider(1) prediction = net.predict(path, test_x) mask = prediction[0, :, :, :] print(label[0, :, :, :]) print(mask) cv2.imshow('mask', mask) cv2.waitKey(0) cv2.destroyAllWindows()
def TestData(net , Test_Path , Train_Path , padSize): TestImageNum = 7 Trained_Model_Path = Train_Path + 'model/model.cpkt' TestResults_Path = Test_Path + 'results/' try: os.stat(TestResults_Path) except: os.makedirs(TestResults_Path) AllImage_logical = np.zeros((1924,1924)) AllImage = np.zeros((1924,1924)) trainer = unet.Trainer(net) TestData = image_util.ImageDataProvider( Test_Path + '*.tif' , shuffle_data=False) L = len(TestData.data_files) DiceCoefficient = np.zeros(L) LogLoss = np.zeros(L) # BB_Cord = np.zeros(L,3) BB_Cord = np.zeros((L,2)) aa = TestData.data_files for BB_ind in range(L): # BB_ind = 1 bb = aa[BB_ind] d = bb.find('/img') cc = bb[d:len(bb)-4] dd = cc.split('_') # imageName = int(dd[0]) xdim = int(dd[1]) ydim = int(dd[2]) # BB_Cord[ BB_ind , : ] = [xdim,ydim,imageName] BB_Cord[ BB_ind , : ] = [xdim,ydim] Data , Label = TestData(L) szD = Data.shape szL = Label.shape data = np.zeros((1,szD[1],szD[2],szD[3])) label = np.zeros((1,szL[1],szL[2],szL[3])) shiftFlag = 0 for BB_ind in range(L): data[0,:,:,:] = Data[BB_ind,:,:,:].copy() label[0,:,:,:] = Label[BB_ind,:,:,:].copy() if shiftFlag == 1: shiftX = 0 shiftY = 0 data = np.roll(data,[0,shiftX,shiftY,0]) label = np.roll(label,[0,shiftX,shiftY,0]) prediction = net.predict( Trained_Model_Path, data) PredictedSeg = prediction[0,...,1] > 0.2 # ix, iy, ImgNum = BB_Cord[ BB_ind , : ] ix, iy = BB_Cord[ BB_ind , : ] ix = int(148*ix) iy = int(148*iy) # AllImage[148*ix:148*(ix+1) , 148*iy:148*(iy+1) ,ImgNum] = prediction[0,...,1] # AllImage_logical[148*ix:148*(ix+1) , 148*iy:148*(iy+1) ,ImgNum] = PredictedSeg AllImage[ix:148+ix , iy:148+iy] = prediction[0,...,1] AllImage_logical[ix:148+ix , iy:148+iy] = PredictedSeg # unet.error_rate(prediction, util.crop_to_shape(label, prediction.shape)) sz = label.shape A = (padSize/2) imgCombined = util.combine_img_prediction(data, label, prediction) DiceCoefficient[BB_ind] = DiceCoefficientCalculator(PredictedSeg,label[0,A:sz[1]-A,A:sz[2]-A,1]) # 20 is for zero padding done for input util.save_image(imgCombined, TestResults_Path+"prediction_slice"+ str(BB_Cord[BB_ind]) + ".jpg") Loss = unet.error_rate(prediction,label[:,A:sz[1]-A,A:sz[2]-A,:]) LogLoss[BB_ind] = np.log10(Loss+eps) np.savetxt(TestResults_Path+'DiceCoefficient.txt',DiceCoefficient) np.savetxt(TestResults_Path+'LogLoss.txt',LogLoss) im = Image.fromarray(np.uint8(AllImage)) msk = Image.fromarray(np.uint8(AllImage_logical)) im.save( TestResults_Path + 'PredictionSeg_'+str(TestImageNum)+'.tif') msk.save(TestResults_Path + 'PredictionSeg_'+str(TestImageNum)+'_Logical.tif') return AllImage , AllImage_logical
if __name__ == '__main__': np.random.seed(98765) generator = image_gen.GrayScaleDataProvider(nx=572, ny=572, cnt=20, rectangles=False) net = unet.Unet(channels=generator.channels, n_class=generator.n_class, layers=3, features_root=16) trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2)) path = trainer.train( generator, "./unet_trained", training_iters=32, epochs=5, dropout=0.75, # probability to keep units display_step=2) x_test, y_test = generator(4) prediction = net.predict(path, x_test) print("Testing error rate: {:.2f}%".format( unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
numTestImg = len(t1ce_images) print(np.shape(testdata), "WOOOO") print(np.shape(testlabels)) testdata = np.zeros((numTestImg, 240, 240)) ##testdata = np.expand_dims(testdata, 0) ##testdata[0] = numTestImg testlabels = np.zeros((numTestImg, 240, 240)) ##testlabels = np.expand_dims(testlabels, 0) ##testlabels[0] = numTestImg counter + for f in os.listdir("C:\\Users\\Magnus\\Documents\\GitHub\\DD2424\\tf_unet\\test\\"): if f[-11:-7] == "t1ce": testdata[counter] = np.load("C:\\Users\\Magnus\\Documents\\GitHub\\DD2424\\tf_unet\\test\\" + f) try: s = "C:\\Users\\Magnus\\Documents\\GitHub\\DD2424\\tf_unet\\test\\" + f s.replace("t1ce", "seg") testlabels[counter] = np.load(s) except FileNotFoundError: testlabels[counter] = np.zeros((240, 240)) testdata = np.expand_dims(testdata, 3) print(np.shape(testdata)) testlabels = np.expand_dims(testlabels, 3) prediction = net.predict(path, testdata) print("Error_rate: ", unet.error_rate(prediction, util.crop_to_shape(testlabels, prediction.shape))) img = util.combine_img_prediction(np.expand_dims(testdata[5000], axis = 0), np.expand_dims(testlabels[5000], axis = 0), np.expand_dims(prediction[5000], axis = 0)) util.save_image(img, "prediction.jpg")
''' from __future__ import print_function, division, absolute_import, unicode_literals import numpy as np from tf_unet import image_gen from tf_unet import unet from tf_unet import util if __name__ == '__main__': np.random.seed(98765) generator = image_gen.GrayScaleDataProvider(nx=572, ny=572, cnt=20, rectangles=False) net = unet.Unet(channels=generator.channels, n_class=generator.n_class, layers=3, features_root=16) trainer = unet.Trainer(net, optimizer="momentum", opt_kwargs=dict(momentum=0.2)) path = trainer.train(generator, "./unet_trained", training_iters=32, epochs=5, dropout=0.75,# probability to keep units display_step=2) x_test, y_test = generator(4) prediction = net.predict(path, x_test) print("Testing error rate: {:.2f}%".format(unet.error_rate(prediction, util.crop_to_shape(y_test, prediction.shape))))
data_provider = image_util.ImageDataProvider('data/train/*.tif') # setup and training net = unet.Unet(layers=3, features_root=64, channels=1, n_class=2) # trainer = unet.Trainer(net) # path = trainer.train(data_provider, output_path='train_output', training_iters=32, epochs=100) # exit() # verification val_data_provider = image_util.ImageDataProvider('data/val/*.tif') val_images, val_masks = val_data_provider(5) prediction = net.predict('train_output/model.ckpt', val_images) mask_sample = val_masks[0, :, :, 0] print(mask_sample[int(len(mask_sample) / 2)]) mask_sample[mask_sample < 0.5] = 255 mask_sample[mask_sample < 1] = 0 print(mask_sample[int(len(mask_sample) / 2)]) prediction_sample = prediction[0, :, :, 0] print(prediction_sample[int(len(prediction_sample) / 2)]) prediction_sample[prediction_sample < 0.5] = 255 prediction_sample[prediction_sample < 1] = 0 print(prediction_sample[int(len(prediction_sample) / 2)]) cv2.imwrite('mask.png', mask_sample) cv2.imwrite('prediction.jpg', prediction_sample) print( unet.error_rate(prediction, util.crop_to_shape(val_masks, prediction.shape))) # img = util.combine_img_prediction(val_images, val_masks, prediction) # util.save_image(img, "prediction.jpg")