def _resize(self, im): height = im.shape[0] if height == P().model_input_size: return im size = P().model_input_size return cv2.resize(im, (size, size), interpolation=cv2.INTER_CUBIC)
def pre_process(): #dirTrain1 = QDir('C:/Users/home/Desktop/mitos dataset/train/mitosis') dirTrain2 = QDir(P().saveCutMitosDir) imagesFilter = ['*.png', '*.tif', '*.bmp'] #info1 = dirTrain1.entryInfoList(imagesFilter) info2 = dirTrain2.entryInfoList(imagesFilter) infoList = [] #infoList.extend(info1) infoList.extend(info2) i = 0 for fileInfo in infoList: i += 1 imagePath = fileInfo.absoluteFilePath() basePath = fileInfo.absolutePath() + '/' basenameExt = os.path.basename(imagePath) baseName, _ = os.path.splitext(basenameExt) savePath = P().saveMitosisPreProcessed imbase = cv2.imread(imagePath) rows, cols, _ = imbase.shape imXMirror = cv2.flip(imbase, 1) cv2.imwrite(savePath + fileInfo.fileName(), imbase) cv2.imwrite(savePath + baseName + '-mirror.png', imXMirror) rotateImageAndSave(imbase, baseName, savePath) rotateImageAndSave(imXMirror, baseName + '-mirror', savePath) print('%d / %d' % (i, len(infoList)))
def __init__(self, file_list): self.save_candidates_dir_path = P().saveCutCandidatesDir self.save_mitosis_dir_path = P().saveCutMitosDir self.file_list = file_list self.candidates_json_save_path = P().candidatesTrainingJsonPath self.img_with_keypoints_save_path = P().saveCandidatesWholeImgDir self.candidates_save_as_tar = True self.mitosis_save_as_tar = False self.write_img_to_disk = True self.bsave_img_keypoints = False self.extract_windows = P().candidates_size
def train_model(ratio, use_all): selection = True if use_all: selection = False elif ratio <= 0: raise ValueError('ratio cannot be neither negative nor 0') train = ld.dataset(P().saveCutCandidatesDir + 'candidates.tar', P().saveMitosisPreProcessed) xe, ye = train.get_training_sample(ratio=ratio, selection=selection) model = create_fel_res() # model = create_simple_model() # model = create_squeeze_net() # bagging_classifier = Bagging(estimator_func=create_simple_model, # n_estimators=31, # max_samples=0.1, # bootstrap=True) class_weight = _get_class_weights(ye) epochs = P().model_epoch epochs = 40 batch_size = 128 test = ld.dataset(P().saveTestCandidates + 'candidates.tar', P().saveTestMitos) xval, yval = test.get_training_sample(shuffle=False, selection=False) yval_cat = np_utils.to_categorical(yval) del train, test # saves around 500 mb of ram. Wow! end_callback = End_training_callback() # model.fit(xe, target, epochs=epochs, verbose=2, # class_weight=class_weight, # validation_data=(xval, yval_cat), # batch_size=128, # callbacks=[end_callback]) train_metric, val_metric, test_metrics = _do_train(model, (xe, ye), (xval, yval), epochs, 128) if sys.platform == 'win32': print_plots(model.metrics_names, train_metric, val_metric, test_metrics) else: dump_metrics_2_file(train_metric, val_metric, test_metrics) # bagging_classifier.fit(xe, ye, epochs, # batch_size=batch_size, # callbacks=[end_callback], # class_weight=class_weight, # validation_data=(xval, yval_cat)) # save_bagging_model(bagging_classifier) save_model(model, 'model1')
def getInputDim(): import keras.backend as K img_width = P().model_input_size img_height = P().model_input_size if K._image_data_format == 'channels_first': dim = (3, img_width, img_height) else: dim = (img_width, img_height, 3) return dim
def extract_candidates(args): """ Set the params for extracting candidates from the specified folder. The candidates are separated in training and testing and saved in their corresponding folders :param args: namespace that contains the params entered by the user """ if args.custom_folder is None: folder_path = P().normHeStainDir else: folder_path = args.custom_folder if not os.path.isdir(folder_path): raise FileNotFoundError('The path does not exist') # split the files in training and testing file_list = utils.listFiles(folder_path, filter) train_count = len(file_list) - args.number_test_img # train_list = file_list [0:train_count] # test_list = file_list [- args.number_test_img:] # selects a sample of random files in the list and # save some for validation and the rest for training selection_index = random.sample(range(len(file_list)), k=args.number_test_img) test_list = [file_list[i] for i in selection_index] for i in sorted(selection_index, reverse=True): del file_list[i] train_params = cs.Candidates_extractor_params(file_list) test_params = cs.Candidates_extractor_params(test_list) if args.dont_save: train_params.write_img_to_disk = False test_params.write_img_to_disk = False #test_params.write_img_to_disk = False if args.save_img_keypoint: train_params.bsave_img_keypoints = True test_params.bsave_img_keypoints = True # specific params for testing test_params.save_candidates_dir_path = P().saveTestCandidates test_params.save_mitosis_dir_path = P().saveTestMitos # test_params.save_mitosis_dir_path = None test_params.candidates_json_save_path = P().candidatesTestJsonPath train_extractor = cs.Candidates_extractor(train_params) test_extractor = cs.Candidates_extractor(test_params) train_extractor.extract() if args.number_test_img > 0: test_extractor.extract()
def test_model(): import time model = load_model("b_model") # xt, yt = load_test_data() # # fscore, prec, res_round, res = _do_test(model, xt, yt) # # idx = res < 0.2 # # res_round = np.copy(res) # # res_round[idx] = 0 # # res_round[np.logical_not(idx)] = 1 # # metrics.print_conf_matrix(yt, res_round) # print('fscore: {}'.format(fscore)) # print('precision: {}'.format(prec)) # write_test_output(yt, res) # plot_roc(yt, res) # plot_precision_recall(yt, res) # test_json_path = P().candidatesTestJsonPath with open(test_json_path) as file: json_string = file.read() cand_dict = json.loads(json_string) tester = MitosTester(cand_dict, model) t0 = time.time() tester.evaluate_all() t1 = time.time() print(t1 - t0) K.clear_session()
def _extract_test_candidate(self, im, point_list, base_name): from mitos_extract_anotations.ImCutter import No_save_ImCutter imcutter = No_save_ImCutter(im) test_candidate_list = [] save_dir = P().saveTestCandidates sufix_num = 0 for p in point_list: point = (p['row'], p['col']) candidate_im = imcutter.cut(point[1], point[0]) # returns true (1) if point is close to a mitotic cell, # but in the model, label 0 stand for mitosis, # so we need to negate it candidate_im = self.normalize(candidate_im) label = int(not self.verificator.is_mitos(point)) candidate = Testing_candidate(im=candidate_im, pos=point, label=label, base_im_name=base_name) test_candidate_list.append(candidate) # if label == 1: # save_path = '{}{}-{}.png'.format(save_dir, base_name, sufix_num) # else: # save_path = '{}{}-{}-mitosis.png'.format(save_dir, base_name, sufix_num) # cv2.imwrite(save_path, candidate_im) # sufix_num += 1 return test_candidate_list
def train_model(ratio, use_all): selection = True if use_all: selection = False elif ratio <= 0: raise ValueError('ratio cannot be neither negative nor 0') train = ld.dataset(P().saveCutCandidatesDir + 'candidates.tar', P().saveMitosisPreProcessed) xe, ye = train.get_training_sample(ratio=ratio, selection=selection) xt, yt = load_test_data() # from mitosCalsification.crossval import _load_json_data # filters = ['*.bmp', '*.png', '*.jpg'] # train_file_info = listFiles(P().basedir + 'normalizado/fullStainTrain', filters) # test_file_info = listFiles(P().basedir + 'normalizado/fullStainTest', filters) # mitosis_anotations, candidates_anotations = _load_json_data() # from mitosCalsification.loadDataset import extract_anotations # xe, ye, xt, yt = extract_anotations(train_file_info, # test_file_info, # mitosis_anotations, # candidates_anotations) # model = create_fel_res() model = create_simple_model() # model = create_squeeze_net() # model = create_simple2() # model = load_model('b_model') # best model previusly trained clasificator = MitosisClasificatorTrainer(model, (xe, ye), (xt, yt), epochs=100, batch_size=128) generator = ImageDataGenerator(rotation_range=44, width_shift_range=0.2, height_shift_range=0.2, shear_range=0.21, zoom_range=0.3, fill_mode='wrap', horizontal_flip=True, vertical_flip=True) # clasificator.generator = generator clasificator.train() clasificator.plot_metrics_to_disk() save_model(model, 'model1') K.clear_session()
def _load_json_data(): # loads the anotations and combines them in one dictionary with open(P().candidatesTrainingJsonPath) as f: json_string = f.read() candidates_dict = json.loads(json_string) with open(P().candidatesTestJsonPath) as f: json_string = f.read() test_candidates_dict = json.loads(json_string) candidates_dict.update(test_candidates_dict) with open(P().mitosAnotationJsonPath) as f: json_string = f.read() mitosis_anotations = json.loads(json_string) return mitosis_anotations, candidates_dict
def __init__(self): file = open(P().mitosAnotationJsonPath) string = file.read() self.jsonDict = json.loads(string) file.close() self.verificated_mitos = 0 self.base_name = None self.not_found_points = []
def _map_to_testing_candidates(self): for base_name in sorted(self.json_dict): point_list = self.json_dict[base_name] self.verificator.set_base_name(base_name) im_path = im_path = P().normHeStainDir + base_name + '.bmp' im = cv2.imread(im_path) cand_list = self._extract_test_candidate(im, point_list, base_name) self.testing_candidates_list.extend(cand_list) self.not_detected += len(self.verificator.not_found_points)
def print_detections_images(self, y_pred, base_name_list): mitosis_anotations_path = P().mitosAnotationJsonPath with open(mitosis_anotations_path) as file: json_string = file.read() mitosis_anotations = json.loads(json_string) detection_dict = self._map_detections_to_dict(y_pred) for base_name in base_name_list: image_path = P( ).basedir + 'normalizado/testHeStain/' + base_name + '.bmp' image = cv2.imread(image_path) self._print_mitosis(image, mitosis_anotations[base_name]) save_im_path = P().basedir + 'resultado/' + base_name + '.png' detection_list = detection_dict[base_name] self._print_detected(image, detection_list) cv2.imwrite(save_im_path, image)
def __init__(self, anotations_dict, model): self.anotations_dict = anotations_dict self.verificator = MitosVerification() self.HPF_dirpath = P().basedir + 'normalizado/testHeStain/' self.not_detected = 0 self.not_detected_points = {} self.testing_candidates_list = [] self.model = model self.is_dataset_loaded = False self.predicted_labels = [] self.base_name_list = []
def __init__(self, json_dict, HPF_dirpath=P().normHeStainDir): self.json_dict = json_dict self.verificator = MitosVerification() self.HPF_dirpath = HPF_dirpath self.not_detected = 0 self.not_detected_points = {} self.testing_candidates_list = [] self._pos = 0 self._labels = [] self._predicted_labels = [] self._map_to_testing_candidates()
def crossval(n_fold=10): if os.path.exists('res.txt'): os.remove('res.txt') # create a list of all High Power Field images path filters = ['*.bmp', '*.png', '*.jpg'] train_file_list = listFiles(P().basedir + 'normalizado/heStain/', filters) test_file_list = listFiles(P().basedir + 'normalizado/testHeStain', filters) # train_file_list = listFiles(P().basedir + 'normalizado/fullStainTrain/', filters) # test_file_list = listFiles(P().basedir + 'normalizado/fullStainTest', filters) file_list = train_file_list file_list.extend(test_file_list) file_list = np.asarray(file_list) mitosis_anotations, candidates_anotations = _load_json_data() i = 1 kfold = KFold(n_splits=n_fold, shuffle=True) for train_index, test_index in kfold.split(file_list): print('iteraciĆ³n: {}/{}'.format(i, n_fold)) i += 1 train_im_fileInfo = file_list[train_index] test_im_fileInfo = file_list[test_index] x_train, y_train, x_test, y_test = extract_anotations(train_im_fileInfo, test_im_fileInfo, mitosis_anotations, candidates_anotations) model = create_simple_model() trainer = MitosisClasificatorTrainer(model, (x_train, y_train), (x_test, y_test), epochs=40) trainer.train() best_score = trainer.best_score with open('res.txt', 'a') as file: file.write('{}\n'.format(best_score))
def extract_anotation_single_image(image, point_list, invert_dim=False): imcutter = No_save_ImCutter(image, cut_size=81) cutted_im_list = [] for p in point_list: if invert_dim: cutted_im = imcutter.cut(p['col'], p['row']) else: cutted_im = imcutter.cut(p['row'], p['col']) size = P().model_input_size cutted_im = cv2.resize(cutted_im, (size, size), interpolation=cv2.INTER_CUBIC) cutted_im = _preprocess(cutted_im) cutted_im_list.append(cutted_im) return cutted_im_list
def print_res_to_img(self): im = cv2.imread('C:/Users/felipe/mitos dataset/normalizado/A04_02.bmp') base_name = 'A04_02' i = 0 while base_name == 'A04_02': candidate = self.testing_candidates_list[i] i += 1 base_name = candidate.base_im_name pos = candidate.pos prediction = candidate.predicted_label label = candidate.label if label == 0: if prediction == 0: color = (255, 0, 0) # blue color else: color = (0, 255, 0) # green color cv2.circle(im, pos, 25, color, thickness=2) elif prediction == 0 and label == 1: cv2.circle(im, pos, 25, (0, 0, 255), thickness=2) base_dir = P().basedir save_path = base_dir + 'test/print/A04_02.jpg' cv2.imwrite(save_path, im)
from mitosCalsification.metrics import print_conf_matrix print_conf_matrix(self._labels, self._predicted_labels) def get_candidates(self): candidates = [] for cand in self.testing_candidates_list: candidates.append(cand.im) return np.asarray(candidates) def normalize(self, im): im = np.asarray(im, np.float32) im /= 255 return im if __name__ == "__main__": from common.Params import Params as P test_json_path = P().basedir + 'anotations/test_cand.json' with open(test_json_path) as file: json_string = file.read() cand_dict = json.loads(json_string) mte = Mitos_test_evaluator(cand_dict, P().basedir + 'normalizado/heStain/') for c in mte: j = 0 print(mte.not_detected) print(mte.not_detected_points) i = 0
if len(self._predicted_labels) == 0: raise ValueError('No predicted labels available') from mitosCalsification.metrics import print_conf_matrix print_conf_matrix(self._labels, self._predicted_labels) def get_candidates(self): candidates = [] for cand in self.testing_candidates_list: candidates.append(cand.im) return np.asarray(candidates) def normalize(self, im): im = np.asarray(im, np.float32) im /= 255 return im if __name__ == "__main__": from common.Params import Params as P test_json_path = P().candidatesTestJsonPath with open(test_json_path) as file: json_string = file.read() cand_dict = json.loads(json_string) mte = Mitos_test_evaluator(cand_dict) for c in mte: j = 0 print(mte.not_detected) i = 0
def createBlobDetector(): params = P().blobDetectorParams return cv2.SimpleBlobDetector_create(params)
cv2.imwrite(save_path, im_with_keypoints) def finish(self): self.candidate_cutter.close_tar() if self.mitos_cutter is not None: self.mitos_cutter.close_tar() json_string = json.dumps(self.candidates_dict, sort_keys=True, indent=4) with open(self.params.candidates_json_save_path, 'w') as file: file.write(json_string) print('Total de candidatos: {}'.format(self.candidates_count)) if __name__ == "__main__": filter = ['*.bmp', '*.png', '*.jpg'] file_list = utils.listFiles(P().normHeStainDir, filter) train_list = file_list[0:30] test_list = file_list[-5:] p = Candidates_extractor_params(train_list) c = Candidates_extractor(p) c.extract() # extract testing dataset param = Candidates_extractor_params(test_list) param.save_candidates_dir_path = P().saveTestCandidates param.save_mitosis_dir_path = P().saveTestMitos c = Candidates_extractor(param) c.extract()
from common import utils from mitos_extract_anotations import candidateSelection as cs from common.Params import Params as P if __name__ == '__main__': filter = ['*.bmp', '*.png', '*.jpg'] file_list = utils.listFiles(P().basedir + 'normalizado/testHeStain', filter) params = cs.Candidates_extractor_params(file_list) params.candidates_json_save_path = P().basedir + 'anotations/test_cand.json' params.save_candidates_dir_path = P().basedir + 'test/no-mitosis/' params.save_mitosis_dir_path = P().basedir + 'test/mitosis/' params.bsave_img_keypoints = True params.bappend_mitosis_to_json = True cutter = cs.Candidates_extractor(params) cutter.extract()
def _map_detections_to_dict(self, y_pred): detection_dict = {} for pred, candidate in zip(y_pred, self.testing_candidates_list): if candidate.base_im_name not in detection_dict: detection_dict[candidate.base_im_name] = [] if pred == 0: detection_dict[candidate.base_im_name].append(candidate.pos) return detection_dict if __name__ == '__main__': from common.utils import listFiles json_path = P().basedir + 'anotations/test_cand.json' with open(json_path) as file: string = file.read() cand_dict = json.loads(string) tester = MitosTester(cand_dict, None) tester._load_dataset() not_detected = tester.not_detected print(not_detected) i = 0 for cand in tester.testing_candidates_list: if cand.label == 0: i += 1 print(i)
for model in self.estimators: res = model.predict(x, verbose=0) res = np.argmax(res, 1)[0] proba[res] += 1 return self._mayority_vote(proba) def _mayority_vote(self, prediction_matrix): matrix_shape = prediction_matrix.shape # the prediction of only 1 sample if len(matrix_shape) == 1: return np.argmax(prediction_matrix) else: return np.argmax(prediction_matrix, axis=1) if __name__ == '__main__': from mitosCalsification import loadDataset as ld from common.Params import Params as P train = ld.dataset(P().saveCutCandidatesDir + 'candidates.tar', P().saveMitosisPreProcessed) xe, ye = train.get_training_sample(selection=False) del train model = Sequential() model.add(Dense(10, activation='relu', input_shape=(2, ))) model.add(Dense(1, activation='softmax')) model.compile(optimizer='adam', loss='mse') bag = Bagging(model) bag.fit(xe, ye)
test_json_path = P().candidatesTestJsonPath with open(test_json_path) as file: json_string = file.read() cand_dict = json.loads(json_string) tester = MitosTester(cand_dict, model) t0 = time.time() tester.evaluate_all() t1 = time.time() print(t1 - t0) K.clear_session() if __name__ == '__main__': test = ld.dataset(P().saveTestCandidates + 'candidates.tar', P().saveTestMitos) xt, yt = test.get_training_sample(shuffle=False, selection=False) yt_cat = np_utils.to_categorical(yt) model = load_model('model3') res = model.predict_classes(xt) cat_res = np_utils.to_categorical(res) fscore = K.eval( metrics.mitos_fscore(K.variable(yt_cat), K.variable(cat_res))) metrics.print_conf_matrix(yt, res) print('fscore: {}'.format(fscore)) write_test_output(yt, res)