class NucleiConfig(Config): """Configuration for training on the toy shapes dataset. Derives from the base Config class and overrides values specific to the toy shapes dataset. """ # Give the configuration a recognizable name #NAME = "Nuclei" NAME = UNETSettings().network_info["net_description"] # Train on 1 GPU and 8 images per GPU. We can put multiple images on each # GPU because the images are small. Batch size is 8 (GPUs * images/GPU). GPU_COUNT = 1 IMAGES_PER_GPU = 8 # Number of classes (including background) NUM_CLASSES = 1 + 1 # background + Nucleus # Use small images for faster training. Set the limits of the small side # the large side, and that determines the image shape. IMAGE_MIN_DIM = 256 IMAGE_MAX_DIM = 256 # Use smaller anchors because our image and objects are small RPN_ANCHOR_SCALES = (8, 16, 32, 64, 128) # anchor side in pixels # Reduce training ROIs per image because the images are small and have # few objects. Aim to allow ROI sampling to pick 33% positive ROIs. TRAIN_ROIS_PER_IMAGE = 32 # Use a small epoch since the data is simple STEPS_PER_EPOCH = 100 # use small validation steps since the epoch is small VALIDATION_STEPS = 5 DETECTION_MIN_CONFIDENCE = 0.85 # before: 0.7
def load(self, phase='train'): # Load settings settings = UNETSettings() # Load Dataset print("Load dataset ...") if UNETSettings().network_info["dataset"] == 'tisquant': dataset = TisquantDatasetNew() # dataset = TisquantDataset() elif UNETSettings().network_info["dataset"] == 'artificialNuclei': dataset = ArtificialNucleiDataset() elif UNETSettings( ).network_info["dataset"] == 'artificialNucleiNotConverted': dataset = ArtificialNucleiDatasetNotConverted() elif UNETSettings( ).network_info["dataset"] == 'mergeTisquantArtificialNotConverted': datasets = [] dataset1 = TisquantDatasetNew() dataset1.load_data(mode=1) dataset2 = ArtificialNucleiDatasetNotConverted() dataset2.load_data(mode=1) datasets.append(dataset1) datasets.append(dataset2) dataset = MergedDataset(datasets) elif UNETSettings( ).network_info["dataset"] == 'mergeTisquantArtificial': datasets = [] dataset1 = TisquantDatasetNew() dataset1.load_data(mode=1) dataset2 = ArtificialNucleiDataset() dataset2.load_data(mode=1) datasets.append(dataset1) datasets.append(dataset2) dataset = MergedDataset(datasets) else: print('Dataset not valid') sys.exit("Error") # Load Dataset if phase == 'train': dataset.load_data(mode=1) else: dataset.load_data(mode=2) dataset.prepare() return dataset
W = All_D w[i, 0] = W.astype(np.float32) return x, y, w else: return x, y def batch_iterator(batch_size, k_samples, shuffle): #return h5BatchIterator(batch_size=batch_size, prepare=prepare, k_samples=k_samples, shuffle=shuffle) return BatchIterator(batch_size=batch_size, prepare=prepare, k_samples=k_samples, shuffle=shuffle) return batch_iterator # prepare training strategy train_strategy = get_binary_segmentation_TrainingStrategy(batch_size=2, max_epochs=int(UNETSettings().network_info["max_epochs"]), #samples_per_epoch=250, patience=50,#20 ini_learning_rate=0.001, #L2=None, use_weights=use_weights, #refinement_strategy=RefinementStrategy(n_refinement_steps=6), valid_batch_iter=get_batch_iterator(), train_batch_iter=get_segmentation_crop_flip_batch_iterator(flip_left_right=True, flip_up_down=True, rotate=None,#30,##None, #45 use_weights=use_weights, crop_size=None)) #INPUT_SHAPE[1:])) # ))
class ArtificialNucleiDataset(utils_for_datasets.Dataset): img_prefix = 'Img_' img_postfix = '-outputs.png' mask_prefix = 'Mask_' mask_postfix = '.tif' settings = UNETSettings() def load_data(self, width=256, height=256, ids=None, mode=1): # Load settings self.image_path = [] self.mask_path = [] self.add_class("ArtificialNuclei", 1, 'ArtificialNuclei') train_cnt = 0 val_cnt = 0 print("Loading train data ...") if self.settings.network_info["traintestmode"] == 'train': for i in self.settings.network_info["dataset_dirs_train"].split( ';'): img_range = self.setImagePaths(folders=[i + "/images"]) self.setMaskPaths(folders=[i + "/masks"], img_range=img_range) print("Checking train path ...") self.checkPath() print("Loading val data ...") train_cnt = self.image_path.__len__() for i in self.settings.network_info["dataset_dirs_val"].split(';'): img_range = self.setImagePaths(folders=[i + "/images"]) self.setMaskPaths(folders=[i + "/masks"], img_range=img_range) print("Checking val path ...") self.checkPath() val_cnt += self.image_path.__len__() - train_cnt #ids = np.arange(self.image_path.__len__()) ids_train = np.arange(0, train_cnt) ids_val = np.arange(train_cnt, train_cnt + val_cnt) self.train_cnt = train_cnt self.val_cnt = val_cnt np.random.shuffle(ids_train) np.random.shuffle(ids_val) self.ids = np.concatenate((ids_train, ids_val), axis=0) else: for i in self.settings.network_info["dataset_dirs_test"].split( ';'): img_range = self.setImagePaths(folders=[i + "/images"]) self.setMaskPaths(folders=[i + "/masks"], img_range=img_range) print("Checking train path ...") self.checkPath() self.ids = np.arange(0, self.image_path.__len__()) for i in self.ids: self.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height) return ids def checkPath(self): to_delete = [] for index, i in tqdm(enumerate(self.image_path)): if not os.path.exists(i): to_delete.append(index) to_delete.sort(reverse=True) for i in to_delete: del self.image_path[i] del self.mask_path[i] def load_image(self, image_id): info = self.image_info[image_id] img_final = cv2.imread(self.image_path[self.ids[image_id]]) try: img_final = img_final[:, :, 0] except: None #return img_final / 255.0 if self.settings.network_info[ "netinfo"] == 'maskrcnn': # mask rcnn need an rgb image img_new = np.zeros((img_final.shape[0], img_final.shape[1], 3)) img_new[:, :, 0] = img_new[:, :, 1] = img_new[:, :, 2] = img_final img_final = img_new return img_final def setImagePaths(self, folders=""): for folder in folders: file_pattern = os.path.join( folder, self.img_prefix + "*" + self.img_postfix) #"Img_*-outputs.png") print(file_pattern) img_files = glob.glob(file_pattern) img_files.sort() img_range = range(0, img_files.__len__()) for i in img_range: #self.image_path.append(os.path.join(folder, "Img_" + str(i) + "-outputs.png")) self.image_path.append( os.path.join(folder, self.img_prefix + str(i) + self.img_postfix)) # for i in img_files: # self.image_path.append(i) return img_range def setMaskPaths(self, folders="", img_range=None): for folder in folders: file_pattern = os.path.join(folder, self.mask_prefix + "*" + self.mask_postfix) #"Mask_*.tif") print(file_pattern) img_files = glob.glob(file_pattern) img_files.sort() #for i in range(0,img_files.__len__()): for i in img_range: self.mask_path.append( os.path.join(folder, self.mask_prefix + str(i) + self.mask_postfix)) #self.mask_path.append(os.path.join(folder, "Mask_" + str(i) + ".tif")) def image_reference(self, image_id): """Return the shapes data of the image.""" info = self.image_info[image_id] if info["source"] == "shapes": return info["shapes"] else: super(self.__class__).image_reference(self, image_id) def load_mask(self, image_id): """Generate instance masks for shapes of the given image ID. """ info = self.image_info[image_id] mask = tifffile.imread(self.mask_path[self.ids[image_id]]) count = 0 for i in range(1, int(mask.max()) + 1): if ((mask == i).sum() > 0): count = count + 1 # prepare image for net #count = int(mask.max()) mask_new = np.zeros([info['height'], info['width'], count + 1], dtype=np.uint8) # one more for background running = 0 for i in np.unique(mask): #range(1, count): if ((i > 0) & ((mask == i).sum() > 0)): mask_new[:, :, running] = (mask == i) running = running + 1 # Map class names to class IDs. class_ids = np.ones(count) return mask_new, class_ids.astype(np.int32) def load_mask_one_layer(self, image_id, relabel=False): mask = tifffile.imread(self.mask_path[self.ids[image_id]]) if (mask.ndim > 2): mask = mask[:, :, 0] if (relabel): mask_tmp = np.zeros((mask.shape[0], mask.shape[1])) running = 1 for i in np.unique(mask): if i > 0: mask_tmp = mask_tmp + running * (mask == i) running = running + 1 mask = mask_tmp.astype(np.float) return mask #mask.astype(np.float) def pre_process_img(self, img, color): """ Preprocess image """ if color is 'gray': img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) elif color is 'rgb': img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) else: pass img = img.astype(np.float32) img /= 255.0 return img def split_train_test(self, width=256, height=256): dataset_train = ArtificialNucleiDataset() dataset_test = ArtificialNucleiDataset() dataset_train.image_path = [] dataset_train.mask_path = [] dataset_train.add_class("ArtificialNuclei", 1, 'ArtificialNuclei') dataset_test.image_path = [] dataset_test.mask_path = [] dataset_test.add_class("ArtificialNuclei", 1, 'ArtificialNuclei') image_path_train = [] image_path_val = [] mask_path_train = [] mask_path_val = [] self.ids = [] run = 0 dataset_train.image_path.extend(self.image_path[0:self.train_cnt]) dataset_train.mask_path.extend(self.mask_path[0:self.train_cnt]) dataset_train.train_cnt = self.image_path.__len__() dataset_test.image_path.extend(self.image_path[self.train_cnt:]) dataset_test.mask_path.extend(self.mask_path[self.train_cnt:]) dataset_test.train_cnt = self.image_path.__len__() - self.train_cnt ids_train = np.arange(0, self.train_cnt) ids_val = np.arange(0, self.val_cnt) np.random.shuffle(ids_train) np.random.shuffle(ids_val) dataset_train.ids = ids_train dataset_test.ids = ids_val for i in dataset_train.ids: dataset_train.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height) for i in dataset_test.ids: dataset_test.add_image("ArtificialNuclei", image_id=i, path=None, width=width, height=height) dataset_train.prepare() dataset_test.prepare() return dataset_train, dataset_test
if __name__ == '__main__': """ main """ # add argument parser parser = argparse.ArgumentParser(description='Train model.') parser.add_argument('--model', help='select model to train.', default="models\unet1_augment.py") parser.add_argument('--datadescription', help='select data set.', default="256x256_TisQuantTrainingData_Evaluation1") args = parser.parse_args() # Load settings settings = UNETSettings() # Load Dataset print("Load dataset ...") if UNETSettings().network_info["dataset"] == 'tisquant': dataset = TisquantDatasetNew() # dataset = TisquantDataset() elif UNETSettings().network_info["dataset"] == 'artificialNuclei': dataset = ArtificialNucleiDataset() elif UNETSettings( ).network_info["dataset"] == 'artificialNucleiNotConverted': dataset = ArtificialNucleiDatasetNotConverted() elif UNETSettings( ).network_info["dataset"] == 'mergeTisquantArtificialNotConverted': datasets = [] dataset1 = TisquantDatasetNew() dataset1.load_data(mode=1)
default="models\unet1_augment.py") parser.add_argument('--datadescription', help='select data set.', default="256x256_TisQuantTrainingData_Evaluation1") parser.add_argument('--testdata', help='select data set.', default="256x256_TisQuantTestData_Evaluation1") parser.add_argument( '--path_to_img', help='select data set.', default=r"G:\FORSCHUNG\LAB4\Daria Lazic\Deep_Learning\GD2_META_rwf") parser.add_argument('--rwf', default="1") parser.add_argument('--mode', default=3) args = parser.parse_args() settings = UNETSettings() # Load Dataset print("Load dataset ...") if settings.network_info["dataset"] == 'tisquant': # args.dataset dataset = TisquantDatasetNew() elif UNETSettings().network_info["dataset"] == 'sampleInference': dataset = SampleInference() else: print('Dataset not valid') sys.exit("Error") val_idx = dataset.load_data(mode=2) dataset.prepare() # get the model
class InferenceConfig(NucleiConfig): GPU_COUNT = 1 IMAGES_PER_GPU = 1 inference_config = InferenceConfig() """ # Training dataset dataset_test = TisquantDataset() dataset_test.load_data(width=inference_config.IMAGE_SHAPE[0], height=inference_config.IMAGE_SHAPE[1],mode=2) dataset_test.prepare() """ # Load Dataset print("Load dataset ...") if UNETSettings().network_info["dataset"] == 'tisquant': #args.dataset dataset_test = TisquantDatasetNew() elif UNETSettings().network_info["dataset"] == 'sampleInference': dataset_test = SampleInference() else: print('Dataset not valid') sys.exit("Error") dataset_test.load_data(mode=1) dataset_test.prepare() for image_id in dataset_test.image_ids: print(str(image_id) + "\n") # Recreate the model in inference mode model = modellib.MaskRCNN(mode="inference", config=inference_config,
""" # Training dataset dataset_train = TisquantDataset() val_idx = dataset_train.load_data(width=config.IMAGE_SHAPE[0], height=config.IMAGE_SHAPE[1]) dataset_train.prepare() # Validation dataset dataset_val = TisquantDataset() dataset_val.load_data(width=config.IMAGE_SHAPE[0], height=config.IMAGE_SHAPE[1],ids=val_idx,mode=2) dataset_val.prepare() """ # Load Dataset print("Load dataset ...") if UNETSettings().network_info["dataset"] == 'tisquant': #args.dataset dataset = TisquantDatasetNew() elif UNETSettings().network_info["dataset"] == 'artificialNuclei': dataset = ArtificialNucleiDataset() elif UNETSettings().network_info["dataset"] == 'artificialNucleiNotConverted': dataset = ArtificialNucleiDatasetNotConverted() elif UNETSettings().network_info["dataset"] == 'mergeTisquantArtificial': datasets = [] dataset1 = TisquantDatasetNew() dataset1.load_data(mode=1) dataset2 = ArtificialNucleiDataset() dataset2.load_data(mode=1) datasets.append(dataset1) datasets.append(dataset2) dataset = MergedDataset(datasets) else:
def getResultsPath(self): settings = UNETSettings() return settings.network_info["results_folder"]
def getID(self): settings = UNETSettings() return settings.network_info["net_description"]