def ret(input_img): if not cutter.is_green(input_img): return input_img img=input_img.copy() y1,y2,x1,x2=cutter.find_square_coords(input_img) prediced=np.squeeze(self.model.predict(img),0) img=np.squeeze(img,0) img[y1:y2,x1:x2]=prediced[y1:y2,x1:x2] return np.expand_dims(img,0)
def __init__(self,img_cols,img_rows): """ Initializes the autoencoder. """ self.set_training_info() globals().update(self.info) self.threshold=threshold self.img_cols = img_cols#256 # Original is ~576 self.img_rows = img_rows#256 # Original is ~720 self.channels = 3 # RGB self.img_shape=(self.img_cols,self.img_rows,self.channels) dummy=plotload.load_one_img(self.img_shape, dest='med/green',extra_dim=True) self.dims =cutter.find_square_coords(dummy) self.model=None self.pretrained=False
def mask_green_corner(imgs, val=0): """ Mask the bottom area where the green sqare is. takes a series of images (num images,image rows, image cols,channels) and masks an area with the chosen val :param imgs: list of imgs :param mask_width: width of the mask :param mask_height: height of the mask :param val: the value that the mask is filled with :return: (The masked imgs,the part that is removed,pos of the removed part) """ img_num, img_cols, img_rows, img_channels = imgs.shape y1, y2, x1, x2 = cutter.find_square_coords(imgs) masked_imgs = np.empty_like(imgs) missing_parts = np.empty((img_num, y2 - y1, x2 - x1, img_channels)) for i, img in enumerate(imgs): masked_img = img.copy() missing_parts[i] = masked_img[y1:y2, x1:x2, :].copy() masked_img[y1:y2, x1:x2, :] = val masked_imgs[i] = masked_img return masked_imgs, missing_parts, (y1, y2, x1, x2)
def __init__(self, img_cols, img_rows): """ Initializes the autoencoder. """ self.set_training_info() globals().update(self.info) self.threshold = threshold self.img_cols = img_cols # Original is ~576 self.img_rows = img_rows # Original is ~720 self.channels = 3 # RGB self.img_shape = (self.img_cols, self.img_rows, self.channels) if not mask: dummy = plotload.load_polyp_batch(self.img_shape, 20, data_type='med/stool-inclusions', crop=False) self.dims = cutter.find_square_coords(dummy) self.combined = None self.discriminator = None self.generator = None self.pretrained = False
def train_model(self): """ Trainer: Trains the loaded autoencoder model :param epochs: number of epochs run :param batch_size: how many imgs in each batch :param save_interval: how many epochs between each save """ if self.info==None: print("Warning no info found, prompting for info") self.set_training_info() globals().update(self.info) if self.model==None: print("Error: no model loaded") return if self.pretrained==True: print("Warning: model has pretrained weights") from tqdm import tqdm y1,y2,x1,x2=cutter.find_square_coords(plotload.load_polyp_batch(self.img_shape, batch_size)) for epoch in tqdm(range(epochs)): X_train=plotload.load_polyp_batch(self.img_shape, batch_size,data_type='med/none') if mask==0: Y_train,X_train=cutter.add_green_suare(X_train) else: print("Not yet implimented") cur_loss=self.model.train_on_batch(X_train, Y_train) if epoch%10==0: self.save_img(epoch) if cur_loss<self.threshold: print(cur_loss) self.threshold=cur_loss self.model.save(f"models/AE-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if mask==0 else 'n'}.h5") self.model.save_weights(f"models/AE-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if mask==0 else 'n'}-w.h5") self.model.save(f"models/AE-{self.img_shape[0]}-{self.img_shape[1]}-{'c' if mask==0 else 'n'}-fin.h5")