def remap(directory): train_dir = os.path.join(directory, 'train') maps_dir = [os.path.join(directory, 'Maps%d_T' % i) for i in range(1, 7)] # Original maps maps_out = [ os.path.join(directory, 'Remaps%d_T' % i) for i in range(1, 7) ] # Remaps # Create "Remaps" directories if they don't exist yet for m in maps_out: if (os.path.exists(m) == False): os.mkdir(m) images = [f for f in os.listdir(train_dir)] printProgressBar(0, len(images)) for idi, im in enumerate(images): for idm, m in enumerate(maps_dir): f_out = os.path.join( maps_out[idm], '%s_classimg_nonconvex.png' % im.replace('.jpg', '')) if os.path.isfile(f_out): break fname = os.path.join( m, '%s_classimg_nonconvex.png' % im.replace('.jpg', '')) if (os.path.isfile(fname)): anno = imread(fname) anno[anno > 5] = 1 imsave(f_out, anno) printProgressBar(idi, len(images))
def generate_STAPLE(directory, maps, keep_annotations, map_out): train_dir = os.path.join(directory, 'train') m_out = os.path.join(directory, map_out) if (os.path.exists(m_out) == False): os.mkdir(m_out) # Get all the cores from the training set f_train = os.listdir(train_dir) cores = [f.split('.')[0] for f in f_train] reader = sitk.ImageFileReader() msf = sitk.MultiLabelSTAPLEImageFilter() printProgressBar(0, len(cores) - 1) for i, core in enumerate(cores): fname_out = os.path.join(m_out, '%s.png' % core) if os.path.isfile(fname_out): continue # pass if it's already been done maps_ = [] for idm, m in enumerate(maps): if (keep_annotations[i, idm]): maps_ += [m] im_ = coreSTAPLE(directory, core, maps_, msf, reader) if (im_ == None): print("EXCLUDE: ", core) continue imsave(fname_out, im_) printProgressBar(i, len(cores) - 1)
def get_ssp_epstein(pxPerGrades, saveAs=None): sspg = np.ones((pxPerGrades.shape[0], pxPerGrades.shape[1])).astype('int') printProgressBar(0, pxPerGrades.shape[0]) for idi in range(pxPerGrades.shape[0]): for idm in range(pxPerGrades.shape[1]): if (pxPerGrades[idi, idm].sum() > 0): h = pxPerGrades[idi, idm][1:] if (h.sum() == 0): sspg[idi, idm] = 0 else: s = np.argsort(h)[::-1] if (h[s][1] > 0): # >=2 types of glands present: p1 = s[0] + 1 p2 = s[1] + 1 if (p1 + p2 <= 6): sspg[idi, idm] = 1 elif (p1 == 3 and p2 == 4): sspg[idi, idm] = 2 elif (p1 == 4 and p2 == 3): sspg[idi, idm] = 3 elif (p1 == 5 or p2 == 5): sspg[idi, idm] = 5 else: print("error!", idi, idm) else: # only 1 type p1 = s[0] + 1 if (p1 <= 3): sspg[idi, idm] = 1 elif (p1 == 4): sspg[idi, idm] = 4 elif (p1 == 5): sspg[idi, idm] = 5 else: print("error!", idi, idm) printProgressBar(idi + 1, pxPerGrades.shape[0]) if (saveAs != None): np.save(saveAs, sspg) return sspg
def compute_pxPerGrades(directory, maps, saveAs=None): train_dir = os.path.join(directory, 'train') images = [f for f in os.listdir(train_dir)] maps_dir = [os.path.join(directory, m) for m in maps] pxPerGrades_all = np.zeros((len(images), len(maps), 6)) for idm, m in enumerate(maps_dir): printProgressBar(0, len(images)) for idi, im in enumerate(images): fname = os.path.join(m, '%s.png' % im.replace('.jpg', '')) if (os.path.isfile(fname) == False): fname = os.path.join( m, '%s_classimg_nonconvex.png' % im.replace('.jpg', '')) if (os.path.isfile(fname) == False): continue anno = imread(fname) for v in range(6): pxPerGrades_all[idi, idm, v] = (anno == v).sum() printProgressBar(idi + 1, len(images)) if saveAs != None: np.save(saveAs, pxPerGrades_all) return pxPerGrades_all
def process(sess, rgb, X, Y_seg, tile_size, bgDetection=True, verbose=False): if (verbose): print("Processing RGB image") # Background detection if (verbose): print("Background detection.") bg_mask = np.ones((rgb.shape[0], rgb.shape[1])).astype('bool') if (bgDetection): bg_mask = getBackgroundMask(rgb) # 1 = foreground, 0 = background overlap = 2 if (verbose): print("Tiling") im = rgb / 255. - 0.5 # Offset image imshape = im.shape nr, nc = (overlap * np.ceil( (imshape[0] - 1) / tile_size), overlap * np.ceil( (imshape[1] - 1) / tile_size)) yr, xr = (np.arange(0, nr) * ((imshape[0] - 1 - tile_size) // (nr - 1))).astype('int'), (np.arange(0, nc) * ((imshape[1] - 1 - tile_size) // (nc - 1))).astype('int') mesh = np.meshgrid(yr, xr) tiles = zip(mesh[0].flatten(), mesh[1].flatten()) im_pred = np.zeros(imshape[:2]).astype('float') if (verbose): print("Prediction") printProgressBar(0, len(mesh[0].flatten())) for idt, t in enumerate(tiles): batch_X = [im[t[0]:t[0] + tile_size, t[1]:t[1] + tile_size]] sm = Y_seg.eval(session=sess, feed_dict={X: batch_X})[:, :, :, 0] im_pred[t[0]:t[0] + tile_size, t[1]:t[1] + tile_size] = np.maximum( im_pred[t[0]:t[0] + tile_size, t[1]:t[1] + tile_size], sm[0, :, :]) if (verbose): printProgressBar(idt + 1, len(mesh[0].flatten())) mask_pred = im_pred <= 0.5 # mask_pred will be 0=artefact, 1=no artefact mask_out = mask_pred * bg_mask # 1=normal tissue, 0 = background or artefact im_out = imageWithOverlay(rgb, mask_out) #blend2Images(rgb, mask_out) return im_pred, im_out, bg_mask
def get_ssp(pxPerGrades, saveAs=None): ssp = np.zeros((pxPerGrades.shape[0], pxPerGrades.shape[1])).astype('int') printProgressBar(0, pxPerGrades.shape[0]) for idi in range(pxPerGrades.shape[0]): for idm in range(pxPerGrades.shape[1]): if (pxPerGrades[idi, idm].sum() > 0): h = pxPerGrades[idi, idm][1:] if (h.sum() == 0): ssp[idi, idm] = 0 else: s = np.argsort(h)[::-1] if (h[s][1] > 0): # 2 types of glands present: ssp[idi, idm] = (s[:2] + 1).sum() else: # only 1 type ssp[idi, idm] = (s[0] + 1) * 2 printProgressBar(idi + 1, pxPerGrades.shape[0]) if (saveAs != None): np.save(saveAs, ssp) return ssp
def generate_vote(directory, maps, keep_annotations, weights, map_out): train_dir = os.path.join(directory, 'train') maps_dir = [os.path.join(directory, m) for m in maps] images = [f for f in os.listdir(train_dir)] m_out = os.path.join(directory, map_out) if (os.path.exists(m_out) == False): os.mkdir(m_out) printProgressBar(0, len(images) - 1) for idi, im in enumerate(images): annos = [] for idm, m in enumerate(maps_dir): if (keep_annotations[idi, idm]): annos += [ imread( os.path.join( m, '%s_classimg_nonconvex.png' % im.replace('.jpg', ''))) ] annos = np.array(annos) if (weights == 'majority'): ws = np.ones(keep_annotations[idi].sum()) else: ws = weights[keep_annotations[idi]] ws /= ws.sum() votes = np.zeros((annos.shape[1], annos.shape[2], 6)) for v in range(6): for idr, w in enumerate(ws): mask = annos[idr, :, :] == v votes[mask, v] += w * mask[mask] final_v = np.argmax(votes, axis=2) imsave(os.path.join(m_out, '%s.png' % im.replace('.jpg', '')), final_v) printProgressBar(idi, len(images) - 1)
def SNOWGenerator(input_dir, output_dir, pRemove, defStd, simplificationFactor, boundingBoxes=False, doLabelAugmentation=False, verbose=False): Yfiles = os.listdir(input_dir) total = len(Yfiles) if (verbose): print("Generating SNOW annotations for %d files in %s" % (total, input_dir)) print("Noise %%: %d" % (pRemove * 100)) print("Std of erosion/dilation: %d" % defStd) print("Simplification factor: %d" % simplificationFactor) if (boundingBoxes): print("Using bounding boxes as output.") if (doLabelAugmentation): print("Adding label augmentation.") nLabelsIn = 0 nLabelsOut = 0 for f in Yfiles: Y = imread(os.path.join(input_dir, f)) Yout = np.zeros_like(Y) # Check if annotations are already labelled or if it's a mask: labels = np.unique(Y[Y > 0]) if (len(labels) == 1): Y = label(Y > 0) labels = np.unique(Y[Y > 0]) nLabels = len(labels) if (verbose): print("Processing: %s" % f) if (nLabels > 0): printProgressBar(0, nLabels) nLabelsIn += nLabels # SNOW generation: # Draw which objects will be removed from the image toRemove = np.random.random(nLabels) < pRemove # Draw random deformation parameters: if (defStd > 0): diskRadii = np.random.normal(0, defStd, labels.max() + 1).astype('int') # If label augmentation : prepare additional outputs: if (doLabelAugmentation): Yout_p5 = np.copy(Yout) Yout_m5 = np.copy(Yout) disk5 = disk(5) # To be able to get the complete contour of all objects, we first zero-pad the label image: Ypadded = np.zeros((Y.shape[0] + 2, Y.shape[1] + 2)).astype('uint8') Ypadded[1:-1, 1:-1] = Y newLabel = 0 # We will re-label from 0 the resulting annotation. for idl, lab in enumerate(labels): if (toRemove[idl] ): # Ignore remove objects so they won't be added to Yout continue # Select current object Yobj = (Ypadded == lab).astype('uint8') # Deforming objects if (defStd > 0): if (diskRadii[lab] < 0): Yobj = erosion(Yobj, disk(abs(diskRadii[lab]))) elif (diskRadii[lab] > 0): Yobj = dilation(Yobj, disk(abs(diskRadii[lab]))) # Check if we completely removed the object in the process... if (Yobj.sum() == 0): continue # Find the contour. if (simplificationFactor > 1): cont = find_contours(Yobj, 0)[0] sCont = cont[::simplificationFactor] sContours = np.vstack([sCont, cont[0]]) # Close contour rr, cc = polygon(sCont[:, 0], sCont[:, 1], Yout.shape) # Replace object Yobj[Yobj > 0] = 0 Yobj[rr, cc] = 1 # Replace with bounding boxes if (boundingBoxes): # Compute boundaries rows = np.any(Yobj, axis=1) cols = np.any(Yobj, axis=0) rmin, rmax = np.nonzero(rows)[0][[0, -1]] cmin, cmax = np.nonzero(cols)[0][[0, -1]] # Replace object Yobj[Yobj > 0] = 0 Yobj[rmin:rmax + 1, cmin:cmax + 1] = 1 # Add to output array with new label after de-padding newLabel += 1 Yout += Yobj[1:-1, 1:-1] * newLabel # Label Augmentation if (doLabelAugmentation): Yobj_p5 = dilation(Yobj, disk5) Yobj_m5 = erosion(Yobj, disk5) Yout_p5 += Yobj_p5[1:-1, 1:-1] * newLabel Yout_m5 += Yobj_m5[1:-1, 1:-1] * newLabel if (verbose): printProgressBar(idl + 1, nLabels) nLabelsOut += Yout.max() # Save output file(s) imsave(os.path.join(output_dir, f), Yout) if (doLabelAugmentation): ext = f.rsplit('.')[-1] imsave( os.path.join(output_dir, f.replace('.%s' % ext, '-p5.%s' % ext)), Yout_p5) imsave( os.path.join(output_dir, f.replace('.%s' % ext, '-m5.%s' % ext)), Yout_m5) if (verbose): print("In: %d -> Out: %d" % (nLabelsIn, nLabelsOut))