Beispiel #1
0
    def predict(self):
        #############################################
        # apply classifiers and save images
        #############################################

        result_folder = os.path.join(self.imageFolder, 'result_segmentation')
        if not os.path.exists(result_folder):
            os.mkdir(result_folder)

        flist_in = ioDT.get_image_list(self.imageFolder)
        flist_in.sort()

        for f_in in flist_in:

            print('#' * 20 + '\nLoading', f_in, '...')
            img = imread(f_in)
            if len(img.shape) == 2:
                img = np.expand_dims(img, 0)
            if img.shape[-1] == np.min(img.shape):
                img = np.moveaxis(img, -1, 0)
            img = img[0]

            print('Predicting image...')
            pred, prob = predict.predict_image(
                img,
                self.classifier,
                self.scaler,
                sigmas=self.params['sigmas'],
                new_shape_scale=self.params['down_shape'],
                feature_mode=self.params['feature_mode'],
                deep=self.deepModel.isChecked())

            # remove objects at the border
            negative = ndi.binary_fill_holes(pred == 0)
            mask_pred = (pred == 1) * negative
            edge_prob = ((2**16 - 1) * prob[2]).astype(np.uint16)
            mask_pred = mask_pred.astype(np.uint8)

            # save mask
            parent, filename = os.path.split(f_in)
            filename, file_extension = os.path.splitext(filename)
            new_name = os.path.join(parent, 'result_segmentation',
                                    filename + '_classifier' + file_extension)
            imsave(new_name, pred, check_contrast=False)

            # perform watershed
            mask_final = predict.make_watershed(
                mask_pred,
                edge_prob,
                new_shape_scale=self.params['down_shape'])

            # save final mask
            parent, filename = os.path.split(f_in)
            filename, file_extension = os.path.splitext(filename)
            new_name = os.path.join(parent, 'result_segmentation',
                                    filename + '_watershed' + file_extension)
            imsave(new_name, mask_final, check_contrast=False)

        print('All images done!')
def create_GT_mask(model_folder):
    
    ### check that model and trainingset exist
    if not os.path.exists(model_folder):
        print('Warning!')
        print(model_folder,':')
        print('Model folder not created! Skipping this subfolder.')
        return
        
    trainingset_folder = os.path.join(model_folder,'trainingset')
    if not os.path.exists(trainingset_folder):
        print('Warning!')
        print(model_folder,':')
        print('Trainingset images not found! Skipping this subfolder.')
        return

    ### load trainingset images and previously generated ground truth    
    flist_in = io.get_image_list(trainingset_folder, string_filter='_GT', mode_filter='exclude')
    flist_in.sort()
    flist_gt = io.get_image_list(trainingset_folder, string_filter='_GT', mode_filter='include')
    flist_gt.sort()

    ### if no trainingset images in the folder, skip this subfolder
    if len(flist_in) == 0:
        print('\n\nWarning, no trainingset!','Selected "'+model_folder+'" but no trainingset *data* detected. Transfer some images in the "trainingset" folder.')
        return
    
    ### if there are more trainingset than ground truth, promptuse to make mask
    if len(flist_in)!=len(flist_gt):
        print('\n\nWarning, trainingset incomplete!','Selected "'+model_folder+'" but not all masks have been created.\nPlease provide manually annotated masks.')

        for f in flist_in:
            fn,ext = os.path.splitext(f)
            mask_name = fn+'_GT'+ext

            if not os.path.exists(mask_name):
                if not PyQt5.QtWidgets.QApplication.instance():
                    app = PyQt5.QtWidgets.QApplication(sys.argv)
                else:
                    app = PyQt5.QtWidgets.QApplication.instance() 
                m = makeManualMask(f,subfolder='',fn=fn+'_GT'+ext,wsize = (2000,2000))
                m.show()
                app.exec_()
Beispiel #3
0
 def __init__(self, imageFolder, parent=None, start=None, stop=None):
     super(inspectionWindow_20max, self).__init__(parent)
     self.imageFolder = imageFolder
     self.folder, self.cond = os.path.split(self.imageFolder)
     self.flist_in = io.get_image_list(self.imageFolder)
     self.n_imgs = len(self.flist_in)
     self.start = start
     self.stop = stop
     self.n_shown_max = self.stop - self.start
     self.make()
feature_type = 'daisy' # 'daisy' or 'ilastik'
deep = False # True: deep learning with Multi Layer Perceptrons; False: Logistic regression

###############################################################################

if __name__ == '__main__':
    
    for model_folder in model_folders:
        print('-------------'+model_folder+'------------')

        training_folder = os.path.join(model_folder, 'trainingset')

        ### load images
        flist_in = ioDT.get_image_list(
                                                  training_folder, 
                                                  string_filter='_GT', 
                                                  mode_filter='exclude'
                                                  )
        img_train = []
        for f in flist_in:
            img = imread(f)
            if len(img.shape)==2:
                img = np.expand_dims(img,0)
            if img.shape[-1] == np.min(img.shape):
                img = np.moveaxis(img, -1, 0)
            img_train.append( img[0] )

        ## load ground truth
        flist_gt = ioDT.get_image_list(
                                                training_folder, 
                                                string_filter='_GT', 
Beispiel #5
0
def generate_overview_finalMask(input_folder,
                                chosen,
                                saveFig=True,
                                downshape=1,
                                autoclose=False):
    print('### Generating recap image at', input_folder)

    flist_in = io.get_image_list(input_folder)
    flist_in = [flist_in[i] for i in range(len(flist_in)) if chosen[i]]
    segment_folder = os.path.join(input_folder, 'result_segmentation')
    flist_ma = io.get_image_list(segment_folder, '_finalMask.tif', 'include')

    n_img = len(flist_in)
    ncols = 5
    nrows = (n_img - 1) // 5 + 1

    fig, ax = plt.subplots(figsize=(3 * ncols, 3 * nrows),
                           nrows=nrows,
                           ncols=ncols)
    ax = ax.flatten()

    # ### multiprocess
    # file_names = [[flist_in[i],flist_ma[i]] for i in range(n_img)]
    # N_cores = np.clip( int(0.8 * multiprocessing.cpu_count()),1,None )

    # pool = multiprocessing.Pool(N_cores)
    # data_list = list(   tqdm.tqdm(
    #                         pool.istarmap(
    #                             ImageTools.io.load_images_ch0,
    #                             zip(    flist_in,
    #                                     flist_ma,
    #                                     repeat( downshape ) ) ),
    #                             total = n_img ) )
    # imgs = [data[0] for data in data_list]
    # masks = [data[1] for data in data_list]

    ### normal for loop
    imgs = [0. for i in range(n_img)]
    masks = [0. for i in range(n_img)]
    for i in tqdm.tqdm(range(n_img)):
        img = imread(flist_in[i]).astype(float)
        if len(img.shape) == 2:
            img = np.expand_dims(img, 0)
        if img.shape[-1] == np.min(img.shape):
            img = np.moveaxis(img, -1, 0)
        imgs[i] = img[0, ::downshape, ::downshape]
        masks[i] = imread(flist_ma[i])[::downshape, ::downshape].astype(float)

    ### plotting
    for i in tqdm.tqdm(range(n_img)):

        _, filename = os.path.split(flist_in[i])
        filename, _ = os.path.splitext(filename)

        ax[i].imshow(imgs[i],
                     'gray',
                     interpolation='none',
                     vmin=np.percentile(img, 1.),
                     vmax=np.percentile(img, 99.))
        cmap = mpl.colors.LinearSegmentedColormap.from_list(
            'my_cmap', ['black', 'aqua'], 256)
        ax[i].imshow(masks[i], cmap=cmap, interpolation='none', alpha=.3)

        ax[i].set_title(("\n".join(wrap(filename, 20))), fontsize=6)

    for a in ax:
        a.axis('off')
    for j in range(i + 1, len(ax)):
        ax[j].remove()

    # plt.show()

    if autoclose:
        plt.pause(10)
        plt.close()

    if saveFig:
        print('### Saving image...')
        # save figure
        _, cond = os.path.split(input_folder)
        fig.savefig(os.path.join(input_folder, 'result_segmentation',
                                 cond + '_finalMasks.png'),
                    dpi=300)
        print('### Done saving!')

    return fig
Beispiel #6
0
    def computeMaskForAll(self):
        self.read_segmentation_params()
        save_folder = os.path.join(self.imageFolder, 'result_segmentation')
        folder, cond = os.path.split(self.imageFolder)

        #############################################
        # clean masks previously generated
        #############################################

        flist_to_remove = io.get_image_list(save_folder, '_finalMask',
                                            'include')
        for f in flist_to_remove:
            os.remove(f)
        segm_params = os.path.join(save_folder, 'segmentation_params.csv')
        if os.path.exists(segm_params):
            os.remove(segm_params)
        morpho_file = os.path.join(save_folder, cond + '_morpho_params.json')
        if os.path.exists(morpho_file):
            os.remove(morpho_file)

        #############################################
        # save parameters used to make segmentation
        #############################################

        ioSeg.save_segmentation_params(
            save_folder, [os.path.split(fin)[-1]
                          for fin in self.flist_in], self.chosen_masks,
            self.down_shapes, self.thinnings, self.smoothings)

        #############################################
        # generate final mask
        #############################################

        print('### Generating the smoothened masks.')
        for i in tqdm.tqdm(range(self.n_imgs)):
            folder, filename = os.path.split(self.flist_in[i])
            filename, extension = os.path.splitext(filename)
            # print(i, filename)

            if self.chosen_masks[i] == 'w':
                _rawmask = imread(
                    os.path.join(self.imageFolder, 'result_segmentation',
                                 filename + '_watershed' + extension))
                mask = segment.smooth_mask(_rawmask,
                                           mode='watershed',
                                           down_shape=self.down_shapes[i],
                                           smooth_order=self.smoothings[i])
                while (np.sum(mask) == 0) & (self.smoothings[i] > 5):
                    print('Mask failed...')
                    # if mask is zero, try smoothing less
                    self.smoothings[i] -= 2
                    print('Trying with: smoothing', self.smoothings[i])
                    mask = segment.smooth_mask(_rawmask,
                                               mode='watershed',
                                               down_shape=self.down_shapes[i],
                                               smooth_order=self.smoothings[i])

            elif self.chosen_masks[i] == 'c':
                _rawmask = imread(
                    os.path.join(self.imageFolder, 'result_segmentation',
                                 filename + '_classifier' + extension))
                mask = segment.smooth_mask(_rawmask,
                                           mode='classifier',
                                           down_shape=self.down_shapes[i],
                                           smooth_order=self.smoothings[i],
                                           thin_order=self.thinnings[i])
                while (np.sum(mask) == 0) & (self.smoothings[i] >
                                             5) & (self.thinnings[i] > 1):
                    print('Mask failed...')
                    # if mask is zero, try smoothing less
                    self.smoothings[i] -= 2
                    self.thinnings[i] -= 1
                    print('Trying with: smoothing', self.smoothings[i],
                          ' thinnings', self.thinnings[i])
                    mask = segment.smooth_mask(_rawmask,
                                               mode='classifier',
                                               down_shape=self.down_shapes[i],
                                               smooth_order=self.smoothings[i],
                                               thin_order=self.thinnings[i])

            elif self.chosen_masks[i] == 'm':
                if not os.path.exists(
                        os.path.join(self.imageFolder, 'result_segmentation',
                                     filename + '_manual' + extension)):
                    self.m = GUIs.manualmask.makeManualMask(self.flist_in[i])
                    self.m.show()
                    self.m.exec()
                else:
                    print('A previously generated manual mask exists!')
                _rawmask = imread(
                    os.path.join(self.imageFolder, 'result_segmentation',
                                 filename + '_manual' + extension))
                mask = segment.smooth_mask(_rawmask,
                                           mode='manual',
                                           down_shape=self.down_shapes[i],
                                           smooth_order=self.smoothings[i])
                while (np.sum(mask) == 0) & (self.smoothings[i] > 5):
                    print('Mask failed...')
                    # if mask is zero, try smoothing less
                    self.smoothings[i] -= 2
                    print('Trying with: smoothing', self.smoothings[i])
                    # if mask is zero, try smoothing less
                    self.smoothings[i] -= 2
                    mask = segment.smooth_mask(_rawmask,
                                               mode='manual',
                                               down_shape=self.down_shapes[i],
                                               smooth_order=self.smoothings[i])
            elif self.chosen_masks[i] == 'i':
                continue

            if np.sum(mask) == 0:
                QMessageBox.warning(
                    self, 'Warning, no trainingset!',
                    'The method selected didn\'t generate a valid mask. Please input the mask manually.'
                )

                self.chosen_masks[i] = 'm'
                ioSeg.save_segmentation_params(
                    save_folder,
                    [os.path.split(fin)[-1]
                     for fin in self.flist_in], self.chosen_masks,
                    self.down_shapes, self.thinnings, self.smoothings)
                if not os.path.exists(
                        os.path.join(self.imageFolder, 'result_segmentation',
                                     filename + '_manual' + extension)):
                    self.m = GUIs.manualmask.makeManualMask(self.flist_in[i])
                    self.m.show()
                    self.m.exec()
                else:
                    print('A previously generated manual mask exists!')
                _rawmask = imread(
                    os.path.join(self.imageFolder, 'result_segmentation',
                                 filename + '_manual' + extension))
                mask = segment.smooth_mask(_rawmask,
                                           mode='manual',
                                           down_shape=self.down_shapes[i],
                                           smooth_order=self.smoothings[i])

            ioSeg.save_segmentation_params(
                save_folder, [os.path.split(fin)[-1]
                              for fin in self.flist_in], self.chosen_masks,
                self.down_shapes, self.thinnings, self.smoothings)

            # save final mask
            new_name = os.path.join(folder, 'result_segmentation',
                                    filename + '_finalMask' + extension)
            imsave(new_name, mask)

        print('### Done computing masks!')

        #############################################
        # compute morphology
        #############################################

        # props = DatasetTools.morphology.computemorphology.compute_morphological_info(self.imageFolder, self.compute_meshgrid.isChecked())
        # DatasetTools.morphology.io.save_morpho_params(save_folder, cond, props)

        #############################################
        # generate recap
        #############################################

        w = overview.generate_overview_finalMask(
            self.imageFolder,
            chosen=[c != 'i' for c in self.chosen_masks],
            saveFig=True,
            downshape=3)
        w.show()
Beispiel #7
0
    def trainModel(self, archBox):
        self.read_and_check_params()

        #############################################
        # load images to be used as training set
        #############################################
        training_folder = os.path.join(self.modelFolder, 'trainingset')
        flist_in = ioDT.get_image_list(training_folder,
                                       string_filter='_GT',
                                       mode_filter='exclude')
        img_train = []
        for f in flist_in:
            img = imread(f)
            if len(img.shape) == 2:
                img = np.expand_dims(img, 0)
            if img.shape[-1] == np.min(img.shape):
                img = np.moveaxis(img, -1, 0)
            img_train.append(img[0])
        # img_train = np.array(img_train)

        flist_gt = ioDT.get_image_list(training_folder,
                                       string_filter='_GT',
                                       mode_filter='include')
        gt_train = [imread(f) for f in flist_gt]
        gt_train = [g.astype(int) for g in gt_train]

        print('##### Training set:')
        for i, f in enumerate(zip(flist_in, flist_gt)):
            print(i + 1, '\t',
                  os.path.split(f[0])[-1], '\t',
                  os.path.split(f[1])[-1])

        #############################################
        # compute features and generate training set and weights
        #############################################

        print('##### Generating training set...')
        X, Y, w, self.scaler = train.generate_training_set(
            img_train, [g.astype(np.uint8) for g in gt_train],
            sigmas=self.params['sigmas'],
            down_shape=self.params['down_shape'],
            edge_size=self.params['edge_size'],
            fraction=self.params['fraction'],
            feature_mode=self.params['feature_mode'],
            bias=self.params['bias'])

        #############################################
        # Train the model
        #############################################

        print('##### Training model...')
        start = time.time()
        self.classifier = train.train_classifier(
            X, Y, w, deep=self.deepModel.isChecked(), hidden=(350, 50))
        print('Models trained in %.3f seconds.' % (time.time() - start))
        # print('classes_: ', self.classifier.classes_)
        # print('coef_: ', self.classifier.coef_)

        #############################################
        # Save the model
        #############################################

        ioML.save_model(self.modelFolder,
                        self.classifier,
                        self.scaler,
                        sigmas=self.params['sigmas'],
                        down_shape=self.params['down_shape'],
                        edge_size=self.params['edge_size'],
                        fraction=self.params['fraction'],
                        feature_mode=self.params['feature_mode'],
                        bias=self.params['bias'],
                        deep=self.deepModel.isChecked())
        print('##### Model saved!')
        self.predictButton.setEnabled(True)
Beispiel #8
0
    def selectModelFolder(self):
        self.modelFolder = QFileDialog.getExistingDirectory(
            self, "Select Input Folder of Model")

        # check if a trainingset is present
        # a trainingset needs to exist for every model, even if the model is already trained.
        trainingset_folder = os.path.join(self.modelFolder, 'trainingset')
        if os.path.exists(trainingset_folder):
            flist_in = ioDT.get_image_list(trainingset_folder,
                                           string_filter='_GT',
                                           mode_filter='exclude')
            flist_in.sort()
            flist_gt = ioDT.get_image_list(trainingset_folder,
                                           string_filter='_GT',
                                           mode_filter='include')
            flist_gt.sort()

            if len(flist_in) == 0:
                QMessageBox.warning(
                    self, 'Warning, no trainingset!',
                    'Selected "' + self.modelFolder +
                    '" but no trainingset *data* detected. Transfer some images in the "trainingset" folder.'
                )
                self.modelFolder = '-'
                return
            if len(flist_in) != len(flist_gt):
                QMessageBox.warning(
                    self, 'Warning, trainingset incomplete!',
                    'Selected "' + self.modelFolder +
                    '" but not all masks have been created.\nPlease provide manually annotated masks.'
                )
                for f in flist_in:
                    fn, ext = os.path.splitext(f)
                    mask_name = fn + '_GT' + ext
                    if not os.path.exists(mask_name):
                        m = manualmask.makeManualMask(f,
                                                      subfolder='',
                                                      fn=fn + '_GT' + ext)
                        # m.setModal(True)
                        m.show()
                        m.exec()
                # self.modelFolder = '-'
                # return
        else:
            QMessageBox.warning(
                self, 'Warning, no trainingset!', 'Selected "' +
                self.modelFolder + '" but no "trainingset" folder detected.')
            self.modelFolder = '-'
            return
        # check if the model is already trained.
        # if not, only allow training button
        model_file = os.path.join(self.modelFolder, 'scaler.pkl')
        if not os.path.exists(model_file):
            QMessageBox.warning(
                self, 'Warning, train model!',
                'Train the model before loading!\nSetting default parameters...'
            )
        else:
            self.loadModel()
            if self.classifier is None:
                return
            self.predictButton.setEnabled(True)
            self.recapButton.setEnabled(True)
            self.inspectButton.setEnabled(True)

        self.modelFolderSpace.setText(self.modelFolder)
        self.set_params()
        self.sigmasSpace.setEnabled(True)
        self.down_shapeSpace.setEnabled(True)
        self.edge_sizeSpace.setEnabled(True)
        self.fractionSpace.setEnabled(True)
        self.biasSpace.setEnabled(True)
        self.feature_modeSpace.setEnabled(True)
        self.trainButton.setEnabled(True)
Beispiel #9
0
def generate_overview(input_folder,
                      saveFig=True,
                      fileName='',
                      start=None,
                      stop=None,
                      downshape=1):
    print('Generating recap image at', input_folder)

    flist_in = io.get_image_list(input_folder)
    segment_folder = os.path.join(input_folder, 'result_segmentation')
    flist_ws = io.get_image_list(segment_folder, '_watershed.tif', 'include')
    flist_cl = io.get_image_list(segment_folder, '_classifier.tif', 'include')

    if start == None: start = 0
    if stop == None: stop = len(flist_in)
    flist_in = flist_in[start:stop]
    flist_ws = flist_ws[start:stop]
    flist_cl = flist_cl[start:stop]

    n_img = len(flist_in)
    ncols = 5
    nrows = (n_img - 1) // 5 + 1

    # ### multiprocess
    # file_names = [[flist_in[i],flist_cl[i],flist_ws[i]] for i in range(n_img)]

    # N_cores = np.clip( int(0.8 * multiprocessing.cpu_count()),1,None )

    # pool = multiprocessing.Pool(N_cores)
    # data_list = list(   tqdm.tqdm(
    #                         pool.istarmap(
    #                             ImageTools.io.load_images_ch0,
    #                             zip(    file_names,
    #                                     repeat( downshape ) ) ),
    #                             total = n_img ) )
    # imgs = [data[0] for data in data_list]
    # classifiers = [data[1] for data in data_list]
    # watersheds = [data[2] for data in data_list]

    ### normal for loop
    imgs = [0. for i in range(n_img)]
    classifiers = [0. for i in range(n_img)]
    watersheds = [0. for i in range(n_img)]
    for i in tqdm.tqdm(range(n_img)):
        img = imread(flist_in[i]).astype(float)
        if img.ndim == 2:
            img = np.expand_dims(img, 0)
        if img.shape[-1] == np.min(img.shape):
            img = np.moveaxis(img, -1, 0)
        imgs[i] = img[0, ::downshape, ::downshape]
        classifiers[i] = imread(
            flist_cl[i])[::downshape, ::downshape].astype(float)
        watersheds[i] = imread(
            flist_ws[i])[::downshape, ::downshape].astype(float)

    ### plotting
    fig, ax = plt.subplots(figsize=(3 * ncols, 3 * nrows),
                           nrows=nrows,
                           ncols=ncols)
    ax = ax.flatten()

    for i in tqdm.tqdm(range(n_img)):

        _, filename = os.path.split(flist_in[i])
        filename, _ = os.path.splitext(filename)

        ax[i].imshow(imgs[i],
                     'gray',
                     interpolation='none',
                     vmin=np.percentile(img, 1.),
                     vmax=np.percentile(img, 99.))
        cmap = mpl.colors.LinearSegmentedColormap.from_list(
            'my_cmap', ['black', 'red'], 256)
        ax[i].imshow(classifiers[i], cmap=cmap, interpolation='none', alpha=.4)
        cmap = mpl.colors.LinearSegmentedColormap.from_list(
            'my_cmap', ['black', 'aqua'], 256)
        ax[i].imshow(watersheds[i], cmap=cmap, interpolation='none', alpha=.3)

        ax[i].set_title("\n".join(wrap(filename, 20)), fontsize=8)

    for a in ax:
        a.axis('off')
    for j in range(i + 1, len(ax)):
        ax[j].remove()

    # plt.show()

    if saveFig:
        print('Saving image...')
        # save figure
        _, cond = os.path.split(input_folder)
        print(fileName)
        if fileName == '':
            fileName = os.path.join(input_folder, 'result_segmentation',
                                    cond + '_recap_classifier.png')
        fig.savefig(fileName, dpi=300)
        print('Done saving!')

    return fig
Beispiel #10
0
def parsing_images(image_folder,
                   mask_folder,
                   identifier_string,
                   objects_at_border=False):

    # make directories if not already present
    images_output_dir = os.path.join(image_folder, 'splitObjects')
    masks_output_dir = os.path.join(images_output_dir, 'result_segmentation')
    if not os.path.isdir(images_output_dir):
        os.mkdir(images_output_dir)
    if not os.path.isdir(masks_output_dir):
        os.mkdir(masks_output_dir)

    # read images and append if only one channel is present/only greyscale image
    flist_in = io.get_image_list(image_folder,
                                 string_filter=identifier_string,
                                 mode_filter='exclude')
    img_to_crop = []
    for f in flist_in:
        img = imread(f)
        if img.ndim == 2:
            img = np.expand_dims(img, 0)
        if img.shape[-1] == np.min(img.shape):
            img = np.moveaxis(img, -1, 0)
        img_to_crop.append(img)

    # read masks/groundtruth
    flist_mask = io.get_image_list(mask_folder,
                                   string_filter=identifier_string,
                                   mode_filter='include')

    # check that number of masks = number of images, otherwise, find missing mask
    if len(flist_in) != len(flist_mask):
        for f_in in flist_in:
            parent, filename = os.path.split(f_in)
            filename, file_extension = os.path.splitext(filename)
            mask_name = os.path.join(
                image_folder, filename + identifier_string + file_extension)
            if mask_name not in flist_mask:
                print('\"' + mask_name + '\" not found!')
                sys.exit(
                    'Please check that mask is present for every image in input folder!'
                )

    # read and convert masks
    mask_to_crop = [imread(f) for f in flist_mask]
    mask_to_crop = [g.astype(int) for g in mask_to_crop]

    for i in range(len(mask_to_crop)):
        region_counter = 0

        # label mask
        labeled_mask, num_features = label(mask_to_crop[i], return_num=True)

        # for saving of cropped regions
        parent, filename = os.path.split(flist_in[i])
        filename, file_extension = os.path.splitext(filename)
        img_new_name = os.path.join(
            masks_output_dir, filename + "_cropped_mask" + file_extension)

        for region in measure.regionprops(labeled_mask):

            # compute coordinates of regions
            [min_row, min_col, max_row, max_col] = region.bbox
            # exclude objects at edge if required
            if not objects_at_border:
                if min_row == 0 or min_col == 0 or \
                    max_row == labeled_mask.shape[0] or max_row == labeled_mask.shape[1]:
                    # leave cropped objects_at_border in a different folder
                    border_objects_output_dir = os.path.join(
                        images_output_dir, 'objects_at_image_border')
                    if not os.path.isdir(border_objects_output_dir):
                        os.mkdir(border_objects_output_dir)
                    cropped_mask = mask_to_crop[i][min_row:max_row,
                                                   min_col:max_col]
                    cropped_img = img_to_crop[i][:, min_row:max_row,
                                                 min_col:max_col]
                    # save cropped regions
                    img_new_name = os.path.join(
                        border_objects_output_dir, filename +
                        "_cropped%02d" % region_counter + file_extension)
                    mask_new_name = os.path.join(
                        border_objects_output_dir,
                        filename + "_cropped%02d_finalMask" % region_counter +
                        file_extension)
                    imsave(mask_new_name, cropped_mask.astype(np.uint8))
                    imsave(img_new_name, cropped_img)
                    region_counter += 1
                    continue

            # crop images and masks based on coordinates of regions in mask
            cropped_mask = mask_to_crop[i][min_row:max_row, min_col:max_col]
            cropped_img = img_to_crop[i][:, min_row:max_row, min_col:max_col]
            # save cropped regions
            img_new_name = os.path.join(
                images_output_dir,
                filename + "_cropped%02d" % region_counter + file_extension)
            mask_new_name = os.path.join(
                masks_output_dir, filename +
                "_cropped%02d_finalMask" % region_counter + file_extension)
            imsave(mask_new_name, cropped_mask.astype(np.uint8))
            imsave(img_new_name, cropped_img)
            region_counter += 1

    # save parameters
    flist_cropped_images = io.get_image_list(images_output_dir)
    filenames = [os.path.split(fin)[1] for fin in flist_cropped_images]
    chosen_mask = 'user input'
    down_shape = 0.5
    thinning = smoothing = 'N.A.'
    ioSeg.save_segmentation_params(masks_output_dir, filenames, chosen_mask,
                                   down_shape, thinning, smoothing)
    # compute morphological information
    # props = computemorphology.compute_morphological_info(
    #         images_output_dir, compute_meshgrid=False)
    # ioMorph.save_morpho_params(masks_output_dir, 'splitObjects', props)
    print('Done!')
    return
Beispiel #11
0
image_folders = [g for g in folder_names if not g in model_folders_name + exclude_folder]
image_folders = [os.path.join(parent_folder, i) for i in image_folders]

###############################################################################

if __name__ == '__main__':
    app = PyQt5.QtWidgets.QApplication(sys.argv)

    for image_folder in image_folders:

        ### compute parent folder as absolute path
        image_folder = os.path.abspath(image_folder)
    
        print('\n-------------'+image_folder+'------------\n')

        flist_in = ioDT.get_image_list(image_folder)
        n_imgs = len( flist_in )
        if os.path.exists(os.path.join(image_folder,'result_segmentation','segmentation_params.csv')):
            flist_in, chosen_masks, down_shapes, thinnings, smoothings = ioSeg.load_segmentation_params( os.path.join(image_folder,'result_segmentation') )
            flist_in = [os.path.join(image_folder,i) for i in flist_in]
        else:
            chosen_masks = ['w' for i in range(n_imgs)]
            down_shapes = [0.50 for i in range(n_imgs)]
            thinnings = [10 for i in range(n_imgs)]
            smoothings = [25 for i in range(n_imgs)]

        save_folder = os.path.join(image_folder, 'result_segmentation')
        ioSeg.save_segmentation_params(  save_folder, 
                                                        [os.path.split(fin)[-1] for fin in flist_in],
                                                        chosen_masks,
                                                        down_shapes, 
Beispiel #12
0
def compute_morphological_info(input_folder,
                               compute_meshgrid=False,
                               compute_locoefa=True):
    '''
    fdwafwvgrs
    '''

    print('### Computing morphology of images in:', input_folder)

    flist_all = io.get_image_list(input_folder)
    masks_folder = os.path.join(input_folder, 'result_segmentation')
    _, chosen_mask, down_shape, _, _ = ioSeg.load_segmentation_params(
        masks_folder)
    flist_in = [
        flist_all[i] for i in range(len(flist_all)) if chosen_mask[i] != 'i'
    ]
    flist_ma = io.get_image_list(masks_folder,
                                 string_filter='_finalMask.tif',
                                 mode_filter='include')

    # measure region props for every mask
    N_img = len(flist_in)

    # multiprocess
    N_cores = np.clip(int(0.8 * multiprocessing.cpu_count()), 1, None)

    try:
        # try using multiprocessing
        df = pd.DataFrame({})
        pool = multiprocessing.Pool(N_cores)
        data_list = list(
            tqdm.tqdm(pool.istarmap(
                computemorphology.compute_morphological_info,
                zip(repeat(None), flist_in, flist_ma, down_shape,
                    repeat(compute_meshgrid), repeat(compute_locoefa))),
                      total=N_img))

        # print(data_list)
        for row in data_list:
            df = df.append(row, ignore_index=True)

    except ValueError:
        # if anything goes wrong, fall back to for loop processing
        df = pd.DataFrame({})
        for i in tqdm.tqdm(range(N_img)):
            f_in, f_ma = flist_in[i], flist_ma[i]
            mask = imread(f_ma)

            # compute new row
            row = computemorphology.compute_morphological_info(
                mask,
                f_in,
                f_ma,
                down_shape[i],
                compute_meshgrid,
                compute_locoefa=compute_locoefa)

            # concatenate
            df = df.append(row, ignore_index=True)

    return df