Example #1
0
    def run(self, input_image, tile_index_z):

        in_shape = input_image.shape
        pad_shape = self.round(in_shape)
        original_image = np.zeros(pad_shape, dtype=input_image.dtype)
        original_image[:in_shape[0], :in_shape[1]] = input_image

        (original_image_num_pixels_y, original_image_num_pixels_x
         ) = original_image.shape  ###### 180624Change

        current_image_num_pixels_y = original_image_num_pixels_y
        current_image_num_pixels_x = original_image_num_pixels_x
        current_tile_data_space_y = self.tile_num_pixels_y
        current_tile_data_space_x = self.tile_num_pixels_x
        self.tile_index_z = tile_index_z
        self.tile_index_w = 0
        images_stride = 1  ###

        print('Image size (x, y): ', original_image_num_pixels_x,
              original_image_num_pixels_y)

        while current_image_num_pixels_y > self.tile_num_pixels_y / 2 or current_image_num_pixels_x > self.tile_num_pixels_x / 2:

            current_tile_image_path = self.output_tile_image_path + self.tile_path_wz.format(
                self.tile_index_w, self.tile_index_z)
            m.mkdir_safe(current_tile_image_path)

            #            current_image = cv2.resize(original_image,( current_image_num_pixels_x, current_image_num_pixels_y ))
            current_image = original_image[::images_stride, ::images_stride]

            num_tiles_y = int(
                math.ceil(
                    float(current_image_num_pixels_y) /
                    self.tile_num_pixels_y))
            num_tiles_x = int(
                math.ceil(
                    float(current_image_num_pixels_x) /
                    self.tile_num_pixels_x))

            print('Scale: ', images_stride)
            print('Number of panels (x, y): ', num_tiles_x, num_tiles_y)

            for iy in range(num_tiles_y):
                for ix in range(num_tiles_x):

                    current_tile_image_name = self.output_tile_image_path + self.tile_images_filename_wzyx.format(
                        self.tile_index_w, self.tile_index_z, iy, ix)

                    y = iy * self.tile_num_pixels_y
                    x = ix * self.tile_num_pixels_x

                    ## tile_image = current_image[y:y + self.tile_num_pixels_y, x:x + self.tile_num_pixels_x]  ###
                    ## m.save_tif8(tile_image, current_tile_image_name ) ###
                    tmp = current_image[y:y + self.tile_num_pixels_y,
                                        x:x + self.tile_num_pixels_x]
                    tile_with_fringe = np.zeros(
                        (self.tile_num_pixels_y, self.tile_num_pixels_x),
                        self.images_dtype)
                    tile_with_fringe[0:tmp.shape[0], 0:tmp.shape[1]] = tmp
                    m.save_tif8(tile_with_fringe, current_tile_image_name)

                    #print(current_tile_image_name)

            current_image_num_pixels_y = current_image_num_pixels_y / 2
            current_image_num_pixels_x = current_image_num_pixels_x / 2
            current_tile_data_space_y = current_tile_data_space_y * 2
            current_tile_data_space_x = current_tile_data_space_x * 2
            self.tile_index_w = self.tile_index_w + 1
            images_stride = images_stride * 2
Example #2
0
    def run(self, input_ids, tile_index_z):

        in_shape = input_ids.shape
        pad_shape = self.round(in_shape)
        original_ids = np.zeros(pad_shape, dtype=input_ids.dtype)
        original_ids[:in_shape[0], :in_shape[1]] = input_ids

        (original_ids_num_pixels_y, original_ids_num_pixels_x
         ) = original_ids.shape  ######## ###### 180624Change

        ## Grow regions until there are no boundaries

        current_ids_counts = np.bincount(original_ids.ravel())
        current_ids_counts_ids = np.nonzero(current_ids_counts)[0]
        current_max = np.max(current_ids_counts_ids)
        self.tile_index_z = tile_index_z

        if self.id_max < current_max:
            self.id_max = current_max
            self.id_counts.resize(self.id_max + 1)

        self.id_counts[current_ids_counts_ids] = self.id_counts[
            current_ids_counts_ids] + np.int64(
                current_ids_counts[current_ids_counts_ids])

        current_ids_num_pixels_y = original_ids_num_pixels_y
        current_ids_num_pixels_x = original_ids_num_pixels_x
        current_tile_data_space_y = self.tile_num_pixels_y
        current_tile_data_space_x = self.tile_num_pixels_x
        self.tile_index_w = 0
        ids_stride = 1

        while current_ids_num_pixels_y > self.tile_num_pixels_y / 2 or current_ids_num_pixels_x > self.tile_num_pixels_x / 2:

            current_tile_ids_path = self.output_tile_ids_path + self.tile_path_wz.format(
                self.tile_index_w, self.tile_index_z)
            m.mkdir_safe(current_tile_ids_path)

            current_ids = original_ids[::ids_stride, ::ids_stride]

            num_tiles_y = int(
                math.ceil(
                    float(current_ids_num_pixels_y) / self.tile_num_pixels_y))
            num_tiles_x = int(
                math.ceil(
                    float(current_ids_num_pixels_x) / self.tile_num_pixels_x))

            print('Scale: ', ids_stride)
            print('Number of panels (x, y): ', num_tiles_x, num_tiles_y)

            for iy in range(num_tiles_y):
                for ix in range(num_tiles_x):

                    y = iy * self.tile_num_pixels_y
                    x = ix * self.tile_num_pixels_x

                    current_tile_ids_name = self.output_tile_ids_path + self.tile_ids_filename_wzyx.format(
                        self.tile_index_w, self.tile_index_z, iy, ix)

                    tile_ids = np.zeros(
                        (self.tile_num_pixels_y, self.tile_num_pixels_x),
                        np.uint32)
                    tile_ids_non_padded = current_ids[y:y +
                                                      self.tile_num_pixels_y,
                                                      x:x +
                                                      self.tile_num_pixels_x]
                    tile_ids[0:tile_ids_non_padded.shape[0],
                             0:tile_ids_non_padded.
                             shape[1]] = tile_ids_non_padded[:, :]

                    if os.path.isfile(
                            current_tile_ids_name):  ## Backup for undo
                        shutil.move(current_tile_ids_name,
                                    current_tile_ids_name + '_')
                        self.u_info.ids_files_undo.extend(
                            current_tile_ids_name)
                    m.save_hdf5(current_tile_ids_name,
                                self.u_info.tile_var_name, tile_ids)

                    for unique_tile_id in np.unique(tile_ids):

                        self.id_tile_list.append(
                            (unique_tile_id, self.tile_index_w,
                             self.tile_index_z, iy, ix))

            current_ids_num_pixels_y = current_ids_num_pixels_y / 2
            current_ids_num_pixels_x = current_ids_num_pixels_x / 2
            current_tile_data_space_y = current_tile_data_space_y * 2
            current_tile_data_space_x = current_tile_data_space_x * 2
            self.tile_index_w = self.tile_index_w + 1
            ids_stride = ids_stride * 2
Example #3
0
    def _Run(self, parent, params, comm_title):

        ##
        ## Remove preovious results.
        ##
        removal_file1 = os.path.join( params['Output Inference Folder'] ,'0','0','seg-0_0_0.npz' )
        removal_file2 = os.path.join( params['Output Inference Folder'], '0','0','seg-0_0_0.prob')

        if os.path.isfile(removal_file1) or os.path.isfile(removal_file2) :
            Reply = QMessageBox.question(parent, 'FFN', 'seg-0_0_0 files were found at the Output Inference Folder. Remove them?',  QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
            if Reply == QMessageBox.Yes:
                with contextlib.suppress(FileNotFoundError):
                    os.remove(removal_file1)
                with contextlib.suppress(FileNotFoundError):
                    os.remove(removal_file2)
                print('seg-0_0_0 files were removed.')
            else:
                print('FFN inference was canceled.')
                return

        ##
        ## h5 file (target image file) generation.
        ##
        target_image_file_h5 = os.path.join(params['FFN File Folder'], "grayscale_inf.h5")

        try:
            target_image_files = m.ObtainImageFiles(params['Target Image Folder'])
            images = [cv2.imread(i, cv2.IMREAD_GRAYSCALE) for i in target_image_files]
            images = np.array(images)
            image_z    = images.shape[0]
            image_y    = images.shape[1]
            image_x    = images.shape[2]
            image_mean = np.mean(images).astype(np.int16)
            image_std  = np.std(images).astype(np.int16)
            print('x: {}, y: {}, z: {}'.format(image_x, image_y, image_z))

            with h5py.File( target_image_file_h5 , 'w') as f:
                f.create_dataset('raw', data=images, compression='gzip')
            print('"grayscale_inf.h5" file (target inference image) was generated.')
        except:
            print("Error: Target Image h5 was not generated.")
            return False

        ##
        ## Inference configration file generation
        ##
        request = {}
        request['image'] = {"hdf5": "{}@raw".format(target_image_file_h5).replace('\\', '/') }
        request['image_mean'] = image_mean
        request['image_stddev'] = image_std
        request['checkpoint_interval'] = int(params['Checkpoint Interval'])
        request['seed_policy'] = "PolicyPeaks"
        request['model_checkpoint_path'] = params['Tensorflow Model Files'].replace('\\', '/')
        request['model_name'] = "convstack_3d.ConvStack3DFFNModel"

        if params['Sparse Z'] != Qt.Unchecked:
            request['model_args'] = "{\\\"depth\\\": 9, \\\"fov_size\\\": [33, 33, 17], \\\"deltas\\\": [8, 8, 4]}"
            #request['model_args'] = ' {"depth":9,"fov_size":[33,33,17],"deltas":[8,8,4]} '
        else :
            request['model_args'] = "{\\\"depth\\\": 12, \\\"fov_size\\\": [33, 33, 33], \\\"deltas\\\": [8, 8, 8]}"
            #request['model_args'] = ' {"depth":12,"fov_size":[33,33,33],"deltas":[8,8,8]} '

        request['segmentation_output_dir'] = params['Output Inference Folder'].replace('\\', '/')
        inference_options = {}
        inference_options['init_activation'] = 0.95
        inference_options['pad_value'] = 0.05
        inference_options['move_threshold'] = 0.9
        inference_options['min_boundary_dist'] = {"x": 1, "y": 1, "z": 1}
        inference_options['segment_threshold'] = 0.6
        inference_options['min_segment_size'] = 1000
        request['inference_options'] = inference_options

        config_file = os.path.join(params['FFN File Folder'], "inference_params.pbtxt")
        with open(config_file, "w", encoding='utf-8') as f:
            self.write_call(f, request, "")

        print('Configuration file was saved at :')
        print(config_file)
        print('\n')
        ##
        ## Inference start (I gave up the use of run_inference because of the augment parsing problem)
        ##
        m.mkdir_safe(os.path.join( params['Output Inference Folder'] ,'0','0' ) )
        ##
        comm_inference = parent.u_info.exec_run_inference
        comm_inference = comm_inference.split(' ')

        params = ['--image_size_x', np.str( image_x ), 
                 '--image_size_y', np.str( image_y ),
                 '--image_size_z',  np.str( image_z ),
                 '--parameter_file', config_file
                ]

        comm_inference += params

        print(comm_title)
        # print(comm_inference)
        print('\n')
        s.call(comm_inference)
        print(comm_title, 'was finished.')
        return True