def _Run(self, parent, params, comm_title): ## comm_compute_partition = parent.u_info.exec_compute_partition +' ' \ + ' --input_volume ' + os.path.join(params['FFN File Folder'], "groundtruth.h5@stack") + ' ' \ + ' --output_volume ' + os.path.join(params['FFN File Folder'], "af.h5@af") + ' ' \ + ' --thresholds 0.025,0.05,0.075,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9 ' \ + ' --lom_radius 24,24,24 ' \ + ' --min_size 10000 ' comm_build_coordinates = parent.u_info.exec_build_coordinates +' ' \ + ' --partition_volumes validation1@' + os.path.join(params['FFN File Folder'], "af.h5@af") + ' ' \ + ' --coordinate_output ' + os.path.join(params['FFN File Folder'], "tf_record_file") + ' ' \ + ' --margin 24,24,24 ' ## # try: ## training_image_files = m.ObtainImageFiles(params['Training Image Folder']) images = [cv2.imread(i, cv2.IMREAD_GRAYSCALE) for i in training_image_files] images = np.array(images) with h5py.File(os.path.join(params['FFN File Folder'], "grayscale_maps.h5"), 'w') as f: f.create_dataset('raw', data=images, compression='gzip') print('"grayscale_maps.h5" file (training image) was generated.') ground_truth_files = m.ObtainImageFiles(params['Ground Truth Folder']) images = [cv2.imread(i, -1) for i in ground_truth_files] images = np.array(images).astype(np.int32) with h5py.File(os.path.join(params['FFN File Folder'], "groundtruth.h5"), 'w') as f: f.create_dataset('stack', data=images, compression='gzip') print('"groundtruth.h5" file (ground truth) was generated.') ## #except: # print("Error: h5 files (ground truth) were not generated.") # return False ## try: print(comm_title) print('Start compute_partitions.') print(comm_compute_partition) s.run(comm_compute_partition.split()) print('Start build_coordinates.') print(comm_build_coordinates) s.run(comm_build_coordinates.split()) print(comm_title, 'was finished.') ## except : print("Error: ", comm_title, " was not executed.") return False ## return True
def on_clicked(self, index): path = self.dirModel.fileInfo(index).absoluteFilePath() ## Problem: JPEG targetfiles = m.ObtainImageFiles(path) lm = _MyListModel(targetfiles, self.parent) self.listview.setModel(lm)
def __init__(self, parent, title, init_path): self.w = QDialog(parent) self.parent = parent self.left = 300 self.top = 300 self.width = 600 self.height = 400 self.title = title self.dirModel = QFileSystemModel() self.dirModel.setRootPath(init_path) self.dirModel.setFilter(QDir.NoDotAndDotDot | QDir.AllDirs) self.treeview = QTreeView() self.treeview.setModel(self.dirModel) self.treeview.setRootIndex(self.dirModel.index("")) self.treeview.clicked.connect(self.on_clicked) #--- Hide All Header Sections Except First ---- header = self.treeview.header() for sec in range(1, header.count()): header.setSectionHidden(sec, True) #--- ---- ---- ---- ---- ---- ---- ---- ---- -- focus_index = self.dirModel.index(init_path) self.treeview.setCurrentIndex(focus_index) self.current_row_changed() self.listview = QListView() self.listview.setViewMode(QListView.IconMode) self.listview.setIconSize(QSize(192, 192)) ## Problem: JPEG targetfiles = m.ObtainImageFiles(init_path) lm = _MyListModel(targetfiles, self.parent) self.listview.setModel(lm) self.sub_layout = QHBoxLayout() self.sub_layout.addWidget(self.treeview) self.sub_layout.addWidget(self.listview) self.buttonBox = QDialogButtonBox(QDialogButtonBox.Open | QDialogButtonBox.Cancel) self.buttonBox.accepted.connect(self.accept) self.buttonBox.rejected.connect(self.reject) self.main_layout = QVBoxLayout() self.main_layout.addLayout(self.sub_layout) self.main_layout.addWidget(self.buttonBox) self.w.setGeometry(self.left, self.top, self.width, self.height) self.w.setWindowTitle(self.title) self.w.setWindowIcon(QIcon(os.path.join(icon_dir, 'Mojo2_16.png'))) self.w.setLayout(self.main_layout)
def ObtainTarget(self): ## Obtain parameters params = self.ObtainParamsBottomTable(self.obj_args, self.args) input_path = params['Target Folder'] ## ## Free from filelock ## # ofolder = self.parent.u_info.open_files4lock.get(input_path) # if ofolder == None: # return [] # for ofileobj in ofolder.values(): # ofileobj.close() # filestack = m.ObtainImageFiles(input_path) return filestack
def _Run(self, parent, params, comm_title): input_files = m.ObtainImageFiles(params['Image Folder']) if len(input_files) == 0: print('No images in the Image Folder.') return False im = m.imread(input_files[0], cv2.IMREAD_UNCHANGED) root, ext_image = os.path.splitext(os.path.basename(input_files[0])) print('') print('Target file to check color type : ', input_files[0]) print('Image dimensions : ', im.shape) print('Image filetype : ', im.dtype) image_size_x = im.shape[1] image_size_y = im.shape[0] if (image_size_x <= 256 or image_size_y <= 256): print('Image size is too small.') return False # Generate tmpdir tmpdir_standardized = os.path.join( params['Output Segmentation Folder (Empty)'], "standardized" + str(threading.get_ident()).zfill(6)[-6:]) if os.path.exists(tmpdir_standardized): shutil.rmtree(tmpdir_standardized) os.mkdir(tmpdir_standardized) # tmpdir_output = os.path.join( params['Output Segmentation Folder (Empty)'], "output" + str(threading.get_ident()).zfill(6)[-6:]) if os.path.exists(tmpdir_output): shutil.rmtree(tmpdir_output) os.mkdir(tmpdir_output) ## Check image size max_image_size = params['Maximal unit image size'] if max_image_size == '512': std_sizes = np.array([512]) elif max_image_size == '1024': std_sizes = np.array([512, 1024]) elif max_image_size == '2048': std_sizes = np.array([512, 1024, 2048]) else: print('Internal error at Maximal unit image size.') return False max_std_size = np.max(std_sizes) if image_size_x > max_std_size: unit_image_size_x = max_std_size num_tiles_x = np.int(np.ceil(float(image_size_x) / max_std_size)) else: unit_image_size_x = np.min(std_sizes[std_sizes >= image_size_x]) num_tiles_x = 1 if image_size_y > max_std_size: unit_image_size_y = max_std_size num_tiles_y = np.int(np.ceil(float(image_size_y) / max_std_size)) else: unit_image_size_y = np.min(std_sizes[std_sizes >= image_size_y]) num_tiles_y = 1 # converted_size_x = unit_image_size_x * num_tiles_x converted_size_y = unit_image_size_y * num_tiles_y fringe_size_x = converted_size_x - image_size_x fringe_size_y = converted_size_y - image_size_y # # output_files = [] print('Image standardization: ') for input_file in input_files: im_col = m.imread(input_file) # im_col = self._ChangeIntoColor(im_col) filename = path.basename(input_file) print(filename + ' ') for ext in [ '.TIF', '.tif', '.TIFF', '.tiff', '.PNG', '.jpg', '.jpeg', '.JPG', '.JPEG' ]: filename = filename.replace(ext, '.png') output_files.append(filename) # add fringe X im_fringe_x = cv2.flip(im_col, 1) # flipcode > 0, left-right im_fringe_x = im_fringe_x[:, 0:fringe_size_x] converted_image = cv2.hconcat([im_col, im_fringe_x]) # add fringe Y im_fringe_y = cv2.flip(converted_image, 0) # flipcode = 0, top-bottom im_fringe_y = im_fringe_y[0:fringe_size_y, :] converted_image = cv2.vconcat([converted_image, im_fringe_y]) # Save if (num_tiles_x == 1) and (num_tiles_y == 1): converted_filename = os.path.join(tmpdir_standardized, filename) m.imwrite(converted_filename, converted_image) else: for iy in range(num_tiles_y): for ix in range(num_tiles_x): y0 = iy * unit_image_size_y y1 = y0 + unit_image_size_y x0 = ix * unit_image_size_x x1 = x0 + unit_image_size_x current_tile = converted_image[y0:y1, x0:x1] converted_filename = str(ix).zfill(3)[-3:] + '_' + str( iy).zfill(3)[-3:] + '_' + filename converted_filename = os.path.join( tmpdir_standardized, converted_filename) m.imwrite(converted_filename, current_tile) #Complete print('') print('Images were split and changed into RGB 8bit, and stored in ', tmpdir_standardized) print('') tmp = ['--mode' , 'predict' , \ '--save_freq' , '0' , \ '--input_dir' , tmpdir_standardized, \ '--output_dir' , tmpdir_output, \ '--checkpoint' , params['Model Folder'], \ '--image_height', str(unit_image_size_y), \ '--image_width' , str(unit_image_size_x)] comm = parent.u_info.exec_translate[:] comm.extend(tmp) print('') print(' '.join(comm)) print('') print('Start inference.') print('') m.UnlockFolder(parent.u_info, params['Output Segmentation Folder (Empty)'] ) # Only for shared folder/file s.run(comm) print('') print('Segmentation reconstruction: ') for output_file in output_files: ## if (num_tiles_x == 1) and (num_tiles_y == 1): ## Remove fringes filename = os.path.join(tmpdir_output, output_file) inferred_segmentation = m.imread(filename, flags=cv2.IMREAD_GRAYSCALE, dtype='uint8') else: ## Merge split images. inferred_segmentation = np.zeros( (converted_size_y, converted_size_x), dtype='uint8') for iy in range(num_tiles_y): for ix in range(num_tiles_x): y0 = iy * unit_image_size_y y1 = y0 + unit_image_size_y x0 = ix * unit_image_size_x x1 = x0 + unit_image_size_x current_tile_filename = str(ix).zfill( 3)[-3:] + '_' + str(iy).zfill( 3)[-3:] + '_' + output_file current_tile_filename = os.path.join( tmpdir_output, current_tile_filename) current_tile = m.imread(current_tile_filename, flags=cv2.IMREAD_GRAYSCALE, dtype='uint8') inferred_segmentation[y0:y1, x0:x1] = current_tile[:, :] inferred_segmentation = inferred_segmentation[0:image_size_y, 0:image_size_x] print('inferred_segmentation: ', inferred_segmentation.shape, inferred_segmentation.dtype) ## Save filename_base = os.path.splitext(os.path.basename(output_file))[0] filename_base = os.path.join( params['Output Segmentation Folder (Empty)'], filename_base) filetype = params['Output Filetype'] if filetype == '8-bit gray scale PNG': filename = filename_base + '.png' m.save_png8(inferred_segmentation, filename) elif filetype == '8-bit gray scale TIFF (Uncompressed)': filename = filename_base + '.tif' m.save_tif8(inferred_segmentation, filename, compression=1) elif filetype == '8-bit gray scale TIFF (Compressed)': filename = filename_base + '.tif' m.save_tif8(inferred_segmentation, filename) else: print('Internel error: bad filetype.') print(filename) ## # rm tmpdir if os.path.exists(tmpdir_standardized): shutil.rmtree(tmpdir_standardized) if os.path.exists(tmpdir_output): shutil.rmtree(tmpdir_output) parent.parent.ExecuteCloseFileFolder( params['Output Segmentation Folder (Empty)']) parent.parent.OpenFolder(params['Output Segmentation Folder (Empty)']) print('') print('Finish inference.') print('') return True
def _Run(self, parent, params, comm_title): # print('') tmp = [ \ '--input_volume' , os.path.join(params['Empty Folder for FFNs'], "groundtruth.h5@stack"), \ '--output_volume' , os.path.join(params['Empty Folder for FFNs'], "af.h5@af"), \ '--thresholds' , '0.025,0.05,0.075,0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9', \ '--lom_radius' , '24,24,24', \ '--min_size' , '10000'] comm_compute_partition = parent.u_info.exec_compute_partition[:] comm_compute_partition.extend(tmp) # # tmp = [ \ '--partition_volumes' , 'validation1@'+os.path.join(params['Empty Folder for FFNs'], "af.h5@af") , \ '--coordinate_output' , os.path.join(params['Empty Folder for FFNs'], "tf_record_file") , \ '--margin' , '24,24,24 '] comm_build_coordinates = parent.u_info.exec_build_coordinates[:] comm_build_coordinates.extend(tmp) ## # try: ## training_image_files = m.ObtainImageFiles( params['Training Image Folder']) images = [ m.imread(i, cv2.IMREAD_GRAYSCALE) for i in training_image_files ] images = np.array(images) with h5py.File( os.path.join(params['Empty Folder for FFNs'], "grayscale_maps.h5"), 'w') as f: f.create_dataset('raw', data=images, compression='gzip') print('"grayscale_maps.h5" file (training image) was generated.') ground_truth_files = m.ObtainImageFiles(params['Ground Truth Folder']) images = [ m.imread(i, cv2.IMREAD_UNCHANGED) for i in ground_truth_files ] images = np.array(images).astype(np.int32) with h5py.File( os.path.join(params['Empty Folder for FFNs'], "groundtruth.h5"), 'w') as f: f.create_dataset('stack', data=images, compression='gzip') print('"groundtruth.h5" file (ground truth) was generated.') ## #except: # print("Error: h5 files (ground truth) were not generated.") # return False ## print(comm_title) print('Start compute_partitions.') print(' '.join(comm_compute_partition)) print('') s.run(comm_compute_partition) print('') print('Start build_coordinates.') print(' '.join(comm_build_coordinates)) print('') s.run(comm_build_coordinates) print('') print(comm_title, 'is finished.') print('') parent.parent.ExecuteCloseFileFolder(params['Empty Folder for FFNs']) parent.parent.OpenFolder(params['Empty Folder for FFNs']) return True
def _Run(self, parent, params, comm_title): ## ## Remove preovious results. ## removal_file1 = os.path.join( params['Output Inference Folder'] ,'0','0','seg-0_0_0.npz' ) removal_file2 = os.path.join( params['Output Inference Folder'], '0','0','seg-0_0_0.prob') if os.path.isfile(removal_file1) or os.path.isfile(removal_file2) : Reply = QMessageBox.question(parent, 'FFN', 'seg-0_0_0 files were found at the Output Inference Folder. Remove them?', QMessageBox.Yes | QMessageBox.No, QMessageBox.No) if Reply == QMessageBox.Yes: with contextlib.suppress(FileNotFoundError): os.remove(removal_file1) with contextlib.suppress(FileNotFoundError): os.remove(removal_file2) print('seg-0_0_0 files were removed.') else: print('FFN inference was canceled.') return ## ## h5 file (target image file) generation. ## target_image_file_h5 = os.path.join(params['FFN File Folder'], "grayscale_inf.h5") try: target_image_files = m.ObtainImageFiles(params['Target Image Folder']) images = [cv2.imread(i, cv2.IMREAD_GRAYSCALE) for i in target_image_files] images = np.array(images) image_z = images.shape[0] image_y = images.shape[1] image_x = images.shape[2] image_mean = np.mean(images).astype(np.int16) image_std = np.std(images).astype(np.int16) print('x: {}, y: {}, z: {}'.format(image_x, image_y, image_z)) with h5py.File( target_image_file_h5 , 'w') as f: f.create_dataset('raw', data=images, compression='gzip') print('"grayscale_inf.h5" file (target inference image) was generated.') except: print("Error: Target Image h5 was not generated.") return False ## ## Inference configration file generation ## request = {} request['image'] = {"hdf5": "{}@raw".format(target_image_file_h5).replace('\\', '/') } request['image_mean'] = image_mean request['image_stddev'] = image_std request['checkpoint_interval'] = int(params['Checkpoint Interval']) request['seed_policy'] = "PolicyPeaks" request['model_checkpoint_path'] = params['Tensorflow Model Files'].replace('\\', '/') request['model_name'] = "convstack_3d.ConvStack3DFFNModel" if params['Sparse Z'] != Qt.Unchecked: request['model_args'] = "{\\\"depth\\\": 9, \\\"fov_size\\\": [33, 33, 17], \\\"deltas\\\": [8, 8, 4]}" #request['model_args'] = ' {"depth":9,"fov_size":[33,33,17],"deltas":[8,8,4]} ' else : request['model_args'] = "{\\\"depth\\\": 12, \\\"fov_size\\\": [33, 33, 33], \\\"deltas\\\": [8, 8, 8]}" #request['model_args'] = ' {"depth":12,"fov_size":[33,33,33],"deltas":[8,8,8]} ' request['segmentation_output_dir'] = params['Output Inference Folder'].replace('\\', '/') inference_options = {} inference_options['init_activation'] = 0.95 inference_options['pad_value'] = 0.05 inference_options['move_threshold'] = 0.9 inference_options['min_boundary_dist'] = {"x": 1, "y": 1, "z": 1} inference_options['segment_threshold'] = 0.6 inference_options['min_segment_size'] = 1000 request['inference_options'] = inference_options config_file = os.path.join(params['FFN File Folder'], "inference_params.pbtxt") with open(config_file, "w", encoding='utf-8') as f: self.write_call(f, request, "") print('Configuration file was saved at :') print(config_file) print('\n') ## ## Inference start (I gave up the use of run_inference because of the augment parsing problem) ## m.mkdir_safe(os.path.join( params['Output Inference Folder'] ,'0','0' ) ) ## comm_inference = parent.u_info.exec_run_inference comm_inference = comm_inference.split(' ') params = ['--image_size_x', np.str( image_x ), '--image_size_y', np.str( image_y ), '--image_size_z', np.str( image_z ), '--parameter_file', config_file ] comm_inference += params print(comm_title) # print(comm_inference) print('\n') s.call(comm_inference) print(comm_title, 'was finished.') return True