def all_img_files_valid(stack, prep_id='None', version='Ntb', resol='thumbnail'): all_files_valid = True corrupted_files = [] filenames_list = DataManager.load_sorted_filenames(stack)[0].keys() for img_name in filenames_list: img_fp = DataManager.get_image_filepath(stack=stack, resol=resol, version=version, fn=img_name) try: img = cv2.imread(img_fp) # If length of the image size is zero, it's corrupted if len(np.shape(img)) == 0: all_files_valid = False corrupted_files.append(img_name) continue except Exception as e: all_files_valid = False print(e) corrupted_files.append(img_name) return all_files_valid, corrupted_files
def get_fp( section, prep_id=1, resol='thumbnail', version='auto' ): if version=='auto': version = stain_to_metainfo[stain]['img_version_1'] return DataManager.get_image_filepath(stack=stack, section=section, prep_id=prep_id, resol=resol, version=version)
def update_transformed_images_feeder(self): section_filenames = self.get_sorted_filenames(valid_only=self.show_valid_only) transformed_image_filenames = [] for i in range(1, len(section_filenames)): fp = DataManager.load_image_filepath_warped_to_adjacent_section(stack=self.stack, moving_fn=section_filenames[i], fixed_fn=section_filenames[i-1]) transformed_image_filenames.append(fp) self.transformed_images_feeder.set_images(labels=section_filenames[1:], filenames=transformed_image_filenames, resolution=self.tb_res, load_with_cv2=True)
def align_using_elastix(self): selected_elastix_parameter_name = str(self.alignment_ui.comboBox_parameters.currentText()) param_fn = os.path.join(UTILITY_DIR, 'preprocess', 'parameters', 'Parameters_' + selected_elastix_parameter_name + '.txt') curr_fn = self.curr_gscene.active_section prev_fn = self.prev_gscene.active_section out_dir = os.path.join(self.stack_data_dir, self.stack + '_custom_transforms', curr_fn + '_to_' + prev_fn) curr_fp = DataManager.get_image_filepath_v2(stack=self.stack, prep_id=None, fn=curr_fn, resol=self.tb_res, version=self.tb_version) prev_fp = DataManager.get_image_filepath_v2(stack=self.stack, prep_id=None, fn=prev_fn, resol=self.tb_res, version=self.tb_version ) # curr_fp = os.path.join(RAW_DATA_DIR, self.stack, curr_fn + '.' + self.tb_fmt) # prev_fp = os.path.join(RAW_DATA_DIR, self.stack, prev_fn + '.' + self.tb_fmt) execute_command('rm -rf %(out_dir)s; mkdir -p %(out_dir)s; elastix -f %(fixed_fn)s -m %(moving_fn)s -out %(out_dir)s -p %(param_fn)s' % \ dict(param_fn=param_fn, out_dir=out_dir, fixed_fn=prev_fp, moving_fn=curr_fp)) # section_filenames = self.get_sorted_filenames(valid_only=self.show_valid_only) self.update_transformed_images_feeder()
def load_crop(self): """ Load crop box. """ self.set_show_option('aligned') cropbox_fp = DataManager.get_cropbox_filename(stack=self.stack, anchor_fn=self.anchor_fn) with open(cropbox_fp, 'r') as f: ul_x, lr_x, ul_y, lr_y, first_section, last_section = map(int, f.readline().split()) self.first_section = self.section_to_filename[first_section] self.last_section = self.section_to_filename[last_section] self.sorted_sections_gscene.set_box(ul_x, lr_x, ul_y, lr_y) print(ul_x, lr_x, ul_y, lr_y, self.first_section, self.last_section)
def all_adaptive_intensity_floatHistograms_present(stack): all_files_present = True missing_files = [] for fn in DataManager.metadata_cache['filenames_to_sections'][stack].keys( ): sec = DataManager.metadata_cache['filenames_to_sections'][stack][fn] img_fp = DataManager.get_intensity_normalization_result_filepath( what='float_histogram_png', stack=stack, fn=fn, section=sec) if not os.path.isfile(img_fp): all_files_present = False missing_files.append(img_fp) return all_files_present, missing_files
def save_sorted_filenames(self): sorted_filenames = self.get_sorted_filenames(valid_only=False) out_sorted_image_names_fp = DataManager.get_sorted_filenames_filename(stack=self.stack) with open(out_sorted_image_names_fp, 'w') as f: for i, fn in enumerate(sorted_filenames): f.write('%s %03d\n' % (fn, i+1)) # index starts from 1 upload_to_s3(out_sorted_image_names_fp) sys.stderr.write('Sorted filename list saved.\n') self.statusBar().showMessage('Sorted filename list saved.')
def edit_transform(self): sys.stderr.write('Loading Edit Transform GUI...\n') self.statusBar().showMessage('Loading Edit Transform GUI...') self.alignment_ui = Ui_AlignmentGui() self.alignment_gui = QDialog(self) self.alignment_gui.setWindowTitle("Edit transform between adjacent sections") self.alignment_ui.setupUi(self.alignment_gui) self.alignment_ui.button_anchor.clicked.connect(self.add_anchor_pair_clicked) self.alignment_ui.button_align.clicked.connect(self.align_using_elastix) self.alignment_ui.button_compute.clicked.connect(self.compute_custom_transform) param_fps = os.listdir(DataManager.get_elastix_parameters_dir()) all_parameter_setting_names = ['_'.join(pf[:-4].split('_')[1:]) for pf in param_fps] self.alignment_ui.comboBox_parameters.addItems(all_parameter_setting_names) section_filenames = self.get_sorted_filenames(valid_only=self.show_valid_only) # Initialize gscene "current" self.curr_gscene = SimpleGraphicsScene4(id='current', gview=self.alignment_ui.curr_gview) self.alignment_ui.curr_gview.setScene(self.curr_gscene) self.curr_gscene.set_data_feeder(self.ordered_images_feeder) self.curr_gscene.set_active_i(1) self.curr_gscene.active_image_updated.connect(self.current_section_image_changed) self.curr_gscene.anchor_point_added.connect(self.anchor_point_added) # Initialize gscene "previous" self.prev_gscene = SimpleGraphicsScene4(id='previous', gview=self.alignment_ui.prev_gview) self.alignment_ui.prev_gview.setScene(self.prev_gscene) self.prev_gscene.set_data_feeder(self.ordered_images_feeder) self.prev_gscene.set_active_i(0) self.prev_gscene.active_image_updated.connect(self.previous_section_image_changed) self.prev_gscene.anchor_point_added.connect(self.anchor_point_added) # Initialize gscene "overlay" self.overlay_gscene = MultiplePixmapsGraphicsScene(id='overlay', pixmap_labels=['moving', 'fixed'], gview=self.alignment_ui.aligned_gview) self.alignment_ui.aligned_gview.setScene(self.overlay_gscene) self.transformed_images_feeder = ImageDataFeeder_v2('transformed image feeder', stack=self.stack, sections=section_filenames, resolution=self.tb_res) self.update_transformed_images_feeder() self.overlay_gscene.set_data_feeder(self.transformed_images_feeder, 'moving') self.overlay_gscene.set_data_feeder(self.ordered_images_feeder, 'fixed') self.overlay_gscene.set_active_indices({'moving': 1, 'fixed': 0}) self.overlay_gscene.set_opacity('moving', .3) self.overlay_gscene.set_opacity('fixed', .3) self.overlay_gscene.active_image_updated.connect(self.overlay_image_changed) self.alignment_gui.show()
def apply_custom_transform(self): # section_filenames = self.get_sorted_filenames(valid_only=self.show_valid_only) # curr_section_fn = section_filenames[self.valid_section_indices[self.curr_gscene.active_i]-1] # prev_section_fn = section_filenames[self.valid_section_indices[self.prev_gscene.active_i]-1] curr_section_fn = self.curr_gscene.active_section prev_section_fn = self.prev_gscene.active_section custom_tf_fn = os.path.join(self.stack_data_dir, self.stack+'_custom_transforms', curr_section_fn + '_to_' + prev_section_fn, curr_section_fn + '_to_' + prev_section_fn + '_customTransform.txt') with open(custom_tf_fn, 'r') as f: t11, t12, t13, t21, t22, t23 = map(float, f.readline().split()) prev_fp = DataManager.get_image_filepath_v2(stack=self.stack, prep_id=None, fn=prev_section_fn, resol=self.tb_res, version=self.tb_version ) curr_fp = DataManager.get_image_filepath_v2(stack=self.stack, prep_id=None, fn=curr_section_fn, resol=self.tb_res, version=self.tb_version ) prev_img_w, prev_img_h = identify_shape(prev_fp) output_image_fp = os.path.join(self.stack_data_dir, '%(stack)s_custom_transforms/%(curr_fn)s_to_%(prev_fn)s/%(curr_fn)s_alignedTo_%(prev_fn)s.tif' % \ dict(stack=self.stack, curr_fn=curr_section_fn, prev_fn=prev_section_fn) ) execute_command("convert %(curr_fp)s -virtual-pixel background +distort AffineProjection '%(sx)f,%(rx)f,%(ry)f,%(sy)f,%(tx)f,%(ty)f' -crop %(w)sx%(h)s%(x)s%(y)s\! -flatten -compress lzw %(output_fp)s" %\ dict(curr_fp=curr_fp, output_fp=output_image_fp, tb_fmt=self.tb_fmt, sx=t11, sy=t22, rx=t21, ry=t12, tx=t13, ty=t23, w=prev_img_w, h=prev_img_h, x='+0', y='+0', raw_data_dir=RAW_DATA_DIR))
def all_autoSubmasks_present(stack): all_files_present = True missing_files = [] for fn in DataManager.metadata_cache['filenames_to_sections'][stack].keys( ): sec = DataManager.metadata_cache['filenames_to_sections'][stack][fn] img_fp = DataManager.get_auto_submask_filepath(stack=stack, what='submask', submask_ind=0, fn=fn, sec=sec) if not os.path.isfile(img_fp): all_files_present = False missing_files.append(img_fp) return all_files_present, missing_files
def save_crop(self): ul_pos = self.sorted_sections_gscene.corners['ul'].scenePos() lr_pos = self.sorted_sections_gscene.corners['lr'].scenePos() ul_x = int(ul_pos.x()) ul_y = int(ul_pos.y()) lr_x = int(lr_pos.x()) lr_y = int(lr_pos.y()) # If not set yet. if ul_x == 100 and ul_y == 100 and lr_x == 200 and lr_y == 200: return cropbox_fp = DataManager.get_cropbox_filename(stack=self.stack, anchor_fn=self.anchor_fn) filename_to_section = invert_section_to_filename_mapping(self.section_to_filename) with open(cropbox_fp, 'w') as f: f.write('%d %d %d %d %d %d' % (ul_x, lr_x, ul_y, lr_y, filename_to_section[self.first_section], filename_to_section[self.last_section])) upload_to_s3(cropbox_fp)
def revert_to_prev_step(stack, target_step): progress_dict = {} passed_target_step = False for step in ordered_pipeline_steps: # Set all steps before "target_step" as completed, all after as incomplete if passed_target_step: progress_dict[step] = False else: if step == target_step: progress_dict[step] = False passed_target_step = True else: progress_dict[step] = True # Save PROGRESS ini progress_ini_to_save = {} progress_ini_to_save['DEFAULT'] = progress_dict # Get filepath and save ini fp = DataManager.get_brain_info_progress(stack) save_dict_as_ini(progress_ini_to_save, fp)
try: structure_list = json.loads(args.structure_list) except: structure_list = args.structure_list else: structure_list = all_known_structures # structure_list = ['Amb', 'SNR', '7N', '5N', '7n', 'LRt', 'Sp5C', 'SNC', 'VLL', 'SC', 'IC'] print(structure_list) atlas_spec = dict(name='atlasV7', vol_type='score', resolution='10.0um') atlas_structures_wrt_canonicalAtlasSpace_atlasResol = \ DataManager.load_original_volume_all_known_structures_v3(atlas_spec, in_bbox_wrt='canonicalAtlasSpace', out_bbox_wrt='canonicalAtlasSpace') # atlas_structures_wrt_canonicalAtlasSpace_atlasResol is an array with two elements # atlas_structures_wrt_canonicalAtlasSpace_atlasResol[0] is the probability volumes loaded from the atlas # atlas_structures_wrt_canonicalAtlasSpace_atlasResol[1] is the X Y Z offset # For computing score maps. batch_size = 256 model_dir_name = 'inception-bn-blue' model_name = 'inception-bn-blue' # Loading mxnet model causes warnings! model, mean_img = load_mxnet_model( model_dir_name = model_dir_name, model_name = model_name, num_gpus = 1, batch_size = batch_size) print('\n\n\n\n')
def get_prep5_limits_from_prep1_thumbnail_masks( stack, max_distance_to_scan_from_midpoint=25, plot_progression=False): prep_id = 1 version = 'mask' resol = 'thumbnail' sec_to_fn_dict = DataManager.load_sorted_filenames(stack=stack)[1] midpoint = int( np.mean(DataManager.load_sorted_filenames(stack=stack)[1].keys())) max_distance = max_distance_to_scan_from_midpoint # Only keeps sections within a max_distance of the midpoint for i in sec_to_fn_dict.keys(): try: if i not in range(midpoint - max_distance, midpoint + max_distance): del sec_to_fn_dict[i] if sec_to_fn_dict[i] == 'Placeholder': del sec_to_fn_dict[i] except KeyError: pass # Get dimensions of the first image in the list (will be the same for all) img_fp = DataManager.get_image_filepath( stack=stack, resol=resol, version=version, fn=sec_to_fn_dict[sec_to_fn_dict.keys()[0]]) height, width, channels = cv2.imread(img_fp).shape height_d16 = height / 16 width_d16 = width / 16 curr_rostral_lim_d16 = width_d16 curr_caudal_lim_d16 = 0 curr_dorsal_lim_d16 = height_d16 curr_ventral_lim_d16 = 0 for img_name in sec_to_fn_dict.values(): # Get the image filepath and then load the image, downsampling # an additional 16x for speed img_fp = DataManager.get_image_filepath(stack=stack, resol=resol, version=version, fn=img_name) img_thumbnail_mask_down16 = cv2.imread(img_fp)[::16, ::16] # update rostral lim for col_i in range(curr_rostral_lim_d16): col = img_thumbnail_mask_down16[:, col_i] contains_tissue = np.array(col).any() if contains_tissue: curr_rostral_lim_d16 = min(curr_rostral_lim_d16, col_i) break # update caudal lim caudal_range = range(curr_caudal_lim_d16, width_d16) caudal_range = reversed( caudal_range) # Goes from right of image to left for col_i in caudal_range: col = img_thumbnail_mask_down16[:, col_i] contains_tissue = np.array(col).any() if contains_tissue: curr_caudal_lim_d16 = max(curr_caudal_lim_d16, col_i) break # update dorsal lim for row_i in range(curr_dorsal_lim_d16): row = img_thumbnail_mask_down16[row_i, :] contains_tissue = np.array(row).any() if contains_tissue: curr_dorsal_lim_d16 = min(curr_dorsal_lim_d16, row_i) break # update ventral lim ventral_range = range(curr_ventral_lim_d16, height_d16) ventral_range = reversed( ventral_range) # Goes from right of image to left for row_i in ventral_range: row = img_thumbnail_mask_down16[row_i, :] contains_tissue = np.array(row).any() if contains_tissue: curr_ventral_lim_d16 = max(curr_ventral_lim_d16, row_i) break if plot_progression: plt.imshow(img_thumbnail_mask_down16) plt.scatter([ curr_rostral_lim_d16, curr_rostral_lim_d16, curr_caudal_lim_d16, curr_caudal_lim_d16 ], [ curr_dorsal_lim_d16, curr_ventral_lim_d16, curr_dorsal_lim_d16, curr_ventral_lim_d16 ], c='r') plt.show() # Make the boundary slightly larger final_rostral_lim = (curr_rostral_lim_d16 - 1.5) * 16 final_caudal_lim = (curr_caudal_lim_d16 + 1.5) * 16 final_dorsal_lim = (curr_dorsal_lim_d16 - 1.5) * 16 final_ventral_lim = (curr_ventral_lim_d16 + 1.5) * 16 # If boundary goes past the image, reset to the min/max value final_rostral_lim = max(final_rostral_lim, 0) final_caudal_lim = min(final_caudal_lim, width) final_dorsal_lim = max(final_dorsal_lim, 0) final_ventral_lim = min(final_ventral_lim, height) print('rostral:', final_rostral_lim) print('caudal:', final_caudal_lim) print('dorsal:', final_dorsal_lim) print('ventral:', final_ventral_lim) return final_rostral_lim, final_caudal_lim, final_dorsal_lim, final_ventral_lim
def get_img( section, prep_id='None', resol='thumbnail', version='NtbNormalized' ): return DataManager.load_image(stack=stack, section=section, prep_id=prep_id, resol=resol, version=version)
type=str, help="User-corrected pairwise transform output dir") parser.add_argument("--anchor_img_name", type=str, help="Anchor image name") parser.add_argument("--out", type=str, help="csv, composed transforms for each image to anchor") args = parser.parse_args() input_spec = load_ini(args.input_spec) image_name_list = input_spec['image_name_list'] if image_name_list == 'all': #image_name_list = DataManager.load_sorted_filenames(stack=stack)[0].keys() image_name_list = map( lambda x: x[0], sorted(DataManager.load_sorted_filenames( stack=input_spec['stack'])[0].items(), key=lambda x: x[1])) #op = load_ini(os.path.join(DATA_ROOTDIR, 'CSHL_data_processed', input_spec['stack'], 'operation_configs', args.op + '.ini')) op = load_ini( os.path.join(DATA_ROOTDIR, 'CSHL_data_processed', input_spec['stack'], 'operation_configs', args.op + '.ini')) assert op['type'] == 'warp', "Op type must be warp." assert op['base_prep_id'] == input_spec[ 'prep_id'], "Op requires %s, but input has prep %s." % ( op['base_prep_id'], input_spec['prep_id']) elastix_output_dir = op['elastix_output_dir'] custom_output_dir = op['custom_output_dir'] toanchor_transforms_fp = op['transforms_csv'] anchor_img_name = op['anchor_image_name']
parser.add_argument("-H", "--height", type=int, help="Height") args = parser.parse_args() input_spec = load_ini(args.input_spec) stack = input_spec['stack'] prep_id = input_spec['prep_id'] if prep_id == 'None': prep_id = None resol = input_spec['resol'] version = input_spec['version'] if version == 'None': version = None image_name_list = input_spec['image_name_list'] if image_name_list == 'all': image_name_list = DataManager.load_sorted_filenames(stack=stack)[0].keys() from PIL import Image Image.MAX_IMAGE_PIXELS = 1000000000000 for img_name in image_name_list: t = time.time() in_fp = DataManager.get_image_filepath(stack=stack, resol=resol, version=version, fn=img_name) out_fp = DataManager.get_image_filepath(stack=stack, resol=args.out_resol, version=version, fn=img_name) create_parent_dir_if_not_exists(out_fp) #img = imread(in_fp, plugin='pil')
default=1) args = parser.parse_args() input_spec = load_ini(args.input_spec) stack = input_spec['stack'] prep_id = input_spec['prep_id'] if prep_id == 'None': prep_id = None resol = input_spec['resol'] version = input_spec['version'] if version == 'None': version = None image_name_list = input_spec['image_name_list'] if image_name_list == 'all': image_name_list = DataManager.load_sorted_filenames(stack=stack)[0].keys() create_if_not_exists( DataManager.get_image_dir(stack=stack, resol=resol, version=args.out_version)) if args.channel == -1: run_distributed5( 'convert \"%(in_fp)s\" -set colorspace Gray -separate -average \"%(out_fp)s\"', kwargs_list=[{ 'in_fp': DataManager.get_image_filepath(stack=stack, prep_id=prep_id, resol=resol, version=version,
from utilities.sqlcontroller import SqlController parser = argparse.ArgumentParser( formatter_class=argparse.RawDescriptionHelpFormatter, description='Converts image format to tiff, extracts different channels') parser.add_argument("stack", type=str, help="The name of the stack") parser.add_argument("stain", type=str, help="Either \'NTB\' or \'Thionin\'.") args = parser.parse_args() stack = args.stack stain = args.stain # Do quality check on sorted_filenames.txt # Make sure ROOT_DIR/CSHL_data_processed/STACK/STACK_raw/SLICE_raw.tif files all exist, otherwise can't continue sorted_fns = DataManager.get_fn_list_from_sorted_filenames(stack) for fn in sorted_fns: fp_tif = os.path.join(DataManager.get_images_root_folder(stack), stack + '_raw', fn + '_raw.tif') fp_tif_generic = os.path.join(DataManager.get_images_root_folder(stack), stack + '_raw', '<FILENAME>_raw.tif') # fp_tif = ROOT_DIR+'CSHL_data_processed/'+stack+'/'+stack+'_raw/'+fn+'_raw.tif' if not os.path.isfile(fp_tif): print('') print( '_________________________________________________________________________________' ) print( '_________________________________________________________________________________' ) print(
def convert_operation_to_arr(op, resol, inverse=False, return_str=False, stack=None): """ If op is warp, return {image_name: (3,3)-array}. If op is crop, return {image_name: (x,y,w,h)}. """ assert 'type' in op, "Operation spec must provide type." if op['type'] == 'warp': tf_csv = op['transforms_csv'] assert tf_csv.endswith('.csv'), "transforms_csv is not a csv file." #print(tf_csv) transforms_to_anchor = csv_to_dict(tf_csv) transforms_resol = op['resolution'] transforms_scale_factor = DataManager.convert_resolution_string_to_um( stack=stack, resolution=transforms_resol ) / DataManager.sqlController.convert_resolution_string_to_um( stack=stack, resolution=resol) tf_mat_mult_factor = np.array([[1, 1, transforms_scale_factor], [1, 1, transforms_scale_factor]]) if inverse: transforms_to_anchor = { img_name: convert_2d_transform_forms( np.linalg.inv(np.reshape(tf, (3, 3)))[:2] * tf_mat_mult_factor, out_form='str') for img_name, tf in transforms_to_anchor.iteritems() } else: transforms_to_anchor = { img_name: convert_2d_transform_forms( np.reshape(tf, (3, 3))[:2] * tf_mat_mult_factor, out_form='str') for img_name, tf in transforms_to_anchor.iteritems() } return transforms_to_anchor elif op['type'] == 'crop': cropbox_resol = op['resolution'] if 'cropboxes_csv' in op: # each image has a different cropbox cropboxes_all = csv_to_dict(op['cropboxes_csv']) cropboxes = {} for img_name in image_name_list: arr_xxyy = convert_cropbox_fmt(data=cropboxes_all[img_name], in_fmt='arr_xywh', out_fmt='arr_xxyy') if inverse: arr_xxyy = np.array( [-arr_xxyy[0], arr_xxyy[1], -arr_xxyy[2], arr_xxyy[3]]) cropboxes[img_name] = convert_cropbox_fmt( data=arr_xxyy, in_fmt='arr_xxyy', out_fmt='str_xywh' if return_str else 'arr_xywh', in_resol=cropbox_resol, out_resol=resol, stack=stack) # cropboxes = {img_name: convert_cropbox_fmt(data=cropboxes_all[img_name], in_fmt='arr_xywh', out_fmt='str_xywh' if return_str else 'arr_xywh', in_resol=cropbox_resol, out_resol=resol, stack=stack) for img_name in image_name_list} else: # a single cropbox for all images arr_xxyy = convert_cropbox_fmt(data=op, in_fmt='dict', out_fmt='arr_xxyy', in_resol=cropbox_resol, out_resol=resol, stack=stack) if inverse: arr_xxyy = np.array( [-arr_xxyy[0], arr_xxyy[1], -arr_xxyy[2], arr_xxyy[3]]) cropbox = convert_cropbox_fmt( data=arr_xxyy, in_fmt='arr_xxyy', out_fmt='str_xywh' if return_str else 'arr_xywh', stack=stack) cropboxes = {img_name: cropbox for img_name in image_name_list} return cropboxes elif op['type'] == 'rotate': return {img_name: op['how'] for img_name in image_name_list} else: raise Exception( "Operation type specified by ini must be either warp, crop or rotate." )
def update_sorted_sections_gscene_from_sorted_filenames(self): if not hasattr(self, 'currently_showing'): self.currently_showing = 'original' if not hasattr(self, 'anchor_fn'): anchor_fp = DataManager.get_anchor_filename_filename(self.stack) if os.path.exists(anchor_fp): with open(anchor_fp) as f: self.set_anchor(f.readline().strip()) else: imageNames_to_load = self.get_sorted_filenames(valid_only=self.show_valid_only) shapes = \ [identify_shape(DataManager.get_image_filepath_v2(stack=self.stack, fn=fn, prep_id=None, version=self.tb_version, resol=self.tb_res)) for fn in imageNames_to_load] largest_idx = np.argmax([h*w for h, w in shapes]) print('largest section is ', imageNames_to_load[largest_idx]) self.set_anchor(imageNames_to_load[largest_idx]) print(imageNames_to_load[largest_idx]) if self.currently_showing == 'original': imageNames_to_load = self.get_sorted_filenames(valid_only=self.show_valid_only) if not hasattr(self, 'ordered_images_feeder') or self.ordered_images_feeder is None: self.ordered_images_feeder = ImageDataFeeder_v2('ordered image feeder', stack=self.stack, sections=imageNames_to_load, resolution=self.tb_res, use_thread=False, auto_load=False) self.ordered_images_feeder.set_images(labels=imageNames_to_load, filenames=[DataManager.get_image_filepath_v2(stack=self.stack, fn=fn, prep_id=None, version=self.tb_version, resol=self.tb_res) for fn in imageNames_to_load], resolution=self.tb_res, load_with_cv2=False) self.ordered_images_feeder.set_images(labels=['Placeholder'], filenames=[self.placeholder_qimage], resolution=self.tb_res, load_with_cv2=False) else: self.ordered_images_feeder.set_sections(imageNames_to_load) self.sorted_sections_gscene.set_data_feeder(self.ordered_images_feeder) if self.sorted_sections_gscene.active_i is not None: active_i = self.sorted_sections_gscene.active_i else: active_i = 1 self.sorted_sections_gscene.set_active_i(active_i) elif self.currently_showing == 'aligned': imageNames_to_load = self.get_sorted_filenames(valid_only=self.show_valid_only) if not hasattr(self, 'aligned_images_feeder') or self.aligned_images_feeder is None: self.aligned_images_feeder = ImageDataFeeder_v2('aligned image feeder', stack=self.stack, sections=imageNames_to_load, resolution=self.tb_res, use_thread=False, auto_load=False) self.aligned_images_feeder.set_images(labels=imageNames_to_load, filenames=[DataManager.get_image_filepath_v2(stack=self.stack, fn=fn, prep_id='alignedPadded', version=self.tb_version, resol=self.tb_res) for fn in imageNames_to_load], resolution=self.tb_res, load_with_cv2=False) self.aligned_images_feeder.set_images(labels=['Placeholder'], filenames=[self.placeholder_qimage], resolution=self.tb_res, load_with_cv2=False) else: self.aligned_images_feeder.set_sections(imageNames_to_load) self.sorted_sections_gscene.set_data_feeder(self.aligned_images_feeder) if self.sorted_sections_gscene.active_i is not None: active_i = self.sorted_sections_gscene.active_i else: active_i = 1 self.sorted_sections_gscene.set_active_i(active_i)
default=7) args = parser.parse_args() input_spec = load_ini(args.input_spec) stack = input_spec['stack'] prep_id = input_spec['prep_id'] if prep_id == 'None': prep_id = None #resol = input_spec['resol'] version = input_spec['version'] if version == 'None': version = None image_name_list = input_spec['image_name_list'] if image_name_list == 'all': image_name_list = DataManager.load_sorted_filenames(stack=stack)[0].keys() #stack = args.brain_name #if hasattr(args, 'section') and args.section is not None: # sections = [args.section] #else: # sections = metadata_cache['valid_sections'][stack] win_id = args.win_id #version = args.version batch_size = 256 model_dir_name = 'inception-bn-blue' model_name = 'inception-bn-blue' model, mean_img = load_mxnet_model(model_dir_name=model_dir_name, model_name=model_name,
def button_grid_push(self, button): """ If any of the "grid" buttons are pressed, this is the callback function. In this case, "grid" buttons have a one-to_one correspondance to the steps in the pipeline. The completion of each step means you move onto the next one. """ # User creates initial masks if button == self.b_1: subprocess.call([ 'python', 'mask_editing_tool.py', stack, stain_to_metainfo[self.stain.lower()]['img_version_1'] ]) # Mark this step as complete if these two files were created fp_to_check_1 = os.path.join( DataManager.get_images_root_folder(self.stack), self.stack + '_prep1_thumbnail_initSnakeContours.pkl') fp_to_check_2 = os.path.join( DataManager.get_images_root_folder(self.stack), self.stack + '_prep1_thumbnail_anchorInitSnakeContours.pkl') if os.path.exists(fp_to_check_1) and os.path.exists(fp_to_check_2): set_step_completed_in_progress_ini( self.stack, '3-1_mask_initial_contours') # Automatic mask generation elif button == self.b_2: try: QMessageBox.about( self, "Popup Message", "This operation is expected to take at least 6 hours.") subprocess.call([ 'python', 'a_script_preprocess_3.py', self.stack, self.stain ]) set_step_completed_in_progress_ini(self.stack, '3-2_mask_scripts_1') except Exception as e: sys.stderr.write(str(e)) # Correct auto-generated masks elif button == self.b_3: try: subprocess.call([ 'python', 'mask_editing_tool.py', stack, stain_to_metainfo[self.stain.lower()]['img_version_1'] ]) # Mark this step as complete if all masks files exist if all_img_files_present(self.stack, prep_id=1, version='mask', resol='thumbnail'): set_step_completed_in_progress_ini( self.stack, '3-3_mask_correct_contours') except Exception as e: sys.stderr.write(str(e)) # Run automatic scripts elif button == self.b_4: try: QMessageBox.about( self, "Popup Message", "This operation is expected to take at least 12 hours.") # Run script 4 subprocess.call([ 'python', 'a_script_preprocess_4.py', self.stack, self.stain ]) # Run script 5 (automatically finds the bounds first) rostral_lim, caudal_lim, dorsal_lim, ventral_lim = get_prep5_limits_from_prep1_thumbnail_masks( self.stack) command = [ 'python', 'a_script_preprocess_5.py', self.stack, self.stain, '-l', str(rostral_lim), str(caudal_lim), str(dorsal_lim), str(ventral_lim) ] subprocess.call(command) progress = get_pipeline_status(self.stack) if not progress == 'a_script_preprocess_4' and not progress == 'a_script_preprocess_5': set_step_completed_in_progress_ini(self.stack, '3-4_mask_scripts_2') except Exception as e: sys.stderr.write(str(e)) self.updateFields()
def get_text_of_pipeline_status(stack, stain): text = "" all_correct = True stain = stain.lower() for script_name in script_list: for image_set in necessary_files_by_script[stain][script_name].keys(): contents = necessary_files_by_script[stain][script_name][image_set] if type(contents) == str: if contents == 'setup_files': all_files_present, missing_files = all_setupFiles_present( stack) # Setup always gives a weird error if len(missing_files) > 0: text += 'Please run ' + script_name + ' and then continue to the next script.' break elif contents == 'autoSubmasks': all_files_present, missing_files = all_autoSubmasks_present( stack) elif contents == 'floatHistogram': all_files_present, missing_files = all_adaptive_intensity_floatHistograms_present( stack) elif contents == 'atlas_wrt_canonicalAtlasSpace_subject_wrt_wholebrain_atlasResol': all_files_present, missing_files = check_for_file( contents, stack) elif contents == 'registered_atlas_structures_wrt_wholebrainXYcropped_xysecTwoCorners': all_files_present, missing_files = check_for_file( contents, stack) elif contents == 'classifier_setup_files': all_files_present, missing_files = all_setupFiles_present_classifier( stack) else: prep_id = contents['prep_id'] version = contents['version'] resol = contents['resol'] if version == 'None': version = None all_files_present, missing_files = all_img_files_present( stack, prep_id=prep_id, version=version, resol=resol) if all_files_present: # text += script_name + " " + image_set + " has been run successfully!\n" pass else: num_missing_files = len(missing_files) num_total_files = len( DataManager.load_sorted_filenames(stack)[0].keys()) if num_missing_files == num_total_files: text += "" + script_name + " is the next script you need to run.\n" all_correct = False break for fn in missing_files[0]: img_fp = DataManager.get_image_filepath(stack=stack, resol=resol, version=version, fn=fn) img_root_fp = img_fp[0:img_fp.rfind('/') + 1] # text += "\n"+script_name + " " + image_set + " is missing files:\n\n" text += "\n" + script_name + " did not run properly and has missing files:\n" text += "(" + str( num_missing_files) + " missing out of " + str( num_total_files) + ")\n\n" text += "Missing Directory: " + img_root_fp + "\n\n" # text += "`" + img_root_fp + "` is the image directory in which there are the following missing files:\n\n" text += "Missing Files:\n" missing_files.sort() for fn in missing_files: img_fp = DataManager.get_image_filepath(stack=stack, resol=resol, version=version, fn=fn) text += fn + "\n" all_correct = False break if not all_correct: break elif all_correct: if script_name == 'a_script_processing`': text += script_name + " runs the brain through the classifiers and fits the atlas to the images. Ready to run!" else: text += script_name + " has been run successfully!\n\n" return text, script_name
toanchor_transforms_fp = op['transforms_csv'] #anchor_img_name = op['anchor_image_name'] anchor_img_name = image_name_list[0] base_prep_id = op['base_prep_id'] ################################################# anchor_idx = image_name_list.index(anchor_img_name) transformation_to_previous_sec = {} for i in range(1, len(image_name_list)): transformation_to_previous_sec[ i] = DataManager.load_consecutive_section_transform( moving_fn=image_name_list[i], fixed_fn=image_name_list[i - 1], stack=input_spec['stack']) #transformation_to_previous_sec[i] = DataManager.load_consecutive_section_transform(moving_fn=image_name_list[i], fixed_fn=image_name_list[i-1], elastix_output_dir=elastix_output_dir, custom_output_dir=custom_output_dir) transformation_to_anchor_sec = {} # Converts every transformation for moving_idx in range(len(image_name_list)): if moving_idx == anchor_idx: # transformation_to_anchor_sec[moving_idx] = np.eye(3) transformation_to_anchor_sec[image_name_list[moving_idx]] = np.eye(3) elif moving_idx < anchor_idx: T_composed = np.eye(3) for i in range(anchor_idx, moving_idx, -1):
op_params_str) # sequantial_dispatcher argument cannot be too long, so we must limit the number of images processed each time batch_size = 100 for batch_id in range(0, len(image_name_list), batch_size): #print '_____________________________________________' #print batch_id #print '_____________________________________________' # Removes stderr and stdout run_distributed('python %(script)s --input_fp \"%%(input_fp)s\" \ --output_fp \"%%(output_fp)s\" %%(ops_str)s --pad_color %%(pad_color)s' % \ {'script': os.path.join(os.getcwd(), 'warp_crop_v3.py'),}, kwargs_list=[{'ops_str': ops_str_all_images[img_name], 'input_fp': DataManager.get_image_filepath_v2(stack=stack, fn=img_name, prep_id=prep_id, version=version, resol=resol), 'output_fp': DataManager.get_image_filepath_v2(stack=stack, fn=img_name, prep_id=out_prep_id, version=version, resol=resol), 'pad_color': pad_color} for img_name in image_name_list[batch_id:batch_id+batch_size]], argument_type='single', jobs_per_node=args.njobs, local_only=True) elif args.op is not None: # Usage 1 op_str = '' for op_type, op_params in args.op: # args.op is a list # revert the leading minus sign hack if op_params.startswith('^'):