def run_spiness_prediction(max_n_jobs_gpu=None, max_n_jobs=None): if max_n_jobs is None: max_n_jobs = global_params.NCORE_TOTAL * 2 if max_n_jobs_gpu is None: max_n_jobs_gpu = global_params.NGPU_TOTAL * 2 log = initialize_logging('spine_identification', global_params.config.working_dir + '/logs/', overwrite=False) ssd = SuperSegmentationDataset( working_dir=global_params.config.working_dir) pred_key = "spiness" # run semantic spine segmentation on multi views sd = ssd.get_segmentationdataset("sv") # chunk them multi_params = chunkify(sd.so_dir_paths, max_n_jobs_gpu) # set model properties model_kwargs = dict(src=global_params.config.mpath_spiness, multi_gpu=False) so_kwargs = dict(working_dir=global_params.config.working_dir) pred_kwargs = dict(pred_key=pred_key) multi_params = [[par, model_kwargs, so_kwargs, pred_kwargs] for par in multi_params] log.info('Starting spine prediction.') qu.QSUB_script(multi_params, "predict_spiness_chunked", log=log, n_max_co_processes=global_params.NGPU_TOTAL, n_cores=global_params.NCORES_PER_NODE // global_params.NGPUS_PER_NODE, suffix="", additional_flags="--gres=gpu:1", remove_jobfolder=True) log.info('Finished spine prediction.') # map semantic spine segmentation of multi views on SSV mesh # TODO: CURRENTLY HIGH MEMORY CONSUMPTION if not ssd.mapping_dict_exists: raise ValueError('Mapping dict does not exist.') multi_params = np.array(ssd.ssv_ids, dtype=np.uint) nb_svs_per_ssv = np.array( [len(ssd.mapping_dict[ssv_id]) for ssv_id in ssd.ssv_ids]) # sort ssv ids according to their number of SVs (descending) multi_params = multi_params[np.argsort(nb_svs_per_ssv)[::-1]] multi_params = chunkify(multi_params, max_n_jobs) # add ssd parameters kwargs_semseg2mesh = dict(semseg_key=pred_key, force_recompute=True) multi_params = [(ssv_ids, ssd.version, ssd.version_dict, ssd.working_dir, kwargs_semseg2mesh) for ssv_ids in multi_params] log.info('Starting mapping of spine predictions to neurite surfaces.') qu.QSUB_script(multi_params, "map_spiness", n_max_co_processes=global_params.NCORE_TOTAL, n_cores=4, suffix="", additional_flags="", remove_jobfolder=True, log=log) log.info('Finished spine mapping.')
def run_spiness_prediction(max_n_jobs_gpu: Optional[int] = None, max_n_jobs: Optional[int] = None): """ Will store semantic spine labels inside``ssv.label_dict('vertex')['spiness]``. Todo: * run rendering chunk-wise instead of on-the-fly and then perform prediction chunk-wise as well, adopt from spiness step. Args: max_n_jobs_gpu: Number of parallel GPU jobs. Used for the inference. max_n_jobs : Number of parallel CPU jobs. Used for the mapping step. """ if max_n_jobs is None: max_n_jobs = global_params.NCORE_TOTAL * 2 if max_n_jobs_gpu is None: max_n_jobs_gpu = global_params.NGPU_TOTAL * 2 log = initialize_logging('spine_identification', global_params.config.working_dir + '/logs/', overwrite=False) ssd = SuperSegmentationDataset(working_dir=global_params.config.working_dir) # run semantic spine segmentation on multi views sd = ssd.get_segmentationdataset("sv") # chunk them multi_params = chunkify(sd.so_dir_paths, max_n_jobs_gpu) # set model properties model_kwargs = dict(src=global_params.config.mpath_spiness, multi_gpu=False) so_kwargs = dict(working_dir=global_params.config.working_dir) pred_kwargs = dict(pred_key=global_params.semseg2mesh_spines['semseg_key']) multi_params = [[par, model_kwargs, so_kwargs, pred_kwargs] for par in multi_params] log.info('Starting spine prediction.') qu.QSUB_script(multi_params, "predict_spiness_chunked", log=log, n_max_co_processes=global_params.NGPU_TOTAL, n_cores=global_params.NCORES_PER_NODE // global_params.NGPUS_PER_NODE, suffix="", additional_flags="--gres=gpu:1", remove_jobfolder=True) log.info('Finished spine prediction.') # map semantic spine segmentation of multi views on SSV mesh # TODO: CURRENTLY HIGH MEMORY CONSUMPTION if not ssd.mapping_dict_exists: raise ValueError('Mapping dict does not exist.') multi_params = np.array(ssd.ssv_ids, dtype=np.uint) nb_svs_per_ssv = np.array([len(ssd.mapping_dict[ssv_id]) for ssv_id in ssd.ssv_ids]) # sort ssv ids according to their number of SVs (descending) multi_params = multi_params[np.argsort(nb_svs_per_ssv)[::-1]] multi_params = chunkify(multi_params, max_n_jobs) # add ssd parameters kwargs_semseg2mesh = global_params.semseg2mesh_spines kwargs_semsegforcoords = global_params.semseg2coords_spines multi_params = [(ssv_ids, ssd.version, ssd.version_dict, ssd.working_dir, kwargs_semseg2mesh, kwargs_semsegforcoords) for ssv_ids in multi_params] log.info('Starting mapping of spine predictions to neurite surfaces.') qu.QSUB_script(multi_params, "map_spiness", n_max_co_processes=global_params.NCORE_TOTAL, n_cores=4, suffix="", additional_flags="", remove_jobfolder=True, log=log) log.info('Finished spine mapping.')
def run_axoness_prediction(max_n_jobs_gpu=None, e3=False): log = initialize_logging('axon_prediction', global_params.config.working_dir + '/logs/', overwrite=False) if max_n_jobs_gpu is None: max_n_jobs_gpu = global_params.NGPU_TOTAL * 2 # here because all qsub jobs will start a script referring to 'global_params.config.working_dir' ssd = SuperSegmentationDataset( working_dir=global_params.config.working_dir) sd = ssd.get_segmentationdataset("sv") # chunk them multi_params = chunkify(sd.so_dir_paths, max_n_jobs_gpu) pred_key = "axoness_probas" # leave this fixed because it is used all over # get model properties log.info( 'Performing axon prediction of neuron views. Labels will be stored ' 'on SV level in the attribute dict with key "{}"'.format(pred_key)) if e3 is True: model_kwargs = 'get_axoness_model_e3' else: m = get_axoness_model() model_kwargs = dict(model_path=m._path, normalize_data=m.normalize_data, imposed_batch_size=m.imposed_batch_size, nb_labels=m.nb_labels, channels_to_load=m.channels_to_load) #all other kwargs like obj_type='sv' and version are the current SV SegmentationDataset by default so_kwargs = dict(working_dir=global_params.config.working_dir) # for axoness views set woglia to True (because glia were removed beforehand), # raw_only to False pred_kwargs = dict(woglia=True, pred_key=pred_key, verbose=False, raw_only=False) multi_params = [[par, model_kwargs, so_kwargs, pred_kwargs] for par in multi_params] if e3 is True: # TODO: using two GPUs on a single node seems to be error-prone # -> wb13 froze when processing example_cube=2 n_cores = global_params.NCORES_PER_NODE // global_params.NGPUS_PER_NODE if 'example_cube' in global_params.config.working_dir: n_cores = global_params.NCORES_PER_NODE # do not run two predictions in parallel _ = qu.QSUB_script(multi_params, "predict_sv_views_chunked_e3", log=log, n_max_co_processes=global_params.NGPU_TOTAL, n_cores=n_cores, suffix="_axoness", additional_flags="--gres=gpu:1", remove_jobfolder=True) else: for par in multi_params: mk = par[1] # Single GPUs are made available for every job via slurm, no need for random assignments. mk["init_gpu"] = 0 # np.random.rand(0, 2) _ = qu.QSUB_script(multi_params, "predict_sv_views_chunked", log=log, n_max_co_processes=global_params.NGPU_TOTAL // 2, n_cores=global_params.NCORES_PER_NODE, suffix="_axoness", additional_flags="--gres=gpu:1", remove_jobfolder=True) log.info('Finished axon prediction. Now checking for missing predictions.') res = find_missing_sv_attributes_in_ssv( ssd, pred_key, n_cores=global_params.NCORES_PER_NODE) if len(res) > 0: log.error("Attribute '{}' missing for follwing" " SVs:\n{}".format(pred_key, res)) else: log.info('Success.')
def run_axoness_prediction(max_n_jobs_gpu: Optional[int] = None, e3: bool = True): """ Run the axoness inference based on the ``img2scalar`` CMN. See :func:`~run_semsegaxoness_prediction` for the semantic segmentation model. Args: max_n_jobs_gpu: Number of parallel jobs. e3: If True, use elektronn3 models. Notes: Requires :func:`~run_create_neuron_ssd`, :func:`~run_neuron_rendering` and :func:`~syconn.exec.skeleton.run_skeleton_generation`. """ log = initialize_logging('axon_prediction', global_params.config.working_dir + '/logs/', overwrite=False) if max_n_jobs_gpu is None: max_n_jobs_gpu = global_params.config.ngpu_total * 2 # here because all qsub jobs will start a script referring to # 'global_params.config.working_dir' ssd = SuperSegmentationDataset(working_dir=global_params.config.working_dir) sd = ssd.get_segmentationdataset("sv") # chunk them multi_params = chunkify(sd.so_dir_paths, max_n_jobs_gpu) pred_key = "axoness_probas" # leave this fixed because it is used all over # get model properties log.info('Performing axon prediction of neuron views. Labels will be stored ' 'on SV level in the attribute dict with key "{}"'.format(pred_key)) if e3 is True: model_kwargs = 'get_axoness_model_e3' else: m = get_axoness_model() model_kwargs = dict(model_path=m._path, normalize_data=m.normalize_data, imposed_batch_size=m.imposed_batch_size, nb_labels=m.nb_labels, channels_to_load=m.channels_to_load) # all other kwargs like obj_type='sv' and version are the current SV # SegmentationDataset by default so_kwargs = dict(working_dir=global_params.config.working_dir) # for axoness views set woglia to True (because glia were removed beforehand), # raw_only to False pred_kwargs = dict(woglia=True, pred_key=pred_key, verbose=False, raw_only=False) multi_params = [[par, model_kwargs, so_kwargs, pred_kwargs] for par in multi_params] if e3 is True: # TODO: using two GPUs on a single node seems to be error-prone # -> wb13 froze when processing example_cube=2 n_cores = global_params.config['ncores_per_node'] // global_params.config['ngpus_per_node'] _ = qu.QSUB_script(multi_params, "predict_sv_views_chunked_e3", log=log, n_max_co_processes=global_params.config.ngpu_total, n_cores=n_cores, suffix="_axoness", additional_flags="--gres=gpu:1", remove_jobfolder=True) else: for par in multi_params: mk = par[1] # SLURM is GPU aware, no need for random assignments. mk["init_gpu"] = 0 # np.random.rand(0, 2) _ = qu.QSUB_script(multi_params, "predict_sv_views_chunked", log=log, n_max_co_processes=global_params.config.ngpu_total // 2, n_cores=global_params.config['ncores_per_node'], suffix="_axoness", additional_flags="--gres=gpu:1", remove_jobfolder=True) log.info('Finished axon prediction. Now checking for missing predictions.') res = find_missing_sv_attributes_in_ssv(ssd, pred_key, n_cores=global_params.config['ncores_per_node']) if len(res) > 0: log.error("Attribute '{}' missing for follwing" " SVs:\n{}".format(pred_key, res)) else: log.info('Success.')