def multi_repeat(n, funcs): if USE_SCOOP: fs = [futures.submit(func) for _ in range(n) for func in funcs] futures.wait(fs) return [f.result() for f in fs] else: return [func() for _ in range(n) for func in funcs]
def main(): task = futures.submit(func0, 20) # You can wait for a result before continuing computing futures.wait([task], return_when=futures.ALL_COMPLETED) result = task.result() print(result) return result
def preselect(sample, library, method='MI', number=10, mask=None, use_nl=False, flip=False, step=None, lib_add_n=0): '''calculate requested similarity function and return top number of elements from the library''' results = [] column = 0 # TODO: use multiple modalities for preselection? if use_nl: column = 4 + lib_add_n for (i, j) in enumerate(library): results.append( futures.submit(calculate_similarity, sample, MriDataset(scan=j[column]), method=method, mask=mask, flip=flip, step=step)) futures.wait(results, return_when=futures.ALL_COMPLETED) val = [(j.result(), library[i]) for (i, j) in enumerate(results)] val_sorted = sorted(val, key=lambda s: s[0]) return [i[1] for i in val_sorted[0:number]]
def funcDone(): f = futures.submit(func4, 100) futures.wait((f,)) done = f.done() if done != True: return done res = f.result() done = f.done() return done
def main(n): #creates a tree of all different workers (a worker goes down and generates other workers who generate other workers) n =n task = futures.submit(func0, n) # You can wait for a result before continuing computing futures.wait([task], return_when=futures.ALL_COMPLETED) result = task.result() print(result) return result
def main(): path = 'tasks' task = futures.submit(get_data, path) futures.wait([task], return_when=futures.ALL_COMPLETED) result = task.result() sorted_result = sorted(result, key=result.__getitem__, reverse=True) for key in sorted_result: print("%s: %s" % (key, result[key])) return result
def funcCallback(): f = futures.submit(func4, 100) def callBack(future): future.was_callabacked = True f.add_done_callback(callBack) if len(f.callback) == 0: return False futures.wait((f,)) try: return f.was_callabacked except: return False
def main(): try: os.remove("scf_results.txt") except OSError: pass # # res = run(5, mixings, SCFOpt) # print [str(mix) +" " + str(iter) for mix,iter in zip(reversed(mixings[-len(res):]), res)] fseq = [futures.submit(run, i, mixings, SCFOpt) for i in xrange(nrandom)] not_done = ["dummy"] while not_done: done, not_done = futures.wait(fseq, None, "FIRST_COMPLETED") for i in done: with open('scf_results.txt', 'a') as f: line = [ str(mix) + " " + str(iter) for mix, iter in zip( reversed(mixings[-len(i.result()):]), i.result()) ] print >> f, '\n'.join(line) fseq.remove(i)
def preselect(sample, library, method='MI', number=10, mask=None, use_nl=False, flip=False, step=None, lib_add_n=0, groups=None): '''calculate requested similarity function and return top number of elements from the library''' results = [] column = 2 # skip over grading and group # TODO: use multiple modalities for preselection? if use_nl: column = 6 + lib_add_n for (i, j) in enumerate(library): results.append( futures.submit(calculate_similarity, sample, MriDataset(scan=j[column]), method=method, mask=mask, flip=flip, step=step)) futures.wait(results, return_when=futures.ALL_COMPLETED) val = [(j.result(), int(library[i][0]), library[i]) for (i, j) in enumerate(results)] if groups is None: val_sorted = sorted(val, key=lambda s: s[0]) return [i[2] for i in val_sorted[0:number]] else: s_number = number / groups res = [] for i in range(groups): val_sorted = sorted([v for v in val if v[1] == i], key=lambda s: s[0]) res.extend(val_sorted[0:s_number]) return [i[2] for i in res]
def resample_split_segmentations(input, output,xfm=None, like=None, order=4, invert_transform=False, symmetric=False): '''resample individual segmentations, using parallel execution''' results=[] base=input.seg.rsplit('.mnc',1)[0] for (i,j) in input.seg_split.items(): if not output.seg_split.has_key(i): output.seg_split[i]='{}_{:03d}.mnc'.format(base,i) results.append(futures.submit( resample_file,j,output.seg_split[i],xfm=xfm,like=like,order=order,invert_transform=invert_transform )) if symmetric: base=input.seg_f.rsplit('.mnc',1)[0] for (i,j) in input.seg_f_split.items(): if not output.seg_f_split.has_key(i): output.seg_split[i]='{}_{:03d}.mnc'.format(base,i) results.append(futures.submit( resample_file,j,output.seg_f_split[i],xfm=xfm,like=like,order=order,invert_transform=invert_transform )) futures.wait(results, return_when=futures.ALL_COMPLETED)
def main(): try: os.remove("scf_results.txt") except OSError: pass # # res = run(5, mixings, SCFOpt) # print [str(mix) +" " + str(iter) for mix,iter in zip(reversed(mixings[-len(res):]), res)] fseq = [futures.submit(run, i, mixings, SCFOpt) for i in xrange(nrandom)] not_done = ["dummy"] while not_done: done, not_done = futures.wait(fseq, None, "FIRST_COMPLETED") for i in done: with open('scf_results.txt', 'a') as f: line = [str(mix) +" "+ str(iter) for mix,iter in zip(reversed(mixings[-len(i.result()):]), i.result())] print >> f, '\n'.join(line) fseq.remove(i)
def regress(samples, initial_model=None, initial_int_model=None, initial_def_model=None, output_int_model=None, output_def_model=None, output_residuals_int=None, output_residuals_def=None, prefix='.', options={}): """ perform iterative model creation""" try: # make sure all input scans have parameters N_int = None N_def = None int_design_matrix = [] def_design_matrix = [] nomask = False for s in samples: if N_int is None: N_int = len(s.par_int) elif N_int != len(s.par_int): raise mincError( "Sample {} have inconsisten number of int paramters: {} expected {}" .format(repr(s), len(s), N_int)) if N_def is None: N_def = len(s.par_def) elif N_def != len(s.par_def): raise mincError( "Sample {} have inconsisten number of int paramters: {} expected {}" .format(repr(s), len(s), N_def)) int_design_matrix.append(s.par_int) def_design_matrix.append(s.par_def) if s.mask is None: nomask = True #print("Intensity design matrix=\n{}".format(repr(int_design_matrix))) #print("Velocity design matrix=\n{}".format(repr(def_design_matrix))) ref_model = None # current estimate of template if initial_model is not None: current_int_model = initial_model current_def_model = None ref_model = initial_model.scan else: current_int_model = initial_int_model current_def_model = initial_def_model ref_model = initial_int_model.volume[0] transforms = [] full_transforms = [] protocol = options.get('protocol', [{ 'iter': 4, 'level': 32, 'blur_int': None, 'blur_def': None }, { 'iter': 4, 'level': 16, 'blur_int': None, 'blur_def': None }]) cleanup = options.get('cleanup', False) cleanup_intermediate = options.get('cleanup_intermediate', False) parameters = options.get('parameters', None) refine = options.get('refine', False) qc = options.get('qc', False) downsample = options.get('downsample', None) start_level = options.get('start_level', None) debug = options.get('debug', False) debias = options.get('debias', True) nl_mode = options.get('nl_mode', 'animal') if parameters is None: pass #TODO: make sensible parameters? int_models = [] def_models = [] int_residuals = [] def_residuals = [] int_residual = None def_residual = None prev_def_estimate = None # go through all the iterations it = 0 residuals = [] for (i, p) in enumerate(protocol): blur_int_model = p.get('blur_int', None) blur_def_model = p.get('blur_def', None) for j in range(1, p['iter'] + 1): it += 1 _start_level = None if it == 1: _start_level = start_level # this will be a model for next iteration actually it_prefix = prefix + os.sep + str(it) if not os.path.exists(it_prefix): os.makedirs(it_prefix) next_int_model = MriDatasetRegress(prefix=prefix, name='model_int', iter=it, N=N_int, nomask=nomask) next_def_model = MriDatasetRegress(prefix=prefix, name='model_def', iter=it, N=N_def, nomask=True) print("next_int_model={}".format( next_int_model.volume[0].rsplit('_0.mnc', 1)[0] + '_RMS.mnc')) int_residual = MriDataset( prefix=prefix, scan=next_int_model.volume[0].rsplit('_0.mnc', 1)[0] + '_RMS.mnc') #name=next_int_model.name, iter=it ) def_residual = MriDataset( prefix=prefix, scan=next_def_model.volume[0].rsplit('_0.mnc', 1)[0] + '_RMS.mnc') #name=next_def_model.name, iter=it ) # skip over existing models here! if not next_int_model.exists() or \ not next_def_model.exists() or \ not int_residual.exists() or \ not def_residual.exists(): int_estimate = [] def_estimate = [] r = [] # 1 for each sample generate current approximation # 2. perform non-linear registration between each sample and sample-specific approximation # 3. update transformation # 1+2+3 - all together for (i, s) in enumerate(samples): sample_def = MriTransform(name=s.name, prefix=it_prefix, iter=it) sample_int = MriDataset(name=s.name, prefix=it_prefix, iter=it) previous_def = None if refine and it > 1: previous_def = prev_def_estimate[i] r.append( futures.submit( non_linear_register_step_regress_std, s, current_int_model, current_def_model, None, sample_def, parameters=parameters, level=p['level'], start_level=_start_level, work_dir=prefix, downsample=downsample, debug=debug, previous_def=previous_def, nl_mode=nl_mode)) def_estimate.append(sample_def) #int_estimate.append(sample_int) # wait for jobs to finish futures.wait(r, return_when=futures.ALL_COMPLETED) avg_inv_transform = None if debias: # here all the transforms should exist avg_inv_transform = MriTransform(name='avg_inv', prefix=it_prefix, iter=it) # 2 average all transformations average_transforms(def_estimate, avg_inv_transform, symmetric=False, invert=True, nl=True) corr = [] corr_transforms = [] corr_samples = [] # 3 concatenate correction and resample for (i, s) in enumerate(samples): c = MriDataset(prefix=it_prefix, iter=it, name=s.name) x = MriTransform(name=s.name + '_corr', prefix=it_prefix, iter=it) corr.append( futures.submit(concat_resample_nl, s, def_estimate[i], avg_inv_transform, c, x, current_int_model, p['level'], symmetric=False, qc=qc, invert_transform=True)) corr_transforms.append(x) corr_samples.append(c) futures.wait(corr, return_when=futures.ALL_COMPLETED) # 4. perform regression and create new estimate # 5. calculate residulas (?) # 4+5 result = futures.submit(voxel_regression, int_design_matrix, def_design_matrix, corr_samples, corr_transforms, next_int_model, next_def_model, int_residual, def_residual, blur_int_model=blur_int_model, blur_def_model=blur_def_model, qc=qc) futures.wait([result], return_when=futures.ALL_COMPLETED) # 6. cleanup if cleanup: print("Cleaning up iteration: {}".format(it)) for i in def_estimate: i.cleanup() for i in corr_samples: i.cleanup() if prev_def_estimate is not None: for i in prev_def_estimate: i.cleanup() avg_inv_transform.cleanup() else: # files were there, reuse them print( "Iteration {} already performed, skipping".format(it)) corr_transforms = [] # this is a hack right now for (i, s) in enumerate(samples): x = MriTransform(name=s.name + '_corr', prefix=it_prefix, iter=it) corr_transforms.append(x) int_models.append(current_int_model) def_models.append(current_def_model) int_residuals.append(int_residual) def_residuals.append(def_residual) current_int_model = next_int_model current_def_model = next_def_model result = futures.submit(average_stats_regression, current_int_model, current_def_model, int_residual, def_residual) residuals.append(result) regression_results = { 'int_model': current_int_model, 'def_model': current_def_model, 'int_residuals': int_residual.scan, 'def_residuals': def_residual.scan, } with open(prefix + os.sep + 'results_{:03d}.json'.format(it), 'w') as f: json.dump(regression_results, f, indent=1, cls=MRIEncoder) # save for next iteration # TODO: regularize? prev_def_estimate = corr_transforms # have to use adjusted def estimate # copy output to the destination futures.wait(residuals, return_when=futures.ALL_COMPLETED) with open(prefix + os.sep + 'stats.txt', 'w') as f: for s in residuals: f.write("{}\n".format(s.result())) with open(prefix + os.sep + 'results_final.json', 'w') as f: json.dump(regression_results, f, indent=1, cls=MRIEncoder) if cleanup_intermediate: for i in range(len(int_models) - 1): int_models[i].cleanup() def_models[i].cleanup() int_residuals[i].cleanup() def_residuals[i].cleanup() # delete unneeded models #shutil.rmtree(prefix+os.sep+'reg') return regression_results except mincError as e: print "Exception in regress:{}".format(str(e)) traceback.print_exc(file=sys.stdout) raise except: print "Exception in regress:{}".format(sys.exc_info()[0]) traceback.print_exc(file=sys.stdout) raise
def generate_ldd_average(samples, initial_model=None, output_model=None, output_model_sd=None, prefix='.', options={}): """ perform iterative model creation""" try: #print(repr(options)) # use first sample as initial model if not initial_model: initial_model = samples[0] # current estimate of template current_model = initial_model current_model_sd = None transforms = [] corr = [] bias_fields = [] corr_transforms = [] sd = [] corr_samples = [] protocol = options.get('protocol', [{ 'iter': 4, 'level': 32 }, { 'iter': 4, 'level': 16 }]) cleanup = options.get('cleanup', False) symmetric = options.get('symmetric', False) parameters = options.get('parameters', None) refine = options.get('refine', True) qc = options.get('qc', False) downsample = options.get('downsample', None) models = [] models_sd = [] if symmetric: flipdir = prefix + os.sep + 'flip' if not os.path.exists(flipdir): os.makedirs(flipdir) flip_all = [] # generate flipped versions of all scans for (i, s) in enumerate(samples): s.scan_f = prefix + os.sep + 'flip' + os.sep + os.path.basename( s.scan) if s.mask is not None: s.mask_f = prefix + os.sep + 'flip' + os.sep + 'mask_' + os.path.basename( s.scan) flip_all.append(futures.submit(generate_flip_sample, s)) futures.wait(flip_all, return_when=futures.ALL_COMPLETED) # go through all the iterations it = 0 for (i, p) in enumerate(protocol): for j in xrange(1, p['iter'] + 1): it += 1 # this will be a model for next iteration actually # 1 register all subjects to current template next_model = MriDataset(prefix=prefix, iter=it, name='avg') next_model_sd = MriDataset(prefix=prefix, iter=it, name='sd') transforms = [] it_prefix = prefix + os.sep + str(it) if not os.path.exists(it_prefix): os.makedirs(it_prefix) inv_transforms = [] fwd_transforms = [] for (i, s) in enumerate(samples): sample_xfm = LDDMriTransform(name=s.name, prefix=it_prefix, iter=it) prev_transform = None prev_bias_field = None if it > 1 and refine: prev_transform = corr_transforms[i] transforms.append( futures.submit(non_linear_register_step_ldd, s, current_model, sample_xfm, init_vel=prev_transform, symmetric=symmetric, parameters=parameters, level=p['level'], work_dir=prefix, downsample=downsample)) fwd_transforms.append(sample_xfm) # wait for jobs to finish futures.wait(transforms, return_when=futures.ALL_COMPLETED) if cleanup and it > 1: # remove information from previous iteration for s in corr_samples: s.cleanup() for x in corr_transforms: x.cleanup() # here all the transforms should exist avg_inv_transform = LDDMriTransform(name='avg_inv', prefix=it_prefix, iter=it) # 2 average all transformations average_transforms_ldd(fwd_transforms, avg_inv_transform, symmetric=symmetric, invert=True) corr = [] corr_transforms = [] corr_samples = [] # 3 concatenate correction and resample for (i, s) in enumerate(samples): c = MriDataset(prefix=it_prefix, iter=it, name=s.name) x = LDDMriTransform(name=s.name + '_corr', prefix=it_prefix, iter=it) corr.append( futures.submit(concat_resample_ldd, s, fwd_transforms[i], avg_inv_transform, c, x, current_model.scan, symmetric=symmetric, qc=qc)) corr_transforms.append(x) corr_samples.append(c) futures.wait(corr, return_when=futures.ALL_COMPLETED) # 4 average resampled samples to create new estimate result = futures.submit(average_samples, corr_samples, next_model, next_model_sd, symmetric=symmetric) futures.wait([result], return_when=futures.ALL_COMPLETED) if cleanup: for s in fwd_transforms: s.cleanup() if cleanup and it > 1: # remove previous template estimate models.append(next_model) models_sd.append(next_model_sd) current_model = next_model current_model_sd = next_model_sd result = futures.submit(average_stats, next_model, next_model_sd) sd.append(result) # copy output to the destination futures.wait(sd, return_when=futures.ALL_COMPLETED) with open(prefix + os.sep + 'stats.txt', 'w') as f: for s in sd: f.write("{}\n".format(s.result())) results = { 'model': current_model, 'model_sd': current_model_sd, 'vel': corr_transforms, 'biascorr': None, 'scan': corr_samples, 'symmetric': symmetric, } with open(prefix + os.sep + 'results.json', 'w') as f: json.dump(results, f, indent=1, cls=LDDMRIEncoder) if cleanup: # delete unneeded models for m in models: m.cleanup() for m in models_sd: m.cleanup() return results except mincError as e: print "Exception in generate_ldd_average:{}".format(str(e)) traceback.print_exc(file=sys.stdout) raise except: print "Exception in generate_ldd_average:{}".format(sys.exc_info()[0]) traceback.print_exc(file=sys.stdout) raise
def launchPipeline(options): ''' INPUT: options are the parsed information from the command line TASKS - Read the patients lit - Create a pickle file for each patient to store all the image information - Run pipeline in each pickle files ''' _opts = {} if options.json is not None: with open(options.json, 'r') as f: _opts = json.load(f) # population options if empty: if 'modelname' in _opts: options.modelname = _opts['modelname'] if 'modeldir' in _opts: options.modeldir = _opts['modeldir'] #if 'temporalregu' in _opts: #options.temporalregu=_opts['temporalregu'] options.temporalregu = False # VF: not implemented in the public release if 'skullreg' in _opts: options.skullreg = _opts['skullreg'] if 'large_atrophy' in _opts: options.large_atrophy = _opts['large_atrophy'] if 'manual' in _opts: options.manual = _opts['manual'] if 'mask_n3' in _opts: options.mask_n3 = _opts['mask_n3'] if 'n4' in _opts: options.n4 = _opts['n4'] if 'les' in _opts: options.les = _opts['les'] if 'dobiascorr' in _opts: options.dobiascorr = _opts['dobiascorr'] if 'geo' in _opts: options.geo = _opts['geo'] if 'dodbm' in _opts: options.dodbm = _opts['dodbm'] if 'lngcls' in _opts: options.lngcls = _opts['lngcls'] if 'donl' in _opts: options.donl = _opts['donl'] if 'denoise' in _opts: options.denoise = _opts['denoise'] if 'vbm_options' in _opts: options.vbm_blur = _opts['vbm_options'].get('vbm_blur', 4.0) options.vbm_res = _opts['vbm_options'].get('vbm_res', 2) options.vbm_nl = _opts['vbm_options'].get('vbm_nl', None) options.dovbm = True if 'linreg' in _opts: options.linreg = _opts['linreg'] if 'add' in _opts: options.add = _opts['add'] if 'rigid' in _opts: options.rigid = _opts['rigid'] # TODO: add more options # patients dictionary patients = {} # open list # create output dir # load additional steps and store them inside option structure # if they are strings # otherwise assume they are already loaded properly _add = [] for i, j in enumerate(options.add): try: _par = j if isinstance(j, six.string_types): with open(j, 'r') as f: _par = json.load(f) _add.append(_par) except: print("Error loading JSON:{}\n{}".format(j, sys.exc_info()[0]), file=sys.stderr) traceback.print_exc(file=sys.stderr) exit(1) options.add = _add mkdir(options.output) options.output = os.path.abspath( options.output) + os.sep # always use abs paths for sge if options.workdir is not None: options.workdir = os.path.abspath( options.workdir) + os.sep # always use abs paths for sge if not os.path.exists(options.workdir): os.makedirs(options.workdir) # for each patient with open(options.list) as p: for line in p: # remove the last character of the line... the '\n' sp = line[:-1].split(',') size = len( sp ) # depending the number of items not all information was given if size < 3: print(' -- Line error: ' + str(len(sp))) print(' - Minimum format is : id,visit,t1') continue id = sp[0] # set id visit = sp[1] # set visit # ## Add patient id if not found if id not in patients: # search key in the dictionary patients[id] = LngPatient(id) # create new LngPatient if size > 6: patients[id].sex = sp[6] patients[id].pipeline_version = version patients[id].geo_corr = options.geo # create patient's dir patients[id].patientdir = options.output + os.sep + id + os.sep mkdir(patients[id].patientdir) if options.workdir is None: patients[id].workdir = patients[ id].patientdir + os.sep + 'tmp' + os.sep mkdir(patients[id].workdir) else: patients[id].workdir = options.workdir if options.manual is not None: patients[ id].manualdir = options.manual + os.sep + id + os.sep else: patients[id].manualdir = None # create pickle name # this file saves all the names and processing information patients[id].pickle = patients[id].patientdir + id + '.pickle' if os.path.exists(patients[id].pickle): print(' -- PICKLE already exists!! ') print(' TODO: compare options, now skipping!! ') continue # file storing the output of the processing patients[id].logfile = patients[id].patientdir + id + '.log' # file storing only the comand lines employed patients[ id].cmdfile = patients[id].patientdir + id + '.commands' # model information patients[id].modeldir = options.modeldir patients[id].modelname = options.modelname patients[id].beastdir = options.beastdir # PIPELINE OPTIONS patients[id].denoise = options.denoise # patients[id].beastresolution=options.beastres patients[id].mask_n3 = options.mask_n3 patients[id].n4 = options.n4 patients[id].donl = options.donl patients[id].dolngcls = options.dolngcls patients[id].dodbm = options.dodbm patients[id].dovbm = options.dovbm patients[id].deface = options.deface patients[id].mri3T = options.mri3T patients[id].fast = options.fast patients[id].temporalregu = options.temporalregu patients[id].skullreg = options.skullreg patients[id].large_atrophy = options.large_atrophy patients[id].dobiascorr = options.dobiascorr patients[id].linreg = options.linreg patients[id].rigid = options.rigid patients[id].add = options.add patients[id].vbm_options = { 'vbm_fwhm': options.vbm_blur, 'vbm_resolution': options.vbm_res, 'vbm_nl_level': options.vbm_nl, 'vbm_nl_method': 'minctracc' } #if options.sym == True: #patients[id].nl_method = 'bestsym1stepnlreg.pl' # end of creating a patient # ## Add timepoint to the patient if visit in patients[id]: raise IplError(' -- ERROR : Timepoint ' + visit + ' repeated in patient ' + id) else: print(' - ' + id + '::' + visit) patients[id][visit] = TP( visit) # creating a timepoint for the patient[id] # create visit's dir patients[id][visit].tpdir = patients[id].patientdir + visit \ + os.sep patients[id][visit].qc_title = id + '_' + visit # Reading available information depending on the size of arguments # VF: check existence of file if not os.path.exists(sp[2]): raise IplError( '-- ERROR : Patient %s Timepoint %s missing file:%s ' % (id, visit, sp[2])) patients[id][visit].native['t1'] = sp[2] if size > 3 and len(sp[3]) > 0: # VF: check existence of file if not os.path.exists(sp[3]): raise IplError( '-- ERROR : Patient %s Timepoint %s missing file:%s ' % (id, visit, sp[3])) patients[id][visit].native['t2'] = sp[3] if size > 4 and len(sp[4]) > 0: # VF: check existence of file if not os.path.exists(sp[4]): raise IplError( '-- ERROR : Patient %s Timepoint %s missing file:%s ' % (id, visit, sp[4])) patients[id][visit].native['pd'] = sp[4] if size > 5: patients[id][visit].age = sp[5] if size > 6 and len(sp[6]) > 0: patients[id].sex = sp[6] if size > 7 and len(sp[7]) > 0: # VF: check existence of file if not os.path.exists(sp[7]): raise IplError( '-- ERROR : Patient %s Timepoint %s missing file:%s ' % (id, visit, sp[7])) patients[id][visit].geo['t1'] = sp[7] if size > 8 and len(sp[8]) > 0: if not os.path.exists(sp[7]): raise IplError( '-- ERROR : Patient %s Timepoint %s missing file:%s ' % (id, visit, sp[8])) patients[id][visit].geo['t2'] = sp[8] if size > 9 and options.les and len(sp[9]) > 0: if not os.path.exists(sp[9]): raise IplError( '-- ERROR : Patient %s Timepoint %s missing file:%s ' % (id, visit, sp[9])) patients[id][visit].native['t2les'] = sp[9] # end of adding timepoint print('{} - {}'.format(id, visit)) # store patients in the pickle if options.pe is None: # use SCOOP to run all subjects in parallel pickles = [] for (id, i) in patients.items(): # writing the pickle file if not os.path.exists(i.pickle): i.write(i.pickle) pickles.append(i.pickle) jobs = [futures.submit(runPipeline, i) for i in pickles ] # assume workdir is properly set in pickle... futures.wait(jobs, return_when=futures.ALL_COMPLETED) print('All subjects finished:%d' % len(jobs)) else: # USE SGE to submit one job per subject, using required peslots pickles = [] for (id, i) in patients.items(): # writing the pickle file slots = options.peslots # don't submit jobs with too many requested slots, when only limited number of # timepoints is available if len(i) < slots: slots = len(i) if slots < 1: slots = 1 if not os.path.exists(i.pickle): i.write(i.pickle) # tell python to use SCOOP module to run program comm = [ 'unset PE_HOSTFILE' ] # HACK SCOOP not to rely on PE setting to prevent it from SSHing comm.extend([ 'export ITK_GLOBAL_DEFAULT_NUMBER_OF_THREADS={}'.format(slots) ]) comm.extend(['export OMP_NUM_THREADS={}'.format(slots)]) comm.extend(['export OMP_DYNAMIC=TRUE']) comm.extend([ 'python -m scoop -n {} {} -p {}'.format( str(slots), os.path.abspath(sys.argv[0]), i.pickle) ]) qsub_pe(comm, options.pe, options.peslots, name='LNG_{}'.format(str(id)), logfile=i.patientdir + os.sep + str(id) + ".sge.log", queue=options.queue)
def generate_nonlinear_average( samples, initial_model =None, output_model =None, output_model_sd=None, prefix='.', options={}, skip=0, stop_early=100000 ): """ perform iterative model creation""" # use first sample as initial model if not initial_model: initial_model = samples[0] # current estimate of template current_model = initial_model current_model_sd = None transforms=[] corr=[] corr_transforms=[] sd=[] corr_samples=[] protocol=options.get('protocol', [{'iter':4,'level':32}, {'iter':4,'level':32}] ) cleanup= options.get('cleanup',False) symmetric= options.get('symmetric',False) parameters= options.get('parameters',None) refine= options.get('refine',True) qc= options.get('qc',False) downsample_= options.get('downsample',None) use_dd= options.get('use_dd',False) use_ants= options.get('use_ants',False) use_elastix= options.get('use_elastix',False) start_level= options.get('start_level',None) use_median= options.get('median',False) models=[] models_sd=[] if symmetric: flipdir=prefix+os.sep+'flip' if not os.path.exists(flipdir): os.makedirs(flipdir) flip_all=[] # generate flipped versions of all scans for (i, s) in enumerate(samples): _s_name=os.path.basename(s.scan).rsplit('.gz',1)[0] s.scan_f=prefix+os.sep+'flip'+os.sep+_s_name if s.mask is not None: s.mask_f=prefix+os.sep+'flip'+os.sep+'mask_'+_s_name flip_all.append( futures.submit( generate_flip_sample,s ) ) futures.wait(flip_all, return_when=futures.ALL_COMPLETED) # go through all the iterations it=0 for (i,p) in enumerate(protocol): downsample=p.get('downsample',downsample_) for j in range(1,p['iter']+1): it+=1 if it>stop_early: break # this will be a model for next iteration actually # 1 register all subjects to current template next_model=MriDataset(prefix=prefix,iter=it,name='avg') next_model_sd=MriDataset(prefix=prefix,iter=it,name='sd') transforms=[] it_prefix=prefix+os.sep+str(it) if not os.path.exists(it_prefix): os.makedirs(it_prefix) inv_transforms=[] fwd_transforms=[] start=None if it==1: start=start_level for (i, s) in enumerate(samples): sample_xfm=MriTransform(name=s.name,prefix=it_prefix,iter=it) sample_inv_xfm=MriTransform(name=s.name+'_inv',prefix=it_prefix,iter=it) prev_transform = None if it > 1: if refine: prev_transform = corr_transforms[i] else: start=start_level # TWEAK? if it>skip and it<stop_early: if use_dd: transforms.append( futures.submit( dd_register_step, s, current_model, sample_xfm, output_invert=sample_inv_xfm, init_xfm=prev_transform, symmetric=symmetric, parameters=parameters, level=p['level'], start=start, work_dir=prefix, downsample=downsample) ) elif use_ants: transforms.append( futures.submit( ants_register_step, s, current_model, sample_xfm, output_invert=sample_inv_xfm, init_xfm=prev_transform, symmetric=symmetric, parameters=parameters, level=p['level'], start=start, work_dir=prefix, downsample=downsample) ) elif use_elastix: transforms.append( futures.submit( elastix_register_step, s, current_model, sample_xfm, output_invert=sample_inv_xfm, init_xfm=prev_transform, symmetric=symmetric, parameters=parameters, level=p['level'], start=start, work_dir=prefix, downsample=downsample) ) else: transforms.append( futures.submit( non_linear_register_step, s, current_model, sample_xfm, output_invert=sample_inv_xfm, init_xfm=prev_transform, symmetric=symmetric, parameters=parameters, level=p['level'], start=start, work_dir=prefix, downsample=downsample) ) inv_transforms.append(sample_inv_xfm) fwd_transforms.append(sample_xfm) # wait for jobs to finish if it>skip and it<stop_early: futures.wait(transforms, return_when=futures.ALL_COMPLETED) if cleanup and it>1 : # remove information from previous iteration for s in corr_samples: s.cleanup(verbose=True) for x in corr_transforms: x.cleanup(verbose=True) # here all the transforms should exist avg_inv_transform=MriTransform(name='avg_inv', prefix=it_prefix, iter=it) # 2 average all transformations if it>skip and it<stop_early: result=futures.submit(average_transforms, inv_transforms, avg_inv_transform, nl=True, symmetric=symmetric) futures.wait([result], return_when=futures.ALL_COMPLETED) corr=[] corr_transforms=[] corr_samples=[] # 3 concatenate correction and resample for (i, s) in enumerate(samples): c=MriDataset(prefix=it_prefix,iter=it,name=s.name) x=MriTransform(name=s.name+'_corr',prefix=it_prefix,iter=it) if it>skip and it<stop_early: corr.append(futures.submit( concat_resample_nl, s, fwd_transforms[i], avg_inv_transform, c, x, current_model, level=p['level'], symmetric=symmetric, qc=qc )) corr_transforms.append(x) corr_samples.append(c) if it>skip and it<stop_early: futures.wait(corr, return_when=futures.ALL_COMPLETED) # cleanup transforms if cleanup : for x in inv_transforms: x.cleanup() for x in fwd_transforms: x.cleanup() avg_inv_transform.cleanup() # 4 average resampled samples to create new estimate if it>skip and it<stop_early: result=futures.submit(average_samples, corr_samples, next_model, next_model_sd, symmetric=symmetric, symmetrize=symmetric,median=use_median) futures.wait([result], return_when=futures.ALL_COMPLETED) if cleanup and it>1: # remove previous template estimate models.append(next_model) models_sd.append(next_model_sd) current_model=next_model current_model_sd=next_model_sd if it>skip and it<stop_early: result=futures.submit(average_stats, next_model, next_model_sd) sd.append(result) # copy output to the destination futures.wait(sd, return_when=futures.ALL_COMPLETED) with open(prefix+os.sep+'stats.txt','w') as f: for s in sd: f.write("{}\n".format(s.result())) results={ 'model': current_model, 'model_sd': current_model_sd, 'xfm': corr_transforms, 'biascorr': None, 'scan': corr_samples, 'symmetric': symmetric, 'samples': samples } with open(prefix+os.sep+'results.json','w') as f: json.dump(results, f, indent=1, cls=MRIEncoder) if cleanup and stop_early==100000: # keep the final model models.pop() models_sd.pop() # delete unneeded models for m in models: m.cleanup() for m in models_sd: m.cleanup() return results
def funcWait(timeout): fs = [futures.submit(func4, i) for i in range(1000)] done, not_done = futures.wait(fs, timeout=timeout) return done, not_done
def funcKeywords(n, **kwargs): task = futures.submit(funcWithKW, n, **kwargs) futures.wait([task], return_when=futures.ALL_COMPLETED) result = task.result() return result
def mainSimple(n): task = futures.submit(func3, n) futures.wait([task], return_when=futures.ALL_COMPLETED) result = task.result() return result
def full_cv_fusion_segment(validation_library, segmentation_library, output, segmentation_parameters, cv_iterations, cv_exclude, ec_parameters=None, debug=False, ec_variant='ec', fuse_variant='fuse', cv_variant='cv', regularize_variant='gc', cleanup=False, ext=False, cv_iter=None): if cv_iter is not None: raise "Not Implemented!" validation_library_idx = range(len(validation_library)) # randomly exlcude samples, repeat results = [] if not os.path.exists(output): try: os.makedirs(output) except: pass # assume directory was created by competing process modalities = segmentation_library.modalities - 1 for i in range(cv_iterations): #TODO: save this list in a file rem_list = [] ran_file = output + os.sep + ('random_{}_{}.json'.format( cv_variant, i)) if not os.path.exists(ran_file): rem_list = random.sample(validation_library_idx, cv_exclude) with open(ran_file, 'w') as f: json.dump(rem_list, f) else: with open(ran_file, 'r') as f: rem_list = json.load(f) # list of subjects rem_items = [validation_library[j] for j in rem_list] rem_n = [ os.path.basename(j[0]).rsplit('.gz', 1)[0].rsplit('.mnc', 1)[0] for j in rem_items ] rem_lib = [] val_lib = [] for j in rem_n: rem_lib.extend([ k for (k, t) in enumerate(segmentation_library.library) if t[0].find(j) >= 0 ]) val_lib.extend([ k for (k, t) in enumerate(validation_library) if t[0].find(j) >= 0 ]) if debug: print(repr(rem_lib)) rem_lib = set(rem_lib) val_lib = set(val_lib) #prepare exclusion list experiment_segmentation_library = copy.deepcopy(segmentation_library) experiment_segmentation_library.library = \ [k for j, k in enumerate(segmentation_library.library) if j not in rem_lib] _validation_library = \ [k for j, k in enumerate( validation_library ) if j not in val_lib ] for j, k in enumerate(rem_items): output_experiment = output + os.sep + ('{}_{}_{}'.format( i, rem_n[j], cv_variant)) work_dir = output + os.sep + ('work_{}_{}_{}'.format( i, rem_n[j], fuse_variant)) validation_sample = k[0] validation_segment = k[1] presegment = None shift = 2 if ext: presegment = k[2] shift = 3 results.append( futures.submit(run_segmentation_experiment, validation_sample, validation_segment, experiment_segmentation_library, output_experiment, segmentation_parameters=segmentation_parameters, debug=debug, work_dir=work_dir, ec_parameters=ec_parameters, ec_variant=ec_variant, fuse_variant=fuse_variant, regularize_variant=regularize_variant, add=k[shift:shift + modalities], cleanup=cleanup, presegment=presegment, train_list=_validation_library)) futures.wait(results, return_when=futures.ALL_COMPLETED) stat_results = [i.result()[0] for i in results] output_results = [i.result()[1] for i in results] return (stat_results, output_results)
def loo_cv_fusion_segment(validation_library, segmentation_library, output, segmentation_parameters, ec_parameters=None, debug=False, ec_variant='ec', fuse_variant='fuse', cv_variant='cv', regularize_variant='gc', cleanup=False, ext=False, cv_iter=None): '''Run leave-one-out cross-validation experiment''' # for each N subjects run segmentation and compare # Right now run LOOCV if not os.path.exists(output): try: os.makedirs(output) except: pass # assume directory was created by competing process results = [] results_json = [] modalities = segmentation_library.get('modalities', 1) - 1 print("cv_iter={}".format(repr(cv_iter))) for (i, j) in enumerate(validation_library): n = os.path.basename(j[0]).rsplit('.gz', 1)[0].rsplit('.mnc', 1)[0] output_experiment = output + os.sep + n + '_' + cv_variant validation_sample = j[0] validation_segment = j[1] presegment = None add = [] if ext: presegment = j[2] add = j[3:3 + modalities] else: add = j[2:2 + modalities] # remove training sample (?) _validation_library = validation_library[0:i] _validation_library.extend( validation_library[i + 1:len(validation_library)]) experiment_segmentation_library = copy.deepcopy(segmentation_library) # remove sample experiment_segmentation_library.library = [ _i for _i in segmentation_library.library if _i[0].find(n) < 0 ] if (cv_iter is None) or (i == cv_iter): results.append( futures.submit(run_segmentation_experiment, validation_sample, validation_segment, experiment_segmentation_library, output_experiment, segmentation_parameters=segmentation_parameters, debug=debug, work_dir=output + os.sep + 'work_' + n + '_' + fuse_variant, ec_parameters=ec_parameters, ec_variant=ec_variant, fuse_variant=fuse_variant, regularize_variant=regularize_variant, add=add, cleanup=cleanup, presegment=presegment, train_list=_validation_library)) else: results_json.append((output_experiment + '_stats.json', output_experiment + '_out.json')) print("Waiting for {} jobs".format(len(results))) futures.wait(results, return_when=futures.ALL_COMPLETED) stat_results = [] output_results = [] if cv_iter is None: stat_results = [_i.result()[0] for _i in results] output_results = [_i.result()[1] for _i in results] elif cv_iter == -1: # TODO: load from json files for _i in results_json: if os.path.exists(_i[0]) and os.path.exists(_i[1]): # VF: a hack with open(_i[0], 'r') as _f: stat_results.append(json.load(_f)) with open(_i[1], 'r') as _f: output_results.append(json.load(_f)) else: if not os.path.exists(_i[0]): print("Warning: missing file:{}".format(_i[0])) if not os.path.exists(_i[1]): print("Warning: missing file:{}".format(_i[1])) return (stat_results, output_results)
model='ref.mnc', mask='mask.mnc') j1 = futures.submit(gm.generate_linear_model_csv, 'subjects1.lst', work_prefix='tmp_lsq9', options={ 'symmetric': False, 'reg_type': '-lsq9', 'objective': '-xcorr', 'iterations': 4, 'refine': True }, model='ref.mnc', mask='mask.mnc') j2 = futures.submit(gm.generate_linear_model_csv, 'subjects2.lst', work_prefix='tmp_lsq12', options={ 'symmetric': False, 'reg_type': '-lsq12', 'objective': '-xcorr', 'iterations': 4, 'refine': True }, model='ref.mnc', mask='mask.mnc') futures.wait([j0, j1, j2], return_when=futures.ALL_COMPLETED)
def generate_linear_average(samples, initial_model=None, output_model=None, output_model_sd=None, prefix='.', options={}): """ perform iterative model creation""" # use first sample as initial model if not initial_model: initial_model = samples[0] # current estimate of template current_model = initial_model current_model_sd = None transforms = [] corr = [] bias_fields = [] corr_transforms = [] corr_samples = [] sd = [] iterations = options.get('iterations', 4) cleanup = options.get('cleanup', False) symmetric = options.get('symmetric', False) reg_type = options.get('reg_type', '-lsq12') objective = options.get('objective', '-xcorr') linreg = options.get('linreg', None) refine = options.get('refine', False) biascorr = options.get('biascorr', False) biasdist = options.get('biasdist', 100) # default for 1.5T qc = options.get('qc', False) downsample = options.get('downsample', None) use_n4 = options.get('N4', False) use_median = options.get('median', False) models = [] models_sd = [] models_bias = [] if symmetric: flipdir = prefix + os.sep + 'flip' if not os.path.exists(flipdir): os.makedirs(flipdir) flip_all = [] # generate flipped versions of all scans for (i, s) in enumerate(samples): _s_name = os.path.basename(s.scan).rsplit('.gz', 1)[0] s.scan_f = prefix + os.sep + 'flip' + os.sep + _s_name if s.mask is not None: s.mask_f = prefix + os.sep + 'flip' + os.sep + 'mask_' + _s_name flip_all.append(futures.submit(generate_flip_sample, s)) futures.wait(flip_all, return_when=futures.ALL_COMPLETED) # go through all the iterations for it in xrange(1, iterations + 1): # this will be a model for next iteration actually # 1 register all subjects to current template next_model = MriDataset(prefix=prefix, iter=it, name='avg') next_model_sd = MriDataset(prefix=prefix, iter=it, name='sd') next_model_bias = MriDataset(prefix=prefix, iter=it, name='bias') transforms = [] it_prefix = prefix + os.sep + str(it) if not os.path.exists(it_prefix): os.makedirs(it_prefix) inv_transforms = [] fwd_transforms = [] for (i, s) in enumerate(samples): sample_xfm = MriTransform(name=s.name, prefix=it_prefix, iter=it, linear=True) sample_inv_xfm = MriTransform(name=s.name + '_inv', prefix=it_prefix, iter=it, linear=True) prev_transform = None prev_bias_field = None if it > 1 and refine: prev_transform = corr_transforms[i] if it > 1 and biascorr: prev_bias_field = bias_fields[i] transforms.append( futures.submit(linear_register_step, s, current_model, sample_xfm, output_invert=sample_inv_xfm, init_xfm=prev_transform, symmetric=symmetric, reg_type=reg_type, objective=objective, linreg=linreg, work_dir=prefix, bias=prev_bias_field, downsample=downsample)) inv_transforms.append(sample_inv_xfm) fwd_transforms.append(sample_xfm) # wait for jobs to finish futures.wait(transforms, return_when=futures.ALL_COMPLETED) # remove information from previous iteration if cleanup and it > 1: for s in corr_samples: s.cleanup(verbose=True) for x in corr_transforms: x.cleanup(verbose=True) # here all the transforms should exist avg_inv_transform = MriTransform(name='avg_inv', prefix=it_prefix, iter=it, linear=True) # 2 average all transformations result = futures.submit(average_transforms, inv_transforms, avg_inv_transform, nl=False, symmetric=symmetric # TODO: maybe make median transforms? ) futures.wait([result], return_when=futures.ALL_COMPLETED) corr = [] corr_transforms = [] corr_samples = [] # 3 concatenate correction and resample for (i, s) in enumerate(samples): prev_bias_field = None if it > 1 and biascorr: prev_bias_field = bias_fields[i] c = MriDataset(prefix=it_prefix, iter=it, name=s.name) x = MriTransform(name=s.name + '_corr', prefix=it_prefix, iter=it, linear=True) corr.append( futures.submit(concat_resample, s, fwd_transforms[i], avg_inv_transform, c, x, current_model, symmetric=symmetric, qc=qc, bias=prev_bias_field)) corr_transforms.append(x) corr_samples.append(c) futures.wait(corr, return_when=futures.ALL_COMPLETED) # cleanup transforms if cleanup: for x in inv_transforms: x.cleanup() for x in fwd_transforms: x.cleanup() avg_inv_transform.cleanup() # 4 average resampled samples to create new estimate result = futures.submit(average_samples, corr_samples, next_model, next_model_sd, symmetric=symmetric, symmetrize=symmetric, median=use_median) if cleanup: # remove previous template estimate models.append(next_model) models_sd.append(next_model_sd) futures.wait([result], return_when=futures.ALL_COMPLETED) if biascorr: biascorr_results = [] new_bias_fields = [] for (i, s) in enumerate(samples): prev_bias_field = None if it > 1: prev_bias_field = bias_fields[i] c = corr_samples[i] x = corr_transforms[i] b = MriDataset(prefix=it_prefix, iter=it, name='bias_' + s.name) biascorr_results.append( futures.submit(calculate_diff_bias_field, c, next_model, b, symmetric=symmetric, distance=biasdist, n4=use_n4)) new_bias_fields.append(b) futures.wait(biascorr_results, return_when=futures.ALL_COMPLETED) result = futures.submit(average_bias_fields, new_bias_fields, next_model_bias, symmetric=symmetric) futures.wait([result], return_when=futures.ALL_COMPLETED) biascorr_results = [] new_corr_bias_fields = [] for (i, s) in enumerate(samples): prev_bias_field = None if it > 1: prev_bias_field = bias_fields[i] c = corr_samples[i] x = corr_transforms[i] b = new_bias_fields[i] out = MriDataset(prefix=it_prefix, iter=it, name='c_bias_' + s.name) biascorr_results.append( futures.submit(resample_and_correct_bias, b, x, next_model_bias, out, previous=prev_bias_field, symmetric=symmetric)) new_corr_bias_fields.append(out) futures.wait(biascorr_results, return_when=futures.ALL_COMPLETED) # swap bias fields if biascorr: bias_fields = new_bias_fields current_model = next_model current_model_sd = next_model_sd sd.append(futures.submit(average_stats, next_model, next_model_sd)) # copy output to the destination futures.wait(sd, return_when=futures.ALL_COMPLETED) with open(prefix + os.sep + 'stats.txt', 'w') as f: for s in sd: f.write("{}\n".format(s.result())) if cleanup: # keep the final model models.pop() models_sd.pop() # delete unneeded models for m in models: m.cleanup() for m in models_sd: m.cleanup() results = { 'model': current_model, 'model_sd': current_model_sd, 'xfm': corr_transforms, 'biascorr': bias_fields, 'scan': corr_samples, 'symmetric': symmetric } with open(prefix + os.sep + 'results.json', 'w') as f: json.dump(results, f, indent=1, cls=MRIEncoder) return results
def my_wait_first(fs): return futures.wait(fs, return_when=futures.FIRST_COMPLETED)
def fusion_segment(input_scan, library_description, output_segment, input_mask=None, parameters={}, exclude=[], work_dir=None, debug=False, ec_variant=None, fuse_variant=None, regularize_variant=None, add=[], cleanup=False, cleanup_xfm=False, presegment=None, preprocess_only=False): """Apply fusion segmentation""" try: if debug: print("Segmentation parameters:") print(repr(parameters)) print("presegment={}".format(repr(presegment))) out_variant = '' if fuse_variant is not None: out_variant += fuse_variant if regularize_variant is not None: out_variant += '_' + regularize_variant if ec_variant is not None: out_variant += '_' + ec_variant if work_dir is None: work_dir = output_segment + os.sep + 'work_segment' if not os.path.exists(work_dir): os.makedirs(work_dir) work_lib_dir = work_dir + os.sep + 'library' work_lib_dir_f = work_dir + os.sep + 'library_f' if not os.path.exists(work_lib_dir): os.makedirs(work_lib_dir) if not os.path.exists(work_lib_dir_f): os.makedirs(work_lib_dir_f) library_nl_samples_avail = library_description['nl_samples_avail'] library_modalities = library_description.get('modalities', 1) - 1 # perform symmetric segmentation segment_symmetric = parameters.get('segment_symmetric', False) # read filter paramters pre_filters = parameters.get('pre_filters', None) post_filters = parameters.get('post_filters', parameters.get('filters', None)) # if linear registration should be performed do_initial_register = parameters.get( 'initial_register', parameters.get('linear_register', {})) if do_initial_register is not None and isinstance( do_initial_register, dict): initial_register = do_initial_register do_initial_register = True else: initial_register = {} inital_reg_type = parameters.get( 'initial_register_type', parameters.get('linear_register_type', initial_register.get('type', '-lsq12'))) inital_reg_ants = parameters.get( 'initial_register_ants', parameters.get('linear_register_ants', False)) inital_reg_options = parameters.get( 'initial_register_options', initial_register.get('options', None)) inital_reg_downsample = parameters.get( 'initial_register_downsample', initial_register.get('downsample', None)) inital_reg_use_mask = parameters.get( 'initial_register_use_mask', initial_register.get('use_mask', False)) initial_reg_objective = initial_register.get('objective', '-xcorr') # perform local linear registration do_initial_local_register = parameters.get( 'initial_local_register', parameters.get('local_linear_register', {})) if do_initial_local_register is not None and isinstance( do_initial_local_register, dict): initial_local_register = do_initial_local_register do_initial_local_register = True else: initial_local_register = {} local_reg_type = parameters.get( 'local_register_type', initial_local_register.get('type', '-lsq12')) local_reg_ants = parameters.get('local_register_ants', False) local_reg_opts = parameters.get( 'local_register_options', initial_local_register.get('options', None)) local_reg_bbox = parameters.get( 'local_register_bbox', initial_local_register.get('bbox', False)) local_reg_downsample = parameters.get( 'local_register_downsample', initial_local_register.get('downsample', None)) local_reg_use_mask = parameters.get( 'local_register_use_mask', initial_local_register.get('use_mask', True)) local_reg_objective = initial_local_register.get('objective', '-xcorr') # if non-linear registraiton should be performed for library creation do_nonlinear_register = parameters.get('non_linear_register', False) # generate segmentation library (needed for label fusion, not needed for single atlas based or external tool) generate_library = parameters.get('generate_library', True) # if non-linear registraiton should be performed pairwise do_pairwise = parameters.get('non_linear_pairwise', False) # if pairwise registration should be performed using ANTS do_pairwise_ants = parameters.get('non_linear_pairwise_ants', True) pairwise_register_type = parameters.get('non_linear_pairwise_type', None) if pairwise_register_type is None: if do_pairwise_ants: pairwise_register_type = 'ants' library_preselect = parameters.get('library_preselect', 10) library_preselect_step = parameters.get('library_preselect_step', None) library_preselect_method = parameters.get('library_preselect_method', 'MI') # if non-linear registraiton should be performed with ANTS do_nonlinear_register_ants = parameters.get('non_linear_register_ants', False) nlreg_level = parameters.get('non_linear_register_level', 2) nlreg_start = parameters.get('non_linear_register_start', 16) nlreg_options = parameters.get('non_linear_register_options', None) nlreg_downsample = parameters.get('non_linear_register_downsample', None) nonlinear_register_type = parameters.get('non_linear_register_type', None) if nonlinear_register_type is None: if do_nonlinear_register_ants: nonlinear_register_type = 'ants' pairwise_level = parameters.get('pairwise_level', 2) pairwise_start = parameters.get('pairwise_start', 16) pairwise_options = parameters.get('pairwise_options', None) fuse_options = parameters.get('fuse_options', None) resample_order = parameters.get('resample_order', 2) resample_baa = parameters.get('resample_baa', True) # error correction parametrs ec_options = parameters.get('ec_options', None) # QC image paramters qc_options = parameters.get('qc_options', None) # special case for training error correction, assume input scan is already pre-processed run_in_bbox = parameters.get('run_in_bbox', False) # mask output mask_output = parameters.get('mask_output', True) classes_number = library_description['classes_number'] seg_datatype = library_description['seg_datatype'] gco_energy = library_description['gco_energy'] output_info = {} input_sample = MriDataset(scan=input_scan, seg=presegment, mask=input_mask, protect=True, add=add) sample = input_sample # get parameters model = MriDataset(scan=library_description['model'], mask=library_description['model_mask'], add=library_description.get('model_add', [])) local_model = MriDataset( scan=library_description['local_model'], mask=library_description['local_model_mask'], scan_f=library_description.get('local_model_flip', None), mask_f=library_description.get('local_model_mask_flip', None), seg=library_description.get('local_model_seg', None), seg_f=library_description.get('local_model_seg_flip', None), add=library_description.get('local_model_add', []), add_f=library_description.get('local_model_add_flip', []), ) library = library_description['library'] sample_modalities = len(add) print("\n\n") print("Sample modalities:{}".format(sample_modalities)) print("\n\n") # apply the same steps as used in library creation to perform segmentation: # global initial_xfm = None nonlinear_xfm = None bbox_sample = None nl_sample = None bbox_linear_xfm = None flipdir = work_dir + os.sep + 'flip' sample_filtered = MriDataset(prefix=work_dir, name='flt_' + sample.name, add_n=sample_modalities) # QC file # TODO: allow for alternative location, extension #sample_qc=work_dir+os.sep+'qc_'+sample.name+'_'+out_variant+'.jpg' sample_qc = output_segment + '_qc.jpg' if run_in_bbox: segment_symmetric = False # that would depend ? do_initial_register = False do_initial_local_register = False # assume filter already applied! pre_filters = None post_filters = None if pre_filters is not None: apply_filter(sample.scan, sample_filtered.scan, pre_filters, model=model.scan, model_mask=model.mask) if sample.mask is not None: shutil.copyfile(sample.mask, sample_filtered.mask) for i, j in enumerate(sample.add): shutil.copyfile(sample.add[i], sample_filtered.add[i]) sample = sample_filtered else: sample_filtered = None output_info['sample_filtered'] = sample_filtered if segment_symmetric: # need to flip the inputs if not os.path.exists(flipdir): os.makedirs(flipdir) sample.scan_f = flipdir + os.sep + os.path.basename(sample.scan) sample.add_f = ['' for (i, j) in enumerate(sample.add)] for (i, j) in enumerate(sample.add): sample.add_f[i] = flipdir + os.sep + os.path.basename( sample.add[i]) if sample.mask is not None: sample.mask_f = flipdir + os.sep + 'mask_' + os.path.basename( sample.scan) else: sample.mask_f = None generate_flip_sample(sample) if presegment is None: sample.seg = None sample.seg_f = None if do_initial_register is not None: initial_xfm = MriTransform(prefix=work_dir, name='init_' + sample.name) if inital_reg_type == 'elx' or inital_reg_type == 'elastix': elastix_registration(sample, model, initial_xfm, symmetric=segment_symmetric, parameters=inital_reg_options, nl=False, use_mask=inital_reg_use_mask, downsample=inital_reg_downsample) elif inital_reg_type == 'ants' or inital_reg_ants: linear_registration(sample, model, initial_xfm, symmetric=segment_symmetric, reg_type=inital_reg_type, linreg=inital_reg_options, ants=True, use_mask=inital_reg_use_mask, downsample=inital_reg_downsample) else: linear_registration(sample, model, initial_xfm, symmetric=segment_symmetric, reg_type=inital_reg_type, linreg=inital_reg_options, downsample=inital_reg_downsample, use_mask=inital_reg_use_mask, objective=initial_reg_objective) output_info['initial_xfm'] = initial_xfm # local bbox_sample = MriDataset(prefix=work_dir, name='bbox_init_' + sample.name, add_n=sample_modalities) # a hack to have sample mask bbox_sample_mask = MriDataset(prefix=work_dir, name='bbox_init_' + sample.name) if do_initial_local_register: bbox_linear_xfm = MriTransform(prefix=work_dir, name='bbox_init_' + sample.name) if local_reg_type == 'elx' or local_reg_type == 'elastix': elastix_registration(sample, local_model, bbox_linear_xfm, symmetric=segment_symmetric, init_xfm=initial_xfm, resample_order=resample_order, parameters=local_reg_opts, bbox=local_reg_bbox, use_mask=local_reg_use_mask, downsample=local_reg_downsample) elif local_reg_type == 'ants' or local_reg_ants: linear_registration(sample, local_model, bbox_linear_xfm, init_xfm=initial_xfm, symmetric=segment_symmetric, reg_type=local_reg_type, linreg=local_reg_opts, resample_order=resample_order, ants=True, close=True, bbox=local_reg_bbox, use_mask=local_reg_use_mask, downsample=local_reg_downsample) else: linear_registration(sample, local_model, bbox_linear_xfm, init_xfm=initial_xfm, symmetric=segment_symmetric, reg_type=local_reg_type, linreg=local_reg_opts, resample_order=resample_order, close=True, bbox=local_reg_bbox, use_mask=local_reg_use_mask, objective=local_reg_objective, downsample=local_reg_downsample) else: bbox_linear_xfm = initial_xfm output_info['bbox_initial_xfm'] = bbox_linear_xfm bbox_sample.mask = None bbox_sample.mask_f = None if sample.seg is None: bbox_sample.seg = None bbox_sample.seg_f = None warp_sample( sample, local_model, bbox_sample, transform=bbox_linear_xfm, symmetric=segment_symmetric, symmetric_flip=segment_symmetric, # need to flip symmetric dataset resample_order=resample_order, filters=post_filters, ) if sample.seg is not None: _lut = None _flip_lut = None if not run_in_bbox: # assume that labels are already renamed _lut = invert_lut(library_description.get("map", None)) _flip_lut = invert_lut( library_description.get("flip_map", None)) warp_rename_seg(sample, local_model, bbox_sample, transform=bbox_linear_xfm, symmetric=segment_symmetric, symmetric_flip=segment_symmetric, lut=_lut, flip_lut=_flip_lut, resample_order=resample_order, resample_baa=resample_baa) output_info['bbox_sample'] = bbox_sample if preprocess_only: if cleanup: shutil.rmtree(work_lib_dir) shutil.rmtree(work_lib_dir_f) if os.path.exists(flipdir): shutil.rmtree(flipdir) if pre_filters is not None: sample_filtered.cleanup() return (None, output_info) # 3. run non-linear registration if needed # TODO: skip if sample presegmented if do_nonlinear_register: nl_sample = MriDataset(prefix=work_dir, name='nl_' + sample.name, add_n=sample_modalities) nonlinear_xfm = MriTransform(prefix=work_dir, name='nl_' + sample.name) if nonlinear_register_type == 'elx' or nonlinear_register_type == 'elastix': elastix_registration(bbox_sample, local_model, nonlinear_xfm, symmetric=segment_symmetric, level=nlreg_level, start_level=nlreg_start, parameters=nlreg_options, nl=True, downsample=nlreg_downsample) elif nonlinear_register_type == 'ants' or do_nonlinear_register_ants: non_linear_registration(bbox_sample, local_model, nonlinear_xfm, symmetric=segment_symmetric, level=nlreg_level, start_level=nlreg_start, parameters=nlreg_options, ants=True, downsample=nlreg_downsample) else: non_linear_registration(bbox_sample, local_model, nonlinear_xfm, symmetric=segment_symmetric, level=nlreg_level, start_level=nlreg_start, parameters=nlreg_options, ants=False, downsample=nlreg_downsample) print("\n\n\nWarping the sample!:{}\n\n\n".format(bbox_sample)) nl_sample.seg = None nl_sample.seg_f = None nl_sample.mask = None nl_sample.mask_f = None warp_sample( bbox_sample, local_model, nl_sample, transform=nonlinear_xfm, symmetric=segment_symmetric, resample_order=resample_order, filters=post_filters, ) warp_model_mask(local_model, bbox_sample_mask, transform=nonlinear_xfm, symmetric=segment_symmetric, resample_order=resample_order) bbox_sample.mask = bbox_sample_mask.mask bbox_sample.mask_f = bbox_sample_mask.mask_f output_info['bbox_sample'] = bbox_sample output_info['nl_sample'] = nl_sample else: nl_sample = bbox_sample # use mask from the model directly? bbox_sample.mask = local_model.mask bbox_sample.mask_f = local_model.mask output_info['nonlinear_xfm'] = nonlinear_xfm if generate_library: # remove excluded samples TODO: use regular expressions for matching? selected_library = [i for i in library if i[0] not in exclude] selected_library_f = [] if segment_symmetric: # fill up with all entries selected_library_f = copy.deepcopy(selected_library) # library pre-selection if needed # TODO: skip if sample presegmented if library_preselect > 0 and library_preselect < len( selected_library): loaded = False loaded_f = False if os.path.exists(work_lib_dir + os.sep + 'sel_library.json'): with open(work_lib_dir + os.sep + 'sel_library.json', 'r') as f: selected_library = json.load(f) loaded = True if segment_symmetric and os.path.exists(work_lib_dir_f + os.sep + 'sel_library.json'): with open(work_lib_dir_f + os.sep + 'sel_library.json', 'r') as f: selected_library_f = json.load(f) loaded_f = True if do_nonlinear_register: if not loaded: selected_library = preselect( nl_sample, selected_library, method=library_preselect_method, number=library_preselect, use_nl=library_nl_samples_avail, step=library_preselect_step, lib_add_n=library_modalities) if segment_symmetric: if not loaded_f: selected_library_f = preselect( nl_sample, selected_library_f, method=library_preselect_method, number=library_preselect, use_nl=library_nl_samples_avail, flip=True, step=library_preselect_step, lib_add_n=library_modalities) else: if not loaded: selected_library = preselect( bbox_sample, selected_library, method=library_preselect_method, number=library_preselect, use_nl=False, step=library_preselect_step, lib_add_n=library_modalities) if segment_symmetric: if not loaded_f: selected_library_f = preselect( bbox_sample, selected_library_f, method=library_preselect_method, number=library_preselect, use_nl=False, flip=True, step=library_preselect_step, lib_add_n=library_modalities) if not loaded: with open(work_lib_dir + os.sep + 'sel_library.json', 'w') as f: json.dump(selected_library, f) if not loaded_f: if segment_symmetric: with open(work_lib_dir_f + os.sep + 'sel_library.json', 'w') as f: json.dump(selected_library_f, f) output_info['selected_library'] = selected_library if segment_symmetric: output_info['selected_library_f'] = selected_library_f selected_library_scan = [] selected_library_xfm = [] selected_library_warped2 = [] selected_library_xfm2 = [] selected_library_scan_f = [] selected_library_xfm_f = [] selected_library_warped_f = [] selected_library_warped2_f = [] selected_library_xfm2_f = [] for (i, j) in enumerate(selected_library): d = MriDataset(scan=j[0], seg=j[1], add=j[2:2 + library_modalities]) selected_library_scan.append(d) selected_library_warped2.append( MriDataset(name=d.name, prefix=work_lib_dir, add_n=sample_modalities)) selected_library_xfm2.append( MriTransform(name=d.name, prefix=work_lib_dir)) if library_nl_samples_avail: selected_library_xfm.append( MriTransform(xfm=j[2 + library_modalities], xfm_inv=j[3 + library_modalities])) output_info['selected_library_warped2'] = selected_library_warped2 output_info['selected_library_xfm2'] = selected_library_xfm2 if library_nl_samples_avail: output_info['selected_library_xfm'] = selected_library_xfm if segment_symmetric: for (i, j) in enumerate(selected_library_f): d = MriDataset(scan=j[0], seg=j[1], add=j[2:2 + library_modalities]) selected_library_scan_f.append(d) selected_library_warped2_f.append( MriDataset(name=d.name, prefix=work_lib_dir_f, add_n=sample_modalities)) selected_library_xfm2_f.append( MriTransform(name=d.name, prefix=work_lib_dir_f)) if library_nl_samples_avail: selected_library_xfm_f.append( MriTransform(xfm=j[2 + library_modalities], xfm_inv=j[3 + library_modalities])) output_info[ 'selected_library_warped2_f'] = selected_library_warped2_f output_info[ 'selected_library_xfm2_f'] = selected_library_xfm2_f if library_nl_samples_avail: output_info[ 'selected_library_xfm_f'] = selected_library_xfm_f # nonlinear registration to template or individual if do_pairwise: # Right now ignore precomputed transformations results = [] if debug: print("Performing pairwise registration") for (i, j) in enumerate(selected_library): # TODO: make clever usage of precomputed transform if available if pairwise_register_type == 'elx' or pairwise_register_type == 'elastix': results.append( futures.submit( elastix_registration, bbox_sample, selected_library_scan[i], selected_library_xfm2[i], level=pairwise_level, start_level=pairwise_start, parameters=pairwise_options, nl=True, output_inv_target=selected_library_warped2[i], warp_seg=True, resample_order=resample_order, resample_baa=resample_baa)) elif pairwise_register_type == 'ants' or do_pairwise_ants: results.append( futures.submit( non_linear_registration, bbox_sample, selected_library_scan[i], selected_library_xfm2[i], level=pairwise_level, start_level=pairwise_start, parameters=pairwise_options, ants=True, output_inv_target=selected_library_warped2[i], warp_seg=True, resample_order=resample_order, resample_baa=resample_baa)) else: results.append( futures.submit( non_linear_registration, bbox_sample, selected_library_scan[i], selected_library_xfm2[i], level=pairwise_level, start_level=pairwise_start, parameters=pairwise_options, ants=False, output_inv_target=selected_library_warped2[i], warp_seg=True, resample_order=resample_order, resample_baa=resample_baa)) if segment_symmetric: for (i, j) in enumerate(selected_library_f): # TODO: make clever usage of precomputed transform if available if pairwise_register_type == 'elx' or pairwise_register_type == 'elastix': results.append( futures.submit(elastix_registration, bbox_sample, selected_library_scan_f[i], selected_library_xfm2_f[i], level=pairwise_level, start_level=pairwise_start, parameters=pairwise_options, nl=True, output_inv_target= selected_library_warped2_f[i], warp_seg=True, flip=True, resample_order=resample_order, resample_baa=resample_baa)) elif pairwise_register_type == 'ants' or do_pairwise_ants: results.append( futures.submit(non_linear_registration, bbox_sample, selected_library_scan_f[i], selected_library_xfm2_f[i], level=pairwise_level, start_level=pairwise_start, parameters=pairwise_options, ants=True, output_inv_target= selected_library_warped2_f[i], warp_seg=True, flip=True, resample_order=resample_order, resample_baa=resample_baa)) else: results.append( futures.submit(non_linear_registration, bbox_sample, selected_library_scan_f[i], selected_library_xfm2_f[i], level=pairwise_level, start_level=pairwise_start, parameters=pairwise_options, ants=False, output_inv_target= selected_library_warped2_f[i], warp_seg=True, flip=True, resample_order=resample_order, resample_baa=resample_baa)) # TODO: do we really need to wait for result here? futures.wait(results, return_when=futures.ALL_COMPLETED) else: results = [] for (i, j) in enumerate(selected_library): lib_xfm = None if library_nl_samples_avail: lib_xfm = selected_library_xfm[i] results.append( futures.submit(concat_resample, selected_library_scan[i], lib_xfm, nonlinear_xfm, selected_library_warped2[i], resample_order=resample_order, resample_baa=resample_baa)) if segment_symmetric: for (i, j) in enumerate(selected_library_f): lib_xfm = None if library_nl_samples_avail: lib_xfm = selected_library_xfm_f[i] results.append( futures.submit(concat_resample, selected_library_scan_f[i], lib_xfm, nonlinear_xfm, selected_library_warped2_f[i], resample_order=resample_order, resample_baa=resample_baa, flip=True)) # TODO: do we really need to wait for result here? futures.wait(results, return_when=futures.ALL_COMPLETED) else: # no library generated selected_library = [] selected_library_f = [] selected_library_warped2 = [] selected_library_warped2_f = [] results = [] sample_seg = MriDataset(name='bbox_seg_' + sample.name + out_variant, prefix=work_dir) sample_seg.mask = None sample_seg.mask_f = None results.append( futures.submit(fuse_segmentations, bbox_sample, sample_seg, selected_library_warped2, flip=False, classes_number=classes_number, fuse_options=fuse_options, gco_energy=gco_energy, ec_options=ec_options, model=local_model, debug=debug, ec_variant=ec_variant, fuse_variant=fuse_variant, regularize_variant=regularize_variant)) if segment_symmetric: results.append( futures.submit(fuse_segmentations, bbox_sample, sample_seg, selected_library_warped2_f, flip=True, classes_number=classes_number, fuse_options=fuse_options, gco_energy=gco_energy, ec_options=ec_options, model=local_model, debug=debug, ec_variant=ec_variant, fuse_variant=fuse_variant, regularize_variant=regularize_variant)) futures.wait(results, return_when=futures.ALL_COMPLETED) output_info['fuse'] = results[0].result() if segment_symmetric: output_info['fuse_f'] = results[1].result() if qc_options: # generate QC images output_info['qc'] = generate_qc_image( sample_seg, bbox_sample, sample_qc, options=qc_options, model=local_model, symmetric=segment_symmetric, labels=library_description['classes_number']) # cleanup if need if cleanup: shutil.rmtree(work_lib_dir) shutil.rmtree(work_lib_dir_f) if os.path.exists(flipdir): shutil.rmtree(flipdir) if nl_sample is not None: nl_sample.cleanup() if pre_filters is not None: sample_filtered.cleanup() if cleanup_xfm: # TODO: remove more xfms(?) if nonlinear_xfm is not None: nonlinear_xfm.cleanup() if not run_in_bbox: # TODO: apply error correction here # rename labels to final results sample_seg_native = MriDataset(name='seg_' + sample.name + out_variant, prefix=work_dir) sample_seg_native2 = MriDataset(name='seg2_' + sample.name + out_variant, prefix=work_dir) warp_rename_seg( sample_seg, input_sample, sample_seg_native, transform=bbox_linear_xfm, invert_transform=True, lut=library_description['map'], symmetric=segment_symmetric, symmetric_flip=segment_symmetric, use_flipped= segment_symmetric, # needed to flip .seg_f back to right orientation flip_lut=library_description['flip_map'], resample_baa=resample_baa, resample_order=resample_order, datatype=seg_datatype) output_info['sample_seg_native'] = sample_seg_native output_info['used_labels'] = make_segmented_label_list( library_description, symmetric=segment_symmetric) _output_segment = output_segment + '_seg.mnc' if segment_symmetric: join_left_right(sample_seg_native, sample_seg_native2.seg, datatype=seg_datatype) else: sample_seg_native2 = sample_seg_native #shutil.copyfile(sample_seg_native.seg, output_segment+'_seg.mnc') if mask_output and input_mask is not None: # with mincTools() as minc: minc.calc([sample_seg_native2.seg, input_mask], 'A[1]>0.5?A[0]:0', _output_segment, labels=True) else: shutil.copyfile(sample_seg_native2.seg, _output_segment) output_info['output_segment'] = _output_segment output_info['output_volumes'] = seg_to_volumes( _output_segment, output_segment + '_vol.json', label_map=library_description.get('label_map', None)) output_info['output_volumes_json'] = output_segment + '_vol.json' # TODO: cleanup more here (?) return (_output_segment, output_info) else: # special case, needed to train error correction return (sample_seg.seg, output_info) except mincError as e: print("Exception in fusion_segment:{}".format(str(e))) traceback.print_exc(file=sys.stdout) raise except: print("Exception in fusion_segment:{}".format(sys.exc_info()[0])) traceback.print_exc(file=sys.stdout) raise
def main(): options = parse_options() pipeline_parameters = default_pipeline_options pipeline_info = {} modalities = options.modalities.split(',') try: if options.options is not None: try: with open(options.options, 'r') as f: pipeline_parameters = json.load(f) except: print("Error reading:{}".format(options.options)) raise if (options.csv is not None) or (options.load is not None): inputs = [] if options.load is not None: inputs = load_pipeline_output(options.load) else: with open(options.csv, 'r') as csvfile: reader = csv.reader(csvfile, delimiter=',', quoting=csv.QUOTE_NONE) for row in reader: if len(row) >= 3: data_name = '{}_{}'.format(row[0], row[1]) t1w = MriScan(name=data_name, scan=row[2], modality='t1w', mask=None) t2w = None pdw = None corr_t1w = None corr_t2w = None age = None sex = None add = [] for l, ll in enumerate(modalities): if len(row) > (3 + l) and row[3 + l] != '': add.append( MriScan(name=data_name, scan=row[3 + l], modality=ll, mask=None)) if len(row) > (4 + len(modalities)) and row[ (4 + len(modalities))] != '': age = float(row[(4 + len(modalities))]) if len(row) > (5 + len(modalities)) and row[ (5 + len(modalities))] != '': sex = float(row[(5 + len(modalities))]) if len(row) > (6 + len(modalities)) and row[ (6 + len(modalities))] != '': corr_t1w = MriTransform( None, 'corr_t1w', xfm=row[(6 + len(modalities))]) # corr_t1w if len(row) > (7 + len(modalities)) and row[ (7 + len(modalities))] != '': corr_t2w = MriTransform( None, 'corr_t2w', xfm=row[(7 + len(modalities))]) # corr_t1w line = { 'subject': row[0], 'visit': row[1], # MRI 't1w': t1w, # demographic info 'age': age, 'sex': sex, # distortion correction 'corr_t1w': corr_t1w, 'corr_t2w': corr_t2w, # timepoint specific model 'model_name': None, 'model_dir': None, } # if len(add) > 0: line['add'] = add inputs.append(line) else: print("Error, unexpected line format:{}".format( repr(row))) raise Exception() pipeline_parameters['debug'] = options.debug if options.debug: print(repr(inputs)) run_pipeline = [] # only needed for parallel execution from scoop import futures, shared for (i, s) in enumerate(inputs): output_dir = options.output + os.sep + s[ 'subject'] + os.sep + s['visit'] manual_dir = None if options.manual is not None: manual_dir = options.manual + os.sep + s[ 'subject'] + os.sep + s['visit'] run_pipeline.append( futures.submit(standard_pipeline, s, output_dir, options=pipeline_parameters, work_dir=output_dir, manual_dir=manual_dir)) # # wait for all to finish # futures.wait(run_pipeline, return_when=futures.ALL_COMPLETED) for j, i in enumerate(run_pipeline): inputs[j]['output'] = i.result() save_pipeline_output(inputs, options.output + os.sep + 'summary.json') elif options.scans is not None and \ options.subject is not None and \ options.visit is not None: # run on a single subject data_name = '{}_{}'.format(options.subject, options.visit) pipeline_parameters['debug'] = options.debug output_dir = options.output + os.sep + options.subject + os.sep + options.visit manual_dir = None if options.manual is not None: manual_dir = options.manual + os.sep + options.subject + os.sep + options.visit add = [] for l, ll in enumerate(modalities): if len(options.scans) > (l + 1): add.append( MriScan(name=data_name, scan=options.scans[(l + 1)], modality=ll, mask=None)) if len(add) == 0: add = None info = { 'subject': options.subject, 'visit': options.visit, 't1w': MriScan(name=data_name, scan=options.scans[0], modality='t1w', mask=None), 'add': add } if options.corr is not None: info['corr_t1w'] = MriTransform(None, 'corr_t1w', xfm=options.corr[0]) if len(options.corr) > 1: info['corr_t2w'] = MriTransform(None, 'corr_t2w', xfm=options.corr[1]) ret = standard_pipeline(info, output_dir, options=pipeline_parameters, work_dir=output_dir, manual_dir=manual_dir) # TODO: make a check if there is a summary file there already? #save_pipeline_output([info],options.output+os.sep+'summary.json') else: print("Refusing to run without input data, run --help") exit(1) except: print("Exception :{}".format(sys.exc_info()[0])) traceback.print_exc(file=sys.stdout) raise
# ps_mask, gal_mask = read_dpc_masks(freq, NSIDE) # smooth_combine_config = dict(fwhm=np.radians(1.), degraded_nside=128,smooth_mask=ps_mask, spectra_mask=gal_mask) # chtags = [""] # if freq == 70: # chtags += ["18_23", "19_22", "20_21"] # for chtag in chtags: # surveydiff(freq, chtag, survs, pol='IQU', smooth_combine_config=smooth_combine_config, mapreader=mapreader, output_folder="dx9/surveydiff/",read_masks=read_dpc_masks) #print "SURVDIFF, CH" #survs = [1,2,3,4,5] #freqs = [70] #for freq in freqs: # ps_mask, gal_mask = read_dpc_masks(freq, NSIDE) # smooth_combine_config = dict(fwhm=np.radians(1.), degraded_nside=128,smooth_mask=ps_mask, spectra_mask=gal_mask) # chtags = chlist(freq) # for chtag in chtags: # surveydiff(freq, chtag, survs, pol='I', smooth_combine_config=smooth_combine_config, mapreader=mapreader, output_folder="dx9/surveydiff/",read_masks=read_dpc_masks) #print "CHDIFF" #survs = [1] #freqs = [30, 44, 70] # #for freq in freqs: # ps_mask, gal_mask = read_dpc_masks(freq, NSIDE) # smooth_combine_config = dict(fwhm=np.radians(1.), degraded_nside=128,smooth_mask=ps_mask, spectra_mask=gal_mask) # for surv in survs: #chdiff(freq, ["LFI%d" % h for h in HORNS[freq]], surv, pol='I', smooth_combine_config=smooth_combine_config, mapreader=mapreader, output_folder="dx9/chdiff/", read_masks=read_dpc_masks) print("Wait for %d tasks to complete" % len(tasks)) futures.wait(tasks)
def fusion_grading(input_scan, library_description, output_segment, input_mask=None, parameters={}, exclude=[], work_dir=None, debug=False, ec_variant=None, fuse_variant=None, regularize_variant=None, add=[], cleanup=False, cleanup_xfm=False, exclude_re=None): """Apply fusion segmentation""" if debug: print("Segmentation parameters:") print(repr(parameters)) out_variant = '' if fuse_variant is not None: out_variant += fuse_variant if regularize_variant is not None: out_variant += '_' + regularize_variant if ec_variant is not None: out_variant += '_' + ec_variant if work_dir is None: work_dir = output_segment + os.sep + 'work_segment' if not os.path.exists(work_dir): os.makedirs(work_dir) work_lib_dir = work_dir + os.sep + 'library' work_lib_dir_f = work_dir + os.sep + 'library_f' if not os.path.exists(work_lib_dir): os.makedirs(work_lib_dir) if not os.path.exists(work_lib_dir_f): os.makedirs(work_lib_dir_f) library_nl_samples_avail = library_description['nl_samples_avail'] library_modalities = library_description.get('modalities', 1) - 1 # perform symmetric segmentation segment_symmetric = parameters.get('segment_symmetric', False) # read filter paramters pre_filters = parameters.get('pre_filters', None) post_filters = parameters.get('post_filters', parameters.get('filters', None)) # perform local linear registration do_initial_register = parameters.get('initial_register', parameters.get('linear_register', {})) if do_initial_register is not None and isinstance(do_initial_register, dict): initial_register = do_initial_register do_initial_register = True else: initial_register = {} inital_reg_type = parameters.get( 'initial_register_type', parameters.get('linear_register_type', initial_register.get('type', '-lsq12'))) inital_reg_ants = parameters.get( 'initial_register_ants', parameters.get('linear_register_ants', False)) inital_reg_options = parameters.get('initial_register_options', initial_register.get('options', None)) inital_reg_downsample = parameters.get( 'initial_register_downsample', initial_register.get('downsample', None)) inital_reg_use_mask = parameters.get( 'initial_register_use_mask', initial_register.get('use_mask', False)) initial_reg_objective = initial_register.get('objective', '-xcorr') # perform local linear registration do_initial_local_register = parameters.get( 'initial_local_register', parameters.get('local_linear_register', {})) if do_initial_local_register is not None and isinstance( do_initial_local_register, dict): initial_local_register = do_initial_local_register do_initial_local_register = True else: initial_local_register = {} local_reg_type = parameters.get( 'local_register_type', initial_local_register.get('type', '-lsq12')) local_reg_ants = parameters.get('local_register_ants', False) local_reg_opts = parameters.get( 'local_register_options', initial_local_register.get('options', None)) local_reg_bbox = parameters.get('local_register_bbox', initial_local_register.get('bbox', False)) local_reg_downsample = parameters.get( 'local_register_downsample', initial_local_register.get('downsample', None)) local_reg_use_mask = parameters.get( 'local_register_use_mask', initial_local_register.get('use_mask', True)) local_reg_objective = initial_local_register.get('objective', '-xcorr') # if non-linear registraiton should be performed for library creation do_nonlinear_register = parameters.get('non_linear_register', False) # if non-linear registraiton should be performed with ANTS do_nonlinear_register_ants = parameters.get('non_linear_register_ants', False) nonlinear_register_type = parameters.get('non_linear_register_type', None) if nonlinear_register_type is None: if do_nonlinear_register_ants: nonlinear_register_type = 'ants' # if non-linear registraiton should be performed pairwise do_pairwise = parameters.get('non_linear_pairwise', False) # if pairwise registration should be performed using ANTS do_pairwise_ants = parameters.get('non_linear_pairwise_ants', True) pairwise_register_type = parameters.get('non_linear_pairwise_type', None) if pairwise_register_type is None: if do_pairwise_ants: pairwise_register_type = 'ants' # should we use ANTs library_preselect = parameters.get('library_preselect', 10) library_preselect_step = parameters.get('library_preselect_step', None) library_preselect_method = parameters.get('library_preselect_method', 'MI') nlreg_level = parameters.get('non_linear_register_level', 2) nlreg_start = parameters.get('non_linear_register_start', 16) nlreg_options = parameters.get('non_linear_register_options', None) nlreg_downsample = parameters.get('non_linear_register_downsample', None) pairwise_level = parameters.get('pairwise_level', 2) pairwise_start = parameters.get('pairwise_start', 16) pairwise_options = parameters.get('pairwise_options', None) fuse_options = parameters.get('fuse_options', None) resample_order = parameters.get('resample_order', 2) label_resample_order = parameters.get('label_resample_order', resample_order) resample_baa = parameters.get('resample_baa', True) use_median = parameters.get('use_median', False) # QC image paramters qc_options = parameters.get('qc_options', None) # special case for training error correction, assume input scan is already pre-processed run_in_bbox = parameters.get('run_in_bbox', False) classes_number = library_description['classes_number'] groups = library_description['groups'] seg_datatype = 'byte' output_info = {} sample = MriDataset(scan=input_scan, seg=None, mask=input_mask, protect=True, add=add) # get parameters model = MriDataset(scan=library_description['model'], mask=library_description['model_mask'], add=library_description.get('model_add', [])) local_model = MriDataset( scan=library_description['local_model'], mask=library_description['local_model_mask'], scan_f=library_description.get('local_model_flip', None), mask_f=library_description.get('local_model_mask_flip', None), seg=library_description.get('local_model_seg', None), seg_f=library_description.get('local_model_seg_flip', None), add=library_description.get('local_model_add', []), add_f=library_description.get('local_model_add_flip', []), ) library = library_description['library'] sample_modalities = len(add) print("\n\n") print("Sample modalities:{}".format(sample_modalities)) print("\n\n") # apply the same steps as used in library creation to perform segmentation: # global initial_xfm = None nonlinear_xfm = None bbox_sample = None nl_sample = None bbox_linear_xfm = None sample_filtered = MriDataset(prefix=work_dir, name='flt_' + sample.name, add_n=sample_modalities) # QC file # TODO: allow for alternative location, extension sample_qc = work_dir + os.sep + 'qc_' + sample.name + '_' + out_variant + '.jpg' if run_in_bbox: segment_symmetric = False do_initial_register = False do_initial_local_register = False # assume filter already applied! pre_filters = None post_filters = None if segment_symmetric: # need to flip the inputs flipdir = work_dir + os.sep + 'flip' if not os.path.exists(flipdir): os.makedirs(flipdir) sample.scan_f = flipdir + os.sep + os.path.basename(sample.scan) sample.add_f = ['' for (i, j) in enumerate(sample.add)] for (i, j) in enumerate(sample.add): sample.add_f[i] = flipdir + os.sep + os.path.basename( sample.add[i]) if sample.mask is not None: sample.mask_f = flipdir + os.sep + 'mask_' + os.path.basename( sample.scan) generate_flip_sample(sample) if pre_filters is not None: apply_filter(sample.scan, sample_filtered.scan, pre_filters, model=model.scan, model_mask=model.mask) if sample.mask is not None: shutil.copyfile(sample.mask, sample_filtered.mask) for i, j in enumerate(sample.add): shutil.copyfile(sample.add[i], sample_filtered.add[i]) sample = sample_filtered else: sample_filtered = None output_info['sample_filtered'] = sample_filtered if do_initial_register: initial_xfm = MriTransform(prefix=work_dir, name='init_' + sample.name) if inital_reg_type == 'elx' or inital_reg_type == 'elastix': elastix_registration(sample, model, initial_xfm, symmetric=segment_symmetric, parameters=inital_reg_options, nl=False, downsample=inital_reg_downsample) elif inital_reg_type == 'ants' or inital_reg_ants: linear_registration(sample, model, initial_xfm, symmetric=segment_symmetric, reg_type=inital_reg_type, linreg=inital_reg_options, ants=True, downsample=inital_reg_downsample) else: linear_registration(sample, model, initial_xfm, symmetric=segment_symmetric, reg_type=inital_reg_type, linreg=inital_reg_options, downsample=inital_reg_downsample, objective=initial_reg_objective) output_info['initial_xfm'] = initial_xfm # local bbox_sample = MriDataset(prefix=work_dir, name='bbox_init_' + sample.name, add_n=sample_modalities) if do_initial_local_register: bbox_linear_xfm = MriTransform(prefix=work_dir, name='bbox_init_' + sample.name) if local_reg_type == 'elx' or local_reg_type == 'elastix': elastix_registration(sample, local_model, bbox_linear_xfm, symmetric=segment_symmetric, init_xfm=initial_xfm, resample_order=resample_order, parameters=local_reg_opts, bbox=local_reg_bbox, downsample=local_reg_downsample) elif local_reg_type == 'ants' or local_reg_ants: linear_registration(sample, local_model, bbox_linear_xfm, init_xfm=initial_xfm, symmetric=segment_symmetric, reg_type=local_reg_type, linreg=local_reg_opts, resample_order=resample_order, ants=True, close=True, bbox=local_reg_bbox, downsample=local_reg_downsample) else: linear_registration(sample, local_model, bbox_linear_xfm, init_xfm=initial_xfm, symmetric=segment_symmetric, reg_type=local_reg_type, linreg=local_reg_opts, resample_order=resample_order, close=True, bbox=local_reg_bbox, downsample=local_reg_downsample, objective=local_reg_objective) else: bbox_linear_xfm = initial_xfm output_info['bbox_initial_xfm'] = bbox_linear_xfm bbox_sample.mask = None bbox_sample.seg = None bbox_sample.seg_f = None warp_sample( sample, local_model, bbox_sample, transform=bbox_linear_xfm, symmetric=segment_symmetric, symmetric_flip=segment_symmetric, # need to flip symmetric dataset resample_order=resample_order, filters=post_filters, ) output_info['bbox_sample'] = bbox_sample # TODO: run local intensity normalization # 3. run non-linear registration if needed if do_nonlinear_register: nl_sample = MriDataset(prefix=work_dir, name='nl_' + sample.name, add_n=sample_modalities) nonlinear_xfm = MriTransform(prefix=work_dir, name='nl_' + sample.name) if nonlinear_register_type == 'elx' or nonlinear_register_type == 'elastix': elastix_registration(bbox_sample, local_model, nonlinear_xfm, symmetric=segment_symmetric, level=nlreg_level, start_level=nlreg_start, parameters=nlreg_options, nl=True, downsample=nlreg_downsample) elif nonlinear_register_type == 'ants' or do_nonlinear_register_ants: non_linear_registration(bbox_sample, local_model, nonlinear_xfm, symmetric=segment_symmetric, level=nlreg_level, start_level=nlreg_start, parameters=nlreg_options, ants=True, downsample=nlreg_downsample) else: non_linear_registration(bbox_sample, local_model, nonlinear_xfm, symmetric=segment_symmetric, level=nlreg_level, start_level=nlreg_start, parameters=nlreg_options, ants=False, downsample=nlreg_downsample) print("\n\n\nWarping the sample!:{}\n\n\n".format(bbox_sample)) nl_sample.seg = None nl_sample.seg_f = None nl_sample.mask = None warp_sample(bbox_sample, local_model, nl_sample, transform=nonlinear_xfm, symmetric=segment_symmetric, resample_order=resample_order) output_info['nl_sample'] = nl_sample else: nl_sample = bbox_sample output_info['nonlinear_xfm'] = nonlinear_xfm if exclude_re is not None: _exclude_re = re.compile(exclude_re) selected_library = [ i for i in library if not _exclude_re.match(i[2]) and i[2] not in exclude ] else: selected_library = [i for i in library if i[2] not in exclude] selected_library_f = [] if segment_symmetric: # fill up with all entries selected_library_f = selected_library # library pre-selection if needed # we need balanced number of samples for each group if library_preselect > 0 and library_preselect < len(selected_library): loaded = False loaded_f = False if os.path.exists(work_lib_dir + os.sep + 'sel_library.json'): with open(work_lib_dir + os.sep + 'sel_library.json', 'r') as f: selected_library = json.load(f) loaded = True if segment_symmetric and os.path.exists(work_lib_dir_f + os.sep + 'sel_library.json'): with open(work_lib_dir_f + os.sep + 'sel_library.json', 'r') as f: selected_library_f = json.load(f) loaded_f = True if do_nonlinear_register: if not loaded: selected_library = preselect(nl_sample, selected_library, method=library_preselect_method, number=library_preselect, use_nl=library_nl_samples_avail, step=library_preselect_step, lib_add_n=library_modalities, groups=groups) if segment_symmetric: if not loaded_f: selected_library_f = preselect( nl_sample, selected_library, method=library_preselect_method, number=library_preselect, use_nl=library_nl_samples_avail, flip=True, step=library_preselect_step, lib_add_n=library_modalities, groups=groups) else: if not loaded: selected_library = preselect(bbox_sample, selected_library, method=library_preselect_method, number=library_preselect, use_nl=False, step=library_preselect_step, lib_add_n=library_modalities, groups=groups) if segment_symmetric: if not loaded_f: selected_library_f = preselect( bbox_sample, selected_library, method=library_preselect_method, number=library_preselect, use_nl=False, flip=True, step=library_preselect_step, lib_add_n=library_modalities, groups=groups) if not loaded: with open(work_lib_dir + os.sep + 'sel_library.json', 'w') as f: json.dump(selected_library, f) if not loaded_f: if segment_symmetric: with open(work_lib_dir_f + os.sep + 'sel_library.json', 'w') as f: json.dump(selected_library_f, f) output_info['selected_library'] = selected_library if segment_symmetric: output_info['selected_library_f'] = selected_library_f selected_library_scan = [] selected_library_xfm = [] selected_library_warped2 = [] selected_library_xfm2 = [] selected_library_scan_f = [] selected_library_xfm_f = [] selected_library_warped_f = [] selected_library_warped2_f = [] selected_library_xfm2_f = [] for (i, j) in enumerate(selected_library): d = MriDataset(scan=j[2], seg=j[3], add=j[4:4 + library_modalities], group=int(j[0]), grading=float(j[1])) selected_library_scan.append(d) selected_library_warped2.append( MriDataset(name=d.name, prefix=work_lib_dir, add_n=sample_modalities, group=int(j[0]), grading=float(j[1]))) selected_library_xfm2.append( MriTransform(name=d.name, prefix=work_lib_dir)) if library_nl_samples_avail: selected_library_xfm.append( MriTransform(xfm=j[4 + library_modalities], xfm_inv=j[5 + library_modalities])) output_info['selected_library_warped2'] = selected_library_warped2 output_info['selected_library_xfm2'] = selected_library_xfm2 if library_nl_samples_avail: output_info['selected_library_xfm'] = selected_library_xfm if segment_symmetric: for (i, j) in enumerate(selected_library_f): d = MriDataset(scan=j[2], seg=j[3], add=j[4:4 + library_modalities], group=int(j[0]), grading=float(j[1])) selected_library_scan_f.append(d) selected_library_warped2_f.append( MriDataset(name=d.name, prefix=work_lib_dir_f, add_n=sample_modalities)) selected_library_xfm2_f.append( MriTransform(name=d.name, prefix=work_lib_dir_f)) if library_nl_samples_avail: selected_library_xfm_f.append( MriTransform(xfm=j[4 + library_modalities], xfm_inv=j[5 + library_modalities])) output_info['selected_library_warped2_f'] = selected_library_warped2_f output_info['selected_library_xfm2_f'] = selected_library_xfm2_f if library_nl_samples_avail: output_info['selected_library_xfm_f'] = selected_library_xfm_f # nonlinear registration to template or individual if do_pairwise: # Right now ignore precomputed transformations results = [] if debug: print("Performing pairwise registration") for (i, j) in enumerate(selected_library): # TODO: make clever usage of precomputed transform if available if pairwise_register_type == 'elx' or pairwise_register_type == 'elastix': results.append( futures.submit( elastix_registration, bbox_sample, selected_library_scan[i], selected_library_xfm2[i], level=pairwise_level, start_level=pairwise_start, parameters=pairwise_options, nl=True, output_inv_target=selected_library_warped2[i], warp_seg=True, resample_order=resample_order, resample_baa=resample_baa)) elif pairwise_register_type == 'ants' or do_pairwise_ants: results.append( futures.submit( non_linear_registration, bbox_sample, selected_library_scan[i], selected_library_xfm2[i], level=pairwise_level, start_level=pairwise_start, parameters=pairwise_options, ants=True, output_inv_target=selected_library_warped2[i], warp_seg=True, resample_order=resample_order, resample_baa=resample_baa)) else: results.append( futures.submit( non_linear_registration, bbox_sample, selected_library_scan[i], selected_library_xfm2[i], level=pairwise_level, start_level=pairwise_start, parameters=pairwise_options, ants=False, output_inv_target=selected_library_warped2[i], warp_seg=True, resample_order=resample_order, resample_baa=resample_baa)) if segment_symmetric: for (i, j) in enumerate(selected_library_f): # TODO: make clever usage of precomputed transform if available if pairwise_register_type == 'elx' or pairwise_register_type == 'elastix': results.append( futures.submit( elastix_registration, bbox_sample, selected_library_scan_f[i], selected_library_xfm2_f[i], level=pairwise_level, start_level=pairwise_start, parameters=pairwise_options, nl=True, output_inv_target=selected_library_warped2_f[i], warp_seg=True, flip=True, resample_order=resample_order, resample_baa=resample_baa)) elif pairwise_register_type == 'ants' or do_pairwise_ants: results.append( futures.submit( non_linear_registration, bbox_sample, selected_library_scan_f[i], selected_library_xfm2_f[i], level=pairwise_level, start_level=pairwise_start, parameters=pairwise_options, ants=True, output_inv_target=selected_library_warped2_f[i], warp_seg=True, flip=True, resample_order=resample_order, resample_baa=resample_baa)) else: results.append( futures.submit( non_linear_registration, bbox_sample, selected_library_scan_f[i], selected_library_xfm2_f[i], level=pairwise_level, start_level=pairwise_start, parameters=pairwise_options, ants=False, output_inv_target=selected_library_warped2_f[i], warp_seg=True, flip=True, resample_order=resample_order, resample_baa=resample_baa)) # TODO: do we really need to wait for result here? futures.wait(results, return_when=futures.ALL_COMPLETED) else: results = [] for (i, j) in enumerate(selected_library): lib_xfm = None if library_nl_samples_avail: lib_xfm = selected_library_xfm[i] results.append( futures.submit(concat_resample, selected_library_scan[i], lib_xfm, nonlinear_xfm, selected_library_warped2[i], resample_order=resample_order, label_resample_order=label_resample_order, resample_baa=resample_baa)) if segment_symmetric: for (i, j) in enumerate(selected_library_f): lib_xfm = None if library_nl_samples_avail: lib_xfm = selected_library_xfm_f[i] results.append( futures.submit(concat_resample, selected_library_scan_f[i], lib_xfm, nonlinear_xfm, selected_library_warped2_f[i], resample_order=resample_order, label_resample_order=label_resample_order, resample_baa=resample_baa, flip=True)) # TODO: do we really need to wait for result here? futures.wait(results, return_when=futures.ALL_COMPLETED) results = [] sample_seg = MriDataset(name='bbox_seg_' + sample.name + out_variant, prefix=work_dir) sample_grad = MriDataset(name='bbox_grad_' + sample.name + out_variant, prefix=work_dir) results.append( futures.submit(fuse_grading, bbox_sample, sample_seg, selected_library_warped2, flip=False, classes_number=classes_number, fuse_options=fuse_options, model=local_model, debug=debug, fuse_variant=fuse_variant, groups=groups)) if segment_symmetric: results.append( futures.submit(fuse_grading, bbox_sample, sample_seg, selected_library_warped2_f, flip=True, classes_number=classes_number, fuse_options=fuse_options, model=local_model, debug=debug, fuse_variant=fuse_variant, groups=groups)) futures.wait(results, return_when=futures.ALL_COMPLETED) output_info['fuse'] = results[0].result() if segment_symmetric: output_info['fuse_f'] = results[1].result() if qc_options: # generate QC images output_info['qc'] = generate_qc_image( sample_seg, bbox_sample, sample_qc, options=qc_options, model=local_model, symmetric=segment_symmetric, labels=library_description['classes_number']) # cleanup if need if cleanup: shutil.rmtree(work_lib_dir) shutil.rmtree(work_lib_dir_f) if nl_sample is not None: nl_sample.cleanup() if cleanup_xfm: if nonlinear_xfm is not None: nonlinear_xfm.cleanup() if not run_in_bbox: # TODO: apply error correction here # rename labels to final results sample_seg_native = MriDataset(name='seg_' + sample.name + out_variant, prefix=work_dir) warp_rename_seg( sample_seg, sample, sample_seg_native, transform=bbox_linear_xfm, invert_transform=True, lut=library_description['map'], symmetric=segment_symmetric, symmetric_flip=segment_symmetric, use_flipped= segment_symmetric, # needed to flip .seg_f back to right orientation flip_lut=library_description['flip_map'], resample_baa=resample_baa, resample_order=label_resample_order, datatype=seg_datatype) warp_sample( sample_seg, sample, sample_seg_native, transform=bbox_linear_xfm, invert_transform=True, symmetric=segment_symmetric, symmetric_flip=segment_symmetric, # need to flip symmetric dataset resample_order=resample_order) output_info['sample_seg_native'] = sample_seg_native if segment_symmetric: join_left_right(sample_seg_native, output_segment + '_seg.mnc', output_segment + '_grad.mnc', datatype=seg_datatype) else: shutil.copyfile(sample_seg_native.seg, output_segment + '_seg.mnc') shutil.copyfile(sample_seg_native.scan, output_segment + '_grad.mnc') output_info['output_segment'] = output_segment + '_seg.mnc' output_info['output_grading'] = output_segment + '_grad.mnc' volumes = seg_to_volumes_grad(output_segment + '_seg.mnc', output_segment + '_vol.json', label_map=library_description.get( 'label_map', None), grad=output_segment + '_grad.mnc', median=use_median) output_info['output_volumes'] = volumes output_info['output_volumes_json'] = output_segment + '_vol.json' # TODO: cleanup more here (?) return (output_segment + '_seg.mnc', output_segment + '_grad.mnc', volumes, output_info) else: # special case, needed to train error correction TODO: remove? volumes = seg_to_volumes_grad(sample_seg.seg, output_segment + '_vol.json', grad=sample_seg.scan, median=use_median) return (sample_seg.seg, sample_seg.scan, volumes, output_info)
def runPipeline(pickle, workdir=None): ''' RUN PIPELINE Process selected pickle file ''' # TODO: make VBM options part of initialization parameters patient = None try: if not os.path.exists(pickle): raise IplError(' -- Pickle does not exists ' + pickle) # # Read patient patient = LngPatient.read(pickle) if not version == patient.pipeline_version: print(' #### NOT THE SAME PIPELINE VERSION!! ') raise IplError( ' - Change the pipeline version or restart all processing' ) setFilenames(patient) print("Processing:") patient.printself() if workdir is not None: patient.workdir = workdir # prepare qc folder tps = sorted(patient.keys()) jobs = [] for tp in tps: jobs.append(futures.submit(runTimePoint_FirstStage, tp, patient)) futures.wait(jobs, return_when=futures.ALL_COMPLETED) print('First stage finished!') patient.write(patient.pickle) # copy new images in the pickle jobs = [] if len(tps) == 1: for tp in tps: runTimePoint_SecondStage(tp, patient, patient.vbm_options) else: # create longitudinal template # ############################ # it creates a new stx space (stx2) registering the linear template to the atlas # all images are aligned using this new template and the bias correction used in the template creation pipeline_linearlngtemplate(patient) for tp in tps: jobs.append(futures.submit(runSkullStripping, tp, patient)) # wait for all jobs to finish futures.wait(jobs, return_when=futures.ALL_COMPLETED) # using the stx2 space, we do the non-linear template # ################################################ pipeline_lngtemplate(patient) # non-linear registration of the template to the atlas # ########################## pipeline_atlasregistration(patient) if len(patient.add) > 0: pipeline_run_add(patient) # Concatenate xfm files for each timepoint. # run per tp tissue classification jobs = [] for tp in tps: jobs.append( futures.submit(runTimePoint_ThirdStage, tp, patient)) futures.wait(jobs, return_when=futures.ALL_COMPLETED) # longitudinal classification # ############################ if patient.dolngcls: pipeline_lng_classification(patient) else: print(' -- Skipping Lng classification') jobs = [] for tp in tps: jobs.append( futures.submit(runTimePoint_FourthStage, tp, patient, patient.vbm_options)) futures.wait(jobs, return_when=futures.ALL_COMPLETED) patient.write(patient.pickle) # copy new images in the pickle return patient.id except mincError as e: print("Exception in runPipeline:{}".format(repr(e))) traceback.print_exc(file=sys.stdout) raise except: print("Exception in runPipeline:{}".format(sys.exc_info()[0])) traceback.print_exc(file=sys.stdout) raise
def cv_fusion_segment(cv_parameters, segmentation_library, output, segmentation_parameters, ec_parameters=None, debug=False, cleanup=False, ext=False, extlib=None, cv_iter=None): '''Run cross-validation experiment for each N subjects run segmentation and compare Right now run LOOCV or random CV ''' # TODO: implement more realistic, random schemes validation_library = cv_parameters['validation_library'] # maximum number of iterations cv_iterations = cv_parameters.get('iterations', -1) # number of samples to exclude cv_exclude = cv_parameters.get('cv', 1) # use to distinguish different versions of error correction ec_variant = cv_parameters.get('ec_variant', 'ec') # use to distinguish different versions of label fusion fuse_variant = cv_parameters.get('fuse_variant', 'fuse') # use to distinguish different versions of cross-validation cv_variant = cv_parameters.get('cv_variant', 'cv') # different version of label regularization regularize_variant = cv_parameters.get('regularize_variant', 'gc') cv_output = output + os.sep + cv_variant + '_stats.json' res_output = output + os.sep + cv_variant + '_res.json' if extlib is not None: validation_library = extlib if validation_library is not list: with open(validation_library, 'r') as f: validation_library = list(csv.reader(f)) if cv_iter is not None: cv_iter = int(cv_iter) stat_results = None output_results = None if ext: # TODO: move pre-rpcessing here? # pre-process presegmented scans here! # we only neeed to re-create left-right flipped segmentation pass if cv_iterations == -1 and cv_exclude == 1: # simle LOO cross-validation (stat_results, output_results) = loo_cv_fusion_segment( validation_library, segmentation_library, output, segmentation_parameters, ec_parameters=ec_parameters, debug=debug, cleanup=cleanup, ec_variant=ec_variant, fuse_variant=fuse_variant, cv_variant=cv_variant, regularize_variant=regularize_variant, ext=ext, cv_iter=cv_iter) else: # arbitrary number of iterations (stat_results, output_results) = full_cv_fusion_segment( validation_library, segmentation_library, output, segmentation_parameters, cv_iterations, cv_exclude, ec_parameters=ec_parameters, debug=debug, cleanup=cleanup, ec_variant=ec_variant, fuse_variant=fuse_variant, cv_variant=cv_variant, regularize_variant=regularize_variant, ext=ext, cv_iter=cv_iter) # average error maps if cv_iter is None or cv_iter == -1: results = [] output_results_all = {'results': output_results} output_results_all['cv_stats'] = cv_output output_results_all['error_maps'] = {} all_error_maps = [] for (i, j) in enumerate(output_results[0]['error_maps']): out_avg = output + os.sep + cv_variant + '_error_{:03d}.mnc'.format( i) output_results_all['error_maps'][i] = out_avg all_error_maps.append(out_avg) maps = [k['error_maps'][i] for k in output_results] results.append(futures.submit(average_error_maps, maps, out_avg)) futures.wait(results, return_when=futures.ALL_COMPLETED) output_results_all[ 'max_error'] = output + os.sep + cv_variant + '_max_error.mnc'.format( i) max_error_maps(all_error_maps, output_results_all['max_error']) with open(cv_output, 'w') as f: json.dump(stat_results, f, indent=1, cls=LIBEncoder) with open(res_output, 'w') as f: json.dump(output_results_all, f, indent=1, cls=LIBEncoder) return stat_results else: # we assume that results will be available later return None
def train_ec_loo(segmentation_library, segmentation_parameters=None, ec_parameters=None, debug=False, fuse_variant='fuse', regularize_variant='gc', ec_variant='ec', cleanup=False, ext=False, train_list=None): '''Train error correction using leave-one-out cross-validation''' # for each N subjects run segmentation and compare try: ec_variant = ec_parameters.get('variant', ec_variant) work_dir = ec_parameters.get( 'work_dir', segmentation_library.prefix + os.sep + fuse_variant) ec_output = ec_parameters.get( 'output', work_dir + os.sep + ec_variant + '.pickle') ec_border_mask = ec_parameters.get('border_mask', True) ec_border_mask_width = ec_parameters.get('border_mask_width', 3) ec_antialias_labels = ec_parameters.get('antialias_labels', True) ec_blur_labels = ec_parameters.get('blur_labels', 1.0) ec_expit_labels = ec_parameters.get('expit_labels', 1.0) ec_normalize_labels = ec_parameters.get('normalize_labels', True) ec_use_raw = ec_parameters.get('use_raw', False) ec_split = ec_parameters.get('split', None) ec_train_rounds = ec_parameters.get('train_rounds', -1) ec_train_cv = ec_parameters.get('train_cv', 1) ec_sample_pick_strategy = ec_parameters.get('train_pick', 'random') ec_max_samples = ec_parameters.get('max_samples', -1) modalities = ec_parameters.get('train_modalities', segmentation_library.modalities) - 1 print("\n\n") print("EC modalities:{}".format(modalities)) print("train_list={}".format(repr(train_list))) print("ext={}".format(repr(ext))) print("\n\n") try: if not os.path.exists(work_dir): os.makedirs(work_dir) except: pass if (train_list is not None) and not isinstance(train_list, list): print(repr(train_list)) with open(train_list, 'r') as f: train_list = list(csv.reader(f)) if not os.path.exists(work_dir): os.makedirs(work_dir) # setup parameters to stop early local_model_mask = segmentation_library.local_model_mask # disable EC options if present segmentation_parameters['ec_options'] = None ec_train = [] ec_train_file = work_dir + os.sep + 'train_ec_' + ec_variant + '.json' #ec_train_library = segmentation_library.library ec_work_dirs = [] if not os.path.exists(ec_train_file): results = [] _train_list = [] # if we have pre-segmented scans, then we should pre-process training library again (!) and train on pre-segmented scans if ext and train_list: results2 = [] for (i, j) in enumerate(train_list): n = os.path.basename(j[0]).rsplit('.gz', 1)[0].rsplit('.mnc', 1)[0] output_pre_seg = work_dir + os.sep + 'pre_' + n ec_work_dir = work_dir + os.sep + 'work_pre_' + n #TODO: find out how to select appropriate segmentation train_sample = j[0] train_segment = j[1] train_add = [] train_presegment = None train_presegment = j[2] train_add = j[3:3 + modalities] experiment_segmentation_library = copy.deepcopy( segmentation_library) print("Running pre-processing on {} - {}".format( train_sample, train_presegment)) results2.append( futures.submit(fusion_segment, train_sample, experiment_segmentation_library, work_dir + os.sep + n, parameters=segmentation_parameters, debug=True, work_dir=ec_work_dir, ec_variant='noec', fuse_variant=fuse_variant, regularize_variant=regularize_variant, add=train_add, cleanup=cleanup, presegment=train_presegment, preprocess_only=True)) ### print("waiting for {} jobs".format(len(results2))) futures.wait(results2, return_when=futures.ALL_COMPLETED) print("Finished!") #train_list=range() # now pre-fill training library with freshly pre-processed samples for (_i, _j) in enumerate(results2): print("{} - done ".format( _j.result()[1]['bbox_sample'].seg)) # raise("Not FINISHED!") sample_id = os.path.basename(train_list[_i][0]).rsplit( '.gz', 1)[0].rsplit('.mnc', 1)[0] # include into the training list train_list_i = [ i for i, j in enumerate(segmentation_library.library) if j[0].find(sample_id) >= 0 ] # the output should be either one or two samples, if symmetrized version is used if len(train_list_i) == 1: # we have a single match! match = segmentation_library.library[train_list_i[0]] train = match[0:2] train.append(_j.result()[1]['bbox_sample'].seg) train.extend(match[2:len(match)]) _train_list.append(train) elif len(train_list_i) == 2: # we have left and right samples # we assume that straight is first and flipped is second match = segmentation_library.library[train_list_i[0]] train = match[0:2] train.append(_j.result()[1]['bbox_sample'].seg) train.extend(match[2:len(match)]) _train_list.append(train) # flipped version match = segmentation_library.library[train_list_i[1]] train = match[0:2] train.append(_j.result()[1]['bbox_sample'].seg_f) train.extend(match[2:len(match)]) _train_list.append(train) else: raise "Unexpected number of matches encountered!" else: _train_list = segmentation_library.library segmentation_parameters['run_in_bbox'] = True if ec_train_cv == 1: print("_train_list={}".format(repr(_train_list))) if ec_train_rounds > 0 and ec_train_rounds < len(_train_list): if ec_sample_pick_strategy == 'random' and ec_max_samples > 0: ec_train_library = random.sample( _train_list, ec_max_samples) else: ec_train_library = _train_list[0:ec_max_samples] else: ec_train_library = _train_list for (_i, _j) in enumerate(ec_train_library): n = os.path.basename(_j[0]).rsplit('.gz', 1)[0].rsplit('.mnc', 1)[0] output_loo_seg = work_dir + os.sep + n ec_work_dir = work_dir + os.sep + 'work_ec_' + n #TODO: find out how to select appropriate segmentation train_sample = _j[0] train_segment = _j[1] train_add = [] train_presegment = None print(train_sample) if ext: train_presegment = _j[2] train_add = _j[3:3 + modalities] else: train_add = _j[2:2 + modalities] experiment_segmentation_library = copy.deepcopy( segmentation_library) # remove sample experiment_segmentation_library.library = [ i for i in segmentation_library.library if i[0].find(n) < 0 ] results.append( futures.submit(fusion_segment, train_sample, experiment_segmentation_library, work_dir + os.sep + n, parameters=segmentation_parameters, debug=debug, work_dir=ec_work_dir, ec_variant='noec', fuse_variant=fuse_variant, regularize_variant=regularize_variant, add=train_add, cleanup=cleanup, presegment=train_presegment)) ec_work_dirs.append(ec_work_dir) else: validation_library_idx = range(len(_train_list)) ec_train_library = [] for i in range(ec_train_rounds): ran_file = work_dir + os.sep + ('random_{}_{}.json'.format( ec_variant, i)) if not os.path.exists(ran_file): rem_list = random.sample(validation_library_idx, ec_train_cv) with open(ran_file, 'w') as f: json.dump(rem_list, f) else: with open(ran_file, 'r') as f: rem_list = json.load(f) # ec_sample_pick_strategy=='random' # list of subjects rem_items = [_train_list[j] for j in rem_list] rem_n = [ os.path.basename(j[0]).rsplit('.gz', 1)[0].rsplit('.mnc', 1)[0] for j in rem_items ] rem_lib = [] for j in rem_n: rem_lib.extend([ k for (k, t) in enumerate(_train_list) if t[0].find(j) >= 0 ]) if debug: print(repr(rem_lib)) rem_lib = set(rem_lib) #prepare exclusion list experiment_segmentation_library = copy.deepcopy( segmentation_library) experiment_segmentation_library.library = \ [k for j, k in enumerate(segmentation_library['library']) if j not in rem_lib] for j, k in enumerate(rem_items): output_experiment = work_dir + os.sep + '{}_{}_{}'.format( i, rem_n[j], 'ec') ec_work_dir = work_dir + os.sep + 'work_{}_{}_{}'.format( i, rem_n[j], fuse_variant) # ??? sample = [k[0], k[1]] presegment = None if ext: presegment = k[2] sample.extend(k[3:3 + modalities]) else: sample.extend(k[2:2 + modalities]) ec_train_library.append(sample) results.append( futures.submit( fusion_segment, k[0], experiment_segmentation_library, output_experiment, parameters=segmentation_parameters, debug=debug, work_dir=ec_work_dir, ec_variant='noec', fuse_variant=fuse_variant, regularize_variant=regularize_variant, add=k[2:2 + modalities], cleanup=cleanup, presegment=presegment)) ec_work_dirs.append(ec_work_dir) futures.wait(results, return_when=futures.ALL_COMPLETED) results2 = [] results3 = [] for (i, j) in enumerate(ec_train_library): train_sample = j[0] train_segment = j[1] train_add = j[2:2 + modalities] train_mask = local_model_mask auto_segment = results[i].result()[0] # TODO: use the subject-specific mask somehow? if ec_border_mask: train_mask = auto_segment.rsplit( '.mnc', 1)[0] + '_' + ec_variant + '_train_mask.mnc' results2.append( futures.submit(make_border_mask, auto_segment, train_mask, width=ec_border_mask_width, labels=experiment_segmentation_library[ 'classes_number'])) # need to split up multilabel segmentation for training if experiment_segmentation_library['classes_number'] > 2 and ( not ec_use_raw): print( "Splitting into individual files: class_number={} use_raw={}" .format( experiment_segmentation_library['classes_number'], ec_use_raw)) labels_prefix = auto_segment.rsplit('.mnc', 1)[0] results3.append( futures.submit( split_labels, auto_segment, experiment_segmentation_library.classes_number, labels_prefix, antialias=ec_antialias_labels, blur=ec_blur_labels, expit=ec_expit_labels, normalize=ec_normalize_labels)) ec_input = [train_sample] ec_input.extend(train_add) ec_input.extend([ '{}_{:02d}.mnc'.format(labels_prefix, i) for i in range(experiment_segmentation_library.classes_number) ]) ec_input.extend([auto_segment, train_mask, train_segment]) ec_train.append(ec_input) else: # binary label ec_input = [train_sample] ec_input.extend(train_add) ec_input.extend([ auto_segment, auto_segment, train_mask, train_segment ]) ec_train.append(ec_input) if ec_border_mask: futures.wait(results2, return_when=futures.ALL_COMPLETED) if experiment_segmentation_library.classes_number > 2: futures.wait(results3, return_when=futures.ALL_COMPLETED) # TODO run Error correction here with open(ec_train_file, 'w') as f: json.dump(ec_train, f, indent=1) else: with open(ec_train_file, 'r') as r: ec_train = json.load(r) if ec_split is None: if not os.path.exists(ec_output): errorCorrectionTrain( ec_train, ec_output, parameters=ec_parameters, debug=debug, multilabel=segmentation_library['classes_number']) else: results = [] for s in range(ec_split): out = ec_output.rsplit('.pickle', 1)[0] + '_' + str(s) + '.pickle' if not os.path.exists(out): results.append( futures.submit( errorCorrectionTrain, ec_train, out, parameters=ec_parameters, debug=debug, partition=ec_split, part=s, multilabel=segmentation_library['classes_number'])) futures.wait(results, return_when=futures.ALL_COMPLETED) # TODO: cleanup not-needed files here! if cleanup: for i in ec_work_dirs: shutil.rmtree(i) except mincError as e: print("Exception in train_ec_loo:{}".format(str(e))) traceback.print_exc(file=sys.stdout) raise except: print("Exception in train_ec_loo:{}".format(sys.exc_info()[0])) traceback.print_exc(file=sys.stdout) raise
def fuse_segmentations(sample, output, library, fuse_options={}, flip=False, classes_number=2, gco_energy=None, ec_options=None, model=None, debug=False, ec_variant='', fuse_variant='', regularize_variant='', work_dir=None): try: final_out_seg = output.seg scan = sample.scan add_scan = sample.add output_info = {} preseg = sample.seg if flip: scan = sample.scan_f add_scan = sample.add_f final_out_seg = output.seg_f preseg = sample.seg_f if not os.path.exists(final_out_seg): with mincTools(verbose=2) as m: if work_dir is None: work_dir = os.path.dirname(output.seg) dataset_name = sample.name if flip: dataset_name += '_f' out_seg_fuse = work_dir + os.sep + dataset_name + '_' + fuse_variant + '.mnc' out_prob_base = work_dir + os.sep + dataset_name + '_' + fuse_variant + '_prob' out_dist = work_dir + os.sep + dataset_name + '_' + fuse_variant + '_dist.mnc' out_seg_reg = work_dir + os.sep + dataset_name + '_' + fuse_variant + '_' + regularize_variant + '.mnc' out_seg_ec = final_out_seg output_info['work_dir'] = work_dir output_info['dataset_name'] = work_dir if ec_options is None: # skip error-correction part out_seg_reg = out_seg_ec print("ec_options={}".format(repr(ec_options))) output_info['out_seg_reg'] = out_seg_reg output_info['out_seg_fuse'] = out_seg_fuse output_info['out_dist'] = out_dist probs = [ '{}_{:02d}.mnc'.format(out_prob_base, i) for i in range(classes_number) ] output_info['probs'] = probs if preseg is None: patch = 0 search = 0 threshold = 0 iterations = 0 gco_optimize = False nnls = False gco_diagonal = False label_norm = None ext_tool = None if fuse_options is not None: # get parameters patch = fuse_options.get('patch', 0) search = fuse_options.get('search', 0) threshold = fuse_options.get('threshold', 0.0) iterations = fuse_options.get('iter', 3) weights = fuse_options.get('weights', None) nnls = fuse_options.get('nnls', False) label_norm = fuse_options.get('label_norm', None) beta = fuse_options.get('beta', None) new_prog = fuse_options.get('new', True) ext_tool = fuse_options.get('ext', None) # graph-cut based segmentation gco_optimize = fuse_options.get('gco', False) gco_diagonal = fuse_options.get('gco_diagonal', False) gco_wlabel = fuse_options.get('gco_wlabel', 1.0) gco_wdata = fuse_options.get('gco_wdata', 1.0) gco_wintensity = fuse_options.get( 'gco_wintensity', 0.0) gco_epsilon = fuse_options.get('gco_epsilon', 1e-4) if label_norm is not None: print("Using label_norm:{}".format(repr(label_norm))) # need to create rough labeling and average segs = ['multiple_volume_similarity'] segs.extend([i.seg for i in library]) segs.extend( ['--majority', m.tmp('maj_seg.mnc'), '--bg']) m.execute(segs) scans = [i.scan for i in library] m.median(scans, m.tmp('median.mnc')) norm_order = label_norm.get('order', 3) norm_median = label_norm.get('median', True) n_scan = work_dir + os.sep + dataset_name + '_' + fuse_variant + '_norm.mnc' if flip: n_scan = work_dir + os.sep + dataset_name + '_' + fuse_variant + '_f_norm.mnc' hl.label_normalize(scan, m.tmp('maj_seg.mnc'), m.tmp('median.mnc'), m.tmp('maj_seg.mnc'), out=n_scan, order=norm_order, median=norm_median) scan = n_scan if ext_tool is not None: # will run external segmentation tool! # ext_tool is expected to be a string with format language specs segs = ext_tool.format(sample=sample.scan, mask=sample.mask, output=out_seg_fuse, prob_base=out_prob_base, model_mas=model.mask, model_atlas=model.seg) outputs = [out_seg_fuse] m.command(segs, inputs=[sample.scan], outputs=outputs) pass #TODO: finish this elif patch == 0 and search == 0: # perform simple majority voting # create majority voted model segmentation, for ANIMAL segmentation if needed segs = ['multiple_volume_similarity'] segs.extend([i.seg for i in library]) segs.extend(['--majority', out_seg_fuse, '--bg']) m.execute(segs) #TODO:Output fake probs ? if gco_energy is not None and gco_optimize: # todo place this into parameters split_labels(out_seg_fuse, classes_number, out_prob_base, antialias=True, blur=1.0, expit=1.0, normalize=True) else: # run patc-based label fusion # create text file for the training library train_lib = os.path.dirname( library[0].seg) + os.sep + sample.name + '.lst' if flip: train_lib = os.path.dirname( library[0].seg ) + os.sep + sample.name + '_f.lst' output_info['train_lib'] = train_lib with open(train_lib, 'w') as f: for i in library: ss = [os.path.basename(i.scan)] ss.extend([os.path.basename(j) for j in i.add]) ss.append(os.path.basename(i.seg)) f.write(",".join(ss)) f.write("\n") outputs = [] if len(add_scan) > 0: segs = [ 'itk_patch_morphology_mc', scan, '--train', train_lib, '--search', str(search), '--patch', str(patch), '--discrete', str(classes_number), '--adist', out_dist, '--prob', out_prob_base ] if weights is not None: segs.extend(['--weights', weights]) segs.extend(add_scan) segs.extend(['--output', out_seg_fuse]) else: if nnls: segs = [ 'itk_patch_segmentation', scan, '--train', train_lib, '--search', str(search), '--patch', str(patch), '--discrete', str(classes_number), '--iter', str(iterations), '--prob', out_prob_base, '--adist', out_dist, '--nnls', '--threshold', str(threshold) ] else: if new_prog: segs = ['itk_patch_segmentation', '--exp'] else: segs = ['itk_patch_morphology'] segs.extend([ scan, '--train', train_lib, '--search', str(search), '--patch', str(patch), '--discrete', str(classes_number), '--iter', str(iterations), '--prob', out_prob_base, '--adist', out_dist, '--threshold', str(threshold) ]) if beta is not None: segs.extend(['--beta', str(beta)]) segs.append(out_seg_fuse) # plug in additional modalities outputs = [out_seg_fuse] outputs.extend(probs) if sample.mask is not None: segs.extend(['--mask', sample.mask]) m.command(segs, inputs=[sample.scan], outputs=outputs) print(' '.join(segs)) if gco_energy is not None and gco_optimize: gco = ['gco_classify', '--cooc', gco_energy] gco.extend(probs) gco.extend([ out_seg_reg, '--iter', '1000', '--wlabel', str(gco_wlabel), '--wdata', str(gco_wdata), '--epsilon', str(gco_epsilon) ]) if gco_diagonal: gco.append('--diagonal') if gco_wintensity > 0.0: gco.extend([ '--intensity', scan, '--wintensity', str(gco_wintensit) ]) if sample.mask is not None: gco.extend(['--mask', sample.mask]) m.command(gco, inputs=probs, outputs=[out_seg_reg]) else: shutil.copyfile(out_seg_fuse, out_seg_reg) else: #shutil.copyfile(preseg, out_seg_reg) if ec_options is None: shutil.copyfile(preseg, final_out_seg) out_seg_reg = final_out_seg else: out_seg_reg = preseg output_info['out_seg_reg'] = out_seg_reg output_info['out_seg_fuse'] = out_seg_reg output_info['out_dist'] = None output_info['prob'] = None #out_seg_reg = preseg if ec_options is not None: # create ec mask ec_border_mask = ec_options.get('border_mask', True) ec_border_mask_width = ec_options.get( 'border_mask_width', 3) ec_antialias_labels = ec_options.get( 'antialias_labels', True) ec_blur_labels = ec_options.get('blur_labels', 1.0) ec_expit_labels = ec_options.get('expit_labels', 1.0) ec_normalize_labels = ec_options.get( 'normalize_labels', True) ec_use_raw = ec_options.get('use_raw', False) ec_split = ec_options.get('split', None) train_mask = model.mask ec_input_prefix = out_seg_reg.rsplit( '.mnc', 1)[0] + '_' + ec_variant if ec_border_mask: train_mask = ec_input_prefix + '_train_mask.mnc' make_border_mask(out_seg_reg, train_mask, width=ec_border_mask_width, labels=classes_number) ec_input = [scan] ec_input.extend(sample.add) if classes_number > 2 and (not ec_use_raw): split_labels(out_seg_reg, classes_number, ec_input_prefix, antialias=ec_antialias_labels, blur=ec_blur_labels, expit=ec_expit_labels, normalize=ec_normalize_labels) ec_input.extend([ '{}_{:02d}.mnc'.format(ec_input_prefix, i) for i in range(classes_number) ]) # skip background feature ? else: ec_input.append( out_seg_reg) # the auto segmentation is output_info['out_seg_ec'] = out_seg_ec if ec_split is None: if ec_variant is not None: out_seg_ec_errors1 = work_dir + os.sep + dataset_name + '_' + fuse_variant + '_' + regularize_variant + '_' + ec_variant + '_error1.mnc' out_seg_ec_errors2 = work_dir + os.sep + dataset_name + '_' + fuse_variant + '_' + regularize_variant + '_' + ec_variant + '_error2.mnc' output_info[ 'out_seg_ec_errors1'] = out_seg_ec_errors1 output_info[ 'out_seg_ec_errors2'] = out_seg_ec_errors2 errorCorrectionApply(ec_input, out_seg_ec, input_mask=train_mask, parameters=ec_options, input_auto=out_seg_reg, debug=debug, multilabel=classes_number, debug_files=[ out_seg_ec_errors1, out_seg_ec_errors2 ]) else: results = [] parts = [] for s in range(ec_split): out = '{}_part_{:d}.mnc'.format(ec_input_prefix, s) train_part = ec_options['training'].rsplit( '.pickle', 1)[0] + '_' + str(s) + '.pickle' ec_options_part = copy.deepcopy(ec_options) ec_options_part['training'] = train_part if ec_variant is not None: out_seg_ec_errors1 = work_dir + os.sep + dataset_name + '_' + fuse_variant + '_' + regularize_variant + '_' + ec_variant + '_error1_' + str( s) + '.mnc' out_seg_ec_errors2 = work_dir + os.sep + dataset_name + '_' + fuse_variant + '_' + regularize_variant + '_' + ec_variant + '_error2_' + str( s) + '.mnc' parts.append(out) results.append( futures.submit(errorCorrectionApply, ec_input, out, input_mask=train_mask, parameters=ec_options_part, input_auto=out_seg_reg, debug=debug, partition=ec_split, part=s, multilabel=classes_number, debug_files=[ out_seg_ec_errors1, out_seg_ec_errors2 ])) futures.wait(results, return_when=futures.ALL_COMPLETED) merge_segmentations(parts, out_seg_ec, ec_split, ec_options) return output_info except mincError as e: print("Exception in fuse_segmentations:{}".format(str(e))) traceback.print_exc(file=sys.stdout) raise except: print("Exception in fuse_segmentations:{}".format(sys.exc_info()[0])) traceback.print_exc(file=sys.stdout) raise
def main(): options = parse_options() if options.source is not None and \ options.library is not None and \ options.output is not None: source_parameters = {} try: with open(options.source, 'r') as f: source_parameters = yaml.load(f, Loader=yaml.FullLoader) except: print("Error loading configuration:{} {}\n".format( options.source, sys.exc_info()[0]), file=sys.stderr) traceback.print_exc(file=sys.stderr) exit(1) library = SegLibrary(options.library) samples = source_parameters['library'] build_symmetric = source_parameters.get('build_symmetric', False) # load csv file if samples is not list: with open(samples, 'r') as f: samples = list(csv.reader(f)) n_samples = len(samples) # if not os.path.exists(options.output): os.makedirs(options.output) pca_int = None pca_grid = None if options.intpca is not None: pca_int = pca_lib(options.intpca) if options.gridpca is not None: pca_grid = pca_lib(options.gridpca) outputs = [] print(repr(samples)) #print(repr(pca_grid.lib)) for i, j in enumerate(samples): # submit jobs to produce augmented dataset outputs.append( futures.submit(gen_sample, library, options, source_parameters, j, idx=i, pca_grid=pca_grid, pca_int=pca_int)) # flipped (?) if build_symmetric: outputs.append( futures.submit(gen_sample, library, options, source_parameters, j, idx=i, flip=True, pca_grid=pca_grid, pca_int=pca_int)) # futures.wait(outputs, return_when=futures.ALL_COMPLETED) # generate a new library for augmented samples augmented_library = library # wipe all the samples augmented_library.library = [] for j in outputs: for k in j.result(): # remove _scan_xxx.mnc part from id, to indicate that the augmented sample still comes from original ID augmented_library.library.append( LibEntry(k, relpath=options.output, ent_id=os.path.basename(k[0]).rsplit('_', 2)[0])) # save new library description #save_library_info(augmented_library, options.output) print("Saving to {}".format(options.output)) augmented_library.save(options.output) else: print("Run with --help")
def generate_library(parameters, output, debug=False, cleanup=False): '''Actual generation of the segmentation library''' try: if debug: print(repr(parameters)) # read parameters reference_model = parameters['reference_model'] reference_mask = parameters.get('reference_mask', None) reference_model_add = parameters.get('reference_model_add', []) reference_local_model = parameters.get('reference_local_model', None) reference_local_mask = parameters.get('reference_local_mask', None) reference_local_model_flip = parameters.get( 'reference_local_model_flip', None) reference_local_mask_flip = parameters.get('reference_local_mask_flip', None) library = parameters['library'] work_dir = parameters.get('workdir', output + os.sep + 'work') # should we build symmetric model build_symmetric = parameters.get('build_symmetric', False) # should we build symmetric flipped model build_symmetric_flip = parameters.get('build_symmetric_flip', False) # lookup table for renaming labels for more compact representation build_remap = parameters.get('build_remap', {}) # lookup table for renaming labels for more compact representation, # when building symmetrized library build_flip_remap = parameters.get('build_flip_remap', {}) # lookup table for renaming labels for more compact representation, # when building symmetrized library build_unflip_remap = parameters.get('build_unflip_remap', {}) if not build_unflip_remap and build_flip_remap and build_remap: build_unflip_remap = create_unflip_remap(build_remap, build_flip_remap) # label map label_map = parameters.get('label_map', None) classes_number = parameters.get('classes', 2) # perform filtering as final stage of the library creation pre_filters = parameters.get('pre_filters', None) post_filters = parameters.get('post_filters', parameters.get('filters', None)) # perform denoising as final stage of the library creation resample_order = parameters.get('resample_order', 2) # use boundary anti-aliasing filter when resampling labels resample_baa = parameters.get('resample_baa', True) # extent bounding box to reduce boundary effects extend_boundary = parameters.get('extend_boundary', 4) # extend maks #dilate_mask = parameters.get( 'dilate_mask',3) op_mask = parameters.get('op_mask', 'E[2] D[4]') # if linear registration should be performed do_initial_register = parameters.get( 'initial_register', parameters.get('linear_register', {})) if do_initial_register is not None and isinstance( do_initial_register, dict): initial_register = do_initial_register do_initial_register = True else: initial_register = {} inital_reg_type = parameters.get( 'initial_register_type', parameters.get('linear_register_type', initial_register.get('type', '-lsq12'))) inital_reg_ants = parameters.get( 'initial_register_ants', parameters.get('linear_register_ants', False)) inital_reg_options = parameters.get( 'initial_register_options', initial_register.get('options', None)) inital_reg_downsample = parameters.get( 'initial_register_downsample', initial_register.get('downsample', None)) inital_reg_use_mask = parameters.get( 'initial_register_use_mask', initial_register.get('use_mask', False)) initial_reg_objective = initial_register.get('objective', '-xcorr') # perform local linear registration do_initial_local_register = parameters.get( 'initial_local_register', parameters.get('local_linear_register', {})) if do_initial_local_register is not None and isinstance( do_initial_local_register, dict): initial_local_register = do_initial_local_register do_initial_local_register = True else: initial_local_register = {} local_reg_type = parameters.get( 'local_register_type', initial_local_register.get('type', '-lsq12')) local_reg_ants = parameters.get('local_register_ants', False) local_reg_opts = parameters.get( 'local_register_options', initial_local_register.get('options', None)) local_reg_bbox = parameters.get( 'local_register_bbox', initial_local_register.get('bbox', False)) local_reg_downsample = parameters.get( 'local_register_downsample', initial_local_register.get('downsample', None)) local_reg_use_mask = parameters.get( 'local_register_use_mask', initial_local_register.get('use_mask', True)) local_reg_objective = initial_local_register.get('objective', '-xcorr') # if non-linear registraiton should be performed for library creation do_nonlinear_register = parameters.get('non_linear_register', False) # if non-linear registraiton should be performed with ANTS do_nonlinear_register_ants = parameters.get('non_linear_register_ants', False) nonlinear_register_type = parameters.get('non_linear_register_type', None) if nonlinear_register_type is None: if do_nonlinear_register_ants: nonlinear_register_type = 'ants' nlreg_level = parameters.get('non_linear_register_level', 2) nlreg_start = parameters.get('non_linear_register_start', 16) nlreg_options = parameters.get('non_linear_register_options', None) nlreg_downsample = parameters.get('non_linear_register_downsample', None) modalities = parameters.get('modalities', 1) - 1 create_patch_norm_lib = parameters.get('create_patch_norm_lib', False) patch_norm_lib_pct = parameters.get('patch_norm_lib_pct', 0.1) patch_norm_lib_sub = parameters.get('patch_norm_lib_sub', 1) patch_norm_lib_patch = parameters.get('patch_norm_lib_patch', 2) # 5x5x5 patches use_fake_masks = parameters.get('fake_mask', False) # prepare directories if not os.path.exists(output): os.makedirs(output) if not os.path.exists(work_dir): os.makedirs(work_dir) # 0. go over input samples, prepare variables input_samples = [] filtered_samples = [] lin_xfm = [] lin_samples = [] tmp_lin_samples = [] bbox_lin_xfm = [] final_samples = [] warped_samples = [] final_transforms = [] tmp_log_samples = [] patch_norm_db = output + os.sep + 'patch_norm.db' patch_norm_idx = output + os.sep + 'patch_norm.idx' # identity xfm identity_xfm = MriTransform(prefix=work_dir, name='identity') with mincTools() as m: m.param2xfm(identity_xfm.xfm) m.param2xfm(identity_xfm.xfm_f) # check if library is list, if it is not, assume it's a reference to a csv file if library is not list: with open(library, 'r') as f: library = list(csv.reader(f)) # setup files model = MriDataset(scan=reference_model, mask=reference_mask, add=reference_model_add) for (j, i) in enumerate(library): scan = i[0] seg = i[1] mask = None add = i[2:modalities + 2] # additional modalties if len( i ) > modalities + 2: # assume that the extra file is a subject specific mask mask = i[modalities + 2] elif use_fake_masks: # create mask from segmentation mask = work_dir + os.sep + 'fake_mask_' + os.path.basename( scan) create_fake_mask(seg, mask, op=op_mask) sample = MriDataset(scan=scan, seg=seg, mask=mask, protect=True, add=add) input_samples.append(sample) filtered_samples.append( MriDataset(prefix=work_dir, name='flt_' + sample.name, add_n=modalities)) lin_xfm.append( MriTransform(prefix=work_dir, name='lin_' + sample.name)) bbox_lin_xfm.append( MriTransform(prefix=work_dir, name='lin_bbox_' + sample.name)) lin_samples.append( MriDataset(prefix=work_dir, name='lin_' + sample.name, add_n=modalities)) tmp_lin_samples.append( MriDataset(prefix=work_dir, name='tmp_lin_' + sample.name, add_n=modalities)) tmp_log_samples.append( MriDataset(prefix=work_dir, name='tmp_log_' + sample.name)) final_samples.append( MriDataset(prefix=output, name=sample.name, add_n=modalities)) warped_samples.append( MriDataset(prefix=output, name='nl_' + sample.name, add_n=modalities)) final_transforms.append( MriTransform(prefix=output, name='nl_' + sample.name)) # temp array results = [] if pre_filters is not None: # apply pre-filtering before other stages filter_all = [] for (j, i) in enumerate(input_samples): # a HACK? filtered_samples[j].seg = input_samples[j].seg filtered_samples[j].mask = input_samples[j].mask filter_all.append( futures.submit(filter_sample, input_samples[j], filtered_samples[j], pre_filters, model=model)) futures.wait(filter_all, return_when=futures.ALL_COMPLETED) else: filtered_samples = input_samples if build_symmetric: # need to flip the inputs flipdir = work_dir + os.sep + 'flip' if not os.path.exists(flipdir): os.makedirs(flipdir) flip_all = [] labels_datatype = 'short' # TODO: determine optimal here #if largest_label>255:labels_datatype='short' for (j, i) in enumerate(filtered_samples): i.scan_f = flipdir + os.sep + os.path.basename(i.scan) i.add_f = [] for (k, j) in enumerate(i.add): i.add_f.append(flipdir + os.sep + os.path.basename(i.add[k])) if i.mask is not None: i.mask_f = flipdir + os.sep + 'mask_' + os.path.basename( i.scan) else: i.mask_f = None flip_all.append( futures.submit(generate_flip_sample, i, labels_datatype=labels_datatype)) futures.wait(flip_all, return_when=futures.ALL_COMPLETED) # 1. run global linear registration if nedded if do_initial_register: for (j, i) in enumerate(filtered_samples): if inital_reg_type == 'elx' or inital_reg_type == 'elastix': results.append( futures.submit(elastix_registration, i, model, lin_xfm[j], symmetric=build_symmetric, parameters=inital_reg_options, downsample=inital_reg_downsample, use_mask=inital_reg_use_mask)) elif inital_reg_type == 'ants' or inital_reg_ants: results.append( futures.submit(linear_registration, i, model, lin_xfm[j], symmetric=build_symmetric, linreg=inital_reg_options, ants=True, downsample=inital_reg_downsample, use_mask=inital_reg_use_mask)) else: results.append( futures.submit(linear_registration, i, model, lin_xfm[j], symmetric=build_symmetric, reg_type=inital_reg_type, linreg=inital_reg_options, downsample=inital_reg_downsample, use_mask=inital_reg_use_mask, objective=initial_reg_objective)) # TODO: do we really need to wait for result here? futures.wait(results, return_when=futures.ALL_COMPLETED) # TODO: determine if we need to resample input files here #lin_samples=input_samples # 2. for each part run linear registration, apply flip and do symmetric too # 3. perform local linear registrtion and local intensity normalization if needed # create a local reference model local_model = None local_model_ovl = None local_model_avg = None local_model_sd = None if reference_local_model is None: local_model = MriDataset(prefix=output, name='local_model', add_n=modalities) local_model_ovl = MriDataset(prefix=output, name='local_model_ovl') local_model_avg = MriDataset(prefix=output, name='local_model_avg', add_n=modalities) local_model_sd = MriDataset(prefix=output, name='local_model_sd', add_n=modalities) if not os.path.exists(local_model.scan): for (j, i) in enumerate(filtered_samples): xfm = None if do_initial_register: xfm = lin_xfm[j] results.append( futures.submit(warp_rename_seg, i, model, tmp_lin_samples[j], transform=xfm, symmetric=build_symmetric, symmetric_flip=build_symmetric, lut=build_remap, flip_lut=build_flip_remap, resample_order=0, resample_baa=False, create_mask=use_fake_masks, op_mask=op_mask)) futures.wait(results, return_when=futures.ALL_COMPLETED) create_local_model(tmp_lin_samples, model, local_model, extend_boundary=extend_boundary, op=op_mask) if not os.path.exists( local_model.scan_f ) and build_symmetric and build_symmetric_flip: create_local_model_flip(local_model, model, remap=build_unflip_remap, op=op_mask) else: local_model = MriDataset(scan=reference_local_model, mask=reference_local_mask) local_model.scan_f = reference_local_model_flip local_model.mask_f = reference_local_mask_flip if do_initial_local_register: for (j, i) in enumerate(filtered_samples): init_xfm = None if do_initial_register: init_xfm = lin_xfm[j] if local_reg_type == 'elx' or local_reg_type == 'elastix': results.append( futures.submit(elastix_registration, i, local_model, bbox_lin_xfm[j], init_xfm=init_xfm, symmetric=build_symmetric, parameters=local_reg_opts, bbox=local_reg_bbox, downsample=local_reg_downsample, use_mask=local_reg_use_mask)) elif local_reg_type == 'ants' or local_reg_ants: results.append( futures.submit(linear_registration, i, local_model, bbox_lin_xfm[j], init_xfm=init_xfm, symmetric=build_symmetric, reg_type=local_reg_type, linreg=local_reg_opts, close=True, ants=True, bbox=local_reg_bbox, downsample=local_reg_downsample, use_mask=local_reg_use_mask)) else: if not do_initial_register: init_xfm = identity_xfm # to avoid strange initialization errors results.append( futures.submit(linear_registration, i, local_model, bbox_lin_xfm[j], init_xfm=init_xfm, symmetric=build_symmetric, reg_type=local_reg_type, linreg=local_reg_opts, close=True, bbox=local_reg_bbox, downsample=local_reg_downsample, use_mask=local_reg_use_mask, objective=local_reg_objective)) # TODO: do we really need to wait for result here? futures.wait(results, return_when=futures.ALL_COMPLETED) else: bbox_lin_xfm = lin_xfm # create bbox samples results = [] for (j, i) in enumerate(filtered_samples): xfm = None if i.mask is None: final_samples[j].mask = None final_samples[j].mask_f = None if do_initial_local_register or do_initial_register: xfm = bbox_lin_xfm[j] # results.append( futures.submit(warp_rename_seg, i, local_model, final_samples[j], transform=xfm, symmetric=build_symmetric, symmetric_flip=build_symmetric, lut=build_remap, flip_lut=build_flip_remap, resample_order=resample_order, resample_baa=resample_baa, create_mask=use_fake_masks, op_mask=op_mask)) futures.wait(results, return_when=futures.ALL_COMPLETED) results = [] for (j, i) in enumerate(filtered_samples): xfm = None if do_initial_local_register or do_initial_register: xfm = bbox_lin_xfm[j] results.append( futures.submit( warp_sample, i, local_model, final_samples[j], transform=xfm, symmetric=build_symmetric, symmetric_flip=build_symmetric, resample_order=resample_order, filters=post_filters, )) futures.wait(results, return_when=futures.ALL_COMPLETED) if create_patch_norm_lib: create_patch_norm_db(final_samples, patch_norm_db, patch_norm_idx, pct=patch_norm_lib_pct, sub=patch_norm_lib_sub, patch=patch_norm_lib_patch) results = [] if do_nonlinear_register: for (j, i) in enumerate(final_samples): # TODO: decide what to do with mask i.mask = None if nonlinear_register_type == 'elx' or nonlinear_register_type == 'elastix': results.append( futures.submit(elastix_registration, i, local_model, final_transforms[j], symmetric=build_symmetric, level=nlreg_level, parameters=nlreg_options, output_sample=warped_samples[j], warp_seg=True, resample_order=resample_order, resample_baa=resample_baa, nl=True, downsample=nlreg_downsample)) elif nonlinear_register_type == 'ants' or do_nonlinear_register_ants: results.append( futures.submit(non_linear_registration, i, local_model, final_transforms[j], symmetric=build_symmetric, level=nlreg_level, parameters=nlreg_options, output_sample=warped_samples[j], warp_seg=True, resample_order=resample_order, resample_baa=resample_baa, ants=True, downsample=nlreg_downsample)) else: results.append( futures.submit(non_linear_registration, i, local_model, final_transforms[j], symmetric=build_symmetric, level=nlreg_level, parameters=nlreg_options, output_sample=warped_samples[j], warp_seg=True, resample_order=resample_order, resample_baa=resample_baa, ants=False, downsample=nlreg_downsample)) final_samples[j].mask = None # TODO: do we really need to wait for result here? futures.wait(results, return_when=futures.ALL_COMPLETED) with mincTools() as m: # a hack, to replace a rough model with a new one if os.path.exists(local_model.seg): os.unlink(local_model.seg) # create majority voted model segmentation, for ANIMAL segmentation if needed segs = ['multiple_volume_similarity'] segs.extend([i.seg for i in warped_samples]) if build_symmetric: segs.extend([i.seg_f for i in warped_samples]) segs.extend([ '--majority', local_model.seg, '--bg', '--overlap', local_model_ovl.scan ]) m.command(segs, inputs=[], outputs=[local_model.seg, local_model_ovl.scan]) avg = ['mincaverage', '-float'] avg.extend([i.scan for i in warped_samples]) if build_symmetric: avg.extend([i.scan_f for i in warped_samples]) avg.extend( [local_model_avg.scan, '-sdfile', local_model_sd.scan]) m.command(avg, inputs=[], outputs=[local_model_avg.scan, local_model_sd.scan]) for i in range(modalities): avg = ['mincaverage', '-float'] avg.extend([j.add[i] for j in warped_samples]) if build_symmetric: avg.extend([j.add_f[i] for j in warped_samples]) avg.extend([ local_model_avg.add[i], '-sdfile', local_model_sd.add[i] ]) m.command(avg, inputs=[], outputs=[ local_model_avg.add[i], local_model_sd.add[i] ]) else: with mincTools() as m: # a hack, to replace a rough model with a new one if os.path.exists(local_model.seg): os.unlink(local_model.seg) # create majority voted model segmentation, for ANIMAL segmentation if needed segs = ['multiple_volume_similarity'] segs.extend([i.seg for i in final_samples]) if build_symmetric: segs.extend([i.seg_f for i in final_samples]) segs.extend([ '--majority', local_model.seg, '--bg', '--overlap', local_model_ovl.scan ]) m.command(segs, inputs=[], outputs=[local_model.seg, local_model_ovl.scan]) avg = ['mincaverage', '-float'] avg.extend([i.scan for i in final_samples]) if build_symmetric: avg.extend([i.scan_f for i in final_samples]) avg.extend( [local_model_avg.scan, '-sdfile', local_model_sd.scan]) m.command(avg, inputs=[], outputs=[local_model_avg.scan, local_model_sd.scan]) for i in range(modalities): avg = ['mincaverage', '-float'] avg.extend([j.add[i] for j in final_samples]) if build_symmetric: avg.extend([j.add_f[i] for j in final_samples]) avg.extend([ local_model_avg.add[i], '-sdfile', local_model_sd.add[i] ]) m.command(avg, inputs=[], outputs=[ local_model_avg.add[i], local_model_sd.add[i] ]) # number of classes including bg #classes_number=2 ## 6. create training library description #with mincTools() as m: #classes_number=int(m.execute_w_output(['mincstats', '-q', '-max',local_model.seg ]).rstrip("\n"))+1 library_description = {} # library models library_description['model'] = model.scan library_description['model_mask'] = model.mask library_description['model_add'] = model.add library_description['local_model'] = local_model.scan library_description['local_model_add'] = local_model.add library_description['local_model_mask'] = local_model.mask library_description['local_model_seg'] = local_model.seg library_description['local_model_avg'] = local_model_avg.scan library_description['local_model_ovl'] = local_model_ovl.scan library_description['local_model_sd'] = local_model_sd.scan # library parameters library_description['map'] = inv_dict(dict(build_remap)) library_description['classes_number'] = classes_number library_description['nl_samples_avail'] = do_nonlinear_register library_description['modalities'] = modalities + 1 largest_label = max(library_description['map'].values(), key=lambda p: int(p)) library_description['seg_datatype'] = 'short' if largest_label <= 255: library_description['seg_datatype'] = 'byte' library_description['gco_energy'] = output + os.sep + 'gco_energy.csv' estimate_gco_energy(final_samples, library_description['gco_energy'], classes=classes_number) library_description['label_map'] = label_map if build_symmetric and build_symmetric_flip: library_description['local_model_flip'] = local_model.scan_f library_description['local_model_add_flip'] = local_model.add_f library_description['local_model_mask_flip'] = local_model.mask_f library_description['local_model_seg_flip'] = local_model.seg_f library_description['flip_map'] = inv_dict(dict(build_flip_remap)) else: library_description['local_model_flip'] = None library_description['local_model_add_flip'] = [] library_description['local_model_mask_flip'] = None library_description['flip_map'] = {} library_description['library'] = [] for (j, i) in enumerate(final_samples): ss = [i.scan, i.seg] ss.extend(i.add) if do_nonlinear_register: ss.extend([ final_transforms[j].xfm, final_transforms[j].xfm_inv, warped_samples[j].scan, warped_samples[j].seg ]) library_description['library'].append(ss) if build_symmetric: ss = [i.scan_f, i.seg_f] ss.extend(i.add_f) if do_nonlinear_register: ss.extend([ final_transforms[j].xfm_f, final_transforms[j].xfm_f_inv, warped_samples[j].scan_f, warped_samples[j].seg_f ]) library_description['library'].append(ss) save_library_info(library_description, output) # cleanup if cleanup: shutil.rmtree(work_dir) except mincError as e: print("Exception in generate_library:{}".format(str(e)), file=sys.stderr) traceback.print_exc(file=sys.stderr) raise except: print("Exception in generate_library:{}".format(sys.exc_info()[0]), file=sys.stderr) traceback.print_exc(file=sys.stderr) raise
def multi_repeat(n, funcs): fs = [futures.submit(func) for func in funcs for _ in range(n)] futures.wait(fs) return [f.result() for f in fs]
generate_xfm_direct_ANTS_CC, i, k, mri[i], mri[k], mask[i], mask[k], seg[i], seg[k], output + os.sep + 'C_{:02d}_{:02d}'.format(i, k))) if not os.path.exists(output + os.sep + 'D_{:02d}_{:02d}_map.xfm'.format(i, k)): rr.append( futures.submit( generate_xfm_direct_ANTS_MI, i, k, mri[i], mri[k], mask[i], mask[k], seg[i], seg[k], output + os.sep + 'D_{:02d}_{:02d}'.format(i, k))) if not os.path.exists(output + os.sep + 'E_{:02d}_{:02d}_map.xfm'.format(i, k)): rr.append( futures.submit( generate_xfm_direct_elastix_cc, i, k, mri[i], mri[k], mask[i], mask[k], seg[i], seg[k], output + os.sep + 'E_{:02d}_{:02d}'.format(i, k))) if not os.path.exists(output + os.sep + 'F_{:02d}_{:02d}_map.xfm'.format(i, k)): rr.append( futures.submit( generate_xfm_direct_elastix_mi, i, k, mri[i], mri[k], mask[i], mask[k], seg[i], seg[k], output + os.sep + 'F_{:02d}_{:02d}'.format(i, k))) futures.wait(rr, return_when=futures.ALL_COMPLETED) # kate: space-indent on; indent-width 4; indent-mode python;replace-tabs on;word-wrap-column 80;show-tabs on