def _run_interface(self, runtime): #Get extension for input transformation files ext_1 = splitext(self.inputs.transform_1)[1] ext_2 = splitext(self.inputs.transform_2)[1] if ext_1 in ['.mat', '.txt'] and ext_2 in ['.mat', '.txt']: self.inputs.out_file = os.getcwd( ) + os.sep + 'composite_affine.mat' elif ext_1 == '.h5' or ext_2 == '.h5': self.inputs.out_file = os.getcwd() + os.sep + 'composite_warp.h5' cmd("CompositeTransformUtil --assemble " + ' '.join([ self.inputs.out_file, self.inputs.transform_1, self.inputs.transform_2 ])) return runtime
def _run_interface(self, runtime): if not isdefined(self.inputs.out_file): base = os.path.basename(self.inputs.in_file) split = splitext(base) self.inputs.out_file = os.getcwd( ) + os.sep + split[0] + self._suffix + split[1] #Load PET 3D volume infile = nib.load(self.inputs.in_file) shape = infile.get_shape() zmax = shape[2] data = infile.get_data() #Get max slice values and multiply by pet_mask_slice_threshold (0.25 by default) slice_thresholds = np.amax(data, axis=(1, 2)) * self.inputs.slice_factor #Get mean for all values above slice_max slice_mean_f = lambda t, d, i: float(np.mean(d[i, d[i, :, :] > t[i]])) slice_mean = np.array( [slice_mean_f(slice_thresholds, data, i) for i in range(zmax)]) #Remove nan from slice_mean slice_mean = slice_mean[~np.isnan(slice_mean)] #Calculate overall mean from mean of thresholded slices overall_mean = np.mean(slice_mean) #Calcuate threshold threshold = overall_mean * self.inputs.total_factor #Apply threshold and create and write outputfile idx = data >= threshold data[idx] = 1 data[~idx] = 0 outfile = nib.Nifti1Image(data, infile.get_affine()) outfile.to_file(self.inputs.out_file) return runtime
def _run_interface(self, runtime): transforms = [] invert_transform_flags = [] if isdefined(self.inputs.transform_1): transforms.append(self.inputs.transform_1) invert_transform_flags.append(self.inputs.invert_1) if isdefined(self.inputs.transform_2): transforms.append(self.inputs.transform_2) invert_transform_flags.append(self.inputs.invert_2) if isdefined(self.inputs.transform_3): transforms.append(self.inputs.transform_3) invert_transform_flags.append(self.inputs.invert_3) cmd = ApplyTransforms() cmd.inputs.transforms = transforms cmd.inputs.invert_transform_flags = invert_transform_flags cmd.inputs.reference_image = self.inputs.reference_image cmd.inputs.input_image = self.inputs.input_image cmd.inputs.interpolation = self.inputs.interpolation cmd.run() split = splitext(os.path.basename(self.inputs.input_image)) self.inputs.output_image = os.getcwd( ) + os.sep + split[0] + '_trans' + split[1] print(os.listdir(os.getcwd())) return runtime
def _run_interface(self, runtime): in_file = check_gz(self.inputs.in_file) mask_file = check_gz(self.inputs.mask_file) img2dft = img2dftCommand() img2dft.inputs.in_file = in_file img2dft.inputs.mask_file = mask_file img2dft.inputs.out_file = os.getcwd() + os.sep + splitext( os.path.basename(self.inputs.in_file))[0] + '.dft' img2dft.run() print(img2dft.cmdline) header = json.load(open(self.inputs.pet_header_json, 'r')) frame_times = header["Time"]["FrameTimes"]["Values"] c0 = c1 = 1. #Time unit conversion variables. Time should be in seconds if header["Time"]["FrameTimes"]["Units"][0] == 's': c0 = 1. / 60 elif header["Time"]["FrameTimes"]["Units"][0] == 'h': c0 = 60. if header["Time"]["FrameTimes"]["Units"][1] == 's': c1 = 1 / 60. elif header["Time"]["FrameTimes"]["Units"][1] == 'h': c1 = 60. line_counter = -1 newlines = '' with open(img2dft.inputs.out_file, 'r') as f: for line in f.readlines(): print(line) if 'Times' in line: line_counter = 0 line = line.replace('sec', 'min') newlines += line continue if line_counter >= 0: line_split = line.split('\t') line_split[0] = frame_times[line_counter][0] * c0 line_split[1] = frame_times[line_counter][1] * c1 line_split_str = [str(i) for i in line_split] newlines += '\t'.join(line_split_str) line_counter += 1 else: newlines += line print(newlines) with open(img2dft.inputs.out_file, 'w') as f: f.write(newlines) #convert_time = tacunitCommand() #convert_time.inputs.in_file = img2dft.inputs.out_file #convert_time.inputs.xconv="min" #convert_time.run() self.inputs.out_file = img2dft.inputs.out_file return runtime
def _create_output_file(self, fn, space): basefn = os.path.basename(fn) if not '_space-' in basefn: basefn_split = splitext(basefn) return basefn_split[0] + '_space-' + space + basefn_split[1] else: return '_'.join([ f if not 'space-' in f else 'space-' + space for f in basefn.split('_') ])
def mnc2vol(niftifile): if not os.path.exists(niftifile) : print('Warning: could not find file', niftifile) exit(1) datatype = nib.load(niftifile).get_data().dtype basename = os.getcwd()+os.sep+ splitext(os.path.basename(niftifile))[0] rawfile = basename +'.raw' headerfile = basename +'.header' minc2volume.make_raw(niftifile, datatype, rawfile) minc2volume.make_header(niftifile, datatype, headerfile)
def _gen_output(self, basefile): fname = ntpath.basename(basefile) fname_list = splitext(fname) # [0]= base filename; [1] =extension dname = os.getcwd() suffix = self._suffix if suffix[0] != '_': suffix = '_' + suffix out_fn = dname + os.sep + fname_list[0] + suffix + fname_list[1] if '.gz' not in fname_list[1]: out_fn += '.gz' return out_fn
def create_alt_template(template, beast_dir, clobber=False) : template_rsl=splitext(template)[0] + '_rsl.mnc' mask=splitext(template)[0] + '_mask.mnc' mask_rsl_fn=splitext(template)[0] + '_rsl_mask.mnc' for f in glob.glob(beast_dir + os.sep + "*mnc" ) : if not os.path.exists(template_rsl) or clobber : rsl = minc.Resample() rsl.inputs.input_file=template rsl.inputs.output_file=template_rsl rsl.inputs.like=f print rsl.cmdline rsl.run() if not os.path.exists(mask_rsl_fn) or clobber : mask_rsl = minc.Resample() mask_rsl.inputs.input_file=mask mask_rsl.inputs.output_file=mask_rsl_fn mask_rsl.inputs.like=f mask_rsl.run() print(mask_rsl.cmdline) break return template_rsl
def _run_interface(self, runtime): quantNode = self._quantCommand() quantNode.inputs = self.inputs init_command = quantNode.cmdline modified_command = [] self.inputs.out_file = quantNode.inputs.out_file for f in init_command.split(' '): if os.path.exists(f): f = check_gz(f) elif f == quantNode.inputs.out_file and splitext( f)[1] == '.nii.gz': f = splitext(f)[0] + '.nii' self.inputs.out_file = f modified_command.append(f) print(modified_command) command = ' '.join(modified_command) print command cmd(command) print "Out file", self.inputs.out_file return runtime
def main(filename, datatype): if not os.path.isfile(filename): console_error("File {} does not exist.".format(filename), 1) check_minc_tools_installed() basename = splitext(os.path.basename(filename)) dirname = os.path.dirname(filename) headername = "{}/{}.header".format(dirname, basename) rawname = "{}/{}.raw".format(dirname, basename) console_log("Processing file: {}".format(filename)) console_log("Creating header file: {}".format(headername)) make_header(filename, datatype, headername) console_log("Creating raw data file: {}".format(rawname)) make_raw(filename, datatype, rawname)
def _run_interface(self, runtime): img_fn = self.inputs.pet json_fn = self.inputs.pet_header_json out_fn = splitext(img_fn)[0] + '.sif' img = nib.load(img_fn).get_data() d = json.load(open(json_fn)) nframes = len(d["Time"]["FrameTimes"]["Values"]) date_string = datetime.datetime.now().strftime("%d/%m/%Y %H:%m:%S") lines = date_string + " " + str( nframes) + " 4 1 sub " + d['Info']['Tracer']['Isotope'][0] + "\n" for i, vals in enumerate(d["Time"]["FrameTimes"]["Values"]): lines += "{}\t{}\t{}\t{}\n".format(vals[0], vals[1], str(np.sum(img[:, :, :, i])), str(np.sum(img[:, :, :, i]))) with open(out_fn, 'w') as f: f.write(lines) self.inputs.out_file = out_fn print('Creating SIF') return runtime
def _gen_output(self, in_file): ii = splitext(os.path.basename(in_file))[0] out_file = os.getcwd() + os.sep + ii + "_int.csv" return out_file
def _gen_output_vol(self, basefile): fname = ntpath.basename(basefile) fname_list = splitext(fname) # [0]= base filename; [1] =extension dname = os.getcwd() out_vol=dname+ os.sep+fname_list[0] + "_normalized" + fname_list[1] return out_vol
def _gen_output(self, basefile, _suffix): fname = ntpath.basename(basefile) fname_list = splitext(fname) # [0]= base filename; [1] =extension dname = os.getcwd() return dname + os.sep + fname_list[0] + _suffix + fname_list[1]
def _gen_output(self, basefile): fname = ntpath.basename(basefile) fname_list = splitext(fname) dname = os.getcwd() return dname + os.sep + fname_list[0] + self._file_type
def _gen_outputs(self, fn): fn_split = splitext(fn) return os.getcwd() + os.sep + os.path.basename( fn_split[0]) + "_4d" + fn_split[1]
def _run_interface(self, runtime): #1. load label image img = nib.load(self.inputs.label_img) label_img = img.get_data() print("1", np.sum(label_img)) print(self.inputs.labels) if self.inputs.labels != [] : _labels =[ int(i) for i in self.inputs.labels ] #else : # _labels = np.unique(label_img) #2. Remove labels not specified by user, if any have been provided if self.inputs.labels != [] : labels_to_remove =[ i for i in np.unique(label_img) if int(i) not in _labels ] for i in labels_to_remove : label_img[ label_img == i ] = 0 print(np.unique(label_img)) print("2", np.sum(label_img)) #3. concatenate all labels to 1 if self.inputs.ones_only : label_img[label_img != 0 ] = 1 #4. erode all labels label_img_eroded=np.zeros(label_img.shape) if self.inputs.erode_times != 0 : for i in np.unique(label_img) : if i != 0 : temp=np.zeros(label_img.shape) temp[ label_img == i ] = 1 temp = binary_erosion(temp, iterations=self.inputs.erode_times) label_img_eroded += temp label_img=label_img_eroded #5. if self.inputs.brain_only : brain_mask = nib.load(self.inputs.brain_mask).get_data() label_img *= brain_mask tmp_label_img = nib.Nifti1Image(label_img, img.get_affine()) tmp_label_img.to_filename("tmp_label_img.nii") #6. Apply transformation transformLabels = APPIANApplyTransforms() transformLabels.inputs.input_image ="tmp_label_img.nii" transformLabels.inputs.reference_image = self.inputs.like_file transformLabels.inputs.transform_1 = self.inputs.transform_1 transformLabels.inputs.transform_2 = self.inputs.transform_2 transformLabels.inputs.transform_3 = self.inputs.transform_3 transformLabels.inputs.invert_1 = self.inputs.invert_1 transformLabels.inputs.invert_2 = self.inputs.invert_2 transformLabels.inputs.invert_3 = self.inputs.invert_3 transformLabels.inputs.interpolation = 'NearestNeighbor' transformLabels.run() output_image = transformLabels._list_outputs()['output_image'] print(transformLabels._list_outputs() ) #7. Copy to output if not isdefined(self.inputs.out_file): self.inputs.out_file = self._gen_output(self.inputs.label_img, self._suffix+self.inputs.analysis_space) print(output_image, self.inputs.out_file) if '.gz' in splitext(self.inputs.out_file)[1] : print('Gzip') gz(output_image, self.inputs.out_file) nib.load(output_image) nib.load(self.inputs.out_file) else : print('Copy') shutil.copy(output_image, self.inputs.out_file) return runtime