def clean_ct(CT_original, CT_clean):

           
    ct_ori, imageInfo= nrrd.read(CT_original)          
    ct_cl, nr_objects = ndimage.label(ct_ori > -700) 
    ct_ori[ct_cl != 1]=-1024
    nrrd.write(CT_clean, np.squeeze(ct_ori))
Esempio n. 2
0
def cropObj(imagefile, labelfile, labels=None):
    if labels is None:
        print 'no labels to crop to'
    else:
        print 'Loading image %s...' % (imagefile)
        data1, header1 = nrrd.read(imagefile)
        print 'Loading mask %s...' % (labelfile)
        data2, header2 = nrrd.read(labelfile)

        print 'Croping to objects with label(s) %s' % str(labels)

        mask = np.ones(np.shape(data1), dtype=bool)
        for i in labels:
            try:
                print 'Cropping to object ' + str(i)
                mask[data2 == int(i)] = False
            except:
                print '---'

        data1[mask] = 0

        v = np.max(data1)
        print "Saving result over " + imagefile
        header1['encoding'] = 'gzip'
        if v > 256:
            header1['type'] = 'uint16'
            nrrd.write(imagefile, np.uint16(data1), options=header1)
        else:
            header1['type'] = 'uint8'
            nrrd.write(imagefile, np.uint8(data1), options=header1)
Esempio n. 3
0
def cutObj(imagefile, labelfile, labels=None):
    if labels is None:
        print 'no labels to be cut'
    else:
        print 'Loading image %s...' % (imagefile)
        data1, header1 = nrrd.read(imagefile)
        print 'Loading mask %s...' % (labelfile)
        data2, header2 = nrrd.read(labelfile)

        print 'Cutting objects with label(s) %s' % str(labels)

        for i in labels:
            try:
                print 'Cutting object ' + str(i)
                data1[data2 == int(i)] = np.uint8(0)
            except:
                print '---'

        v = np.max(data1)
        print "Saving result over " + imagefile
        header1['encoding'] = 'gzip'
        if v > 256:
            header1['type'] = 'uint16'
            nrrd.write(imagefile, np.uint16(data1), options=header1)
        else:
            header1['type'] = 'uint8'
            nrrd.write(imagefile, np.uint8(data1), options=header1)
Esempio n. 4
0
def convert(in_filename, out_filename=None, spacings=None):
    A = loadmat(in_filename, struct_as_record=False)

    # struct
    S = A['Save_data'][0,0]
    # volume
    V = S.P

    # output filename
    if out_filename == None:
        out_filename = os.path.splitext(in_filename)[0] + '.nrrd'
        
    logger.debug('Output filename: %s', out_filename)
    logger.debug('Writing NRRD file.')

    # NRRD options
    options = {}
    if spacings == None:
        xs = float((S.xmax - S.xmin) / V.shape[0])
        ys = float((S.ymax - S.ymin) / V.shape[1])
        zs = float((S.zmax - S.zmin) / V.shape[2])
        options['spacings'] = [xs, ys, zs]
    else:
        options['spacings'] = eval(spacings)

    logger.debug('Setting spacings to: %s', options['spacings'])

    nrrd.write(out_filename, V, options)
Esempio n. 5
0
 def align(floatingImage, xform=floatingImage.replace('.nrrd', '_warp.xform'),
           imageOUT=floatingImage.replace('.nrrd', '_aligned.nrrd'), template=template, settings=''):
     if 'default' in xform: xform = floatingImage.replace('.nrrd', '_warp.xform')
     if 'default' in imageOUT: imageOUT = floatingImage.replace('.nrrd', '_aligned.nrrd')
     if settings == None or 'None' in settings: settings = ''
     if (os.path.exists(xform) and os.path.exists(template) and os.path.exists(floatingImage)):
         if os.path.exists(imageOUT):
             print 'removing old alignment %s' % (imageOUT)
             os.remove(imageOUT)
         print 'nice %sreformatx %s -o %s --floating %s %s %s' % (
             cmtkdir, settings, imageOUT, floatingImage, template, xform)
         r = subprocess.call("nice %sreformatx %s -o '%s' --floating '%s' '%s' '%s'" % (
             cmtkdir, settings, imageOUT, floatingImage, template, xform), shell=True)
         try:
             data1, header1 = nrrd.read(template)
             data1, header2 = nrrd.read(imageOUT)
             header1['encoding'] = 'gzip'
             if header1['space directions'] == ['none', 'none', 'none']:
                 header1.pop("space directions", None)
             if os.path.exists(imageOUT):
                 os.remove(imageOUT)
             nrrd.write(imageOUT, data1, options=header1)
             os.chmod(imageOUT, stat.S_IRWXU | stat.S_IRWXG | stat.S_IRWXO)
         except:
             pass
     else:
         r = 99
     return imageOUT, r
Esempio n. 6
0
def labelObj(imagefile, labelfile, t=20, ms=1000, sl=np.ones((3, 3, 3))):
    print 'Loading image %s...' % (imagefile)
    data1, header1 = nrrd.read(imagefile)
    header1.pop("endian", None)

    print 'Labeling objects with any voxel intensity above %s' % str(t)

    data1[data1 < t] = 0

    print 'identifying distinct objects...'
    labels, features = ndimage.label(data1, structure=sl)
    print str(features) + ' distinct objects found'
    val, lab = np.histogram(labels, bins=range(1, features + 1))
    print 'Removing any objects with a volume of less than ' + str(ms) + ' voxels.'
    print 'New label(s):'
    data = np.zeros(np.shape(data1))
    v = 1
    for i in range(0, features - 1):
        if val[i] > ms:
            data[labels == lab[i]] = v
            print str(v) + ' = ' + str(lab[i])
            v = v + 1
    print str(v - 1) + ' distinct objects still indexed'

    print "Saving result to " + labelfile
    header1['encoding'] = 'gzip'
    if v > 256:
        header1['type'] = 'uint16'
        nrrd.write(labelfile, np.uint16(data), options=header1)
    else:
        header1['type'] = 'uint8'
        nrrd.write(labelfile, np.uint8(data), options=header1)
    return np.uint8(np.unique(data))
Esempio n. 7
0
def extractAndSaveBaselineToNRRD(nrrdfilename, baselinenrrdfilename):
    
    nrrdData, bvalue, gradientDirections, baselineIndex,_ = readHARDI(nrrdfilename)
                
    baseline = nrrdData[:,:,:,baselineIndex]

    # save to nrrd
    _, options = nrrd.read(nrrdfilename)
    
    options['keyvaluepairs']    = []
    
    if options.has_key('centerings'):
        options['centerings']       = [options['centerings'][0], options['centerings'][1], options['centerings'][2]]
    
    options['dimension']        = 3
    
    options['kinds']            = ['space', 'space', 'space']
    
    if options.has_key('thicknesses'):
        options['thicknesses']    = []        
        
    if options.has_key('space directions'):    
        options['space directions'] = [options['space directions'][0], options['space directions'][1], options['space directions'][2]]
        options['thicknesses'] = [abs(options['space directions'][0][0]), abs(options['space directions'][1][1]), abs(options['space directions'][2][2])]
        
    #if options.has_key('thicknesses'):        
    #    options['thicknesses']      = [options['thicknesses'][0], options['thicknesses'][1], options['thicknesses'][2]]
    
    options['sizes']            = list(baseline.shape)
    nrrd.write( baselinenrrdfilename, baseline, options)
Esempio n. 8
0
 def write_and_read_back_with_encoding(self, encoding):
     output_filename = os.path.join(self.temp_write_dir, "testfile_%s.nrrd" % encoding)
     nrrd.write(output_filename, self.data_input, {u'encoding':encoding})
     # Read back the same file
     data, header = nrrd.read(output_filename)
     self.assertEqual(self.expected_data, data.tostring(order='F'))
     self.assertEqual(header['encoding'], encoding)
def compute_ct_mask_similarity(input_labelmap_filename, input_ctfilename , 
    output_maskfilename, dilation_value):

        cropped_data_temp, imageInfo= nrrd.read(input_labelmap_filename)
        above_zero = cropped_data_temp>0
        belowthresh = cropped_data_temp<17000 #fat is 17944
    
        #threshold slice to contain only all pec data
        cropped_data_temp[above_zero & belowthresh ] = 1
        cropped_data_temp[cropped_data_temp>1] = 0  
        
        
    
        #dilate
    	#cropped_data_temp = np.array(ndimage.binary_dilation(cropped_data_temp,\
    	#   iterations = 10)).astype(np.int) #10    
     #   cropped_data = np.squeeze(cropped_data_temp)  
        if (dilation_value > 0):        
    	   cropped_data_temp = np.array(ndimage.binary_dilation(cropped_data_temp,\
    	       iterations = dilation_value)).astype(np.int) #10 is the last functional value   
        cropped_data = np.squeeze(cropped_data_temp)        
        print(np.shape(cropped_data)) 
        #now find boundingbox
        #b = np.where(cropped_data>0)
        #cropped_data[min(b[0]):max(b[0])+1, min(b[1]):max(b[1])+1 ] = 1
        
        #remove lung tissue
        ct_data_temp, info = nrrd.read (input_ctfilename)
        ct_data = np.squeeze(ct_data_temp)
        print(np.shape(ct_data)) 
        lung_indeces = np.where(ct_data < (-1022))
        cropped_data[lung_indeces] = 0
        
        nrrd.write(output_maskfilename,np.squeeze(cropped_data))
def compute_ct_mask_similarity_withlabel(input_ct_volume, input_labelmap_filename,\
    output_ct_filename, output_maskfilename = None):

        cropped_data_temp, imageInfo= nrrd.read(input_labelmap_filename)
        above_zero = cropped_data_temp>0
        belowthresh = cropped_data_temp<17000 #fat is 17944
    
        #threshold slice to contain only all pec data
        cropped_data_temp[above_zero & belowthresh ] = 1
        cropped_data_temp[cropped_data_temp>1] = 0   
    
        #dilate
    	cropped_data_temp = np.array(ndimage.binary_dilation(cropped_data_temp, iterations = 20)).astype(np.int) #10    
        cropped_data = np.squeeze(cropped_data_temp)        
    
        ##now find boundingbox
        b = np.where(cropped_data>0)
        cropped_data[:] = -1024
        cropped_data[min(b[0]):max(b[0])+1, min(b[1]):max(b[1])+1 ] = \
            input_ct_volume[min(b[0]):max(b[0])+1, min(b[1]):max(b[1])+1 ] 

        #label all -1024 as 0, because resampling messes that up
        if (output_maskfilename != None):
            sim_mask = np.zeros_like(cropped_data)
            sim_mask[cropped_data>(-1024)] = 1
            nrrd.write(output_maskfilename,np.squeeze(sim_mask))
        
        cropped_data[cropped_data<(-1023)] = 0
        nrrd.write(output_ct_filename,np.squeeze(cropped_data))
def register_2d_ct(moving_CT_original, fixed_CT_original, output_transfo_name):

    """
    Function that registers a moving 2D ct image to a fixed image, and saves
    the resulting transformation in a .tfm file
    
    moving_CT_original : inout moving CT image
    
    fixed_CT_original : input fixed ct image

    output_transfo_name : filename of the output transformation to be saved.
    
    (Mask files will be saved in the same directory as the input files)
    
    """        
    
    toolsPaths = ['CIP_PATH'];
    path=dict()
    for path_name in toolsPaths:
        path[path_name]=os.environ.get(path_name,False)
        if path[path_name] == False:
            print path_name + " environment variable is not set"
            exit()
            
    """
    Remove cruft from images before performing registration
    """
    moving_CT= moving_CT_original.split('.')[0]+"_cleaned."+\
        moving_CT_original.split('.')[1]

    fixed_CT= fixed_CT_original.split('.')[0]+"_cleaned."+\
        fixed_CT_original.split('.')[1]
            

    input_moving_mask_rigid= '_'.join(moving_CT_original.split("_")[0:-1])+"_pecsSubqFatClosedSlice."+\
        moving_CT_original.split('.')[1]
           
    moving_mask_rigid= '_'.join(moving_CT_original.split("_")[0:-1])+"_pecsSubqFatClosedSlice_thresholded."+\
        moving_CT_original.split('.')[1]

    mask, imageInfo= nrrd.read(input_moving_mask_rigid)          
    above_zero = mask>0
    belowthresh = mask<17000 #fat is 17944
    
        #threshold slice to contain only all pec data
    mask[above_zero & belowthresh ] = 1
    mask[mask>1] = 0  
        
    nrrd.write(moving_mask_rigid, np.squeeze(mask))
    
    clean_ct(moving_CT_original, moving_CT)
    clean_ct(fixed_CT_original, fixed_CT)
    
    registerCall = os.path.join(path['CIP_PATH'],"RegisterCT2D")
    register_call = registerCall+" -m "+moving_CT+" -f "+\
                fixed_CT+ " --outputTransform "+output_transfo_name+\
                 " --isIntensity --movingLabelmapFileName "+moving_mask_rigid
            
    print(register_call)  
    os.system(register_call)
Esempio n. 12
0
def save_nrrd(data,nrrd_filename):
    
    space = '3D-right-handed'
    
    x = np.array(data['x'])
    y = np.array(data['y'])
    z = np.array(data['z'])
    
    # set origin
    x0 = x.min()
    y0 = y.min()
    z0 = z.min()
    
    space_orig = np.array([x0, y0, z0]).astype('float32')

    # set spacing
    del_x = np.diff(x)[0]
    del_y = np.diff(y)[0]
    del_z = np.diff(z)[0]

    spacings = np.array([del_x, del_y, del_z]).astype('float32')
    
    options = {'type' : 'f4', 'space': space, 'encoding': 'raw', 
               'space origin' : space_orig, 'spacings' : spacings}

    print "saving density to %s \n" % nrrd_filename    
    nrrd.write(nrrd_filename, np.array(data_scaled['rho']).astype('float32'), options)
def compute_simple_mask(input_mask_name, output_mask_name):
    """
    generates a mask with some dilation of the input mask
    """

    mask_data, options = nrrd.read(input_mask_name)
    print(" reading mask name "+input_mask_name)
    #mask_data = np.array(ndimage.binary_dilation(mask_data, iterations = 10)).astype(np.int) 
    nrrd.write(output_mask_name,mask_data)
def test_sitk_nrrd_read(tmpdir_factory, size):
    
    path = tmpdir_factory.mktemp('nrrd_io_test').join('dummy.nrrd')
    array = np.random.rand(*size)
    
    nrrd.write(str(path), array)

    obt_image, obt_info = su.read_ndarray_with_sitk(path)

    assert(np.allclose( obt_image, array ))
Esempio n. 15
0
 def write(self, outFileName, data):
     if self.verbose:
         print("nrrdFileHandler: type(data)",type(data),"len(data)",len(data))
         
     outData = data.astype(int)
     outData = outData[::-1,:,:]
     outData = outData[:,:,::-1]
     outData= outData.swapaxes(0,2).swapaxes(0,1)
     if not str(outFileName).endswith('.nrrd'):
         filename = outFileName+".nrrd"
     nrrd.write(outFileName, outData)
Esempio n. 16
0
 def check_and_write(base_dir, structure_id, fn):
     '''A many_structure_masks callback that writes the mask to a nrrd file 
     if the file does not already exist.
     '''
 
     mask_path = os.path.join(base_dir, 
                              'structure_{0}.nrrd'.format(structure_id))
     
     if not os.path.exists(mask_path):
         nrrd.write(mask_path, fn())
         
     return structure_id
Esempio n. 17
0
    def batch_preprocess(self, input_folder, output_folder, padding=20):
        """Pad all images in the input folder
        """
        input_files = glob.glob(input_folder + '/*')
        for input_path in input_files:
            subject_name = re.search(self.KEY_WORD_FILE, input_path).group()
            output_path = output_folder + '/' + subject_name

            data, options = nrrd.read(input_path)
            data, options = self.pad_upper(data, options, padding)
            data, options = self.filter_background_to_air(data, options)

            print 'write ' + output_path
            nrrd.write(output_path, data, options)  # too slow in Python
def saveTips(tipsPos, casePath, spacing, patchsize):
    '''
    Save the tips
    '''
    tipPath = getTipsPath(casePath, spacing)
    vol = nrrd.read(casePath + '/case.nrrd')[0]
    for i, tipPos in enumerate(tipsPos):
        x, y, z = tipPos
        xmin, ymin, zmin = np.array(patchsize)//2
        xmin = xmin//spacing[0]
        ymin = ymin//spacing[1]
        zmin = zmin//spacing[2]
        tip = vol[x-xmin:x+xmin, y-ymin:y+ymin, z-zmin:z+zmin]
        createDir(tipPath)
        nrrd.write(tipPath + '/tip-%d.nrrd'%i, tip)
Esempio n. 19
0
    def dicom_to_nrrd(self, dicom_root_dir, nrrd_files_dir):
        """Transfer dicom volumn into nrrd format
        0. Uncompress the dicom image
        1. Load each dicom images in the dicom_files_dir
        2. Save the load image into numpy.array format (rows, columns, depth)
        3. Write the numpy.array out as a nrrd file
        """
        TEMP_FILE = '/Users/chunwei/Downloads/_TEMP'
        SYSTEM_COMMAND = 'gdcmconv -w {0} {1}'

        for i, subject_folder in enumerate(glob.glob(dicom_root_dir + '/*')):
            nrrd_file = nrrd_files_dir + '/'\
                + re.search(self.KEY_WORD_FLODER, subject_folder).group()\
                + '_%02d.nrrd' % (i + 1)
            print 'Processing ' + nrrd_file

            if not os.path.exists(nrrd_files_dir):
                os.makedirs(nrrd_files_dir)

            data_3d = None

            dicom_files = glob.glob(subject_folder + '/*')
            for j, dicom_file in enumerate(dicom_files):
                # prompt
                ratio = 100 * float(j)/float(len(dicom_files))
                sys.stdout.write('\r%d%%' % ratio)
                sys.stdout.flush()

                # uncompress the dicom image
                command = SYSTEM_COMMAND.format(dicom_file, TEMP_FILE)
                call(command.split(), shell=False)

                # concatenate dicom image layer by layer
                ds = dicom.read_file(TEMP_FILE)
                data = ds.pixel_array
                data_3d = self.concatenate_layers(data_3d, data)  # bottom up

            # get nrrd options
            options = self.load_dicom_options(TEMP_FILE, len(dicom_file))

            # transpose the data
            data_3d = numpy.swapaxes(data_3d, 0, 1)
            data_3d = data_3d[:, :, ::-1]

            # write the stack files in nrrd format
            nrrd.write(nrrd_file, data_3d, options)

            print
Esempio n. 20
0
    def write_itksnap_labels(self, annotation_path, label_path, **kwargs):
        '''Generate a label file (nrrd) and a label_description file (csv) for use with ITKSnap

        Parameters
        ----------
        annotation_path : str
            write generated label file here
        label_path : str
            write generated label_description file here
        **kwargs : 
            will be passed to self.export_itksnap_labels

        '''

        annotation, labels = self.export_itksnap_labels(**kwargs)
        nrrd.write(annotation_path, annotation, header={'spacings': self.resolution})
        labels.to_csv(label_path, sep=' ', index=False, header=False, quoting=csv.QUOTE_NONNUMERIC)
def saveNoTips(numberOfSamples, casePath, spacing, patchsize):
    '''
    Save the random cubes
    '''
    vol = nrrd.read(casePath + '/case.nrrd')[0]
    tipPath = getNoTipsPath(casePath, spacing)
    for i in range(numberOfSamples):
        xmin, ymin, zmin = np.array(patchsize)//2
        xmin = xmin//spacing[0]
        ymin = ymin//spacing[1]
        zmin = zmin//spacing[2]
        x = np.random.randint(xmin,vol.shape[0]-xmin)
        y = np.random.randint(ymin,vol.shape[1]-ymin)
        z = np.random.randint(zmin,vol.shape[2]-zmin)
        
        tip = vol[x-xmin:x+xmin, y-ymin:y+ymin, z-zmin:z+zmin]
        createDir(tipPath)
        nrrd.write(tipPath + '/notip-%d.nrrd'%i, tip)
def main():
    if(len(sys.argv) < 3):
        return

    data_directory = sys.argv[1]

    # integer value in the voxels are brain region identifier
    brain_region_file = sys.argv[2]
    print "\nLoading brain regions ("+brain_region_file+")..."
    region_voxels, region_metadata = nrrd.read(data_directory + brain_region_file)
    print region_metadata

    # 3D Matrix dimensions
    X_MAX = region_metadata['sizes'][0]
    Y_MAX = region_metadata['sizes'][1]
    Z_MAX = region_metadata['sizes'][2]

    # Create output matrices
    brain_region_id = 1
    filtered_brain_regions = np.zeros((X_MAX, Y_MAX, Z_MAX))
    out_br_filename = data_directory + str(brain_region_id) + '_brain_regions.nrrd'

    voxel_count=0
    exported_voxel_count=0
    exported_counter = collections.Counter()
    all_counter = collections.Counter()
    print "\nBuilding NRRD files filtered for brain region '"+ str(brain_region_id) +"' and children..."
    for x in range(0, X_MAX):
        # print "X: " + str(x) + "/" + str(X_MAX) + " - #voxels: " + "{:,}".format(exported_voxel_count) + " / " + "{:,}".format(voxel_count)
        # print "Voxels exported by region", exported_counter
        # print "Brain regions: ", all_counter
        for y in range(0, Y_MAX):
            for z in range(0, Z_MAX):
                region_id = region_voxels[x, y, z]
                # all_counter[ str(region_id) ] += 1
                # voxel_count += 1
                if( region_id == brain_region_id ):
                    # exported_counter[ str(region_id) ] += 1
                    # exported_voxel_count += 1

                    filtered_brain_regions[x, y, z] = region_voxels[x, y, z]

    # Write nrrd files
    nrrd.write(out_br_filename, filtered_brain_regions)
Esempio n. 23
0
def saveNoTips(tipsPos, numberOfSamples, casePath, spacing, patchsize):
    '''
    Pick suitable region
    '''
    print('open: ', casePath)
    vol = nrrd.read(casePath + '/case.nrrd')[0]
    r1, s1, t1 = vol.shape
    # pick approximate region
    pick = vol[r1//3:r1//3*2, s1//3:s1//3*2+20, t1//2+15:t1//3*2+20]
    r2, s2, t2 = pick.shape
    print('vol shape:', vol.shape)
    print('pick shape:', pick.shape)
    '''
    Find voiding region of tips
    '''
    region = []
    ban = 9
    for tip in tipsPos:
        x, y, z = tip
        for i in range(ban):
            for j in range(ban):
                region.append([x-r1//3+i-ban//2, y-s1//3+j-ban//2])
    print('region:', np.shape(region))
    '''
    Save the random cubes
    '''
    notipPath = getNoTipsPath(casePath, spacing)
    for i in range(numberOfSamples):
        xmin, ymin, zmin = np.array(patchsize)//2
        xmin = xmin//spacing[0]
        ymin = ymin//spacing[1]
        zmin = zmin//spacing[2]
        x = np.random.randint(xmin,r2-xmin)
        y = np.random.randint(ymin,s2-ymin)
        z = np.random.randint(zmin,t2-zmin)
        # exclude region where needles locate
        while [x, y] in region:
            x = np.random.randint(xmin,r2-xmin)
            y = np.random.randint(ymin,s2-ymin)
            print('find contradiction!')
        notip = pick[x-xmin:x+xmin, y-ymin:y+ymin, z-zmin:z+zmin]
        createDir(notipPath)
        nrrd.write(notipPath + '/notip-%d.nrrd'%i, notip)
Esempio n. 24
0
def save_to_nrrd( input_h5_fname, clusters, nrrd_fname ):
    cluster_medoids = list(np.unique(clusters))
    n_clusters = len(cluster_medoids)
    h5_data = h5py.File( input_h5_fname, mode='r')

    stepXY = h5_data['stepXY'][()][0]
    stepZ = h5_data['stepZ'][()][0]

    voxel_names = h5_data['positionKeys'][()].strip(',').split(',')
    nrrd_locs = []
    nrrd_values = []
    for i,vn in enumerate(voxel_names):
        x,y,z = util.voxel_name_to_idx(vn)
        r = util.h5_coord_to_nrrd(x, stepXY)
        s = util.h5_coord_to_nrrd(y, stepXY)
        t = util.h5_coord_to_nrrd(z, stepZ)
        nrrd_locs.append( (r,s,t) )
        medoid = clusters[i]
        cluster_id = cluster_medoids.index(medoid) + 1
        assert cluster_id != 0 # must be found
        nrrd_values.append( cluster_id )
    nrrd_locs = np.array(nrrd_locs)
    nrrd_shape = np.max( nrrd_locs, axis=0) + 1
    nrrd_data = np.zeros( nrrd_shape )
    for loc,val in zip(nrrd_locs, nrrd_values):
        r,s,t=loc
        nrrd_data[r,s,t] = val
    assert np.sum(np.isnan(nrrd_data)) == 0
    
    assert np.max(nrrd_data) <= 255
    nrrd_data = nrrd_data.astype(np.uint8)


    outdir = os.path.dirname( nrrd_fname )
    try:
        os.makedirs(outdir)
    except OSError as err:
        if err.errno!=errno.EEXIST:
            raise

    nrrd.write(nrrd_fname, nrrd_data)
Esempio n. 25
0
def fixNRRDfile(nrrdfilename, encoding='gzip'):
    # data not saved by slicer ITK (from Clement) would have NAN in the thickness
    # note: slicer save number of directions as the first dimension
    
    # save to nrrd
    nrrdData, options = nrrd.read(nrrdfilename)
        
    if options['kinds'][0] == 'list' or options['kinds'][0] == 'vector': # as saved by slicer
        nrrdData = np.transpose(nrrdData, (1,2,3,0))
        options['kinds'] = ['space','space','space','list']
        
        if type(options['space directions'][0]) is str:
            options['space directions'] = [options['space directions'][1], options['space directions'][2], options['space directions'][3], 'none']
        else:
            options['space directions'] = [options['space directions'][0], options['space directions'][1], options['space directions'][2], 'none']
    
    options['thicknesses'] = [abs(options['space directions'][0][0]), abs(options['space directions'][1][1]), abs(options['space directions'][2][2]), 'NaN']
    options['sizes']       = list(nrrdData.shape)
    
    options['encoding'] = encoding
    nrrd.write( nrrdfilename, nrrdData, options)
Esempio n. 26
0
def write_xtk_nrrd(volume, nrrd_out):
    """The write_xtk_nrrd method writes numpy arrays as IEV-ready NRRD files. It also works on memory mapped arrays.

    IEV works using the X Toolkit (XTK), which is quite particular about the NRRD files it displays. This method ensures
    that the NRRD headers are written appropriately, using nrrd.py by Maarten Everts
    (https://github.com/mhe/pynrrd/blob/master/nrrd.py)

    :param volume: a numpy array in memory, or a memory mapped numpy array
    :param nrrd_out: a file path to which the NRRD file is written
    :raises IOError: unable to write file to disk
    """

    try:
        options = {"encoding": "gzip",
                   "space": "left-posterior-superior",
                   "space directions": [[1, 0, 0], [0, 1, 0], [0, 0, 1]],
                   "kinds": ["domain", "domain", "domain"],
                   "space origin": [0, 0, 0]}
        nrrd.write(nrrd_out, volume, options)
    except IOError as e:
        print "Failure writing .nrrd file: {}".format(e)
def main():
    nrrd_path = sys.argv[1]
    mask_path = sys.argv[2]
    output_path = sys.argv[3]

    if not os.path.exists(output_path):
        os.makedirs(output_path)

    for filename in sorted(os.listdir(mask_path)):
        print('Processing ' + filename)
        voxel, meta = nrrd.read(nrrd_path)
        #voxel = voxel.astype(float)

        mask_voxel, mask_meta = nrrd.read(mask_path + filename)
        nan_mask = mask_voxel == 0
        #voxel[nan_mask] = 0
        #voxel[nan_mask] = np.nan
        voxel[:,nan_mask] = 0

        #voxel[:,np.isfinite(mask_voxel) == False] = 'nan'
        output_filename = output_path + filename
        nrrd.write(output_filename, voxel, meta)
def test_get_annotation_volume(rsp, fn_temp_dir, rsp_version, resolution):

    eye = np.eye(100)
    path = os.path.join(fn_temp_dir, rsp_version, 'annotation_{0}.nrrd'.format(resolution))

    rsp.api.retrieve_file_over_http = lambda a, b: nrrd.write(b, eye)
    obtained, _ = rsp.get_annotation_volume()

    rsp.api.retrieve_file_over_http = mock.MagicMock()
    rsp.get_annotation_volume()

    rsp.api.retrieve_file_over_http.assert_not_called()
    assert( np.allclose(obtained, eye) ) 
    assert( os.path.exists(path) )
Esempio n. 29
0
def save_to_nrrd_xyz(input_h5_fname, clusters, xyz, nrrd_fname):
    cluster_medoids = list(np.unique(clusters))
    h5_data = h5py.File(input_h5_fname, mode='r')

    stepXY = h5_data['stepXY'][()][0]
    stepZ = h5_data['stepZ'][()][0]

    nrrd_locs = []
    nrrd_values = []
    for medoid, (x, y, z) in zip(clusters, xyz):
        r = util.h5_coord_to_nrrd(x, stepXY)
        s = util.h5_coord_to_nrrd(y, stepXY)
        t = util.h5_coord_to_nrrd(z, stepZ)
        nrrd_locs.append( (r,s,t) )
        cluster_id = cluster_medoids.index(medoid) + 1
        assert cluster_id != 0 # must be found
        nrrd_values.append( cluster_id )
    nrrd_locs = np.array(nrrd_locs)
    nrrd_shape = np.max( nrrd_locs, axis=0) + 1
    nrrd_data = np.zeros( nrrd_shape )
    for loc,val in zip(nrrd_locs, nrrd_values):
        r,s,t=loc
        nrrd_data[r,s,t] = val
    assert np.sum(np.isnan(nrrd_data)) == 0

    assert np.max(nrrd_data) <= 255
    nrrd_data = nrrd_data.astype(np.uint8)


    outdir = os.path.dirname( nrrd_fname )
    try:
        os.makedirs(outdir)
    except OSError as err:
        if err.errno!=errno.EEXIST:
            raise

    nrrd.write(nrrd_fname, nrrd_data)
Esempio n. 30
0
    def get_structure_mask(self, structure_id, file_name=None, annotation_file_name=None):
        """
        Read a 3D numpy array shaped like the annotation volume that has non-zero values where
        voxels belong to a particular structure.  This will take care of identifying substructures.

        Parameters
        ----------

        structure_id: int
            ID of a structure.

        file_name: string
            File name to store the structure mask.  If it already exists,
            it will be read from this file.  If file_name is None, the
            file_name will be pulled out of the manifest.  Default is None.

        annotation_file_name: string
            File name to store the annotation volume.  If it already exists,
            it will be read from this file.  If file_name is None, the
            file_name will be pulled out of the manifest.  Default is None.
        """

        file_name = self.get_cache_path(file_name, self.STRUCTURE_MASK_KEY, structure_id)

        if os.path.exists(file_name):
            return nrrd.read(file_name)
        else:
            ont = self.get_ontology()
            structure_ids = ont.get_descendant_ids([structure_id])
            annotation, _ = self.get_annotation_volume(annotation_file_name)
            mask = self.make_structure_mask(structure_ids, annotation)

            if self.cache:
                Manifest.safe_make_parent_dirs(file_name)
                nrrd.write(file_name, mask)

            return mask, None
Esempio n. 31
0
    edge_p_coordinate = s2.get_edge(data_p)  # 获取边缘点坐标集

    edge = np.zeros([512, 512], dtype='int')
    for e in edge_p_coordinate:
        edge[e[0], e[1]] = 1

    pelvic_fat = []
    fat_fig = np.zeros([512, 512], dtype='int')

    for f in fat_p_coordinate:
        label = 0
        fat = FatPart1(f[0], f[1], bone_p, edge_p_coordinate)  # class Fat
        [dis2Edge, nearestDot2Edge] = fat.__dot2edge__(edge_p_coordinate)
        if dis2Edge > 25:
            fat_fig[f[0], f[1]] = 1
        elif 25 < dis2Edge < 40:
            neardot = fat.neardot(fat.x, fat.y,
                                  nearestDot2Edge)  # 寻找到fat和边界点连线距离小于d的点
            nearFatRate = fat.nearfatrate(neardot,
                                          fat_p_coordinate)  # 最短距离点连接线ε近邻带脂肪
            if nearFatRate < 0.35:
                fat_fig[f[0], f[1]] = 1

    fat_part2[:, :, p] = np.transpose(fat_fig)

    imageio.imwrite(
        './result/fat/pic/part2-1/pic%d.jpg' % p,
        cv2.resize(fat_fig, (512, 512), interpolation=cv2.INTER_CUBIC))

nrrd.write('./result/fat/part1.nrrd', fat_part2)
Esempio n. 32
0
    return result_mask, (str(sigma), str(alpha))


def parse_args():
    parser = argparse.ArgumentParser(description='Parameters setting')
    parser.add_argument('path', type=str)
    parser.add_argument('--alpha', type=float, default=1e-2)
    parser.add_argument('--sigma', type=float, default=40)
    parser.add_argument('--seg_filename', default='tmp')
    args = parser.parse_args()

    return args


if __name__ == '__main__':

    file_dir = '../nrrd/cropped/npy/'
    result_dir = '../results/med_graphcut_res/'
    args = parse_args()

    img = np.load(file_dir + args.path)
    print(img.shape, img.max(), img.min())

    segmentation, ph = med_graphcut(img, args.alpha, args.sigma)
    segmentation = segmentation.astype(np.int) * 255

    print('save the result')
    seg_filename = args.seg_filename  #+'_%f_%f'%(args.alpha, args.sigma)
    np.save(result_dir + 'npy/' + seg_filename + '.npy', segmentation)
    nrrd.write(result_dir + seg_filename + '.nrrd',
               segmentation.astype(np.float))
Esempio n. 33
0




if __name__ == '__main__':
    # directory of original nrrd files
    data_dir = "D:/skull-nrrd"
    data_list=glob('{}/*.nrrd'.format(data_dir))
    # directory to save the cleaned nrrd file
    save_dir = "D:/skull-nrrd/cleaned/"

	for i in range(len(data_list)):
		print('current data to clean:',data_list[i])
		# read nrrd file. data: skull volume. header: nrrd header
		data,header=nrrd.read(data_list[i])
		# Get all the connected components in data 
		labels_out = cc3d.connected_components(data.astype('int32'))
		# select the index of the second largest connected component 
		# in the data (the largest connected component is the background).
		skull_label=skull_id(labels_out)
        # keep only the second largest connected components(and remove other components)
		skull=(labels_out==skull_label)
		skull=skull+1-1
		# file name of the cleaned skull
		filename=save_dir+data_list[i][-10:-5]+'.nrrd'
		print('writing the cleaned skull to nrrd...')
		nrrd.write(filename,skull,h)
		print('writing done...')

Esempio n. 34
0
norm_const = np.float64(1 / ((2 * np.pi)**(3 / 2) * sigma**3))
kernel = norm_const * np.exp(-(xx**2 + yy**2 + zz**2) / (2 * sigma**2))

# plot the kernel
for z in range(kernel.shape[2]):
    plt.figure()
    plt.imshow(kernel[:, :, z])

# Make some sample data
data = np.zeros((51, 51, 51))
data[26, 26, 0] = 1.
data[5, 5, 26] = 1.
data[4, 5, 26] = 1.

# Convolve the sample data with the kernel
filtered = signal.convolve(data, kernel, mode="same")

# plot the result
for z in range(filtered.shape[2]):
    plt.figure()
    plt.imshow(filtered[:, :, z])

# Count the number of Gaussian kernels by integrating over the volume
total_num = np.sum(filtered)

# Round up the integration
total_num = np.ceil(total_num)

# Save the volume and view it with Fiji
nrrd.write('filtered.nrrd', filtered)
                             
#                            print('Exporting the spheroid')
#                            spheroid_export_path = os.path.join(path_to_colocalization_results, cultivation_period, 'C2'+export_suffix+'.nrrd')
#                            nrrd.write(spheroid_export_path, spheroid_new.astype(np.uint8), colocalization_header)
#                            
#                            print('Exporting the colocalization channel')
#                            colocalization_channel_export_path = os.path.join(path_to_colocalization_results,  cultivation_period, 'C1'+export_suffix+'.nrrd')
#                            nrrd.write(colocalization_channel_export_path, colocalization_restored.astype(np.uint8), colocalization_header)
#                            
#                            print('Exporting the segmentation')
#                            segmentation_export_path = os.path.join(path_to_colocalization_results,  cultivation_period, 'C2'+export_suffix+'_seg.nrrd')
#                            nrrd.write(segmentation_export_path, segmentation_thresholded.astype(np.uint16), colocalization_header)
                            
                            print('Exporting the colocalized segmentation')
                            colocalized_segmentation_export_path = os.path.join(path_to_colocalization_results,  cultivation_period, 'C2'+export_suffix+'_seg_colocalized.nrrd')
                            nrrd.write(colocalized_segmentation_export_path, colocalization_segmentation.astype(np.uint16), coloc_header)
                            
                            print('Exporting the colocalization statistics')
                            colocalization_table_export_path = os.path.join(path_to_colocalization_results,  cultivation_period, export_suffix+'_colocalization_stats.txt')
                            with open(colocalization_table_export_path,'w') as file:
                                for item in result_table:
                                    line = "%s \t %s \t %s \t %s \t %s\n" %(item[0], item[1], item[2], item[3], item[4])
                                    file.write(line)
                            
                            # Log the number of cells in a table
                            #spheroid_title = res_dir.split(os.path.sep)[2] + '->' + spheroid_name
                            cultivation_period = res_dir.split(os.path.sep)[2]
                            table.append([cultivation_period, spheroid_name, num_of_cells_ground_truth, num_of_cells_predicted, abs_diff, perc_diff, num_of_colocalized_cells, perc_num_of_colocalized_cells])

with open(os.path.join(path_to_colocalization_results, 'cell_numbers_dataset1_fiji_segmentations_filtered.txt'),'w') as file:
    for item in table:
Esempio n. 36
0
    all_acts = []
    for k in range(5):
        acts = (U[:, k].unsqueeze(1) *
                alpha.unsqueeze(0).cuda()).t() + mean_hhat
        all_acts.append(acts.unsqueeze(-1).unsqueeze(-1))
    all_acts_cat = torch.cat(all_acts, 0)

    tweaks = mdl.decoder(Variable(all_acts_cat))

    im = ut.collate_images_rectangular(tweaks.data, 50, 10, L1=456, L2=320)

    plt.figure(figsize=(30, 20), dpi=120)
    plt.imshow(np.flipud(im), interpolation='None')
    plt.savefig('mouse_results/tweak_images_v1.png', format='png')
    nrrd.write(shared_path + 'tweak_images.nrrd',
               tweaks.data.cpu().squeeze().permute(1, 2, 0).numpy())

else:
    alpha = torch.randn(100) * 10

    Ncomps = 30
    plt.figure(figsize=(24, 16), dpi=120)

    all_acts = []
    all_tweaks = []
    all_real_comps = []
    for k in range(Ncomps):
        print(k)
        acts = (U[:, k].unsqueeze(1) *
                alpha.unsqueeze(0).cuda()).t() + mean_hhat
        outs = nn.parallel.data_parallel(
Esempio n. 37
0
import os
import nrrd
import numpy as np
import nibabel as nib
import cv2

path = '../../data'

images = []
folders = []

for subdir, dirs, files in os.walk(path):
	for file1 in files:
		if 'img' in file1:
			images.append(subdir + '/' + file1)
			folders.append(subdir)


for i in xrange(len(images)):

	# if i == 1:
	# 	break

	print '==> Iteration: ', i+1

	image, options = nrrd.read(images[i])
	blurred = cv2.GaussianBlur(image,(5,5),0)

	nrrd.write(folders[i] + '/gaussianBlurred.nrrd', image)
import numpy as np
import nrrd

test_image, header = nrrd.read('test_data/testbild.nrrd')
test_image_stack = np.zeros(shape=(512,512,4), dtype='uint8')

for i in range(4):
    test_image_stack[:,:,i] = test_image
    
nrrd.write('test_image_stack.nrrd', test_image_stack)
    roi_label_box1 = get_roi_box(kidney_label_array[0:256, :, :])
    roi_label_box2 = get_roi_box(kidney_label_array[256:, :, :])
    roi_label_box = np.concatenate((roi_label_box1, roi_label_box2), axis=0)

    roi_label = np.zeros(roi_label_box.shape, dtype=np.int16)
    # threshold
    roi_label[(data_array < 980) * (data_array > 880) *
              (roi_label_box > 0)] = 1

    # max connect area
    roi_label = np.concatenate((delete_useless_area(
        roi_label[0:256, :, :]), delete_useless_area(roi_label[256:, :, :])),
                               axis=0)

    roi_label = roi_label.astype(np.bool)
    # fill hole
    for height in range(0, roi_label.shape[2]):
        if np.sum(roi_label[:, :, height]) > 0:
            roi_label[:, :, height] = \
                morphology.remove_small_holes(roi_label[:, :, height], min_size=230, connectivity=1)

    # remove small objects
    for height in range(0, roi_label.shape[2]):
        if np.sum(roi_label[:, :, height]) > 0:
            roi_label[:, :, height] = \
                morphology.remove_small_objects(roi_label[:, :, height], min_size=230, connectivity=1)

    # save
    roi_label = roi_label.astype(np.uint8)
    nrrd.write(roi_save_path + data_names[i], roi_label, _)
Esempio n. 40
0
#3. Clamp mass outside range
mass2_im[mass2_im < -15] = 0
mass2_im[mass2_im > 15] = 0
#1. Isolate lung
mass2_im[wl_mask_im == 0] = 1000
#mass2_im[np.logical_and(mass2_im<-20,mass2_im>20)]=0
#3. Create gain/loss mask
mass2_mask = np.zeros(mass2_im.shape)
#Mass gain: Label 2
mass2_mask[mass2_im > 3] = 2
#Mass loss: label 1
mass2_mask[mass2_im < -3] = 1
mass2_mask[mass2_im == 1000] = 0

print "Saving residual mask map"
nrrd.write(mass_file, mass2_im, insp_header)
nrrd.write(mass_mask_file, mass2_mask, insp_header)

exit

resmass = list()
for th in mass_th:
    mass_mask = np.logical_and(mass_im < th, insp_im > -950)
    tmplist = list()
    for ii, rr in enumerate(regions):
        region_mask = np.logical_and(insp_mask_im >= rr[0],
                                     insp_mask_im <= rr[1])
        tmplist.append(100 * mass_mask[region_mask].sum() /
                       float(region_mask.sum()))
        print "Residual mask % " + str(
            rr[0]) + " for th " + str(th) + " is " + str(tmplist[ii])
Esempio n. 41
0
# phi += base_grid

# # Scale to [-1,1]
# phi_min = phi.min(axis=(1,2,3), keepdims=True)
# phi_max = phi.max(axis=(1,2,3), keepdims=True)
# phi = (phi-phi_min) * 2 / (phi_max-phi_min) -1

sz = phi.shape[1]
theta = torch.tensor([[[0, 0, 1, 0], [0, 1, 0, 0], [1, 0, 0, 0]]],
                     dtype=torch.float32)
base_grid = F.affine_grid(theta,
                          torch.Size((1, 1, sz, sz, sz)),
                          align_corners=True)
# base_out = base_grid.numpy()[0,:,:,:,:]
# nrrd.write(file_path+'base_grid_torch.nrrd', base_out, phi_head)

# base_grid = np.meshgrid(np.linspace(-1,1,phi.shape[1]), np.linspace(-1,1,phi.shape[2]), np.linspace(-1,1,phi.shape[3]), indexing='ij')
# base_grid = np.asarray(base_grid)
# base_grid = np.transpose(base_grid, (1,2,3,0))
# nrrd.write(file_path+'base_grid_numpy.nrrd', base_grid, phi_head)
# base_grid = np.expand_dims(base_grid, axis=0)

phi = phi * 2 / phi.shape[1]
phi = torch.from_numpy(phi).float()
phi += base_grid

warped = F.grid_sample(move, phi)
warped_img = warped.numpy()[0, 0, :, :, :]

nrrd.write(file_path + 'out_torch.nrrd', warped_img)
Esempio n. 42
0
d = 20

#prepare(r'Images/Carb.nhdr', r'Images/Carb2.nhdr')
img_source, image_header = nrrd.read(r'source.nhdr')
img_target, image_header = nrrd.read(r'target.nhdr')
offset = int(img_target.shape[0] / 2 - 3 * d / 4)

sls = [(slice(None), 0, slice(None)), (slice(None), -1, slice(None)),
       (slice(None), slice(None), 0), (slice(None), slice(None), -1)]

for sl in sls:
    img_boundary = img_target[sl]
    img_boundary_source = img_source[sl]
    img_mask = np.zeros(img_boundary_source.shape)
    img_mask[1:-1, 1:-1] = 255
    boundary_res = poissonblending.blend(img_boundary,
                                         img_boundary_source,
                                         img_mask,
                                         offset=(offset, 0))
    #boundary_res[boundary_res>=0.5] = 1
    #boundary_res[boundary_res<0.5] = 0
    img_ret = PIL.Image.fromarray(np.uint8(boundary_res))
    img_ret.save('./testimages/bound' + str(sls.index(sl)) + '.png')
    img_target[sl] = boundary_res

img_mask = np.zeros(img_source.shape)
img_mask[1:-1, 1:-1, 1:-1] = 255
data = poisson3d.blend(img_target, img_source, img_mask, offset=(offset, 0, 0))
options = {'encoding': 'raw'}
nrrd.write(r'result.nhdr', data, options=options)
Esempio n. 43
0
def cropping(image, mask, prefix=None, size=[86, 86, 86]):

    print('\nStarting raw data and mask cropping...')
    imagePath, imageFilename, imageExt = split_filename(image)
    if prefix is None:
        imageOutname = os.path.join(imagePath,
                                    imageFilename + '_cropped') + imageExt
    else:
        imageOutname = os.path.join(imagePath, prefix + '_cropped') + imageExt

    _, maskFilename, maskExt = split_filename(mask)
    maskOutname = os.path.join(imagePath, maskFilename + '_cropped') + maskExt

    maskData, maskHD = nrrd.read(mask)
    imageData, imageHD = nrrd.read(image)

    x, y, z = np.where(maskData == 1)
    x_size = np.max(x) - np.min(x)
    y_size = np.max(y) - np.min(y)
    z_size = np.max(z) - np.min(z)
    maskMax = np.max(maskData)
    maskMin = np.min(maskData)
    if maskMax > 1 and maskMin < 0:
        print('This image {} is probably not a mask, as it is not binary. '
              'It will be ignored. Please check if it is true.'.format(mask))
        imageOutname = None
        maskOutname = None
    else:
        if size:
            offset_x = (size[0] - x_size) / 2
            offset_y = (size[1] - y_size) / 2
            offset_z = (size[2] - z_size) / 2
            if offset_x < 0 or offset_y < 0 or offset_z < 0:
                raise Exception('Size too small, please increase.')

            if offset_x.is_integer():
                new_x = [np.min(x) - offset_x, np.max(x) + offset_x]
            else:
                new_x = [
                    np.min(x) - (offset_x - 0.5),
                    np.max(x) + (offset_x + 0.5)
                ]
            if offset_y.is_integer():
                new_y = [np.min(y) - offset_y, np.max(y) + offset_y]
            else:
                new_y = [
                    np.min(y) - (offset_y - 0.5),
                    np.max(y) + (offset_y + 0.5)
                ]
            if offset_z.is_integer():
                new_z = [np.min(z) - offset_z, np.max(z) + offset_z]
            else:
                new_z = [
                    np.min(z) - (offset_z - 0.5),
                    np.max(z) + (offset_z + 0.5)
                ]
            new_x = [int(x) for x in new_x]
            new_y = [int(x) for x in new_y]
            new_z = [int(x) for x in new_z]
        else:
            new_x = [np.min(x) - 20, np.max(x) + 20]
            new_y = [np.min(y) - 20, np.max(y) + 20]
            new_z = [np.min(z) - 20, np.max(z) + 20]
        croppedMask = maskData[new_x[0]:new_x[1], new_y[0]:new_y[1],
                               new_z[0]:new_z[1]]
        maskHD['sizes'] = np.array(croppedMask.shape)

        croppedImage = imageData[new_x[0]:new_x[1], new_y[0]:new_y[1],
                                 new_z[0]:new_z[1]]
        imageHD['sizes'] = np.array(croppedImage.shape)

        nrrd.write(imageOutname, croppedImage, header=imageHD)
        nrrd.write(maskOutname, croppedMask, header=maskHD)
    print('Cropping done!\n')
    return imageOutname, maskOutname
import os
import nrrd
import numpy as np

path = '/home/davidhaberl/PycharmProjects/MasterThesis/Data/Head-Neck-PET-CT/resampled-3D-suv-pet-and-masks-prim-npy'

dst_path = '/home/davidhaberl/PycharmProjects/MasterThesis/Data/Head-Neck-PET-CT/resampled-3D-suv-pet-and-masks-prim-nrrd'

for cohort in sorted(os.listdir(path)):
    # print(cohort)

    # Create directory for each cohort
    os.makedirs(os.path.join(dst_path, cohort), exist_ok=True)

    for patient in sorted(os.listdir(os.path.join(path, cohort))):
        # print(patient)

        # Create directory for each patient
        os.makedirs(os.path.join(dst_path, cohort, patient), exist_ok=True)

        for file in sorted(os.listdir(os.path.join(path, cohort, patient))):

            item = np.load(os.path.join(path, cohort, patient, file))

            # Write to nrrd files
            print('Saving as nrrd file...')
            file_name = file.split('.')
            file_name = file_name[0] + '.nrrd'
            nrrd.write(os.path.join(dst_path, cohort, patient, file_name),
                       item)
Esempio n. 45
0
from skimage.morphology import ball, dilation
from time import time
import numpy as np
import nrrd
from cuda import dilation_mod

mask_path = '/data2/home/zhouxiangyong/Tmp/dilation.nrrd'
mask_cuda_path = '/data2/home/zhouxiangyong/Tmp/dilation_cuda.nrrd'

# mask = np.zeros((3, 3, 3), dtype=np.uint8)
# mask[1, 1, 1] = 1
mask = np.zeros((555, 555, 555), dtype=np.uint8)
mask[2, 2, 2] = 1

print("start skimage dilation")
start_t = time()
selem = ball(1)
mask_sk = dilation(mask, selem).astype(np.uint8)
end_t = time()
print("skimage time: {}".format(end_t - start_t))

print("start cuda dilation")
start_t = time()
mask_cuda = dilation_mod.dilation(mask.astype(np.bool)).astype(np.uint8)
end_t = time()
print("cuda time: {}".format(end_t - start_t))

nrrd.write(mask_path, mask_sk)
nrrd.write(mask_cuda_path, mask_cuda)
Esempio n. 46
0
    def test_invalid_index_order(self):
        output_filename = os.path.join(self.temp_write_dir,
                                       'testfile_invalid_index_order.nrrd')

        with self.assertRaisesRegex(nrrd.NRRDError, 'Invalid index order'):
            nrrd.write(output_filename, np.zeros((3, 9)), index_order=None)
    print('-------------')
    # Spheroid type level
    spheroid_dir = os.path.join(path_to_data, subdir1)
    spheroid_files = get_files_in_directory(spheroid_dir)
    for spheroid_file in spheroid_files:
        if spheroid_file.endswith('.nrrd'):
            print('Processing files:')
            spheroid_name = os.path.splitext(spheroid_file)[0]
            spheroid_file = os.path.join(spheroid_dir, spheroid_file)
            print('Current Spheroid: ', os.path.abspath(spheroid_file))
            subdirs2 = get_immediate_subdirectories(spheroid_dir)
            for subdir2 in subdirs2:
                res_dir = os.path.abspath(os.path.join(spheroid_dir, subdir2))
                files = get_files_in_directory(res_dir)
                for file in files:
                    if spheroid_name + '-centroids' in file and file.endswith('.nrrd'):
                        spheroid_file = os.path.abspath(spheroid_file)
                        centroids_file = os.path.join(os.path.abspath(spheroid_dir), subdir2, file)
                        print('Corresponding Centroids: ', centroids_file)
                        centroids, centroids_header = nrrd.read(centroids_file) # XYZ
                        gauss_centroids = impro.convolve_with_gauss(centroids, 50, 6)
                        nrrd_gauss_centroids_file = os.path.join(res_dir, spheroid_name+'-gauss_centroids.nrrd')
                        nrrd.write(nrrd_gauss_centroids_file, data=gauss_centroids, header=centroids_header, index_order='F')
                        # Log the number of cells, min and max in a table
                        spheroid_title = res_dir.split(os.path.sep)[2] + '->' + spheroid_name
                        table.append([spheroid_title, np.sum(gauss_centroids), np.min(gauss_centroids), np.max(gauss_centroids)])
                        
with open('gauss_cell_numbers_dataset2_mathematica_segmentations_filtered.txt','w') as file:
    for item in table:
        line = "%s \t %s \t %s \t %s\n" %(item[0], item[1], item[2], item[3])
        file.write(line)
Esempio n. 48
0
def run_generation():

    print("Reading:", FLAGS.img)
    img, head = nrrd.read(FLAGS.img)
    img = img.astype(float)

    imgmask = None
    if FLAGS.mask is not None:
        print("Reading:", FLAGS.mask)
        imgmask, headmask = nrrd.read(FLAGS.mask)

    imgsize = head["sizes"]
    splitimage = []
    splitimage_index = []

    for i in range(16, imgsize[0] - 16, 32):
        for j in range(16, imgsize[1] - 16, 32):
            for k in range(16, imgsize[2] - 16, 32):
                extract = True
                if imgmask is not None:
                    if imgmask[i][j][k] != 0:
                        extract = True
                    else:
                        extract = False

                if extract:
                    splitimage_index.append([i, j, k])
                    neigh = get_neighborhood(img, [i, j, k], 16)
                    splitimage.append(neigh)

    splitimage_index = np.array(splitimage_index)
    splitimage = np.array(splitimage).astype(float)
    splitimage = splitimage.reshape([
        splitimage.shape[0], splitimage.shape[1], splitimage.shape[2],
        splitimage.shape[3], 1
    ])
    batch_size = splitimage.shape[0]

    # construct the graph
    with tf.Graph().as_default():

        size = np.array([33, 33, 33, 1])

        # read the images and labels to encode for the generator network 'fake'
        fake_x = tf.placeholder(
            tf.float32, shape=[batch_size, size[0], size[1], size[2], size[3]])

        keep_prob = 1

        ps_device = "/gpu:0"
        w_device = "/gpu:0"
        # run the generator network on the 'fake' input images (encode/decode)
        with tf.variable_scope("generator") as scope:
            gen_x = nn.generator(fake_x,
                                 size,
                                 keep_prob,
                                 batch_size,
                                 ps_device=ps_device,
                                 w_device=w_device)

        # init to setup the initial values of the weights
        #init_op = tf.group(tf.initialize_all_variables(), tf.initialize_local_variables())

        init_op = tf.group(tf.global_variables_initializer(),
                           tf.local_variables_initializer())

        # create the session
        with tf.Session() as sess:

            sess.run(init_op)
            # setup a saver for saving checkpoints
            # setup the coordinato and threadsr.  Used for multiple threads to read data.
            # Not strictly required since we don't have a lot of data but typically
            # using multiple threads to read data improves performance
            start_training_time = time.time()

            generated = sess.run([gen_x],
                                 feed_dict={fake_x: splitimage
                                            })  # Update the discriminator
            # shut down the threads gracefully
            sess.close()
            end_training_time = time.time()

            generated = np.array(generated).reshape(splitimage.shape)

            for index, neigh in zip(splitimage_index, generated):
                for i in range(-16, 16):
                    for j in range(-16, 16):
                        for k in range(-16, 16):
                            img[i + index[0]][j + index[1]][
                                k + index[2]] = neigh[i + 16][j + 16][k + 16]

            print("Writing:", FLAGS.out)
            nrrd.write(FLAGS.out, img, head)
# print(type(clines))
# print(clines[0].shape)
# print(type(cline_ext))
# print(cline_ext.shape)
#
# print(cline.shape)
# print(cline)

print("for cline")
for idx in range(len_cline):
    x, y, z = tuple(int(elem) for elem in cline[idx])
    print("idx: {0}, x: {1}, y: {2}, z: {3}".format(idx, x, y, z))
    new_mask_base[x, y, z] = 1

skel_base = dilation(new_mask_base)

print("for cline_ext")
for idx in range(len_cline_longer):
    x, y, z = tuple(int(elem) for elem in cline_longer[idx])
    print("idx: {0}, x: {1}, y: {2}, z: {3}".format(idx, x, y, z))
    new_mask_ext[x, y, z] = 1

skel_ext = dilation(new_mask_ext)

skel_both = skel_base + skel_ext

# nrrd.write(CLINE_BASE_PATH, skel_base)
# nrrd.write(CLINE_EXT_PATH, skel_ext)
nrrd.write(CLINE_BOTH_PATH, skel_both)
import numpy as np
import nrrd
import os
from os import listdir
from os.path import join

files = sorted(listdir('../images'))

# Little script for changing type of nrrd files
for f in files:
    if f.endswith('netseg.nrrd'):
        netseg, opts = nrrd.read(f)
        netseg = netseg.astype(np.float32)
        _OPTS = {
            'space': 'RAS',
            'space directions': [(1, 0, 0), (0, 1, 0), (0, 0, 3)]
        }
        nrrd.write(join('../images', f), netseg, options=_OPTS)
    if f.endswith('uncmcvar.nrrd'):
        uncmcvar, opts = nrrd.read(f)
        uncmcvar = uncmcvar.astype(np.float32)
        _OPTS = {
            'space': 'RAS',
            'space directions': [(1, 0, 0), (0, 1, 0), (0, 0, 3)]
        }
        nrrd.write(join('../images', f), uncmcvar, options=_OPTS)
Esempio n. 51
0
import pydicom
import tifffile
import numpy as np
import nrrd
import pandas as pd


data_root = 'E:/to_path/'
save_root = 'E:/process/'

_, head = nrrd.read('D:/liver_CT/liver_label_CT/A_001_liver.nrrd')
datainformation = pd.read_csv('D:/1.CSV')

for i in range(243):
    data_name = datainformation.iloc[i]['DataName'][:-4]
    data = tifffile.imread(data_root + data_name + '.tif')
    data = data.transpose(2, 1, 0)

    if datainformation.iloc[i]['num'] > 1:
        data = data[:, :, :datainformation.iloc[i]['z']]

    head['space directions'][0, 0] = float(datainformation.iloc[i]['PixelSpacing'])
    head['space directions'][1, 1] = float(datainformation.iloc[i]['PixelSpacing'])
    head['space directions'][2, 2] = float(datainformation.iloc[i]['SliceThickness'])

    nrrd.write(save_root + data_name + '.nrrd', data, head)

        coilprofile = np.array(coilprofile)

        # load
        # RootSumSquaresCoil = np.sqrt(np.sum( coilprofile**2, axis=0 ))

        # solve LSQR problem
        S = coilprofile.reshape(coilprofile.shape[0],
                                np.prod(coilprofile.shape[1:])).T
        b = targetMask  #*RootSumSquaresCoil
        print S.shape
        print b.ravel().shape
        w = np.linalg.lstsq(S, b.ravel())
        print w
        print len(w)
        print w[0].shape

        # save weights
        np.savez(virtual_coilweigths_filename, w)

        # compute virtual coil
        virtual_coil = np.zeros(coilprofile.shape[1:])
        for ii in range(coilprofile.shape[0]):
            virtual_coil = w[0][ii] * coilprofile[ii, :, :, :]

        nrrd.write(virtual_coil_filename, virtual_coil)

        # except dciException as e:
        #     errorPrefix = 'ERROR while running weigths for subject %d' %( subj['MRN'] )
        #     print( errorPrefix + str(e))
        #     exit(1)
Esempio n. 53
0
def signle_prediction(path=img_path):
    image = np.asarray(Image.open(path).convert('L'), dtype=np.uint8)
    #image = ImgReader.read_directory(path, "png")

    # Add 1 additional axis for future Radiomics processing
    image = image[..., np.newaxis]
    label = np.ones(shape=image.shape)

    # Declare the destination of the data
    folder = "data/nrrd/" + folderName
    create_directory(folder)
    name_image = folderName + "_image_1.nrrd"
    name_label = folderName + "_label_1.nrrd"
    image_path_to = os.getcwd() + "/data/nrrd/" + folderName + sl + name_image
    label_path_to = os.getcwd() + "/data/nrrd/" + folderName + sl + name_label

    # Save the PNG-image as NRRD
    nrrd.write(image_path_to, image)
    nrrd.write(label_path_to, label)

    # Instantiate the extractor
    extractor = featureextractor.RadiomicsFeatureExtractor()

    extractor.disableAllFeatures()
    extractor.enableFeatureClassByName(str('firstorder'))
    extractor.enableFeatureClassByName(str('glcm'))
    extractor.enableFeatureClassByName(str('glrlm'))
    extractor.enableFeatureClassByName(str('ngtdm'))
    extractor.enableFeatureClassByName(str('gldm'))
    extractor.enableFeatureClassByName(str('glszm'))

    #print("Extraction parameters:\n\t", extractor.settings)
    #print("Enabled filters:\n\t", extractor._enabledImagetypes)
    #print("Enabled features:\n\t", extractor._enabledFeatures)

    # result -> ordered dict
    result = extractor.execute(image_path_to, label_path_to)
    toBeDeleted = [
        'diagnostics_Image-original_Dimensionality',
        'diagnostics_Versions_PyRadiomics', 'diagnostics_Versions_Numpy',
        'diagnostics_Versions_SimpleITK', 'diagnostics_Versions_PyWavelet',
        'diagnostics_Versions_Python', 'diagnostics_Configuration_Settings',
        'diagnostics_Configuration_EnabledImageTypes',
        'diagnostics_Image-original_Hash',
        'diagnostics_Image-original_Spacing',
        'diagnostics_Image-original_Size', 'diagnostics_Mask-original_Hash',
        'diagnostics_Mask-original_Spacing', 'diagnostics_Mask-original_Size',
        'diagnostics_Mask-original_BoundingBox',
        'diagnostics_Mask-original_VoxelNum',
        'diagnostics_Mask-original_VolumeNum',
        'diagnostics_Mask-original_CenterOfMassIndex',
        'diagnostics_Mask-original_CenterOfMass'
    ]
    for feature in range(len(toBeDeleted)):
        del (result[toBeDeleted[feature]])

    # Have a look at the current data
    '''
    for key,val in result.items():
        print(key,":",val)
    '''

    df = pd.DataFrame(result, index=[0])
    df.to_csv(os.getcwd() + '/data/result/' + 'single.csv',
              sep=";",
              float_format=None)

    data = pd.read_csv("data/result/single.csv", ";")
    data = data.iloc[0:, 1:data.shape[1]]

    # load the model from disk
    model_name = 'Multi-layer Perceptron'
    accuracy = [77, 83, 90, 86, 84, 85, 85]
    label_ukr = [[
        "norm", "autoimmune hepatitis", "hepatitis В", "hepatitis С",
        "Wilson disease", "cystic fibrosis"
    ], ['NOT Wilson disease', 'Wilson disease'],
                 ['NOT hepatitis В', 'hepatitis В'],
                 ['NOT hepatitis С', 'hepatitis С'],
                 ['NOT autoimmune hepatitis', 'autoimmune hepatitis'],
                 ['pathology', 'norm'],
                 ['NOT cystic fibrosis', 'cystic fibrosis']]

    kind_ukr = [
        "all diseases", "Wilson disease VS all", "hepatitis В VS all",
        "hepatitis С VS all", "autoimmune hepatitis VS all",
        "norm VS pathologies", "cystic fibrosis VS all"
    ]
    poolParam = [
        "diagnosis_code", "iswls", "ishpb", "ishpc", "isauh", "isnorm", "iscf"
    ]
    text = "Classifier: <b>{0}</b><br><br>\n\n".format(model_name)

    models = [0, 5]
    for number in models:
        filename = 'data/result/model/' + model_name + ' ' + poolParam[
            number] + '.sav'
        file = open(filename, 'rb')
        loaded = pickle.load(file)
        print("Model <" + model_name + " " + poolParam[number] +
              "> was loaded")

        # Test the classifier
        y_pred = int(loaded.predict(data))
        part = 'Accuracy: <b>{1}</b>%<br>\nClassification: [{0}]<br>\n[RESULTS]: <b>{2}</b><br><br>\n\n'.format(
            kind_ukr[number], accuracy[number], label_ukr[number][y_pred])
        text = text + part
    text = '<font size="2"> {0} </font>'.format(text)
    print(text)
    return text


#signle_predition()
phi, head = nrrd.read(phi_img)  # [x, y, z, channel]
phi = np.expand_dims(phi, axis=0)
print(phi.shape)
#phi = np.expand_dims(phi, axis=0) # [batch, channel, x, y, z]
#phi = np.transpose(phi, (0,2,3,4,1))  # [batch, x, y, z, channel]

# Add a base grid to deformable field
grid = phi.shape[1:-1]
#base_grid = np.array(np.meshgrid(*[range(x) for x in grid], indexing='ij'))
base_grid = np.array(
    np.meshgrid(*[np.linspace(-1, 1, x) for x in grid], indexing='ij'))
base_grid = np.ascontiguousarray(np.moveaxis(base_grid, 0, -1))
#base_grid = np.asarray(base_grid)  # [channel, y, x, z]
#base_grid = np.transpose(base_grid, (2,1,3,0))  # [x, y, z, channel], bug, check base_grid shape
#base_grid = np.expand_dims(base_grid, axis=0)  # [batch, x, y, z, channel]
phi *= 2. / phi.shape[1]
phi += base_grid

# Scale to [-1,1]
#phi_min = phi.min(axis=(1,2,3), keepdims=True)
#phi_max = phi.max(axis=(1,2,3), keepdims=True)
#phi = (phi-phi_min) * 2 / (phi_max-phi_min) -1

phi = torch.from_numpy(phi).float()

warped = F.grid_sample(move, phi)
warped_img = warped.numpy()[0, 0, :, :, :]

name = file_path + 'out_greg.nrrd'
nrrd.write(name, warped_img)
Esempio n. 55
0
def snake(label, cluster, label_of_interest, N, propScaling):

    # Set the distance
    distance = sitk.SignedMaurerDistanceMapImageFilter()
    distance.InsideIsPositiveOff()
    distance.UseImageSpacingOn()

    # set the seed from the label and make the initial image from the seed
    seedImage = sitk.GetImageFromArray(label.astype(np.int16), isVector=False)

    initialImage = sitk.BinaryThreshold(distance.Execute(seedImage), -1000, 10)
    initialImage = sitk.Cast(initialImage, sitk.sitkFloat32) * -1 + 0.5

    # Setting up the feature image

    # determine which cluster has the maximum overlay with the label.
    c_scores = []
    for k in range(np.amax(cluster)):
        overlay = np.logical_and(label == label_of_interest, cluster == k)
        c_scores.append(np.sum(label[overlay == True]))

    c_val = np.argmax(c_scores)

    # convert desired cluster into a binary image
    cluster[cluster == c_val] = 8

    #cluster[cluster != label_of_interest] = 0
    #cluster[cluster == label_of_interest] = 10

    # turn cluster into sitk image and blur it
    featureImage = sitk.GetImageFromArray(cluster.astype(np.int16),
                                          isVector=False)

    gradientMagnitude = sitk.GradientMagnitudeRecursiveGaussianImageFilter(
    )  # feature image needs blurring for some reason
    gradientMagnitude.SetSigma(0.05)  # sigma is the blurring factor

    featureImage = sitk.BoundedReciprocal(
        gradientMagnitude.Execute(featureImage))
    # featureImage = sitk.InvertIntensity(featureImage, 1)
    featureImage = sitk.Cast(featureImage, sitk.sitkFloat32)

    test_feat_arr = sitk.GetArrayFromImage(featureImage)
    nrrd.write(
        'C:/Users/u5823099/Anaconda3/Lib/site-packages/lama/tests/test_data/reg_fixer_data/inverted_masks/2086980_feat_arr.nrrd',
        test_feat_arr)

    # sitk.WriteImage(featureImage, 'C:/Users/u5823099/Anaconda3/Lib/site-packages/lama/tests/test_data/reg_fixer_data/inverted_masks/2086980_feature.nrrd')
    # perform the active contour
    geodesicActiveContour = sitk.GeodesicActiveContourLevelSetImageFilter()
    geodesicActiveContour.SetPropagationScaling(propScaling)
    geodesicActiveContour.SetCurvatureScaling(0.15)
    geodesicActiveContour.SetAdvectionScaling(0.15)
    geodesicActiveContour.SetMaximumRMSError(0.01)
    geodesicActiveContour.SetNumberOfIterations(N)

    levelset = geodesicActiveContour.Execute(initialImage, featureImage)

    #convert the output of the active contour (i.e. the levelset) to a binary image
    bi_image = sitk.BinaryThreshold(levelset, -1000, 0)

    return bi_image
for dim0 in range(patches.shape[0]):
    for dim1 in range(patches.shape[1]):
        for dim2 in range(patches.shape[2]):
            # Extract patch
            patch = patches[dim0, dim1, dim2]
            # Calculate the position to place this patch in the reconstructed volume
            start_dim0 = dim0*stride_slices
            end_dim0 = start_dim0 + patch_slices
            start_dim1 = dim1*stride_rows
            end_dim1 = start_dim1 + patch_rows
            start_dim2 = dim2*stride_cols
            end_dim2 = start_dim2 + patch_cols
            rec = reconstructed[start_dim0:end_dim0, start_dim1:end_dim1, start_dim2:end_dim2]
#            print('patch: ', patch.shape)
#            print('reconstructed: ', rec.shape)
#            
#            print('Placing patch to\nDim0: ', start_dim0, ' to ', end_dim0, '\n',
#                  'Dim1: ', start_dim1, ' to ', end_dim1, '\n',
#                  'Dim2: ', start_dim2, ' to ', end_dim2, '\n')
            reconstructed[start_dim0:end_dim0, start_dim1:end_dim1, start_dim2:end_dim2] += patch 
            reconstructed[start_dim0:end_dim0, start_dim1:end_dim1, start_dim2:end_dim2] /2
            
            #plt.figure()
            #plt.imshow(patch[:,:,1])


reconstructed = impro.restore_volume_from_overlapped_patches(patches, (127,127,127), (stride_slices, stride_rows, stride_cols))
            
nrrd.write('test.nrrd', np.transpose(reconstructed, axes=(2,1,0)))
Esempio n. 57
0
    def main(self):
        self.out = local.path(self.out)
        if self.out.exists():
            if self.overwrite:
                self.out.delete()
            else:
                logging.error(
                    "{} exists, use '--force' to overwrite it".format(
                        self.out))
                sys.exit(1)
        outxfms = self.out.dirname / self.out.stem + '-xfms.tgz'
        with TemporaryDirectory() as tmpdir, local.cwd(tmpdir):
            tmpdir = local.path(tmpdir)

            # fileinput() caused trouble reading data file in python 3, so switching to nrrd
            # if the hdr has 'nan' in space origin, the following will take care of that
            img = nrrd.read(self.dwi)
            dwi = img[0]
            hdr = img[1]

            hdr_out = hdr.copy()
            hdr_out['space origin'] = hdr_out['space origin'][0:3]

            nrrd.write('dwijoined.nhdr',
                       dwi,
                       header=hdr_out,
                       compression_level=1)

            # we want to use this hdr to write a new .nhdr file with corresponding data file
            # so delete old data file from the hdr
            if 'data file' in hdr_out.keys():
                del hdr_out['data file']
            elif 'datafile' in hdr_out.keys():
                del hdr_out['datafile']

            if 'content' in hdr_out.keys():
                del hdr_out['content']

            logging.info('Dice the DWI')

            # Since fslmerge works along the 3rd axis only, dicing also has to be along that axis
            # So, use `unu permute` to reorient the volumes to be stacked along 3rd axis only
            # Include this issue in the tutorial
            (unu['convert', '-t', 'int16', '-i', 'dwijoined.nhdr']
             | unu['dice', '-a', '3', '-o', 'Diffusion-G'])()
            vols = tmpdir.glob('Diffusion-G*.nrrd')
            vols.sort()

            logging.info('Extract the B0')
            bse_py('-i', 'dwijoined.nhdr', '-o', 'b0.nrrd')
            ConvertBetweenFileFormats('b0.nrrd', 'b0.nii.gz', 'short')

            logging.info('Register each volume to the B0')

            # use the following multi-processed loop
            pool = Pool(int(self.nproc))
            res = pool.map_async(_Register_vol, vols)
            volsRegistered = res.get()
            pool.close()
            pool.join()

            # or use the following for loop
            # volsRegistered = []
            # for vol in vols:
            #     volnii = vol.with_suffix('.nii.gz')
            #     ConvertBetweenFileFormats(vol, volnii, 'short')
            #     logging.info('Run FSL flirt affine registration')
            #     flirt('-interp' ,'sinc'
            #           ,'-sincwidth' ,'7'
            #           ,'-sincwindow' ,'blackman'
            #           ,'-in', volnii
            #           ,'-ref', 'b0.nii.gz'
            #           ,'-nosearch'
            #           ,'-o', volnii
            #           ,'-omat', volnii.with_suffix('.txt', depth=2)
            #           ,'-paddingsize', '1')
            #     volsRegistered.append(volnii)

            fslmerge('-t', 'EddyCorrect-DWI', volsRegistered)
            transforms = tmpdir.glob('Diffusion-G*.txt')
            transforms.sort()

            # nibabel loading can be avoided by setting 'data file' = EddyCorrect-DWI.nii.gz
            # and 'byteskip' = -1
            # Tashrif updated Pynrrd package to properly handle that
            new_dwi = nib.load('EddyCorrect-DWI.nii.gz').get_data()

            logging.info('Extract the rotations and realign the gradients')

            space = hdr_out['space'].lower()
            if (space == 'left'):
                spctoras = np.matrix([[-1, 0, 0], [0, -1, 0], [0, 0, 1]])
            else:
                spctoras = np.matrix([[1, 0, 0], [0, 1, 0], [0, 0, 1]])
            mf = np.matrix(hdr['measurement frame'])

            # Transforms are in RAS so need to do inv(MF)*inv(SPC2RAS)*ROTATION*SPC2RAS*MF*GRADIENT
            mfras = mf.I * spctoras.I
            rasmf = spctoras * mf
            for (i, t) in enumerate(transforms):

                gDir = [
                    float(num) for num in hdr_out['DWMRI_gradient_' +
                                                  '{:04}'.format(i)].split(' ')
                    if num
                ]

                logging.info('Apply ' + t)
                tra = np.loadtxt(t)
                # removes the translation
                aff = np.matrix(tra[0:3, 0:3])
                # computes the finite strain of aff to get the rotation
                rot = aff * aff.T
                # compute the square root of rot
                [el, ev] = np.linalg.eig(rot)
                eL = np.identity(3) * np.sqrt(el)
                sq = ev * eL * ev.I
                # finally the rotation is defined as
                rot = sq.I * aff
                newdir = np.dot(mfras * rot * rasmf, gDir)

                hdr_out['DWMRI_gradient_' + '{:04}'.format(i)] = ('   ').join(
                    str(x) for x in newdir.tolist()[0])

            tar('cvzf', outxfms, transforms)

            nrrd.write(self.out, new_dwi, header=hdr_out, compression_level=1)

            if self.debug:
                tmpdir.copy(
                    join(dirname(self.out), "eddy-debug-" + str(getpid())))
Esempio n. 58
0
def gen_stats(gt_path, recon_path, noisy_path, name):
    gt_list = gen_file_list(gt_path)
    recon_list = gen_file_list(recon_path)
    noisy_list = gen_file_list(noisy_path)

    _mse_array = []
    _psnr_array = []
    _ssim_array = []
    _ms_ssim_array = []
    current_best = 0
    current_worst = 1

    for (gt, recon, noisy) in zip(gt_list, recon_list, noisy_list):
        X = load_data(gt, gt_path)
        Y = load_data(recon, recon_path)
        Z = load_data(noisy, noisy_path)

        X = np.array(X)
        Y = np.array(Y)
        Z = np.array(Z)
        X = np.squeeze(X)
        Y = np.squeeze(Y)
        Z = np.squeeze(Z)

        for i in range(X.shape[0]):
            _mse_array.append(_MSE(X[i], Y[i]))
            _psnr_array.append(PSNR(X[i], Y[i]))
            _ssim_array.append(SSIM(X[i], Y[i]))
            # fig, ax = plt.subplots(1, 2, figsize=(12, 6))
            # ax[0].imshow(X[i], cmap='gray')
            # ax[1].imshow(Y[i], cmap='gray')
            # plt.show()

            x = X[i, :, :, np.newaxis]
            y = Y[i, :, :, np.newaxis]
            _ms_ssim = get_value(tf.image.ssim_multiscale(x, y, 2))
            _ms_ssim_array.append(_ms_ssim)

            if (_ms_ssim > current_best):
                current_best = _ms_ssim
                best_x = X[i]
                best_y = Y[i]
                best_z = Z[i]
            if (_ms_ssim < current_worst):
                current_worst = _ms_ssim
                worst_x = X[i]
                worst_y = Y[i]
                worst_z = Z[i]
    nrrd.write(recon_path + '/best_gt_{}.nrrd'.format(name),
               best_x,
               compression_level=1,
               index_order='C')
    nrrd.write(recon_path + '/best_recon_{}.nrrd'.format(name),
               best_y,
               compression_level=1,
               index_order='C')
    nrrd.write(recon_path + '/best_noisy_{}.nrrd'.format(name),
               best_z,
               compression_level=1,
               index_order='C')

    nrrd.write(recon_path + '/worst_gt_{}.nrrd'.format(name),
               worst_x,
               compression_level=1,
               index_order='C')
    nrrd.write(recon_path + '/worst_recon_{}.nrrd'.format(name),
               worst_y,
               compression_level=1,
               index_order='C')
    nrrd.write(recon_path + '/worst_noisy_{}.nrrd'.format(name),
               worst_z,
               compression_level=1,
               index_order='C')

    stats = np.empty((6, 4))
    stats[:, 0] = calc_stats(_mse_array)
    stats[:, 1] = calc_stats(_psnr_array)
    stats[:, 2] = calc_stats(_ssim_array)
    stats[:, 3] = calc_stats(_ms_ssim_array)

    np.savetxt(recon_path + '/nvidia.csv', stats, delimiter=',')
Esempio n. 59
0
 # Make isotropic voxels. Distinction needed, so that 
 # 48h->untreated_3 and 72h->untreated_1 have the same z-size as
 # the corresponding OpenSegSPIM-data
 
 resampled_image = impro.make_image_isotropic(image, interpolator, 0)
 
 #print(resampled_image.GetOrigin())
 #print(resampled_image.GetDirection())
 #print(resampled_image.GetSpacing())
 
 # Get a numpy array from the resampled simpleITK image
 np_image = sitk.GetArrayFromImage(resampled_image)
 
 # Transpose the numpy array from ZYX back to to XYZ
 np_image = np.transpose(np_image, axes=[2,1,0]) # XYZ
 np_image = np_image.astype('uint8')
 
 
 
 new_spacing = resampled_image.GetSpacing()
 header = {"spacings": [new_spacing[0], new_spacing[1], new_spacing[2]], 
           "dimension": np_image.ndim,
           "type": "uchar", 
           "sizes": [resampled_image.GetWidth(), resampled_image.GetHeight(), 
                     resampled_image.GetDepth()],
           "units": ['"microns"', '"microns"', '"microns"']}
 name = os.path.splitext(filename)[0]
 new_filename = os.path.join(data_dir, name)
 new_filename = new_filename+'_8_bit'+'.nrrd'
 nrrd.write(new_filename, data=np_image, header=header, index_order='F')
Esempio n. 60
0
#!/Library/Frameworks/Python.framework/Versions/2.7/bin/python

import os
import sys
import subprocess
import numpy as np
import nrrd

# some sample numpy data
nonBinaryPath = (sys.argv)[1]
#destPath = r'BinaryMasks'
#os.makedirs(destPath)
files = os.listdir(nonBinaryPath)
for file in files:
    infilename, extname = os.path.splitext(file)
    #print "Extenstion: ",extname
    if extname == '.nrrd':
        print "Reading file", infilename, extname
        readPath = nonBinaryPath + '/' + file
        readData, options = nrrd.read(readPath)
        binaryPath = infilename + '-bin' + extname
        binaryData = np.where(readData > 0, 1, readData)
        nrrd.write(binaryPath, binaryData, options)

print "Done"