Exemple #1
0
 def __init__(self, file, csv_file, nuclei_channel=1):
     self.raw_im = np.squeeze(czi.imread(file))[nuclei_channel]
     self.labeled_spots_im = np.squeeze(czi.imread(file))[0]
     self.mask_1 = cv2.threshold(
         self.raw_im,
         skimage.filters.threshold_otsu(self.raw_im) + 152, 1,
         cv2.THRESH_BINARY)[1]
     self.nuclei_image = skimage.exposure.equalize_hist(
         morphology.white_tophat(
             np.squeeze(czi.imread(file))[nuclei_channel], square(70)),
         mask=self.mask_1)
     self.corrected_ni = np.multiply(
         np.divide(self.nuclei_image, np.amax(self.nuclei_image)), 255)
     self.otsu = skimage.filters.threshold_otsu(self.corrected_ni)
     self.dtype = str(self.nuclei_image.dtype)
     self.nuclei_mask = cv2.threshold(self.corrected_ni, self.otsu, 1,
                                      cv2.THRESH_BINARY)[1]
     self.nuclei_mask_dense = cv2.threshold(self.corrected_ni,
                                            self.otsu + 80, 1,
                                            cv2.THRESH_BINARY)[1]
     self.nuc_chan = nuclei_channel
     self.input_shape = self.nuclei_image.shape
     self.filename = file
     self.csv_file = csv_file
     self.spatial_df = pd.read_csv(csv_file)
     print('csv: \n', self.csv_file)
     print('image: \n', self.filename)
Exemple #2
0
def adaptive_imread(fname: [str, dir]) -> np.ndarray:
    """ Opens images depending on filetype. """
    if fname.endswith(EXTENSIONS[0]):
        return czifile.imread(fname)
    if any(fname.endswith(i) for i in EXTENSIONS[1:]):
        return skimage.io.imread(fname)
    raise ValueError(f"File {fname} not of of type {EXTENSIONS}")
Exemple #3
0
def test(row):
    #Select current czi
    file = row
    #Read in current czi and select channels
    image = czi.imread(folder + '/' + file)
    image1 = image[0,0,0,0,:,:,:,0]
    image2 = image[0,0,1,0,:,:,:,0] #changes to work with new images
    #Create new baseline subtratction images
    image1filtered = np.where(image1>wmbls1,image1-wmbls1,0)
    image2filtered = np.where(image2>wmbls2,image2-wmbls2,0)
    del image
    #Perform channel 1 and then channel 2 spots detection 
    for i in range (1,3):
        if i==1:
            image_name = image1filtered
            image_name_raw = image1
            image_other_raw = image2
        
        elif i==2:
            image_name = image2filtered
            image_name_raw = image2
            image_other_raw =image1
        file_name = file[0:-4] #re.search(name_start, file)
        name = file_name#.group(1)
        #nameint = int(name[1:4])
        file_name = (name + 'channel_' + str(i))#(file_name.group(1) + 'channel_' + str(i))
        spots_detection(image_name,image_name_raw,file_name,image_other_raw,out_name,path)
Exemple #4
0
    def read_image_c3(file_name):
        ''' Read in the image from the file list for channel 3'''
        # read in the image
        if type(file_name) == str:
            if file_name[-3:] == 'tif':
                img = io.imread(file_name)
            elif file_name[-3:] == 'czi':
                img = czifile.imread(file_name)
                img = img[0,0,2,:,0,:,:,0]
            else:
                raise Exception("Not a valid string")

        # if the image is a timelapse reset the time slider max
        if len(img.shape) > 2:
            time_pt.max = img.shape[0] - 1

        # set min and maximum values on the sliders based on the image
        img_min = np.min(img)
        img_max = np.max(img)

        inten_min_c3.value = img_min
        inten_max_c3.value = img_max
        inten_min_c3.min = img_min
        inten_min_c3.max = img_max
        inten_max_c3.min = img_min
        inten_max_c3.max = img_max

        return img, img_min, img_max
    def test_jpegxr_16b_read_block(self):
        """
        Read a block of jpegxr compressed 16 bit image.

        Reads a block from a slide and compares it
        with a block read by czifile package.
        Small deviations can be explained by different
        order in processing of tiles. Czi file contains
        overlapped tiles. Rasters can be slightly different
        in the overlapped areas (depends of tile order).
        """
        # Image to test
        image_path = get_test_image_path("czi", "jxr-16bit-4chnls.czi")
        slide = slideio.open_slide(image_path, "CZI")
        self.assertTrue(slide is not None)
        scene = slide.get_scene(0)
        self.assertTrue(scene is not None)
        image_raster = scene.read_block()
        reference_raster = np.transpose(
            czi.imread(image_path)[0, 0, 0, :, :, :, 0], (1, 2, 0))
        image_raster = image_raster[:, :, 0]
        reference_raster = reference_raster[:, :, 0]
        # calculate square normed errors
        scores_sq = cv.matchTemplate(reference_raster.astype(np.float32),
                                     image_raster.astype(np.float32),
                                     cv.TM_SQDIFF_NORMED)[0][0]
        # call structural difference score
        scores_cf = cv.matchTemplate(reference_raster.astype(np.float32),
                                     image_raster.astype(np.float32),
                                     cv.TM_CCOEFF_NORMED)[0][0]
        self.assertLess(0.99, scores_cf)
        self.assertLess(scores_sq, 0.002)
Exemple #6
0
def load_czi_image(filename: str) -> np.array:
    """Load the czi image, and remove useless dimensions if needed.

    :param filename: path to the file to load.
    :return: the image array.
    """
    image = czifile.imread(filename)
    return np.squeeze(image)
Exemple #7
0
def convert_to_png(image):
    img = imread(image)
    img = img.transpose((2, 3, 1, 0, 4))
    img = img.squeeze()

    greenblue_ch = np.stack(
        (np.zeros(img.shape[0:2], dtype='uint8'),
         img[:, :, 0] / img[:, :, 0].max(), img[:, :, 1] / img[:, :, 1].max()),
        axis=2)

    plt.imsave(values["-IN-"][:-4] + 'gb.png', greenblue_ch)
    def __init__(self, fname, sigma):
        self.sigma = sigma  # save aspect for later
        im = czifile.imread(fname)
        im = np.squeeze(im)  # get rid of 1D labels

        smooth_im = []
        for i in range(len(im[:, 1, 1, 1])):
            smooth_im.append(
                gaussian(im[i, :, :, :], sigma, multichannel=False))
        smooth_im = np.asarray(smooth_im)

        self.smoothed_im = smooth_im
Exemple #9
0
def predict(path):
    try:
        model = load_model('tam4_model.h5')
        img = imread(path)
        img = img.reshape(1038, 1388, 1)
        img = resize(img, (224, 224))
        label = argmax(model.predict(img.reshape(-1, 224, 224, 1)))
        print(path, '->', LABELS[label])
    except Exception as e:
        print(path, '->', e)

    sleep(20)
def read_image(image_path, rgb_order):
    #return image shape (n_channels, x, y, z)
    image = czifile.imread(image_path)
    image = image.squeeze()
    image = image.transpose((0, 2, 3, 1))
    uint16_max = 65535

    image_r = ((np.expand_dims(image[rgb_order.index('r')], axis=0) /
                uint16_max)).astype(np.float32)
    image_g = ((np.expand_dims(image[rgb_order.index('g')], axis=0) /
                uint16_max)).astype(np.float32)
    image_b = np.zeros(
        (1, image.shape[1], image.shape[2], image.shape[3])).astype(np.float32)
    image = np.concatenate((image_r, image_g, image_b), axis=0)

    return image
Exemple #11
0
    def getSerie(self, serie_number):
        '''
        Get the defined image stack from the file,
        rearrange into a more friendly format
        compatible with the rest of yaggie
        Converts lightsheet data from 16 bit to 8 bit
        '''
        if self.type == 'Stack':
            raise Exception('CZI file already defined as a stack, if you want to read a series use cziReader.getSeries(Serie_number)')
        self.type = 'Series'

        serie = imread(self.directory)[serie_number,0,0,0,0].astype(np.uint8)
        self.dimensions = serie.shape
        #Returns as:     [c, t, z, x, y, ?]

        return serie
def import_images(files, sharp=False, sharp_channel=0, czi=False, order=None):
    """Imports files from list of files.

    Args:
        files: Image files. For czi a single filename. For others a list of all channels.
        sharp (bool): If the sharpest slice should be used.
        sharp_channel (int): Channel to determine the sharpest slice on.
        czi (bool): If the image is .czi.
        order (list): Order of channels.

    Returns:
        img (nd.array): Image array of shape (z, x, y)
    """
    if czi:
        img_import = czifile.imread(files)
        # File shape – (1, 3, 30, 2213, 2752, 1)
        if sharp:
            img_sharp = sharp(img_import[0, sharp_channel, :, :, :, 0])
            img_import_stack = [
                img_import[0, l, img_sharp, :, :, 0]
                for l in range(img_import.shape[1])
            ]
        else:
            # Max project
            img_import_stack = [
                np.max(img_import[0, l, :, :, :, 0], axis=0)
                for l in range(img_import.shape[1])
            ]
    else:
        img_import = [io.imread(files[i]) for i in range(len(files))]
        if sharp:
            img_sharp = sharp(img_import[sharp_channel])
            img_import_stack = [
                img_import[i][img_sharp] for i in range(len(img_import))
            ]
        else:
            # Max project
            img_import_stack = [
                np.max(img_import[l], axis=0) for l in range(len(img_import))
            ]
    # Combine all channels
    img = np.stack(img_import_stack, axis=0)
    if order:
        img = [img[i] for i in order]
    return img
	def import_data_fn(self):
		deltat= 1000/float(self.text_1)
		data_array = czi_fn.imread(str(filename))
		scanObject(filename,par_obj,[deltat,float(self.text_2)/1000000],data_array,0,0);
		win_obj.bleachCorr1 = False
		win_obj.bleachCorr2 = False
		win_obj.DeltatEdit.setText(str(deltat));
		win_obj.label.generateList()
		
		self.win_obj.image_status_text.showMessage("Correlating carpet: File " +str(self.win_obj.file_import.file_index+1)+' of '+str(self.win_obj.file_import.file_list.__len__()))
		self.win_obj.app.processEvents()

		if win_obj.last_in_list == False:
			print( 'moving to next file')
			win_obj.file_import.load_next_file()
		else:
			print ('finished with all files')
			win_obj.file_import.post_initial_import()
Exemple #14
0
    def getStack(self):
        '''
        Get the  image stack from the file,
        rearrange into a more friendly format
        more compatible with the rest of yaggie
        Converts lightsheet data from 16 bit to 8 bit
        '''

        if self.type == 'Series':
            raise Exception('CZI file already defined as a series, if you want to read a stack use cziReader.getStack()')
        self.type = 'Stack'

        #Original dimensions:     [image_no?, ?, c, t, z?, y, x, z?]        
        #Returns as:             [c, t, z, x, y, ?]
        stack            = imread(self.directory)[0][0].astype(np.uint8)
        self.dimensions = stack.shape

        return stack
Exemple #15
0
def loadData(brain):
    print 'loading data...'
    # order = [c,t,x,y,z]
    fileName = brain.fileName
    scaledFileName = os.path.join(brain.dataDir,'scaledData.h5')
    if os.path.exists(scaledFileName):
        brain.data = filing.readH5(scaledFileName,hierarchy='DS1')
    else:
        if fileName[-3:] == 'lsm':
            im = tifffile.imread(fileName).astype(n.uint16)
            dimx,dimy,dimz,dimt,dimc = 3,4,1,0,2
            im = misc.sortAxes(im,[dimc,dimt,dimx,dimy,dimz])
        if fileName[-3:] == '.h5':
            im = filing.readH5Image(fileName).astype(n.uint16)
        if fileName[-3:] == 'czi':
            import czifile
            #pdb.set_trace()
            im = czifile.imread(fileName)
            if len(im.shape) == 7:
                im = im[0,:,:,:,:,:,0]
            elif len(im.shape) == 11:
                im = im[0,0,0,0,0,:,:,:,:,:,0]
            else: raise(Exception('look at czi file shape'))
            brain.spacing = zf.getStackInfoFromCZI(fileName)['spacing']
            #pdb.set_trace()
        #pdb.set_trace()
        if (brain.scaleFactors[0] == 1) and ((brain.scaleFactors[1] == 1) and (brain.scaleFactors[2] == 1)):
            print 'no scaling'
        else:
            print 'scaling data by factors %s' %brain.scaleFactors
            scaledData = []
            for ic in range(im.shape[0]):
                tmpC = []
                #for it in range(im.shape[1]):
                # print 'only taking 30!'
                for it in range(im.shape[1]):
                    tmpC.append(sitk.gafi(beads.scaleStack(1/brain.scaleFactors[::-1],sitk.gifa(im[ic][it]))))
                scaledData.append(n.array(tmpC))
            im = n.array(scaledData)
        brain.data = n.array(im)
        filing.arrayToH5Raw(brain.data,scaledFileName)
    brain.dimc,brain.dimt,brain.dimx,brain.dimy,brain.dimz = brain.data.shape
    return
Exemple #16
0
    def __init__(self, raw_data_fname, chs_spts_nucs):

        a      =  czifile.CziFile(raw_data_fname)                                                                                   # read info about pixel size
        b      =  a.metadata()

        start           =  b.find("ScalingZ")
        end             =  b[start + 9:].find("ScalingZ")
        self.pix_sizeZ  =  np.round(float(b[start + 9:start + 7 + end]) * 1000000, decimals=4) 

        start           =  b.find("ScalingX")
        end             =  b[start + 9:].find("ScalingX")
        self.pix_sizeX  =  np.round(float(b[start + 9:start + 7 + end]) * 1000000, decimals=4) 

        filedata  =  np.squeeze(czifile.imread(raw_data_fname))

        if chs_spts_nucs[0] != -1:
            self.spts_a  =  filedata[chs_spts_nucs[0], :, :, :]
            for z in range(self.spts_a.shape[0]):                          # both are rotated and mirrored to match ImageJ standards
                self.spts_a[z, :, :]  =  np.rot90(self.spts_a[z, :, ::-1])


        if chs_spts_nucs[1] != -1:
            self.spts_b    =  filedata[chs_spts_nucs[1], :, :, :]
            for z in range(self.spts_b.shape[0]):                          # both are rotated and mirrored to match ImageJ standards
                self.spts_b[z, :, :]  =  np.rot90(self.spts_b[z, :, ::-1])

        if chs_spts_nucs[2] != -1:
            self.nucs    =  filedata[chs_spts_nucs[2], :, :, :]
            for z in range(self.nucs.shape[0]):                          # both are rotated and mirrored to match ImageJ standards
                self.nucs[z, :, :]  =  np.rot90(self.nucs[z, :, ::-1])

            self.nucs_mip  =  np.zeros(self.nucs.shape[1:])                 # nuclei mip image
            for x in range(self.nucs.shape[1]):
                self.nucs_mip[x, :]    =  filedata[chs_spts_nucs[2], :, x, :].max(0)
                
        else:                                                               # if nuclei channel is not asked, it will be a zeros matrix with the proper shape
            if chs_spts_nucs[0] != -1:
                self.nucs_mip  =  np.zeros(self.spts_a.shape[1:])
            elif chs_spts_nucs[1] != -1:
                self.nucs_mip  =  np.zeros(self.spts_b.shape[1:])
Exemple #17
0
def build_rawdata(homedir):
    imgname = '/net/fileserver-nfs/stornext/snfs2/projects/myersspimdata/Mauricio/for_coleman/ph3_labels_trainingdata_07_10_2018/trainingdata_ph3labels_hspph2bandbactinlap2bline_fish6_justph3andh2b.czi'
    img = czifile.imread(imgname)
    img = img[0, 0, 0, 0, 0, :, 0, :, :, :, 0]  # CZYX
    img = perm(img, "CZYX", "ZYXC")
    shrink = 8
    sh = np.array(img.shape)[:-1] // shrink
    ss = patchmaker.se2slices(sh, (shrink - 1) * sh)
    img = img[ss]
    img = norm(img)

    # img2 = img[ss]
    # img2 = img2 / img2.max(axis=(0,1,2), keepdims=True)

    sig = 20
    wid = 40

    def f(x):
        return np.exp(-(x * x).sum() / (2 * sig**2))

    kern = math_utils.build_kernel_nd(wid, 3, f)
    kern = kern[::5]  ## anisotropic kernel matches img
    kern = kern / kern.sum()

    img[..., 0] = fftconvolve(img[..., 0], kern, mode='same')
    # img = img / img.mean(axis=(0,1,2), keepdims=True)

    r = 1  ## xy downsampling factor
    imgsem = {
        'axes': "ZYXC",
        'ph3': 0,
        'h2b': 1,
        'n_channels': 2,
        'r': r
    }  ## image semantics

    res = dict()
    res['img'] = img[:, ::r, ::r]
    res['imgsem'] = imgsem
    return res
Exemple #18
0
def interactive_plot(channel_check, R,G,B, img, t,intensity_minimum, intensity_maximum, c,inverted_check):
    '''
    Uses the RGB values and intensity values to display an RGB image of the given image
    '''
    
    # check if the channel is included in the overlay
    if channel_check:
        # make the colormaps based on the RGB values
        cmp, icmp = make_colormap((R,G,B),'COLOR',False)

        # determine whether the colormap is inverted
        if inverted_check:
            cmp = icmp
        # Read in the image based on the filename 
        if img[-3:] == 'tif':
            img = io.imread(img)
        elif img[-3:] == 'czi':
            img = czifile.imread(img)
            img = img[0,0,c,:,0,:,:,0]

        # Determine if it's a timelapse and display the image
        if len(img.shape) > 2:
            # if it's a timelapse only plot the relevant time point
            fig = plt.figure(figsize=(5,5))
            plt.imshow(img[t], cmap = cmp, vmin = intensity_minimum, vmax = intensity_maximum)
            plt.axis('off')
        else:
            fig = plt.figure(figsize=(5,5))
            plt.imshow(img, cmap = cmp, vmin = intensity_minimum, vmax = intensity_maximum)
            plt.axis('off')
    else:
        # if channel is not selected display a placeholder
        fig = plt.figure(figsize=(5,5))
        plt.imshow(np.zeros((100,100)), cmap='Greys')
        plt.text(40,50,'NO IMAGE')
        plt.axis('off')

    return img
filename_list = []
for el in li:
    if el[-3:] == 'czi':
        filename_list.append(el)

print(filename_list)

#for filename in list_of_files:
#filename = 'data/12-10-18 ACTUAL 24hr Con 3-Scene-1.czi'
for i, filename in enumerate(filename_list):
    # Next we're going to load the data using the czifile module we imported...

    print('processing file{}, this is file {} of {}'.format(
        filename, i + 1, len(filename_list)))
    filepath = 'data/{}'.format(filename)
    mydata = czifile.imread(filepath)

    # Here we loaded the data and assigned it to a new variable called mydata
    # NB: variable names are up to us! I tend to try to stick to "sensible"
    # variable names that will help me understand what the variable refers to,
    # but had I wanted to, I could have called the variable "banana", or "ds2832"
    # There are some rules about variable names, such as not using a function name,
    # and they have to start with a letter, but more on that as you learn more
    # python.

    # Now because of the way czifile works (I found this out while writing
    # this script!) the data is loaded with more dimensions than we actually need.
    # To see this, we can "print" (i.e. write) the shape attribute of mydata to
    # the terminal:

    print("The data we loaded has shape")
def czi_array(path):
    '''Takes in the path of a czi file and returns an array of the file and its shape as a tuple'''
    array = czifile.imread(path)
    return (array, array.shape)
Exemple #21
0
name_start ='scan(.*)_Out'


#Set WM baseline subtraction value if using
wmbls1 = 588 #458
wmbls2 = 311 #280

#Define radius for weighted centroid calculation
radius =5
data_storage = []
gaussian = 1.76
cutoff = 0.7
variable = 10 # 10=peak13-mean,14-quantile. User sets which variable to use in finding connected voxels.
quantileP = .75

image = czi.imread(folder + '/' + file)


#Output
outputfolder = 'Y:/Active/2019 Airyscan/11-23-19old c57 section 5 right/AS6/Python_Out'
outfolder = "R5_C70_peakintensity_G1_76"
path = os.path.join(outputfolder, outfolder)



if os.path.exists(path) == False:
    os.mkdir(path)
    
out_name = (outfolder)

##User needs to check lines 366 and 367 to ensure channels are pulled correctly.
Exemple #22
0
import czifile

img = czifile.imread('../Osteosarcoma_01.czi')
print(img.shape)

img1 = img[0, 0, :, :, :, 0]
print(img1.shape)

# Next, let us extract each channel image.
img2 = img1[0, :, :]  # First channel, Red
img3 = img1[1, :, :]  # Second channel, Green
img4 = img1[2, :, :]  # Third channel, Blue DAPI

from matplotlib import pyplot as plt

fig = plt.figure(figsize=(10, 10))

ax1 = fig.add_subplot(2, 2, 1)
ax1.imshow(img2, cmap='hot')
ax1.title.set_text('1st channel')
# plt.savefig('redish1.jpg')

ax2 = fig.add_subplot(2, 2, 2)
ax2.imshow(img3, cmap='hot')
ax2.title.set_text('2nd channel')

ax3 = fig.add_subplot(2, 2, 3)
ax3.imshow(img4, cmap='hot')
ax3.title.set_text('3rd channel')
plt.savefig('redish.jpg')
plt.show()
Exemple #23
0
def overlay_interactive(inverted_check,t,
                        img1_check, img2_check, img3_check,
                        img1,img2,img3,
                        R1,G1,B1,R2,G2,B2,R3,G3,B3,
                        img1_min,img2_min,img3_min,
                        img1_max,img2_max,img3_max):
    '''
    Creates the overlay from the (up to) 3 channels
    '''

    file_list, cmap_list, icmap_list, img_min_list, img_max_list = [], [], [], [], []
    if img1_check:
        if img1[-3:] == 'tif':
            image1 = io.imread(img1)
        elif img1[-3:] == 'czi':
            image1 = czifile.imread(img1)
            image1 = image1[0,0,0,:,0,:,:,0]
        if len(image1.shape) > 2:
            image1 = image1[t]
        file_list.append(image1)
        cmp1, icmp1 = make_colormap((R1,G1,B1),'COLOR',False)
        cmap_list.append(cmp1)
        icmap_list.append(icmp1)
        img_min_list.append(img1_min)
        img_max_list.append(img1_max)
    if img2_check:
        if img2[-3:] == 'tif':
            image2 = io.imread(img2)
        elif img2[-3:] == 'czi':
            image2 = czifile.imread(img2)
            image2 = image2[0,0,1,:,0,:,:,0]
        if len(image2.shape) > 2:
            image2 = image2[t]
        
        file_list.append(image2)
        cmp2, icmp2 = make_colormap((R2,G2,B2),'COLOR',False)
        cmap_list.append(cmp2)
        icmap_list.append(icmp2)
        img_min_list.append(img2_min)
        img_max_list.append(img2_max)
    if img3_check:
        if img3[-3:] == 'tif':
            image3 = io.imread(img3)
        elif img3[-3:] == 'czi':
            image3 = czifile.imread(img3)
            image3 = image3[0,0,2,:,0,:,:,0]
        if len(image3.shape) > 2:
            image3 = image3[t]
        
        file_list.append(image3)
        cmp3, icmp3 = make_colormap((R3,G3,B3),'COLOR',False)
        cmap_list.append(cmp3)
        icmap_list.append(icmp3)
        img_min_list.append(img3_min)
        img_max_list.append(img3_max)
    
    if len(file_list) == 0:
        fig = plt.figure(figsize=(5,5))
        plt.imshow(np.zeros((100,100)), cmap='Greys')
        plt.text(40,50,'SELECT A \nCHANNEL')
        plt.axis('off')
    else:
        # create an empty list to hold the channel data
        channels = []
        channels_mapped = []

        if inverted_check:
            image_list = image_min_max(file_list, icmap_list, img_min_list, img_max_list)
        else:
            image_list = image_min_max(file_list, cmap_list, img_min_list, img_max_list)

        # for each channel, normalize image and get min and max intensities
        for channel_data in image_list:
            # add it the the channel list
            channels.append(channel_data)
            # normalize the image
            channel_norm = normalize_image(channel_data[0], channel_data[2], channel_data[3])
            # map the normalized image to the colormap indicated
            channel_mapped = gray2rgb(channel_norm, channel_data[1])
            # add this mapped image to our channel_list variable
            channels_mapped.append(channel_mapped)

        # overlay images
        if inverted_check:
            overlay = image_inverted_overlay(channels_mapped)
        else:
            overlay = image_overlay(channels_mapped)

        if len(overlay.shape) == 3:
            # display the figure
            fig = plt.figure(figsize=(5,5))
            plt.imshow(overlay)
            plt.axis('off')
        else:
            fig = plt.figure(figsize=(5,5))
            plt.imshow(overlay[t])
            plt.axis('off')
Exemple #24
0
def load_czi_file_as_stack(filename):
    return czifile.imread(filename)
def image_min_max(images, cmaps, img_min=0, img_max=0):
    '''Rescale an image to a given minimum and maximum intensity'''

    # if no img_min given set up a list of the appropriate length to fill later
    if img_min == 0:
        img_min = np.zeros(len(cmaps))

    # if no img_max given set up a list of the appropriate length to fill later
    if img_max == 0:
        img_max = np.zeros(len(cmaps))

    # generate a dictionary of default colormaps
    cmap_dict = make_colormaps()
    for count, colormap in enumerate(cmaps):
        # if the colormap is a string pull it from the dictionary
        if type(colormap) == str:
            # is the string in the default dictionary?
            if colormap in cmap_dict.keys():
                cmaps[count] = cmap_dict[colormap]
            else:
                raise Exception('Not a recognized colormap')

    # create an empty list to hold our image details
    image_list = []

    # loop through each channel provided
    for count, image in enumerate(images):
        # if filenames were given
        if type(image[count]) == str:
            # if it's a string check if it's a czi or tif file
            if image[-3:] == 'tif':
                # read in tif file
                imstack = io.imread(image)
                #check if a minimum intensity was defined - if not set to image minimum
                if img_min[count] == 0.0:
                    img_min[count] = np.min(imstack)
                elif img_min[count] <= 1:
                    # if a value less than 1 is defined use as a percentage
                    if len(imstack.shape) == 2:
                        image_intensities = sorted(imstack.ravel())
                    else:
                        image_intensities = sorted(
                            np.mean(imstack, axis=0).ravel())
                    N_pixels = len(image_intensities)
                    img_min[count] = int(image_intensities[int(img_min[count] *
                                                               N_pixels)])

                #check if a maximum intensity was defined - if not set to image maximum
                if img_max[count] == 0.0:
                    img_max[count] = np.max(imstack)
                elif img_max[count] <= 1:
                    # if a value less than 1 is defined use as a percentage
                    if len(imstack.shape) == 2:
                        image_intensities = sorted(imstack.ravel())
                    else:
                        image_intensities = sorted(
                            np.mean(imstack, axis=0).ravel())
                    N_pixels = len(image_intensities)
                    img_max[count] = int(image_intensities[int(img_max[count] *
                                                               N_pixels)])

                image_list.append(
                    (imstack, cmaps[count], img_min[count], img_max[count]))

            elif image[-3:] == 'czi':
                imstack = czifile.imread(image)
                n_channels = imstack.shape[2]
                n_timepoints = imstack.shape[3]

                for channel in np.arange(0, n_channels):
                    if n_timepoints == 1:
                        channel_stack = imstack[0, 0, channel, 0, 0, :, :, 0]
                    else:
                        channel_stack = imstack[0, 0, channel, :, 0, :, :, 0]
                    if img_min[channel] == 0.0:
                        img_min[channel] = np.min(channel_stack)
                    elif img_min[channel] <= 1:
                        if len(channel_stack.shape) == 2:
                            image_intensities = sorted(channel_stack.ravel())
                        else:
                            image_intensities = sorted(
                                np.mean(channel_stack, axis=0).ravel())
                        N_pixels = len(image_intensities)
                        img_min[channel] = int(image_intensities[int(
                            img_min[channel] * N_pixels)])
                    if img_max[channel] == 0.0:
                        img_max[channel] = np.max(channel_stack)
                    elif img_max[channel] <= 1:
                        if len(channel_stack.shape) == 2:
                            image_intensities = sorted(channel_stack.ravel())
                        else:
                            image_intensities = sorted(
                                np.mean(channel_stack, axis=0).ravel())
                        N_pixels = len(image_intensities)
                        img_max[channel] = int(image_intensities[int(
                            img_max[channel] * N_pixels)])
                    image_list.append((channel_stack, cmaps[channel],
                                       img_min[channel], img_max[channel]))

        else:
            # if it's an array already
            imstack = image
            # if the minimum isn't defined set it equal to the image minimum
            if img_min[count] == 0.0:
                img_min[count] = np.min(image)
            elif img_min[count] <= 1:
                # if the minimum is defined as less than 1 use it as a percentage
                image_intensities = sorted(image.ravel())
                N_pixels = len(image_intensities)
                img_min[count] = int(image_intensities[int(img_min[count] *
                                                           N_pixels)])

            # if the maximum isn't defined set it equal to the image maximum
            if img_max[count] == 0.0:
                img_max[count] = np.max(image)
            elif img_max[count] <= 1:
                # if the minimum is defined as less than 1 use it as a percentage
                image_intensities = sorted(image.ravel())
                N_pixels = len(image_intensities)
                img_max[count] = int(image_intensities[int(img_max[count] *
                                                           N_pixels)])

            image_list.append(
                (imstack, cmaps[count], img_min[count], img_max[count]))

    return image_list
#Time series images
img2 = tifffile.imread("images/Scratch_Assay_400_289_8bit.tif")
"""
####################################################################################
#reading czi files
# pip install czifile 
# to import the package you need to use import czifile
# https://pypi.org/project/czifile/
"""

################################

import czifile

img = czifile.imread('images/Osteosarcoma_01.czi')
#img = czifile.imread('images/Scratch_Assay_400_289.czi')

print(img.shape)  #7 dimensions
#Time series, scenes, channels, y, x, z, RGB
#IN this example (Osteosarcoma) we have 1 time series, 1 scene, 3 channels and each channel grey image
#size 1376 x 1104

#Let us extract only relevant pixels, all channels in x and y
img1 = img[0, 0, :, :, :, 0]
print(img1.shape)

#Next, let us extract each channel image.
img2 = img1[0, :, :]  #First channel, Red
img3 = img1[1, :, :]  #Second channel, Green
img4 = img1[2, :, :]  #Third channel, Blue DAPI
Exemple #27
0
def read_czi(filename,
             trim=False,
             swapaxes=True,
             return_metadata=False,
             metadata_only=False):
    """Read a czi file into an ndarray
    
    Args:
        filename: string
            Path to czi file
        trim: bool
            If true, remove last frame if it contains blank slices
        swapaxes: bool
            If true, switches first two axes to produce a stack order ctzxy
        return_metadata: bool
            If true, returns a tuple of stack, first distance, z interval
        metadata_only: bool
            If true, just return the metadata features starting_positions
            and z_interval. Stack is returned as None.
            
    Returns:
        stack: ndarray
            Image stack in dimensions [t,c,z,x,y] (no swap) or 
            [c,t,z,x,y] (swapped)
        starting_positions: list of floats
            List of the position, in microns, of the first slice in the Z
            stack of each file, taken from czi file metadata.
        z_interval: float
            Size of Z slice, in microns, taken from czi metadata
    """
    def frame_incomplete(stack3d):
        """Determine if frame is incomplete."""
        for slice in stack3d:
            # If only value in slice is 0, frame is incomplete.
            if ((np.min(slice) == 0) & (np.max(slice) == 0)):
                return True
        return False

    if not metadata_only:
        stack = czifile.imread(filename)
        stack = np.squeeze(stack)
        # Trim off last frame if incomplete.
        if trim:
            if frame_incomplete(stack[-1, 0]):
                stack = stack[:-1]
        if (swapaxes):
            stack = np.swapaxes(stack, 0, 1)
    else:
        stack = None

    if return_metadata:
        handle = czifile.CziFile(filename)
        metadata = handle.metadata()
        root = ET.fromstring(metadata)
        # Pull first distance and z interval, convert to microns.
        first_dist = float(
            root.findall('.//ZStackSetup')[0][8][0][0].text) * 1e6
        #last_dist = root.findall('.//ZStackSetup')[0][9][0][0].text
        z_interval = float(
            root.findall('.//ZStackSetup')[0][10][0][0].text) * 1e6
        handle.close()
        return stack, first_dist, z_interval
    else:
        return stack
Exemple #28
0
for ibaseDir,baseDir in enumerate(baseDirs):
    baseDir = baseDirs[ibaseDir]
    if yCropLists is not None: yCropList = yCropLists[ibaseDir]
    else: yCropList = None
    correctionBaseDir = correctionBaseDirs[ibaseDir]
    for fileIndex in range(len(fusionFileNames)):
        if not separateFileMultiView:
            fusionFileName = os.path.join(baseDir,fusionFileNames[fileIndex])
            # check whether already fused (only checking for registration channel file!
            tmpFinalFile = os.path.join(os.path.dirname(fusionFileName),
                                     outFilePattern %(os.path.basename(fusionFileName.split('.')[0]),registrationChannel))
            if os.path.exists(tmpFinalFile): continue

            # load files
            print 'loading file %s...' %fusionFileName
            inarray = czifile.imread(fusionFileName)
            print 'finished loading'
            infoDict = zf.getStackInfoFromCZI(fusionFileName)
        else:
            fusionFileName = os.path.join(baseDir,fusionFileNames[fileIndex][0])
            tmpFinalFile = os.path.join(os.path.dirname(fusionFileName),
                                     outFilePattern %(os.path.basename(fusionFileName.split('.')[0]),registrationChannel))
            if os.path.exists(tmpFinalFile): continue

            readPaths = [os.path.join(baseDir,fusionFileNames[fileIndex][i]) for i in range(len(fusionFileNames[fileIndex]))]

            # load files
            print 'loading file %s...' %fusionFileName
            inarrays = []
            shapes = []
            for ifile in range(len(fusionFileNames[fileIndex])):
Exemple #29
0
import czifile
import numpy as np
image = czifile.imread("200923PJ_1900680MIB20x_GDYSB_RWGA_DAPI_1.czi")
def CZIMetadatatoDictionaries(InputDirectory, CziName):
    czi = czifile.CziFile(InputDirectory + str(CziName))
    czi_array = czifile.imread(InputDirectory +
                               str(CziName))  #read the czi file.
    czi_array = czi_array.squeeze(
    )  #take out the dimensions that are not important
    #print(czi_array.shape)

    ####### Extract the metadata
    metadata = czi.metadata  #reading the metadata from CZI
    root = ET.fromstring(metadata)  #loading metadata into XML object
    ##### Making a dictionry from all of the channel data only if it has ID that hs the channel number as the key and the dye name as the value
    ChannelDictionary = {}
    for neighbor in root.iter('Channel'):
        TempDict = {}
        TempDict = neighbor.attrib
        if 'Id' in TempDict:  #for the metadata lines that start with ID
            #print(TempDict) #test
            Search = r"(\w+):(\d)"  #separate the channel:1 into two parts .. only keep the number
            Result = re.search(Search, TempDict['Id'])
            Search2 = r"(\w+)-(.+)"
            Result2 = re.search(Search2, TempDict['Name'])
            ChannelDictionary[Result2.group(1)] = Result.group(
                2
            )  #make a new dictionary where that number (0 based!) is the channel/key to the and the value is the dye name
    #print(ChannelDictionary)

    ####### pull out the channels and make stacks
    if "AF405" in ChannelDictionary.keys():
        AF405index = ChannelDictionary["AF405"]
        AF405Stack = czi_array[int(AF405index), ...]
    else:
        print("AF405 is not in this file")
        AF488Stack = 'empty'

    if "AF488" in ChannelDictionary.keys():
        AF488index = ChannelDictionary["AF488"]
        AF488Stack = czi_array[int(AF488index), ...]

    else:
        print("AF488 is not in this file")
        AF488Stack = 'empty'

    if "AF647" in ChannelDictionary.keys():
        AF647index = ChannelDictionary["AF647"]
        AF647Stack = czi_array[int(AF647index), ...]
    else:
        print("AF647 is not in this file")
        AF647Stack = 'empty'

    if "AF546" in ChannelDictionary.keys():
        AF546index = ChannelDictionary["AF546"]
        AF546Stack = czi_array[int(AF546index), ...]
    elif "At550" in ChannelDictionary.keys():
        AF546index = ChannelDictionary["At550"]
        AF546Stack = czi_array[int(AF546index), ...]
    else:
        print("AF546 is not in this file")
        AF546Stack = 'empty'

    return (AF405Stack, AF488Stack, AF647Stack, AF546Stack)
Exemple #31
0
#
tifff_3dimg = tifffile.imread("images/3d_image.tif")
tifff_time_series = tifffile.imread("images/time_series.tif")

####################################################################################
#reading czi files
# pip install czifile
# to import the package you need to use import czifile
# https://pypi.org/project/czifile/

################################

import czifile
from matplotlib import pyplot as plt
img = czifile.imread('images/multi_channel_z_stack_time_series.czi')

print(img.shape)  #7 dimensions
#Scenes, Time series, channels, z, y, x, RGB
#IN this example we have 1 scene, 11 time series, 2 channels, 23 z slices, 587x587 pixels of gray

#Let us extract only relevant pixels, all channels in x and y for
#scene=0, time=5, channel=1, z=11, y, x, RGB=0
img1_ch0 = img[0, 5, 0, 11, :, :, 0]
img1_ch1 = img[0, 5, 1, 11, :, :, 0]

fig = plt.figure(figsize=(12, 12))
ax1 = fig.add_subplot(2, 2, 1)
ax1.imshow(img1_ch0, cmap='cubehelix')
ax1.title.set_text('1st channel')
ax2 = fig.add_subplot(2, 2, 2)
def imread_czi(filename):
    return czifile.imread(filename)