コード例 #1
0
 def load(self, path):
    img=nd2.ND2Reader(path)
    img.bundle_axes='yx'
    img.default_coords['v'] = 1
    img.iter_axes='t'
    self.img_set=img
    self.pixes_size=self.img_set.metadata['pixel_microns']**2
コード例 #2
0
 def file_open(self):
     name = QFileDialog.getOpenFileName(
         self,
         'Open File',
         filter="ND2 files (*.nd2);;TIFF (*.tif);;All files (*)")
     self.frame = nd2.ND2Reader(name[0])
     self.dims = self.frame.sizes
コード例 #3
0
    def _initND2(self):
        self.reader = nd2reader.ND2Reader(self.filename)
        self.positionCount = float(len(self.reader.metadata['fields_of_view']))
        self.timepointCount = float(len(self.reader.metadata['frames']))
        self.zLevelCount = float(len(self.reader.metadata['z_levels']))
        self.channels = self.reader.metadata['channels']
        self.micronsPerPixel = self.reader.metadata['pixel_microns']
        self.experimentDurationSeconds = float(
            self.reader.metadata['experiment']['loops'][0]['duration'] / 1000)
        self.timepointIntervalSeconds = float(
            self.reader.metadata['experiment']['loops'][0]['sampling_interval']
            / 1000)
        self.height = float(self.reader.metadata['height'])
        self.width = float(self.reader.metadata['width'])

        self.position_names = []
        points = self.reader.parser._raw_metadata.image_metadata[
            b'SLxExperiment'][b'ppNextLevelEx'][b''][b'uLoopPars'][b'Points'][
                b'']
        for point in points:
            self.position_names.append(point[b'dPosName'].decode('UTF-8'))
コード例 #4
0
nuclear_local_thresholding_size = 701
min_size_nuclear = 15000
max_size_nuclear = 130000
min_size_cell = 50000
max_size_cell = 1000000

dirs = [x for x in os.listdir(data_path)]
dirs.pop(0)
num_dir = len(dirs)

for s in range(len(dirs)):

    name = dirs[s][:-4]
    print("### DATA PROCESSING: %s (%d / %d)" % (name, s + 1, num_dir))

    imgs = nd2.ND2Reader('%s/%s.nd2' % (data_path, name))

    # 0: SG channel
    # 1: cell boundary channel
    # 2: nuclei channel

    # identify SG
    print("### SG IDENTIFICATION ...")
    sg = find_organelle(imgs[0], thresholding, 3000, 200, min_size_sg,
                        max_size_sg)

    # identify cell
    print("### CELL SEGMENTATION ...")
    cell, cell_ft = find_cell(imgs[0], imgs[1], imgs[2],
                              nuclear_local_thresholding_size,
                              min_size_nuclear, max_size_nuclear,
コード例 #5
0
def cell_segmentation(nd2_path, coor_path, saveImages=False):
    #This is the meat of our script:

    #Getting current time:
    now = datetime.now()

    date = now.strftime("%Y-%m-%d %H%M%S")

    masksFolderPath = r"Masks ({})".format(date)

    try:
        os.mkdir(masksFolderPath)
    except:
        None  #Folder already exists

    macroFolderPath = r"Macros ({})".format(date)
    try:
        os.mkdir(macroFolderPath)
    except:
        None  #Folder already exists

    #Loading in the nd2 file
    nd2_file = nd2.ND2Reader(nd2_path)

    #Setting the FITC channel and finding out how many points there are in
    # the nd2 file:
    try:
        nd2_file.default_coords['c'] = 1
    except:
        #If there is one channel (the FITC) channel
        None

    try:
        # Defining the number of points for iteration:
        num_points = nd2_file.sizes['v']
    except:
        #If there is no property 'v' then we set the number of points to
        # 1:
        num_points = 1

    # Defining the number of z_stacks
    num_stacks = nd2_file.sizes['z']  # YK

    # Defining a list for all of our points: #YK
    # Defining a list for all of stack arrays
    z_stacks = []

    # We will iterate over the number of points taken
    for i in range(0, num_points):

        if num_points != 1:
            # For every loop, we will change the point.
            nd2_file.default_coords['v'] = i

        # Temporary list to store z-stack images post extraction
        z_stack_post = []

        # We will iterate over the number of #YK stacks
        for j in range(0, num_stacks):
            z_stack_post.append(np.asarray(nd2_file[j]))

        z_stacks.append(z_stack_post)

    z_stacks = normalizeZStacks(z_stacks)

    #If the user wants to save the z-stacks as images:
    if saveImages:
        org_images = z_stacks.copy()

    #Applying Median Filter:
    z_stacks = applyMedianFilter(z_stacks)

    #Applying threshold:
    z_stacks = applyTresholding(z_stacks)

    # Contains [BlobsListsPerImage[blobs[Blob2D]]]
    blobStacks = []  # YK

    # Minimum area size
    minAreaSize = 5000

    #Image index counts start at 1:
    imageIncInd = 1

    # We iterate like we've seen for the other sections. We loop through each
    # z-stack and then we iterate through each image.

    for zInd in range(0, len(z_stacks)):
        z_stack_th = z_stacks[zInd]

        # BlobImage lists i.e. all images' areas
        bStack = BlobStack(zstackInd=zInd)

        # iterating through each image. We use our median fitered image
        # to draw contours on.
        imageInd = 0

        for image_th in z_stack_th:
            # List of Blob2D i.e. areas in an image
            blobImage = BlobImage(incInd=imageIncInd)

            # Finding contours with function:
            contour, hierarchy = cv2.findContours(image_th, cv2.RETR_LIST,
                                                  cv2.CHAIN_APPROX_NONE)
            for c in contour:
                area = cv2.contourArea(c)

                # Filtering contours to an area larger than 4000.
                if area > minAreaSize:
                    # Calculating the centroid of each object
                    M = cv2.moments(c)
                    cX = int(M['m10'] / M['m00'])
                    cY = int(M['m01'] / M['m00'])

                    blobImage.add(
                        Blob2D(imageInd=imageInd,
                               imageIncInd=imageIncInd,
                               centroid=(cX, cY),
                               contour=c,
                               areaMembers=getAreaMembers(image=image_th,
                                                          contour=c),
                               radius=getFarthest(centroid=(cX, cY),
                                                  contour=c),
                               area=area,
                               zstackInd=zInd))
            imageInd += 1
            imageIncInd += 1

            # Finally storing all our data
            bStack.add(blobImage)  # YK

        blobStacks.append(bStack)

    # membership testing
    # Per stack
    for bStack in blobStacks:
        bImages = bStack.getBlobImages()
        for bImage in bImages:
            # lists grouped per images
            blobs = bImage.getBlobs()

            # blobs on a given image
            for bInd in range(0, len(blobs)):
                blob = blobs[bInd]
                masses = bStack.getMasses()
                if not isAMember(blob, masses):
                    mId = len(masses) + 1
                    m = Mass(mId=mId, zstackInd=bStack.zstackInd)
                    m.add(blob)
                    bStack.addMass(mass=m)
        for m in bStack.getMasses():
            if not m.isSphere():
                continue
            m.setAlone(isAlone(masse=m, bImages=bImages))

    # The masses are available on the bStack object
    print('\n')
    print("Finding target masses in each z-stack...")

    coordinates = pd.read_excel(
        coor_path,
        sheet_name='Recorded Data',
        index_col=0,
        usecols=['Index', 'X Coord [µm]', 'Y Coord [µm]', 'Ti ZDrive [µm]'])

    for bsInd in tqdm(range(0, len(blobStacks))):
        bStack = blobStacks[bsInd]
        print('z-stack {0} has {1} {2}'.format(bsInd + 1, bStack.numOfMasses(),
                                               'masses'))
        for m in bStack.getMasses():

            if m.isAlone():  # m.isSphere()
                biggestBlob = m.getBiggestRadiusBlob()
                biggestBlob.zstackInd
                biggestBlob.imageInd
                biggestBlob.contour

                if m.isSphere() and biggestBlob.isSoliditaryElongation(
                ) and not (biggestBlob.isOnEdge()):
                    mask = biggestBlob.areaMembers

                    cv2.drawContours(mask, biggestBlob.contour, -1,
                                     (255, 0, 0), 3)

                    imagePath = join(
                        masksFolderPath,
                        'z{0}_image_{1}.png'.format(biggestBlob.zstackInd + 1,
                                                    biggestBlob.imageInd + 1))

                    cv2.imwrite(imagePath, mask.astype('uint8'))

                    x = coordinates.loc[biggestBlob.imageIncInd,
                                        'X Coord [µm]']
                    y = coordinates.loc[biggestBlob.imageIncInd,
                                        'Y Coord [µm]']
                    z = coordinates.loc[biggestBlob.imageIncInd,
                                        'Ti ZDrive [µm]']

                    generateMacroFile(macroFolder=macroFolderPath,
                                      zInd=biggestBlob.zstackInd + 1,
                                      iInd=biggestBlob.imageInd + 1,
                                      nd2FilePath=nd2_path,
                                      imagePath=imagePath,
                                      x=x,
                                      y=y,
                                      z=z)

                # send those info to the microscope
                # shoot laser on it

    #         # for  accessing contained data
    #         for k, b in m.getBlobDict().items():

    #Saving images if user wants to:

    if saveImages:
        print('\n')
        print("Saving Images...")

        savedImg_path = r"Contours ({})".format(date)
        try:
            os.mkdir(savedImg_path)
        except:
            None  #Folder already exists

        #Initiating list to store cell data (e.g. circularity, convexity, etc):
        z_r = []
        b_r = []
        i_r = []
        cir = []
        sol = []
        elong = []
        area = []
        j = 1

        for bsInd, z in zip(range(0, len(blobStacks)), org_images):
            bStack = blobStacks[bsInd]
            temp_zstack = z.copy()
            temp_zstack = [
                cv2.cvtColor(i, cv2.COLOR_GRAY2RGB) for i in temp_zstack
            ]

            for m, blob_index in zip(bStack.getMasses(),
                                     range(1,
                                           bStack.numOfMasses() + 1)):
                biggestBlob = m.getBiggestRadiusBlob()

                for key, val in m.getBlobDict().items():

                    #Storing your data:
                    data = val.calculation()
                    z_r.append(j)
                    i_r.append(val.imageInd + 1)
                    b_r.append(blob_index)
                    area.append(data[0])
                    cir.append(data[1])
                    sol.append(data[2])
                    elong.append(data[3])

                    # Creating an ROI mask from contours in the local directory

                    img_index = val.imageInd
                    c = val.contour

                    # Limiting the area to test if a point is in the contour
                    font = cv2.FONT_HERSHEY_SIMPLEX
                    bottomLeftCornerOfText = val.centroid
                    fontScale = 1
                    fontColor = (55, 55, 55)
                    lineType = 2

                    if m.isSphere() and biggestBlob.isSoliditaryElongation(
                    ) and not (biggestBlob.isOnEdge()):

                        cv2.drawContours(temp_zstack[img_index], c, -1,
                                         (0, 255, 0), 3)

                        # Saving the
                        cv2.putText(temp_zstack[img_index], str(blob_index),
                                    bottomLeftCornerOfText, font, fontScale,
                                    fontColor, lineType)

                    else:

                        cv2.drawContours(temp_zstack[img_index], c, -1,
                                         (0, 0, 255), 3)

                        # Putting number for objects in images:
                        cv2.putText(temp_zstack[img_index], str(blob_index),
                                    bottomLeftCornerOfText, font, fontScale,
                                    fontColor, lineType)

            j += 1

            #Cell data:
            cell_data = pd.DataFrame({
                'b_ind': b_r,
                'z_ind': z_r,
                'area': area,
                'circularity': cir,
                'solidity': sol,
                'elongation': elong
            })

            cell_data.to_csv(join(savedImg_path, 'contour_data.csv'),
                             index=False)

            #Saving each z-stack
            for i, ind in zip(temp_zstack, range(1, len(temp_zstack) + 1)):
                cv2.imwrite(
                    join(savedImg_path, 'z{0}_image_{1}.png'.format(
                        (bsInd + 1), ind)), i)
            print("Contours has been saved in {}".format(savedImg_path))
コード例 #6
0
ファイル: __init__.py プロジェクト: knowledgevis/large_image
    def __init__(self, path, **kwargs):
        """
        Initialize the tile class.  See the base class for other available
        parameters.

        :param path: a filesystem path for the tile source.
        """
        super(ND2FileTileSource, self).__init__(path, **kwargs)

        self._largeImagePath = self._getLargeImagePath()

        self._pixelInfo = {}
        try:
            self._nd2 = nd2reader.ND2Reader(self._largeImagePath)
        except (UnicodeDecodeError, nd2reader.exceptions.InvalidVersionError,
                nd2reader.exceptions.EmptyFileError):
            raise TileSourceException('File cannot be opened via nd2reader.')
        self._logger = config.getConfig('logger')
        self._tileLock = threading.RLock()
        self._recentFrames = cachetools.LRUCache(maxsize=6)
        self.sizeX = self._nd2.metadata['width']
        self.sizeY = self._nd2.metadata['height']
        self.tileWidth = self.tileHeight = self._tileSize
        if self.sizeX <= self._singleTileThreshold and self.sizeY <= self._singleTileThreshold:
            self.tileWidth = self.sizeX
            self.tileHeight = self.sizeY
        self.levels = int(
            max(
                1,
                math.ceil(
                    math.log(
                        float(max(self.sizeX, self.sizeY)) / self.tileWidth) /
                    math.log(2)) + 1))
        # There is one file that throws a warning 'Z-levels details missing in
        # metadata'.  In this instance, there should be no z-levels.
        try:
            if (self._nd2.sizes.get('z')
                    and self._nd2.sizes.get('z') == self._nd2.sizes.get('v')
                    and not len(
                        self._nd2._parser._raw_metadata._parse_dimension(
                            r""".*?Z\((\d+)\).*?"""))
                    and self._nd2.sizes['v'] * self._nd2.sizes.get('t', 1)
                    == self._nd2.metadata.get('total_images_per_channel')):
                self._nd2._parser._raw_metadata._metadata_parsed[
                    'z_levels'] = []
                self._nd2.sizes['z'] = 1
        except Exception:
            pass
        frames = self._nd2.sizes.get('c', 1) * self._nd2.metadata.get(
            'total_images_per_channel', 0)
        self._framecount = frames if frames else None
        self._nd2.iter_axes = sorted(
            [a for a in self._nd2.axes if a not in {'x', 'y', 'v'}],
            reverse=True)
        if frames and len(self._nd2) != frames and 'v' in self._nd2.axes:
            self._nd2.iter_axes = ['v'] + self._nd2.iter_axes
        if 'c' in self._nd2.iter_axes and len(
                self._nd2.metadata.get('channels', [])):
            self._bandnames = {
                name.lower(): idx
                for idx, name in enumerate(self._nd2.metadata['channels'])
            }

        # self._nd2.metadata
        # {'channels': ['CY3', 'A594', 'CY5', 'DAPI'],
        #  'date': datetime.datetime(2019, 7, 21, 15, 13, 45),
        #  'events': [],
        #  'experiment': {'description': '',
        #                 'loops': [{'duration': 0,
        #                            'sampling_interval': 0.0,
        #                            'start': 0,
        #                            'stimulation': False}]},
        #  'fields_of_view': range(0, 2500),         # v
        #  'frames': [0],
        #  'height': 1022,
        #  'num_frames': 1,
        #  'pixel_microns': 0.219080212825376,
        #  'total_images_per_channel': 2500,
        #  'width': 1024,
        #  'z_coordinates': [1890.8000000000002,
        #                    1891.025,
        #                    1891.1750000000002,
        # ...
        #                    1905.2250000000001,
        #                    1905.125,
        #                    1905.1000000000001],
        #  'z_levels': range(0, 2500)}

        # self._nd2.axes   ['x', 'y', 'c', 't', 'z', 'v']
        # self._nd2.ndim   6
        # self._nd2.pixel_type   numpy.float64
        # self._nd2.sizes  {'x': 1024, 'y': 1022, 'c': 4, 't': 1, 'z': 2500, 'v': 2500}
        self._getND2Metadata()