Example #1
0
 def apply_registration(self):
     img_in = self.meta_image.image
     image = ccore.subImage(img_in,
                            ccore.Diff2D(*self.registration_start)-
                            ccore.Diff2D(*self.channelRegistration),
                            ccore.Diff2D(*self.new_image_size))
     self.meta_image.set_image(image)
Example #2
0
    def normalize_image(self, plate_id=None):

        img_in = self.meta_image.image
        if self.bFlatfieldCorrection:
            self.logger.debug("* using flat field correction with image from %s"
                              % self.strBackgroundImagePath)
            imgBackground = self._load_flatfield_correction_image(plate_id)

            crop_coordinated = MetaImage.get_crop_coordinates()
            if crop_coordinated is not None:
                self.logger.debug("* applying cropping to background image")
                imgBackground = ccore.subImage(imgBackground,
                                               ccore.Diff2D(crop_coordinated[0],
                                                            crop_coordinated[1]),
                                               ccore.Diff2D(crop_coordinated[2],
                                                            crop_coordinated[3]))

            img_in = ccore.flatfieldCorrection(img_in, imgBackground, 0.0, True)
            img_out = ccore.linearTransform2(img_in, self.fNormalizeMin,
                                             self.fNormalizeMax, 0, 255, 0, 255)
        else:
            self.logger.debug("* not using flat field correction")
            if type(img_in) == ccore.UInt16Image:
                img_out = ccore.linearTransform3(img_in, int(self.fNormalizeMin),
                                                 int(self.fNormalizeMax),
                                                 0, 255, 0, 255)
            elif type(img_in) == ccore.Image:
                img_out = ccore.linearTransform2(img_in, int(self.fNormalizeMin),
                                                 int(self.fNormalizeMax),
                                                 0, 255, 0, 255)

            else:
                img_out = img_in

        self.meta_image.set_image(img_out)
Example #3
0
 def _cropped_image(self):
     if self._img_c is None:
         self._img_c = ccore.subImage(
             self._raw_image, ccore.Diff2D(MetaImage._crop_coordinates[0],
                                           MetaImage._crop_coordinates[1]),
             ccore.Diff2D(MetaImage._crop_coordinates[2],
                          MetaImage._crop_coordinates[3]))
     return self._img_c
Example #4
0
 def _cropped_image(self):
     if self._img_c is None:
         self._img_c = ccore.subImage(
             self._raw_image, ccore.Diff2D(MetaImage._crop_coordinates[0],
                                           MetaImage._crop_coordinates[1]),
             ccore.Diff2D(MetaImage._crop_coordinates[2],
                          MetaImage._crop_coordinates[3]))
     return self._img_c
Example #5
0
    def _on_update_image(self, image_rgb, info, filename):
        if self._show_image.isChecked():
            # FIXME:
            if image_rgb.width % 4 != 0:
                image_rgb = ccore.subImage(
                    image_rgb, ccore.Diff2D(0,0), ccore.Diff2D(image_rgb.width - \
                               (image_rgb.width % 4), image_rgb.height))
            qimage = numpy_to_qimage(image_rgb.toArray(copy=False))

            if qApp._image_dialog is None:
                qApp._image_dialog = QFrame()
                ratio = qimage.height() / float(qimage.width())
                qApp._image_dialog.setGeometry(50, 50, 800, 800 * ratio)

                shortcut = QShortcut(QKeySequence(Qt.Key_Escape),
                                     qApp._image_dialog)
                shortcut.activated.connect(self._on_esc_pressed)

                layout = QVBoxLayout(qApp._image_dialog)
                layout.setContentsMargins(0, 0, 0, 0)

                qApp._graphics = ImageRatioDisplay(qApp._image_dialog, ratio)
                qApp._graphics.setScaledContents(True)
                qApp._graphics.resize(800, 800 * ratio)
                qApp._graphics.setMinimumSize(QSize(100, 100))
                policy = QSizePolicy(QSizePolicy.Expanding,
                                     QSizePolicy.Expanding)
                policy.setHeightForWidth(True)
                qApp._graphics.setSizePolicy(policy)
                layout.addWidget(qApp._graphics)

                dummy = QFrame(qApp._image_dialog)
                dymmy_layout = QHBoxLayout(dummy)
                dymmy_layout.setContentsMargins(5, 5, 5, 5)

                qApp._image_combo = QComboBox(dummy)
                qApp._image_combo.setSizePolicy(
                    QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed))
                self._set_display_renderer_info()

                dymmy_layout.addStretch()
                dymmy_layout.addWidget(qApp._image_combo)
                dymmy_layout.addStretch()
                layout.addWidget(dummy)
                layout.addStretch()

                qApp._image_dialog.show()
                qApp._image_dialog.raise_()
            #else:
            #    qApp._graphics_pixmap.setPixmap(QPixmap.fromImage(qimage))
            qApp._graphics.setPixmap(QPixmap.fromImage(qimage))
            qApp._image_dialog.setWindowTitle(info)
            qApp._image_dialog.setToolTip(filename)

            if not qApp._image_dialog.isVisible():
                qApp._image_dialog.show()
                qApp._image_dialog.raise_()
Example #6
0
    def apply_registration(self):
        img_in = self.meta_image.image

        # ccore.subImage checks dimensions
        image = ccore.subImage(
            img_in,
            ccore.Diff2D(*self.registration_start) -
            ccore.Diff2D(*self.channelRegistration),
            ccore.Diff2D(*self.new_image_size))
        self.meta_image.set_image(image)
Example #7
0
    def _on_update_image(self, image_rgb, info, filename):
        if self._show_image.isChecked():
            # FIXME:
            if image_rgb.width % 4 != 0:
                image_rgb = ccore.subImage(
                    image_rgb, ccore.Diff2D(0,0), ccore.Diff2D(image_rgb.width - \
                               (image_rgb.width % 4), image_rgb.height))
            qimage = numpy_to_qimage(image_rgb.toArray(copy=False))

            if qApp._image_dialog is None:
                qApp._image_dialog = QFrame()
                ratio = qimage.height()/float(qimage.width())
                qApp._image_dialog.setGeometry(50, 50, 800, 800*ratio)

                shortcut = QShortcut(QKeySequence(Qt.Key_Escape), qApp._image_dialog)
                shortcut.activated.connect(self._on_esc_pressed)

                layout = QVBoxLayout(qApp._image_dialog)
                layout.setContentsMargins(0,0,0,0)

                qApp._graphics = ImageRatioDisplay(qApp._image_dialog, ratio)
                qApp._graphics.setScaledContents(True)
                qApp._graphics.resize(800, 800*ratio)
                qApp._graphics.setMinimumSize(QSize(100,100))
                policy = QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
                policy.setHeightForWidth(True)
                qApp._graphics.setSizePolicy(policy)
                layout.addWidget(qApp._graphics)

                dummy = QFrame(qApp._image_dialog)
                dymmy_layout = QHBoxLayout(dummy)
                dymmy_layout.setContentsMargins(5,5,5,5)

                qApp._image_combo = QComboBox(dummy)
                qApp._image_combo.setSizePolicy(QSizePolicy(QSizePolicy.Expanding,
                                                            QSizePolicy.Fixed))
                self._set_display_renderer_info()

                dymmy_layout.addStretch()
                dymmy_layout.addWidget(qApp._image_combo)
                dymmy_layout.addStretch()
                layout.addWidget(dummy)
                layout.addStretch()

                qApp._image_dialog.show()
                qApp._image_dialog.raise_()
            #else:
            #    qApp._graphics_pixmap.setPixmap(QPixmap.fromImage(qimage))
            qApp._graphics.setPixmap(QPixmap.fromImage(qimage))
            qApp._image_dialog.setWindowTitle(info)
            qApp._image_dialog.setToolTip(filename)

            if not qApp._image_dialog.isVisible():
                qApp._image_dialog.show()
                qApp._image_dialog.raise_()
Example #8
0
    def normalize_image(self, plate_id=None):
        try:
            import pydevd
            pydevd.connected = True
            pydevd.settrace(suspend=False)
            print 'Thread enabled interactive eclipse debuging...'
        except:
            pass
        img_in = self.meta_image.image
        if self.bFlatfieldCorrection:
            self.logger.debug(
                "* using flat field correction with image from %s" %
                self.strBackgroundImagePath)
            imgBackground = self._load_flatfield_correction_image(plate_id)

            crop_coordinated = MetaImage.get_crop_coordinates()
            if crop_coordinated is not None:
                self.logger.debug("* applying cropping to background image")
                imgBackground = ccore.subImage(
                    imgBackground,
                    ccore.Diff2D(crop_coordinated[0], crop_coordinated[1]),
                    ccore.Diff2D(crop_coordinated[2], crop_coordinated[3]))

            img_in = ccore.flatfieldCorrection(img_in, imgBackground, 0.0,
                                               True)
            img_out = ccore.linearTransform2(img_in, self.fNormalizeMin,
                                             self.fNormalizeMax, 0, 255, 0,
                                             255)
        else:
            self.logger.debug("* not using flat field correction")
            if type(img_in) == ccore.UInt16Image:
                img_out = ccore.linearTransform3(img_in,
                                                 int(self.fNormalizeMin),
                                                 int(self.fNormalizeMax), 0,
                                                 255, 0, 255)
            elif type(img_in) == ccore.Image:
                img_out = ccore.linearTransform2(img_in,
                                                 int(self.fNormalizeMin),
                                                 int(self.fNormalizeMax), 0,
                                                 255, 0, 255)

            else:
                img_out = img_in

        self.meta_image.set_image(img_out)
Example #9
0
    def normalize_image(self, plate_id=None):
        try:
            import pydevd
            pydevd.connected = True
            pydevd.settrace(suspend=False)
            print 'Thread enabled interactive eclipse debuging...'
        except:
            pass
        img_in = self.meta_image.image
        if self.bFlatfieldCorrection:
            self.logger.debug("* using flat field correction with image from %s"
                              % self.strBackgroundImagePath)
            imgBackground = self._load_flatfield_correction_image(plate_id)

            crop_coordinated = MetaImage.get_crop_coordinates()
            if crop_coordinated is not None:
                self.logger.debug("* applying cropping to background image")
                imgBackground = ccore.subImage(imgBackground,
                                               ccore.Diff2D(crop_coordinated[0],
                                                            crop_coordinated[1]),
                                               ccore.Diff2D(crop_coordinated[2],
                                                            crop_coordinated[3]))

            img_in = ccore.flatfieldCorrection(img_in, imgBackground, 0.0, True)
            img_out = ccore.linearTransform2(img_in, self.fNormalizeMin,
                                             self.fNormalizeMax, 0, 255, 0, 255)
        else:
            self.logger.debug("* not using flat field correction")
            if type(img_in) == ccore.UInt16Image:
                img_out = ccore.linearTransform3(img_in, int(self.fNormalizeMin),
                                                 int(self.fNormalizeMax),
                                                 0, 255, 0, 255)
            elif type(img_in) == ccore.Image:
                img_out = ccore.linearTransform2(img_in, int(self.fNormalizeMin),
                                                 int(self.fNormalizeMax),
                                                 0, 255, 0, 255)

            else:
                img_out = img_in

        self.meta_image.set_image(img_out)
Example #10
0
    def normalize_image(self, plate_id=None):

        img_in = self.meta_image.image
        if self.bFlatfieldCorrection:
            self.logger.debug(
                "* using flat field correction with image from %s" %
                self.strBackgroundImagePath)
            imgBackground = self._load_flatfield_correction_image(plate_id)

            crop_coordinated = MetaImage.get_crop_coordinates()
            if crop_coordinated is not None:
                self.logger.debug("* applying cropping to background image")
                imgBackground = ccore.subImage(
                    imgBackground,
                    ccore.Diff2D(crop_coordinated[0], crop_coordinated[1]),
                    ccore.Diff2D(crop_coordinated[2], crop_coordinated[3]))

            img_in = ccore.flatfieldCorrection(img_in, imgBackground, 0.0,
                                               True)
            img_out = ccore.linearTransform2(img_in, self.fNormalizeMin,
                                             self.fNormalizeMax, 0, 255, 0,
                                             255)
        else:
            self.logger.debug("* not using flat field correction")
            if type(img_in) == ccore.UInt16Image:
                img_out = ccore.linearTransform3(img_in,
                                                 int(self.fNormalizeMin),
                                                 int(self.fNormalizeMax), 0,
                                                 255, 0, 255)
            elif type(img_in) == ccore.Image:
                img_out = ccore.linearTransform2(img_in,
                                                 int(self.fNormalizeMin),
                                                 int(self.fNormalizeMax), 0,
                                                 255, 0, 255)

            else:
                img_out = img_in

        self.meta_image.set_image(img_out)
Example #11
0
    def __init__(self, eventselector, strPathIn, oP, strPathOut,
                 imageCompression="85",
                 imageSuffix=".jpg",
                 border=0,
                 writeSubdirs=True,
                 writeDescription=True,
                 size=None,
                 oneFilePerTrack=False):

        self._bHasImages = False
        dctTimePoints = {}

        for strStartId, lstTimeData in eventselector.bboxes( \
            size=size, border=border).iteritems():
            items = Tracker.split_nodeid(strStartId)
            iStartT, iObjId = items[:2]
            if len(items) == 3:
                branch_id = items[2]
            else:
                branch_id = 1

            if writeSubdirs:
                strPathOutEvent = os.path.join(strPathOut,
                                               self._format_name(oP, iStartT, iObjId, branch_id))
            else:
                strPathOutEvent = strPathOut
            makedirs(strPathOutEvent)

            if writeDescription:
                oFile = file(os.path.join(strPathOutEvent,
                                          "_%s.txt" % self._format_name(oP, iStartT, iObjId, branch_id)), "w")
                lstData = ["Frame", "ObjId", "x1", "y1", "x2", "y2"]
                oFile.write("%s\n" % "\t".join(map(str, lstData)))

            for iCnt, (iT, tplBoundingBox, lstObjIds) in enumerate(lstTimeData):

                if writeDescription:
                    lstData = [iT, ';'.join(map(str, lstObjIds))] + list(tplBoundingBox)
                    oFile.write("%s\n" % "\t".join(map(str, lstData)))
                if not iT in dctTimePoints:
                    dctTimePoints[iT] = []
                dctTimePoints[iT].append((strStartId, lstObjIds, iCnt, strPathOutEvent, tplBoundingBox))

            if writeDescription:
                oFile.close()

        for idx, (iT, lstItems) in enumerate(dctTimePoints.iteritems()):

            #print iT, lstItems
            imgXY = self._getImage(strPathIn, iT)

            for strStartId, lstObjIds, iCnt, strPathOutEvent, tplBoundingBox in lstItems:

                x1, y1, x2, y2 = tplBoundingBox
                x1Corr = 0 if x1 < 0 else x1
                y1Corr = 0 if y1 < 0 else y1
                x2Corr = imgXY.width-1 if x2 >= imgXY.width else x2
                y2Corr = imgXY.height-1 if y2 >= imgXY.height else y2

                imgSub = ccore.subImage(imgXY,
                                        ccore.Diff2D(x1Corr, y1Corr),
                                        ccore.Diff2D(x2Corr-x1Corr+1, y2Corr-y1Corr+1))

                if (x1 < 0 or y1 < 0 or
                    x2 >= imgXY.width or y2 >= imgXY.height):
                    imgSub2 = self.IMAGE_CLASS(size[0], size[1])
                    ccore.copySubImage(imgSub, imgSub2, ccore.Diff2D(x1Corr-x1, y1Corr-y1))
                    imgSub = imgSub2

                assert imgSub.width == size[0]
                assert imgSub.width == x2-x1+1
                assert imgSub.height == size[1]
                assert imgSub.height == y2-y1+1

                if self.PROCESS_LABEL:
                    lstImages = []
                    for iObjId in lstObjIds:
                        lstImages.append(ccore.copyImageIfLabel(imgSub, imgSub, iObjId))
                    imgSub = ccore.projectImage(lstImages, ccore.ProjectionType.MaxProjection)

                strFilenameImage = os.path.join(strPathOutEvent, "P%s__T%05d%s" % (oP, iT, imageSuffix))
                ccore.writeImage(imgSub, strFilenameImage)

        if oneFilePerTrack and os.path.isdir(strPathOut):
            self.convertToOneFilePerTrack(strPathOut, imageCompression)
Example #12
0
    def cutTracks(self,
                  full_track_data,
                  img_container,
                  plate, pos,
                  lstTracks=None,
                  channels=None,
                  skip_done=False):

        #imoutDir = self.oSettings.galleryDir
        #inDir = os.path.join(self.oSettings.rawImgDir, plate)

        #lstTracks.sort()
        #imgContainer = self.imageImporter(inDir)

        #channels = self.oSettings.plateChannelDict[plate].values()

        #filter(lambda x: x.split('__')[0] != 'feature', impdata['plate1_1_013']['00008']['T00181__O0031']['primary']['primary'].keys())
        #filter(lambda x: x.split('__')[0] != 'feature', impdata['plate1_1_013']['00008']['T00181__O0031']['primary']['primary'].keys())
        #['tracking__upperleft_x', 'tracking__center_x', 'tracking__lowerright_y', 'class__label', 'class__name', 'tracking__lowerright_x', 'tracking__center_y', 'class__probability', 'tracking__upperleft_y']

        if channels is None:
            channels = img_container.meta_data.channels

        imoutDir = os.path.join(self.baseOutDir, plate, pos)
        if not os.path.exists(imoutDir):
            print 'generating the folder %s' % imoutDir
            os.makedirs(imoutDir)

        if lstTracks is None:
            lstTracks = sorted(full_track_data[plate][pos].keys())

        for trackId in lstTracks:
            center_values = zip(full_track_data[plate][pos][trackId][self.track_channel][self.track_region]['tracking__center_x'],
                                full_track_data[plate][pos][trackId][self.track_channel][self.track_region]['tracking__center_y'],
                                full_track_data[plate][pos][trackId][self.track_channel][self.track_region]['Frame'])

            print 'cutting ', trackId
            imout_filename = os.path.join(imoutDir, 'Gallery--%s.png' % (trackId))

            if skip_done and os.path.isfile(imout_filename):
                continue

            # allocate output image
            imout = ccore.Image(len(center_values) * (2*self.width + 1), len(channels) * (2*self.width + 1))

            images = {}
            x = 0
            for cx, cy, timepoint in center_values:
                y = 0
                for channel in channels :
                    #image_filename = os.path.join(inDir, imageInfo[pos][timepoint][channel]['path'],
                    #                              imageInfo[pos][timepoint][channel]['filename'])
                    image_filename = os.path.join(img_container.path,
                                                  img_container.dimension_lookup[pos][timepoint][channel][0])
                    #print image_filename
                    imin = ccore.readImageMito(image_filename)

                    x_ul = cx - self.width if cx >= self.width else 0 # max(cx - width, 0)
                    #x_ul = max(cx - width, 0)
                    y_ul = cy - self.width if cy >= self.width else 0 # max(cy - width, 0)
                    #y_ul = max(cy - width, 0)
                    x_lr = cx + self.width if cx + self.width < imin.width else imin.width - 1
                    #x_lr = min(cx + width, imin.width-1)
                    y_lr = cy + self.width if cy + self.width < imin.height else imin.height - 1
                    #y_lr = min(cx + width, imin.height-1)
                    w_x = x_lr - x_ul
                    w_y = y_lr - y_ul

                    #print x_ul, y_ul, x_lr, y_lr, w_x, w_y

                    imsub = ccore.subImage(imin,
                                           ccore.Diff2D(int(x_ul), int(y_ul)),
                                           ccore.Diff2D(int(w_x), int(w_y)))
                    ccore.copySubImage(imsub, imout, ccore.Diff2D(x, y))
                    y += (2 * self.width + 1)
                x += (2 * self.width + 1)
            ccore.writeImage(imout, imout_filename)

        return