def initUI(self):
        self.setWindowTitle(self.title)
        self.setGeometry(self.left, self.top, self.width, self.height)

        grid = QGridLayout()

        self.probes = ('probeA', 'probeB', 'probeC', 'probeD', 'probeE',
                       'probeF')

        self.probe_images = [QLabel(i) for i in self.probes]

        im8 = Image.fromarray(np.ones((800, 100), dtype='uint8') * 230)
        imQt = QImage(ImageQt.ImageQt(im8))
        imQt.convertToFormat(QImage.Format_ARGB32)

        for i, image in enumerate(self.probe_images):
            image.setObjectName(self.probes[i])
            image.mousePressEvent = partial(self.clickedOnImage, image)
            image.setPixmap(QPixmap.fromImage(imQt))
            grid.addWidget(image, 0, i * 2)

        self.boundary_buttons = [BoundaryButtons(i, self) for i in self.probes]

        subgrid = QGridLayout()

        save_button = QPushButton('Save', self)
        save_button.setToolTip('Save values as CSV')
        save_button.clicked.connect(self.saveData)

        load_button = QPushButton('Load', self)
        load_button.setToolTip('Load volume data')
        load_button.clicked.connect(self.loadData)

        subgrid.addWidget(save_button, 2, 2)
        subgrid.addWidget(load_button, 2, 3)

        grid.addLayout(subgrid, 0, 13)

        self.current_directory = '/mnt/md0/data/opt/production'

        self.data_loaded = False

        self.selected_probe = None

        self.setLayout(grid)

        for buttons in self.boundary_buttons:
            buttons.createButtons()

        self.selected_probe = None
        self.selected_boundary = -1

        self.show()
Exemple #2
0
def mask_image(imgdata, imgtype='jpg', size=64):
    """Return a ``QPixmap`` from *imgdata* masked with a smooth circle.

    *imgdata* are the raw image bytes, *imgtype* denotes the image type.

    The returned image will have a size of *size* × *size* pixels.

    """
    # Load image and convert to 32-bit ARGB (adds an alpha channel):
    # image = QImage.fromData(imgdata, imgtype)
    h, w, _ = imgdata.shape
    image = QImage(imgdata, w, h, QImage.Format_RGB888).rgbSwapped()
    image.convertToFormat(QImage.Format_ARGB32)

    # Crop image to a square:
    imgsize = min(image.width(), image.height())
    rect = QRect(
        (image.width() - imgsize) / 2,
        (image.height() - imgsize) / 2,
        imgsize,
        imgsize,
    )
    image = image.copy(rect)

    # Create the output image with the same dimensions and an alpha channel
    # and make it completely transparent:
    out_img = QImage(imgsize, imgsize, QImage.Format_ARGB32)
    out_img.fill(Qt.transparent)

    # Create a texture brush and paint a circle with the original image onto
    # the output image:
    brush = QBrush(image)        # Create texture brush
    painter = QPainter(out_img)  # Paint the output image
    painter.setBrush(brush)      # Use the image texture brush
    painter.setPen(Qt.NoPen)     # Don't draw an outline
    painter.setRenderHint(QPainter.Antialiasing, True)  # Use AA
    painter.drawEllipse(0, 0, imgsize, imgsize)  # Actually draw the circle
    painter.end()                # We are done (segfault if you forget this)

    # Convert the image to a pixmap and rescale it.  Take pixel ratio into
    # account to get a sharp image on retina displays:
    # pr = QWindow().devicePixelRatio()
    pm = QPixmap.fromImage(out_img)
    # pm.setDevicePixelRatio(pr)
    # size *= pr
    # pm = pm.scaled(size, size, Qt.KeepAspectRatio, Qt.SmoothTransformation)

    return pm
Exemple #3
0
class Channel(object):
    def __init__(self, filename=None, type=None):

        self.filename = filename  # path relative to the TagLab directory
        self.type = type  # RGB | DEM
        self.qimage = None  # cached QImage (to speed up visualization)
        self.float_map = None  # map of 32-bit floating point (e.g. to store high precision depth values)
        self.nodata = None  # invalid value

    def loadData(self):
        """
        Load the image data. The QImage is cached to speed up visualization.
        """

        if self.type == "RGB":
            self.qimage = QImage(self.filename)
            self.qimage = self.qimage.convertToFormat(QImage.Format_RGB32)

        # typically the depth map is stored in a 32-bit Tiff
        if self.type == "DEM":
            dem = rio.open(self.filename)
            self.float_map = dem.read(1).astype(np.float32)
            self.nodata = dem.nodata
            self.qimage = utils.floatmapToQImage(self.float_map, self.nodata)

        return self.qimage

    def save(self):
        return {"filename": self.filename, "type": self.type}
Exemple #4
0
 def _plat_get_blocks(self, block_count_per_side, orientation):
     image = QImage(str(self.path))
     image = image.convertToFormat(QImage.Format_RGB888)
     # MYSTERY TO SOLVE: For reasons I cannot explain, orientations 5 and 7 don't work for
     # duplicate scanning. The transforms seems to work fine (if I try to save the image after
     # the transform, we see that the image has been correctly flipped and rotated), but the
     # analysis part yields wrong blocks. I spent enought time with this feature, so I'll leave
     # like that for now. (by the way, orientations 5 and 7 work fine under Cocoa)
     if 2 <= orientation <= 8:
         t = QTransform()
         if orientation == 2:
             t.scale(-1, 1)
         elif orientation == 3:
             t.rotate(180)
         elif orientation == 4:
             t.scale(1, -1)
         elif orientation == 5:
             t.scale(-1, 1)
             t.rotate(90)
         elif orientation == 6:
             t.rotate(90)
         elif orientation == 7:
             t.scale(-1, 1)
             t.rotate(270)
         elif orientation == 8:
             t.rotate(270)
         image = image.transformed(t)
     return getblocks(image, block_count_per_side)
Exemple #5
0
    def refreshImage(self):

        if self.data_loaded:
            im = np.take(self.volume,
                 self.slider.value(),
                 axis=self.currentAxis)

            im = rotate(im, self.rotations[self.currentAxis], reshape=False)

            im = shift(im, [self.xshift[self.currentAxis], 
                           self.yshift[self.currentAxis]])

            im8 = Image.fromarray(im)            
        else:
            im8 = Image.fromarray(np.ones((512,512),dtype='uint8')*255)
            
        imQt = QImage(ImageQt.ImageQt(im8))
        imQt = imQt.convertToFormat(QImage.Format_RGB16)

        if self.data_loaded: # == 0:

            if self.currentAxis == 0:
                self.drawVerticalLine(imQt, 48)
                self.drawVerticalLine(imQt, 209)
                self.drawHorizontalLine(imQt, 75)
                self.drawHorizontalLine(imQt, 182)
            elif self.currentAxis == 1:
                self.drawVerticalLine(imQt, 124)
            elif self.currentAxis == 2:
                self.drawVerticalLine(imQt, 175)

        pxmap = QPixmap.fromImage(imQt).scaledToWidth(IMG_WIDTH).scaledToHeight(IMG_WIDTH)

        self.image.setPixmap(pxmap)
    def refreshTemplate(self):

        im8 = Image.fromarray(self.template_volume[self.slider2.value(),:,:])            
        imQt = QImage(ImageQt.ImageQt(im8))
        imQt = imQt.convertToFormat(QImage.Format_RGB16)
            
        if True:
            for i in range(0,self.template_annotations.shape[0]):
                z = int(self.template_annotations[i,2])
                
                if z == self.slider2.value():
                    x = int(self.template_annotations[i,0])
                    y = int(self.template_annotations[i,1])
                    
                    if i == self.landmarkIndex:
                        color = QColor('magenta')
                    else:
                        color = QColor('pink')
                    
                    if x > -1 and y > -1:
                    
                        for j in range(x-15,x+15):
                            for k in range(y-15,y+15):
                                if pow(j-x,2) + pow(k-y,2) < 30:
                                    imQt.setPixelColor(j,k,color)

        pxmap = QPixmap.fromImage(imQt).scaledToWidth(IMG_WIDTH).scaledToHeight(IMG_WIDTH)
        self.template.setPixmap(pxmap)
Exemple #7
0
def white_outline(image: QImage, sigma=6, repeat=6) -> QImage:
    if image.format() != QImage.Format_ARGB32:
        image = image.convertToFormat(QImage.Format_ARGB32)

    bits = image.bits()
    bits.setsize(image.byteCount())

    shape = (image.width() * image.height())
    strides = (4,)

    alpha = numpy.ndarray(shape=shape, dtype=numpy.uint8,
                          buffer=bits, strides=strides, offset=3)
    color = numpy.ndarray(shape=shape, dtype=numpy.uint8)
    color.fill(255)

    alpha = alpha.reshape((image.width(), image.height()))
    alpha = alpha.astype(numpy.float)
    alpha = gaussian_filter(alpha, sigma=sigma)
    alpha *= repeat
    numpy.clip(alpha, 0, 255, out=alpha)
    alpha = alpha.astype(numpy.uint8)
    alpha = alpha.reshape(shape)

    arr = numpy.dstack((color, color, color, alpha))

    return QImage(arr, image.width(), image.height(), QImage.Format_ARGB32)
    def refreshImage(self):

        if self.data_loaded:
            im8 = Image.fromarray(self.volume[self.slider1.value(),:,:])            
        else:
            im8 = Image.fromarray(np.ones((1024,1024),dtype='uint8')*255)
            
        imQt = QImage(ImageQt.ImageQt(im8))
        imQt = imQt.convertToFormat(QImage.Format_RGB16)
            
        if self.data_loaded:
            for i in range(0,self.annotations.shape[0]):
                z = int(self.annotations[i,2])
                
                if z == self.slider1.value():
                    x = int(self.annotations[i,0])
                    y = int(self.annotations[i,1])
                    
                    if i == self.landmarkIndex:
                        color = QColor('magenta')
                    else:
                        color = QColor('pink')
                    
                    if x > -1 and y > -1:
                    
                        for j in range(x-15,x+15):
                            for k in range(y-15,y+15):
                                if pow(j-x,2) + pow(k-y,2) < 30:
                                    imQt.setPixelColor(j,k,color)

        pxmap = QPixmap.fromImage(imQt).scaledToWidth(IMG_WIDTH).scaledToHeight(IMG_WIDTH)
        self.image.setPixmap(pxmap)
def threshold(src_img: QImage, low: int, height: int, val) -> QImage:
    """
    :param src_img:
    :param low:
    :param height:
    :param val:
    :return:
    """
    gray_img = src_img.convertToFormat(QImage.Format_Grayscale8)
    res_img = QImage(src_img.width(), src_img.height(), QImage.Format_RGB888)

    # color = gray_img.pixelColor(364, 285)
    # print(color.red(), color.green(), color.blue())
    # color = gray_img.pixelColor(365, 285)
    # print(color.red(), color.green(), color.blue())
    # color = gray_img.pixelColor(365, 286)
    # print(color.red(), color.green(), color.blue())
    # color = gray_img.pixelColor(365, 287)
    # print(color.red(), color.green(), color.blue())
    # color = gray_img.pixelColor(366, 285)
    # print(color.red(), color.green(), color.blue())

    for y in range(0, src_img.height()):
        for x in range(0, src_img.width()):
            color = gray_img.pixelColor(x, y)
            if low < color.green() < height:
                res_img.setPixelColor(x, y, QColor(val, val, val))
            else:
                res_img.setPixelColor(x, y, QColor(0, 0, 0))

    return res_img
Exemple #10
0
 def _plat_get_blocks(self, block_count_per_side, orientation):
     image = QImage(str(self.path))
     image = image.convertToFormat(QImage.Format_RGB888)
     # MYSTERY TO SOLVE: For reasons I cannot explain, orientations 5 and 7 don't work for
     # duplicate scanning. The transforms seems to work fine (if I try to save the image after
     # the transform, we see that the image has been correctly flipped and rotated), but the
     # analysis part yields wrong blocks. I spent enought time with this feature, so I'll leave
     # like that for now. (by the way, orientations 5 and 7 work fine under Cocoa)
     if 2 <= orientation <= 8:
         t = QTransform()
         if orientation == 2:
             t.scale(-1, 1)
         elif orientation == 3:
             t.rotate(180)
         elif orientation == 4:
             t.scale(1, -1)
         elif orientation == 5:
             t.scale(-1, 1)
             t.rotate(90)
         elif orientation == 6:
             t.rotate(90)
         elif orientation == 7:
             t.scale(-1, 1)
             t.rotate(270)
         elif orientation == 8:
             t.rotate(270)
         image = image.transformed(t)
     return getblocks(image, block_count_per_side)
    def get_snapshot(self, bw=None, return_as_array=None):
        qimage = QImage(self.image)
        if return_as_array:
            qimage = qimage.convertToFormat(4)
            ptr = qimage.bits()
            ptr.setsize(qimage.byteCount())

            image_array = np.array(ptr).reshape(qimage.height(), qimage.width(), 4)
            if bw:
                return np.dot(image_array[..., :3], [0.299, 0.587, 0.144])
            else:
                return image_array
        else:
            if bw:
                return qimage.convertToFormat(QImage.Format_Mono)
            else:
                return qimage
    def get_snapshot(self, bw=None, return_as_array=None):
        qimage = QImage(self.image)
        if return_as_array:
            qimage = qimage.convertToFormat(4)
            ptr = qimage.bits()
            ptr.setsize(qimage.byteCount())

            image_array = np.array(ptr).reshape(qimage.height(), qimage.width(), 4)
            if bw:
                return np.dot(image_array[..., :3], [0.299, 0.587, 0.144])
            else:
                return image_array
        else:
            if bw:
                return qimage.convertToFormat(QImage.Format_Mono)
            else:
                return qimage
Exemple #13
0
    def basefinished(self):
        if self.basereply.error() != QNetworkReply.NoError:
            return
        self.basepixmap = QPixmap()
        self.basepixmap.loadFromData(self.basereply.readAll())
        if self.basepixmap.size() != self.rect.size():
            self.basepixmap = self.basepixmap.scaled(self.rect.size(),
                                                     Qt.KeepAspectRatio,
                                                     Qt.SmoothTransformation)
        self.setPixmap(self.basepixmap)

        # make marker pixmap
        self.mkpixmap = QPixmap(self.basepixmap.size())
        self.mkpixmap.fill(Qt.transparent)
        br = QBrush(QColor(Config.dimcolor))
        painter = QPainter()
        painter.begin(self.mkpixmap)
        painter.fillRect(0, 0, self.mkpixmap.width(), self.mkpixmap.height(),
                         br)
        for marker in self.radar['markers']:
            pt = getPoint(marker["location"], self.point, self.zoom,
                          self.rect.width(), self.rect.height())
            mk2 = QImage()
            mkfile = 'teardrop'
            if 'image' in marker:
                mkfile = marker['image']
            if os.path.dirname(mkfile) == '':
                mkfile = os.path.join('markers', mkfile)
            if os.path.splitext(mkfile)[1] == '':
                mkfile += '.png'
            mk2.load(mkfile)
            if mk2.format != QImage.Format_ARGB32:
                mk2 = mk2.convertToFormat(QImage.Format_ARGB32)
            mkh = 80  # self.rect.height() / 5
            if 'size' in marker:
                if marker['size'] == 'small':
                    mkh = 64
                if marker['size'] == 'mid':
                    mkh = 70
                if marker['size'] == 'tiny':
                    mkh = 40
            if 'color' in marker:
                c = QColor(marker['color'])
                (cr, cg, cb, ca) = c.getRgbF()
                for x in range(0, mk2.width()):
                    for y in range(0, mk2.height()):
                        (r, g, b, a) = QColor.fromRgba(mk2.pixel(x,
                                                                 y)).getRgbF()
                        r = r * cr
                        g = g * cg
                        b = b * cb
                        mk2.setPixel(x, y, QColor.fromRgbF(r, g, b, a).rgba())
            mk2 = mk2.scaledToHeight(mkh, 1)
            painter.drawImage(pt.x - mkh / 2, pt.y - mkh / 2, mk2)

        painter.end()

        self.wmk.setPixmap(self.mkpixmap)
Exemple #14
0
    def show_original_image(self):
        m = 6 * (self.set - 1) + self.id
        self.newpathdir = os.path.join(
            os.path.join(self.curr_dir_destination,
                         self.destination_directory), "Cropped")
        newpath = os.path.join(self.newpathdir,
                               os.path.basename(self.img_files[m]))

        flag = 0
        # the image has been cropped, so delete the file and replace the name in cropped.txt, global array, also display
        for i in range(len(self.img_files)):
            if self.img_files[i] == newpath:
                flag = 1
                if os.path.exists(newpath):
                    os.remove(newpath)

                # old path before crop (in source dir or destination dir) is written in img_files.txt
                with open("Img_files.txt", "r") as file:
                    img_files_old = file.read().splitlines()
                file.close()
                oldpath = img_files_old[m]
                self.img_files[i] = oldpath

                with open("Img_files_cropped.txt", "w") as file:
                    for j in range(len(self.img_files)):
                        file.write(self.img_files[j])
                        file.write("\n")
                file.close()

                # Replacing border with False, since crop image is removed
                self.img_files_border[m] = False

                # To show in the corresponding label, determine label id first
                k = m - (self.set - 1) * 6
                image = QImage(self.img_files[m])
                image = image.convertToFormat(
                    QImage.Format_ARGB8565_Premultiplied)

                p = QPainter(image)
                p.setCompositionMode(QPainter.CompositionMode_DestinationIn)
                p.fillRect(image.rect(), QColor(0, 0, 0, self.transparency))
                p.end()

                pixmap = QPixmap(image)
                w = int(self.labels[k].width() - 4.0)
                h = int(self.labels[k].height() - 4.0)

                smaller_pixmap = pixmap.scaled(w, h, Qt.KeepAspectRatio,
                                               Qt.FastTransformation)
                self.labels[k].setScaledContents(True)
                self.labels[k].setFrameShadow(QFrame.Plain)

                self.labels[k].setPixmap(smaller_pixmap)
                self.btn_img[k] = self.img_files[m]

        # The image has not been cropped, so no action is needed
        if flag == 0:
            pass
Exemple #15
0
class ColorThief(object):
    """Color thief main class."""
    def __init__(self, file):
        """Create one color thief for one image.

        :param file: A filename (string) or a file object. The file object
                     must implement `read()`, `seek()`, and `tell()` methods,
                     and be opened in binary mode.
        """
        self.image = QImage(file)
        if self.image.width() > 420:
            self.image = self.image.scaledToWidth(400, Qt.SmoothTransformation)

    def get_color(self, quality=10):
        """Get the dominant color.

        :param quality: quality settings, 1 is the highest quality, the bigger
                        the number, the faster a color will be returned but
                        the greater the likelihood that it will not be the
                        visually most dominant color
        :return tuple: (r, g, b)
        """
        palette = self.get_palette(5, quality)
        return palette[0]

    def get_palette(self, color_count=10, quality=10):
        """Build a color palette.  We are using the median cut algorithm to
        cluster similar colors.

        :param color_count: the size of the palette, max number of colors
        :param quality: quality settings, 1 is the highest quality, the bigger
                        the number, the faster the palette generation, but the
                        greater the likelihood that colors will be missed.
        :return list: a list of tuple in the form (r, g, b)
        """
        image = self.image.convertToFormat(QImage.Format_ARGB32_Premultiplied)
        pixels = list(
            map(
                lambda color:
                [color.red(),
                 color.green(),
                 color.blue(),
                 color.alpha()], (QColor(image.pixel(x, y))
                                  for x in range(image.width())
                                  for y in range(image.height()))))
        #         pixel_count = image.width() * image.height()
        valid_pixels = []
        for i in range(0, len(pixels), quality):
            r, g, b, a = pixels[i]
            # If pixel is mostly opaque and not white
            if a >= 125:
                if not (r > 250 and g > 250 and b > 250):
                    valid_pixels.append((r, g, b))

        # Send array to quantize function which clusters values
        # using median cut algorithm
        cmap = MMCQ.quantize(valid_pixels, color_count)
        return cmap.palette
 def setBackgroundImage(self, q_img):
     pixmap = QPixmap.fromImage(
         QImage.convertToFormat(q_img, QImage.Format_ARGB32))
     self.setSceneRect(QRectF(0, 0, pixmap.width(), pixmap.height()))
     if self._bg_pixmap is None:
         self._bg_pixmap = self.addPixmap(pixmap)
         self._bg_pixmap.setZValue(0)
     else:
         self._bg_pixmap.setPixmap(pixmap)
Exemple #17
0
                def read_and_convert_using_qt_to_toif(handler, filename,
                                                      hs_cols, hs_rows):
                    img = QImage(filename)
                    if img.isNull():
                        handler.show_error(
                            _('Could not load the image {} -- unknown format or other error'
                              ).format(os.path.basename(filename)))
                        return
                    if (img.width(), img.height()) != (
                            hs_cols, hs_rows):  # do we need to scale it ?
                        img = img.scaled(
                            hs_cols, hs_rows, Qt.IgnoreAspectRatio,
                            Qt.SmoothTransformation
                        )  # force to our dest size. Note that IgnoreAspectRatio guarantess the right size. Ther other modes don't
                        if img.isNull() or (img.width(), img.height()) != (
                                hs_cols, hs_rows):
                            handler.show_error(
                                _("Could not scale image to {} x {} pixels").
                                format(hs_cols, hs_rows))
                            return
                    target_fmt = QImage.Format_RGB888
                    img = img.convertToFormat(
                        QImage.Format_Indexed8
                    ).convertToFormat(
                        target_fmt
                    )  # dither it down to 256 colors to reduce image complexity then back up to 24 bit for easy reading
                    if img.isNull():
                        handler.show_error(
                            _("Could not dither or re-render image"))
                        return

                    def qimg_to_toif(img, handler):
                        try:
                            import struct, zlib
                        except ImportError as e:
                            handler.show_error(
                                _("Could not convert image, a required library is missing: {}"
                                  ).format(e))
                            return
                        data, pixeldata = bytearray(), bytearray()
                        data += b'TOIf'
                        for y in range(img.width()):
                            for x in range(img.height()):
                                rgb = img.pixel(x, y)
                                r, g, b = qRed(rgb), qGreen(rgb), qBlue(rgb)
                                c = ((r & 0xF8) << 8) | ((g & 0xFC) << 3) | (
                                    (b & 0xF8) >> 3)
                                pixeldata += struct.pack(">H", c)
                        z = zlib.compressobj(level=9, wbits=10)
                        zdata = z.compress(bytes(pixeldata)) + z.flush()
                        zdata = zdata[2:-4]  # strip header and checksum
                        data += struct.pack("<HH", img.width(), img.height())
                        data += struct.pack("<I", len(zdata))
                        data += zdata
                        return bytes(data)

                    return qimg_to_toif(img, handler)
Exemple #18
0
    def imageAsArray(image: QImage):
        image = image.convertToFormat(QImage.Format_RGB32)
        width = image.width()
        height = image.height()

        ptr = image.constBits()
        ptr.setsize(height * width * 4)
        arr = np.frombuffer(ptr, np.uint8).reshape((height, width, 4))  # GBRff
        arr = np.delete(arr, -1, axis=2)  # GBR
        return np.fliplr(arr)
def pixmap2cv(pixmap):
    qImg = QImage(pixmap)
    qImg = qImg.convertToFormat(4)

    width = qImg.width()
    height = qImg.height()
    ptr = qImg.bits()
    ptr.setsize(qImg.byteCount())
    arr = np.array(ptr).reshape(height, width, 4)  #  Copies the data
    return arr
    def updateROIcanvas(self, wormCanvas, worm_index_roi, comboBox_ROI,
                        isDrawSkel):
        if not isinstance(self.frame_data, pd.DataFrame):
            #no trajectories data presented, nothing to do here
            wormCanvas.clear()
            return

        #update valid index for the comboBox
        comboBox_ROI.clear()
        comboBox_ROI.addItem(str(worm_index_roi))
        #add the indexes of the current frame into the roi combo box
        for ind in self.frame_data[self.worm_index_type].data:
            comboBox_ROI.addItem(str(ind))

        #extract individual worm ROI
        good = self.frame_data[self.worm_index_type] == worm_index_roi
        row_data = self.frame_data.loc[good].squeeze()

        if row_data.size == 0 or np.isnan(row_data['coord_x']) or np.isnan(
                row_data['coord_y']):
            #invalid data nothing to do here
            wormCanvas.clear()
            return

        worm_img, roi_corner = getWormROI(self.frame_img, row_data['coord_x'],
                                          row_data['coord_y'],
                                          row_data['roi_size'])
        #roi_corner = roi_corner+1

        roi_ori_size = worm_img.shape

        worm_img = np.ascontiguousarray(worm_img)
        worm_qimg = QImage(worm_img.data, worm_img.shape[1], worm_img.shape[0],
                           worm_img.strides[0], QImage.Format_Indexed8)
        worm_qimg = worm_qimg.convertToFormat(QImage.Format_RGB32,
                                              Qt.AutoColor)

        canvas_size = min(wormCanvas.height(), wormCanvas.width())
        worm_qimg = worm_qimg.scaled(canvas_size, canvas_size,
                                     Qt.KeepAspectRatio)

        if isDrawSkel:
            if row_data['has_skeleton'] == 1:
                self.drawSkel(worm_img,
                              worm_qimg,
                              row_data,
                              roi_corner=roi_corner)
            elif row_data['has_skeleton'] == 0:
                self.drawThreshMask(worm_img,
                                    worm_qimg,
                                    row_data,
                                    read_center=False)

        pixmap = QPixmap.fromImage(worm_qimg)
        wormCanvas.setPixmap(pixmap)
Exemple #21
0
    def set_colors(data: bin,
                   fg: QColor,
                   bg: QColor,
                   trans: QColor,
                   swap_fg_bg=False) -> bin:  # pylint: disable=too-many-locals
        """
        Burns foreground and background colors into a raster image, and returns
        the results as a PNG binary
        """
        image = QImage()
        image.loadFromData(data)
        if image.isNull():
            raise UnreadablePictureException(
                'Could not read embedded picture data')

        image = image.convertToFormat(QImage.Format_ARGB32)
        ucharptr = image.bits()
        ucharptr.setsize(image.byteCount() * image.height())

        fg_rgba = qRgba(fg.red(), fg.green(), fg.blue(),
                        fg.alpha()) if fg and fg.isValid() else None
        bg_rgba = qRgba(bg.red(), bg.green(), bg.blue(),
                        bg.alpha()) if bg and bg.isValid() else None

        COLOR_TOLERANCE = 40

        fg_comp = 0
        bg_comp = 255

        for y in range(image.height()):
            start = y * image.width() * 4
            for x in range(image.width()):
                x_start = x * 4 + start
                rgba = struct.unpack('I', ucharptr[x_start:x_start + 4])[0]
                if trans and abs(qRed(rgba) - trans.red(
                )) < COLOR_TOLERANCE and abs(qGreen(rgba) - trans.green(
                )) < COLOR_TOLERANCE and abs(qBlue(rgba) -
                                             trans.blue()) < COLOR_TOLERANCE:
                    ucharptr[x_start:x_start + 4] = struct.pack(
                        'I', qRgba(0, 0, 0, 0))
                elif fg_rgba is not None and abs(
                        qRed(rgba) - fg_comp) < COLOR_TOLERANCE and abs(
                            qGreen(rgba) - fg_comp) < COLOR_TOLERANCE and abs(
                                qBlue(rgba) - fg_comp) < COLOR_TOLERANCE:
                    ucharptr[x_start:x_start + 4] = struct.pack('I', fg_rgba)
                elif bg_rgba is not None and abs(
                        qRed(rgba) - bg_comp) < COLOR_TOLERANCE and abs(
                            qGreen(rgba) - bg_comp) < COLOR_TOLERANCE and abs(
                                qBlue(rgba) - bg_comp) < COLOR_TOLERANCE:
                    ucharptr[x_start:x_start + 4] = struct.pack('I', bg_rgba)

        # convert to PNG
        png_data = QBuffer()
        image.save(png_data, "png")
        return png_data.data()
Exemple #22
0
    def toImage(self):
        t = time.time()

        tWAIT = time.time()
        self._arrayreq.wait()
        tWAIT = 1000.0 * (time.time() - tWAIT)

        tAR = time.time()
        a = self._arrayreq.getResult()
        tAR = 1000.0 * (time.time() - tAR)

        has_no_mask = not np.ma.is_masked(a)

        tImg = None
        if has_no_mask and _has_vigra and hasattr(
                vigra.colors, "gray2qimage_ARGB32Premultiplied"):
            if not a.flags.contiguous:
                a = a.copy()
            tImg = time.time()
            img = QImage(a.shape[1], a.shape[0],
                         QImage.Format_ARGB32_Premultiplied)
            tintColor = np.asarray([
                self._tintColor.redF(),
                self._tintColor.greenF(),
                self._tintColor.blueF()
            ],
                                   dtype=np.float32)
            normalize = np.asarray(self._normalize, dtype=np.float32)
            if normalize[0] > normalize[1]:
                normalize = np.array((0.0, 255.0)).astype(np.float32)
            vigra.colors.alphamodulated2qimage_ARGB32Premultiplied(
                a, byte_view(img), tintColor, normalize)
            tImg = 1000.0 * (time.time() - tImg)
        else:
            if has_no_mask:
                self.logger.warning("using unoptimized conversion functions")
            tImg = time.time()
            d = a[..., None].repeat(4, axis=-1)
            d[:, :, 0] = d[:, :, 0] * self._tintColor.redF()
            d[:, :, 1] = d[:, :, 1] * self._tintColor.greenF()
            d[:, :, 2] = d[:, :, 2] * self._tintColor.blueF()

            normalize = self._normalize
            img = array2qimage(d, normalize)
            img = img.convertToFormat(QImage.Format_ARGB32_Premultiplied)
            tImg = 1000.0 * (time.time() - tImg)

        if self.logger.isEnabledFor(logging.DEBUG):
            tTOT = 1000.0 * (time.time() - t)
            self.logger.debug(
                "toImage (%dx%d, normalize=%r) took %f msec. (array req: %f, wait: %f, img: %f)"
                %
                (img.width(), img.height(), normalize, tTOT, tAR, tWAIT, tImg))

        return img
Exemple #23
0
def qimage2mat(qimage: QImage):
    incomingImage = qimage.convertToFormat(QImage.Format.Format_RGBA8888)

    width = incomingImage.width()
    height = incomingImage.height()

    ptr = incomingImage.bits()
    ptr.setsize(height * width * 4)
    arr = np.frombuffer(ptr, np.uint8).reshape((height, width, 4))

    return arr
Exemple #24
0
def scan_qr_from_image(image: QImage) -> Sequence[QrCodeResult]:
    """Might raise exception: MissingQrDetectionLib."""
    qr_reader = get_qr_reader()
    image_y800 = image.convertToFormat(QImage.Format_Grayscale8)
    res = qr_reader.read_qr_code(
        image_y800.constBits().__int__(), image_y800.byteCount(),
        image_y800.bytesPerLine(),
        image_y800.width(),
        image_y800.height()
    )
    return res
Exemple #25
0
 def noise_image(self):
     img = self.original_img_pix.toImage()
     img.save('./tmp/1.jpg')
     std = self.std_input.value()
     noise.noise_image('./tmp/1.jpg', std)
     pxmap = QImage('./tmp/1.jpg')
     pxmap = pxmap.convertToFormat(QImage.Format_Grayscale8)
     pxmap = QPixmap.fromImage(pxmap)
     self.noising_image = pxmap
     self.original_img_lbl.setPixmap(
         pxmap.scaled(512, 512, QtCore.Qt.KeepAspectRatio))
Exemple #26
0
def get_grayscale(data, w, h, crop=True):
    table = []
    for i in range(256):
        table.append((255 << 24) | (i << 16) | (i << 8) | i)
    image = QImage(data, w, h, QImage.Format_Indexed8)
    image.setColorTable(table)
    image = image.convertToFormat(QImage.Format_ARGB32)

    if crop:
        image = image.copy(0, 0, w - 2, h - 2)
    return image
Exemple #27
0
def QImageTocvmat(img: QImage):
    img = img.convertToFormat(QImage.Format_RGBX8888)
    width = img.width()
    height = img.height()
    ptr = img.bits()
    ptr.setsize(height * width * 4)

    # copy image data from ptr to avoid changes while runtime
    ret = np.fromstring(ptr, np.uint8).reshape((height, width, 4))
    ret.flags.writeable = False
    return ret
Exemple #28
0
    def import_label_map(self,
                         filename,
                         labels_info,
                         w_target,
                         h_target,
                         create_holes=False):
        """
        It imports a label map and create the corresponding blobs.
        The label map is rescaled such that it coincides with the reference map.
        """

        qimg_label_map = QImage(filename)
        qimg_label_map = qimg_label_map.convertToFormat(QImage.Format_RGB32)

        if w_target > 0 and h_target > 0:
            qimg_label_map = qimg_label_map.scaled(w_target, h_target,
                                                   Qt.IgnoreAspectRatio,
                                                   Qt.FastTransformation)

        label_map = utils.qimageToNumpyArray(qimg_label_map)
        label_map = label_map.astype(np.int32)

        # RGB -> label code association (ok, it is a dirty trick but it saves time..)
        label_coded = label_map[:, :, 0] + (label_map[:, :, 1] << 8) + (
            label_map[:, :, 2] << 16)

        labels = measure.label(label_coded, connectivity=1)

        too_much_small_area = 50
        region_big = None

        created_blobs = []
        for region in measure.regionprops(labels):
            if region.area > too_much_small_area:
                id = len(self.seg_blobs)
                blob = Blob(region, 0, 0, self.getFreeId())

                # assign class
                row = region.coords[0, 0]
                col = region.coords[0, 1]
                color = label_map[row, col]

                for label_name in labels_info.keys():
                    c = labels_info[label_name]
                    if c[0] == color[0] and c[1] == color[1] and c[2] == color[
                            2]:
                        blob.class_name = label_name
                        blob.class_color = c
                        break
                if create_holes or blob.class_name is not 'Empty':
                    created_blobs.append(blob)

        return created_blobs
Exemple #29
0
    def load_graphics(self, path):
        graphics = []
        list_of_files = [f for f in listdir(path) if isfile(join(path, f))]

        for i in list_of_files:
            im = QImage(path + '/' + i)
            im = im.convertToFormat(QImage.Format_RGBA8888)
            ptr = im.bits()
            ptr.setsize(im.byteCount())
            graphics.append(ptr.asstring())

        return graphics
 def viewCam(self):
     # read image in BGR format
     ret, self.image = self.cap.read()
     self.image = imutils.resize(self.image, height=1024)
     # set image for snapshot
     self.imageSnap = self.image
     # convert image to RGB format
     self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
     # get image infos
     height, width, channel = self.image.shape
     step = channel * width
     qImg = QImage(self.image.data, width, height, step,
                   QImage.Format_RGB888)
     # show image in label_image
     if self.color == False:
         self.ui.label_camera.setPixmap(
             QPixmap.fromImage(
                 qImg.convertToFormat(QtGui.QImage.Format_Grayscale8)))
     else:
         self.ui.label_camera.setPixmap(
             QPixmap.fromImage(
                 qImg.convertToFormat(QtGui.QImage.Format_RGB888)))
Exemple #31
0
def _image_to_parallel_lines_( settings: Dict, image: QImage ) -> str:
	image: QImage = image.convertToFormat( QImage.Format_Grayscale8 )
	image = _scale_image( settings, image )

	pixel_box_size = settings[ 'pixel_box_size' ]
	direction = settings[ 'lines_direction' ]
	laser_pixel_size = settings[ 'laser_pixel_size' ]
	burn_speed = settings[ 'burn_speed' ]
	travel_speed = settings[ 'travel_speed' ]

	max_lines_per_pixel = (pixel_box_size / laser_pixel_size) * 0.5
	percentage_per_line = 1 / max_lines_per_pixel * 100

	if direction != 0:
		raise NotImplementedError()

	height = image.height()
	width = image.width()

	line_segments = [ ]

	for row_inv in range( height ):
		row = height - row_inv - 1
		line_segments.append( [ ] )
		y_base = row * pixel_box_size

		for col in range( width ):
			segments = [ ]
			lightness = image.pixelColor( col, row ).lightness()
			amount_to_blacken = (255 - lightness) / 2.55
			required_line_count = int( amount_to_blacken / percentage_per_line )
			for i in range( required_line_count ):
				y_point = y_base + (i + 1) / required_line_count * pixel_box_size
				segments.append( y_point )

			line_segments[ -1 ].append( segments )

	instructions = [ ]
	for row in line_segments:
		for col_i, col in enumerate( row ):
			for y_point in col:
				instructions.extend( [
					_CMD_laser_off,
					_move( col_i * pixel_box_size, y_point, travel_speed ),
					_CMD_laser_on,
					_move( (col_i + 1) * pixel_box_size, y_point, burn_speed ),
					_CMD_laser_off,
					'G4 P150'
				] )

	return '\n'.join( instructions )
Exemple #32
0
def qimage2cvmat(image: QImage,
                 image_format=QImage.Format_RGB32,
                 channel=4) -> np.ndarray:
    """Converts a QImage into an opencv MAT format"""

    incoming_image = image.convertToFormat(image_format)

    width = incoming_image.width()
    height = incoming_image.height()

    ptr = incoming_image.bits()
    ptr.setsize(incoming_image.byteCount())
    arr = np.array(ptr).reshape((height, width, channel))  # Copies the data
    return arr
Exemple #33
0
    def toImage(self):
        t = time.time()

        tWAIT = time.time()
        self._arrayreq.wait()
        tWAIT = 1000.0 * (time.time() - tWAIT)

        tAR = time.time()
        a = self._arrayreq.getResult()
        tAR = 1000.0 * (time.time() - tAR)

        has_no_mask = not np.ma.is_masked(a)

        tImg = None
        if has_no_mask and _has_vigra and hasattr(vigra.colors, "gray2qimage_ARGB32Premultiplied"):
            if not a.flags.contiguous:
                a = a.copy()
            tImg = time.time()
            img = QImage(a.shape[1], a.shape[0], QImage.Format_ARGB32_Premultiplied)
            tintColor = np.asarray(
                [self._tintColor.redF(), self._tintColor.greenF(), self._tintColor.blueF()], dtype=np.float32
            )
            normalize = np.asarray(self._normalize, dtype=np.float32)
            if normalize[0] > normalize[1]:
                normalize = np.array((0.0, 255.0)).astype(np.float32)
            vigra.colors.alphamodulated2qimage_ARGB32Premultiplied(a, byte_view(img), tintColor, normalize)
            tImg = 1000.0 * (time.time() - tImg)
        else:
            if has_no_mask:
                self.logger.warning("using unoptimized conversion functions")
            tImg = time.time()
            d = a[..., None].repeat(4, axis=-1)
            d[:, :, 0] = d[:, :, 0] * self._tintColor.redF()
            d[:, :, 1] = d[:, :, 1] * self._tintColor.greenF()
            d[:, :, 2] = d[:, :, 2] * self._tintColor.blueF()

            normalize = self._normalize
            img = array2qimage(d, normalize)
            img = img.convertToFormat(QImage.Format_ARGB32_Premultiplied)
            tImg = 1000.0 * (time.time() - tImg)

        if self.logger.isEnabledFor(logging.DEBUG):
            tTOT = 1000.0 * (time.time() - t)
            self.logger.debug(
                "toImage (%dx%d, normalize=%r) took %f msec. (array req: %f, wait: %f, img: %f)"
                % (img.width(), img.height(), normalize, tTOT, tAR, tWAIT, tImg)
            )

        return img
	def updateROIcanvas(self, wormCanvas, worm_index_roi, comboBox_ROI, isDrawSkel):
		if not isinstance(self.frame_data, pd.DataFrame):
			#no trajectories data presented, nothing to do here
			wormCanvas.clear()
			return
		
		#update valid index for the comboBox
		comboBox_ROI.clear()
		comboBox_ROI.addItem(str(worm_index_roi))
		#add the indexes of the current frame into the roi combo box
		for ind in self.frame_data[self.worm_index_type].data:
			comboBox_ROI.addItem(str(ind))
		
		
		#extract individual worm ROI
		good = self.frame_data[self.worm_index_type] == worm_index_roi
		row_data = self.frame_data.loc[good].squeeze()

		if row_data.size == 0 or np.isnan(row_data['coord_x']) or np.isnan(row_data['coord_y']):
			#invalid data nothing to do here
			wormCanvas.clear()
			return
		
		worm_img, roi_corner = getWormROI(self.frame_img, row_data['coord_x'], row_data['coord_y'], row_data['roi_size'])
		#roi_corner = roi_corner+1
		
		roi_ori_size = worm_img.shape
		
		worm_img = np.ascontiguousarray(worm_img)
		worm_qimg = QImage(worm_img.data, worm_img.shape[1], worm_img.shape[0], worm_img.strides[0], QImage.Format_Indexed8)
		worm_qimg = worm_qimg.convertToFormat(QImage.Format_RGB32, Qt.AutoColor)

		canvas_size = min(wormCanvas.height(),wormCanvas.width())
		worm_qimg = worm_qimg.scaled(canvas_size,canvas_size, Qt.KeepAspectRatio)
		
		if isDrawSkel:
			if row_data['has_skeleton'] == 1:
				self.drawSkel(worm_img, worm_qimg, row_data, roi_corner = roi_corner)
			elif row_data['has_skeleton'] == 0:
				self.drawThreshMask(worm_img, worm_qimg, row_data, read_center=False)


		pixmap = QPixmap.fromImage(worm_qimg)
		wormCanvas.setPixmap(pixmap);
    def drawThreshMask(self, worm_img, worm_qimg, row_data, read_center = True):
        worm_mask = getWormMask(worm_img, row_data['threshold'])
        min_mask_area = row_data['area']/2
        if read_center:
            worm_cnt, _ = binaryMask2Contour(worm_mask, roi_center_x = row_data['coord_y'], roi_center_y = row_data['coord_x'], min_mask_area = min_mask_area)
        else:
            worm_cnt, _ = binaryMask2Contour(worm_mask, min_mask_area = min_mask_area)
        worm_mask = np.zeros_like(worm_mask)
        cv2.drawContours(worm_mask, [worm_cnt.astype(np.int32)], 0, 1, -1)

        worm_mask = QImage(worm_mask.data, worm_mask.shape[1], 
        worm_mask.shape[0], worm_mask.strides[0], QImage.Format_Indexed8)
        worm_mask = worm_mask.convertToFormat(QImage.Format_RGB32, Qt.AutoColor)
        worm_mask = worm_mask.scaled(worm_qimg.width(),worm_qimg.height(), Qt.KeepAspectRatio)
        worm_mask = QPixmap.fromImage(worm_mask)

        worm_mask = worm_mask.createMaskFromColor(Qt.black)
        p = QPainter(worm_qimg)
        p.setPen(QColor(0,204,102))
        p.drawPixmap(worm_qimg.rect(), worm_mask, worm_mask.rect())
        p.end()
Exemple #36
0
def drop_shadow(image: QImage) -> QImage:
    if image.format() != QImage.Format_ARGB32:
        image = image.convertToFormat(QImage.Format_ARGB32)

    bits = image.bits()
    bits.setsize(image.byteCount())

    shape = (image.width() * image.height())
    strides = (4,)

    alpha = numpy.ndarray(shape=shape, dtype=numpy.uint8,
                          buffer=bits, strides=strides, offset=3)
    color = numpy.ndarray(shape=shape, dtype=numpy.uint8)
    color.fill(0)

    alpha = alpha.reshape((image.width(), image.height()))
    alpha = gaussian_filter(alpha, sigma=10)
    alpha = alpha.reshape(shape)

    arr = numpy.dstack((color, color, color, alpha))

    return QImage(arr, image.width(), image.height(), QImage.Format_ARGB32)
	def updateROIcanvas(self, wormCanvas, worm_index_roi, comboBox_ROI, isDrawSkel):
		if not isinstance(self.frame_data, pd.DataFrame):
			wormCanvas.clear()
			return
		
		#update valid index for the comboBox
		comboBox_ROI.clear()
		comboBox_ROI.addItem(str(worm_index_roi))
		
		for ind in self.frame_data[self.worm_index_str].data:
			comboBox_ROI.addItem(str(ind))
		
		
		#extract individual worm ROI
		good = self.frame_data[self.worm_index_str] == worm_index_roi
		roi_data = self.frame_data.loc[good].squeeze()

		if roi_data.size == 0:
			wormCanvas.clear()
			return

		if np.isnan(roi_data['coord_x']) or np.isnan(roi_data['coord_y']):
			return #invalid coordinate, nothing to do here

		worm_roi, roi_corner = getWormROI(self.original_image, roi_data['coord_x'], roi_data['coord_y'], roi_data['roi_size'])
		roi_corner = roi_corner+1
		#worm_roi, roi_corner = self.original_image, np.zeros(2)

		roi_ori_size = worm_roi.shape
		
		worm_roi = np.ascontiguousarray(worm_roi)
		#worm_roi = cv2.cvtColor(worm_img, cv2.COLOR_GRAY2RGB);

		worm_img = QImage(worm_roi.data, worm_roi.shape[1], worm_roi.shape[0], worm_roi.strides[0], QImage.Format_Indexed8)
		worm_img = worm_img.convertToFormat(QImage.Format_RGB32, Qt.AutoColor)

		
		canvas_size = min(wormCanvas.height(),wormCanvas.width())
		worm_img = worm_img.scaled(canvas_size,canvas_size, Qt.KeepAspectRatio)#, Qt.SmoothTransformation)
		
		
		if isDrawSkel:
			if roi_data['has_skeleton']==1:
				c_ratio_y = worm_img.width()/roi_ori_size[1];
				c_ratio_x = worm_img.height()/roi_ori_size[0];
				
				skel_id = int(roi_data['skeleton_id'])

				qPlg = {}
				
				for tt in ['skeleton', 'contour_side1', 'contour_side2']:
					dat = self.skel_dat[tt][skel_id];
					dat[:,0] = (dat[:,0]-roi_corner[0])*c_ratio_x
					dat[:,1] = (dat[:,1]-roi_corner[1])*c_ratio_y
					
					#dat = (self.skel_dat[tt][skel_id] - 0)*c_ratio
					qPlg[tt] = QPolygonF()
					for p in dat:
						qPlg[tt].append(QPointF(*p))
				
				if 'is_good_skel' in roi_data and roi_data['is_good_skel'] == 0:
					self.skel_colors = {'skeleton':(102, 0, 0 ), 
					'contour_side1':(102, 0, 0 ), 'contour_side2':(102, 0, 0 )}
				else:
					self.skel_colors = {'skeleton':(27, 158, 119 ), 
					'contour_side1':(217, 95, 2), 'contour_side2':(231, 41, 138)}

				pen = QPen()
				pen.setWidth(2)
				
				painter = QPainter()
				painter.begin(worm_img)
			
				for tt, color in self.skel_colors.items():
					pen.setColor(QColor(*color))
					painter.setPen(pen)
					painter.drawPolyline(qPlg[tt])
				
				pen.setColor(Qt.black)
				painter.setBrush(Qt.white)
				painter.setPen(pen)
			
				radius = 3#*c_ratio_x
				painter.drawEllipse(qPlg['skeleton'][0], radius, radius)

				painter.end()
			elif roi_data['has_skeleton']==0:
				worm_mask = getWormMask(worm_roi, roi_data['threshold'])
				worm_cnt, _ = binaryMask2Contour(worm_mask)
				worm_mask = np.zeros_like(worm_mask)
				cv2.drawContours(worm_mask, [worm_cnt.astype(np.int32)], 0, 1, -1)

				worm_mask = QImage(worm_mask.data, worm_mask.shape[1], 
					worm_mask.shape[0], worm_mask.strides[0], QImage.Format_Indexed8)
				worm_mask = worm_mask.convertToFormat(QImage.Format_RGB32, Qt.AutoColor)
				worm_mask = worm_mask.scaled(canvas_size,canvas_size, 
					Qt.KeepAspectRatio)#, Qt.SmoothTransformation)
				worm_mask = QPixmap.fromImage(worm_mask)

				worm_mask = worm_mask.createMaskFromColor(Qt.black)
				p = QPainter(worm_img)
				p.setPen(QColor(0,204,102))
				p.drawPixmap(worm_img.rect(), worm_mask, worm_mask.rect())
				p.end()

		
		pixmap = QPixmap.fromImage(worm_img)
		wormCanvas.setPixmap(pixmap);
Exemple #38
0
    def toImage(self):
        t = time.time()

        tWAIT = time.time()
        self._arrayreq.wait()
        tWAIT = 1000.0 * (time.time() - tWAIT)

        tAR = time.time()
        a = self._arrayreq.getResult()
        tAR = 1000.0 * (time.time() - tAR)

        assert a.ndim == 2, "GrayscaleImageRequest.toImage(): result has shape %r, which is not 2-D" % (a.shape,)

        normalize = self._normalize
        if not normalize:
            normalize = [0, 255]

        # FIXME: It is obviously wrong to truncate like this (right?)
        if a.dtype == np.uint64 or a.dtype == np.int64:
            warnings.warn("Truncating 64-bit pixels for display")
            if a.dtype == np.uint64:
                a = np.asanyarray(a, np.uint32)
            elif a.dtype == np.int64:
                a = np.asanyarray(a, np.int32)

        if a.dtype == np.bool_:
            a = a.view(np.uint8)

        has_no_mask = not np.ma.is_masked(a)

        #
        # new conversion
        #
        tImg = None
        if has_no_mask and _has_vigra and hasattr(vigra.colors, "gray2qimage_ARGB32Premultiplied"):
            if (
                not self._normalize or self._normalize[0] >= self._normalize[1] or self._normalize == [0, 0]
            ):  # FIXME: fix volumina conventions
                n = np.asarray([0, 255], dtype=np.float32)
            else:
                n = np.asarray(self._normalize, dtype=np.float32)
            tImg = time.time()
            img = QImage(a.shape[1], a.shape[0], QImage.Format_ARGB32_Premultiplied)
            if not a.flags["C_CONTIGUOUS"]:
                a = a.copy()
            vigra.colors.gray2qimage_ARGB32Premultiplied(a, byte_view(img), n)
            tImg = 1000.0 * (time.time() - tImg)
        else:
            if has_no_mask:
                self.logger.warning("using slow image creation function")
            tImg = time.time()
            if self._normalize:
                # clipping has been implemented in this commit,
                # but it is not yet available in the packages obtained via easy_install
                # http://www.informatik.uni-hamburg.de/~meine/hg/qimage2ndarray/diff/fcddc70a6dea/qimage2ndarray/__init__.py
                a = np.clip(a, *self._normalize)
            img = gray2qimage(a, self._normalize)
            ret = img.convertToFormat(QImage.Format_ARGB32_Premultiplied)
            tImg = 1000.0 * (time.time() - tImg)

        if self.logger.isEnabledFor(logging.DEBUG):
            tTOT = 1000.0 * (time.time() - t)
            self.logger.debug(
                "toImage (%dx%d, normalize=%r) took %f msec. (array req: %f, wait: %f, img: %f)"
                % (img.width(), img.height(), normalize, tTOT, tAR, tWAIT, tImg)
            )

        return img
class HDF5videoViewer_GUI(QMainWindow):
    def __init__(self, ui = ''):
        super().__init__()
        
        # Set up the user interface from Designer.
        if not ui:
            self.ui = Ui_ImageViewer()
        else:
            self.ui = ui

        self.ui.setupUi(self)

        self.isPlay = False
        self.fid = -1
        self.image_group = -1
        self.videos_dir = ''
        #self.videos_dir =  r"/Volumes/behavgenom$/GeckoVideo/Results/20150521_1115/"
        #self.videos_dir =  os.path.expanduser("~") + os.sep + 'Downloads' + os.sep + 'wetransfer-cf3818' + os.sep
        
        #self.ui.imageCanvas.setFocusPolicy(Qt.ClickFocus)

        self.h5path = self.ui.comboBox_h5path.itemText(0)
        
        self.ui.pushButton_video.clicked.connect(self.getVideoFile)
        
        self.ui.playButton.clicked.connect(self.playVideo)
        self.ui.imageSlider.sliderPressed.connect(self.imSldPressed)
        self.ui.imageSlider.sliderReleased.connect(self.imSldReleased)
        
        self.ui.spinBox_frame.valueChanged.connect(self.updateFrameNumber)
        self.ui.doubleSpinBox_fps.valueChanged.connect(self.updateFPS)
        self.ui.spinBox_step.valueChanged.connect(self.updateFrameStep)
        
        self.ui.spinBox_step.valueChanged.connect(self.updateFrameStep)

        self.ui.comboBox_h5path.activated.connect(self.getImGroup)

        self.ui.pushButton_h5groups.clicked.connect(self.updateGroupNames)

        self.updateFPS()
        self.updateFrameStep()
        
        # SET UP RECURRING EVENTS
        self.timer = QTimer()
        self.timer.timeout.connect(self.getNextImage)
        
        
    #Scroller
    def imSldPressed(self):
        self.ui.imageSlider.setCursor(Qt.ClosedHandCursor)
    
    def imSldReleased(self):
        self.ui.imageSlider.setCursor(Qt.OpenHandCursor)
        if self.image_group != -1:
            self.frame_number = int(round((self.tot_frames-1)*self.ui.imageSlider.value()/100))
            self.ui.spinBox_frame.setValue(self.frame_number)
            #self.updateImage()
    
    #frame spin box
    def updateFrameNumber(self):
        self.frame_number = self.ui.spinBox_frame.value()
        progress = round(100*self.frame_number/self.tot_frames)
        if progress != self.ui.imageSlider.value():
            self.ui.imageSlider.setValue(progress)
        
        self.updateImage()

    #fps spin box
    def updateFPS(self):
        self.fps = self.ui.doubleSpinBox_fps.value()

    #frame steps spin box
    def updateFrameStep(self):
        self.frame_step = self.ui.spinBox_step.value()

    #Play Button
    def playVideo(self):
        if self.image_group == -1:
            return
        if not self.isPlay:
            self.startPlay()
        else:
            self.stopPlay()
    
    def startPlay(self):
        self.timer.start(round(1000/self.fps))
        self.isPlay = True
        self.ui.playButton.setText('Stop')
        self.ui.doubleSpinBox_fps.setEnabled(False)

    def stopPlay(self):
        self.timer.stop()
        self.isPlay = False
        self.ui.playButton.setText('Play')
        self.ui.doubleSpinBox_fps.setEnabled(True)

    #Function to get the new valid frame during video play
    def getNextImage(self):
        self.frame_number += self.frame_step
        if self.frame_number >= self.tot_frames:
            self.frame_number = self.tot_frames-1
            self.stopPlay()
        
        self.ui.spinBox_frame.setValue(self.frame_number)
        
    #update image: get the next frame_number, and resize it to fix in the GUI area
    def updateImage(self):
        if self.image_group == -1:
            return
        
        self.readImage()

        self.pixmap = QPixmap.fromImage(self.frame_qimg)
        self.ui.imageCanvas.setPixmap(self.pixmap);
    
    def readImage(self):
        self.label_height = self.ui.imageCanvas.height()
        self.label_width = self.ui.imageCanvas.width()

        self.frame_img = self.image_group[self.frame_number,:,:];
        
        #equalize and cast if it is not uint8
        if self.frame_img.dtype != np.uint8:
            top = np.max(self.frame_img)
            bot = np.min(self.frame_img)

            self.frame_img = (self.frame_img-bot)*255./(top-bot)
            self.frame_img = np.round(self.frame_img).astype(np.uint8)
            
        self.frame_qimg = QImage(self.frame_img.data, 
            self.image_width, self.image_height, self.frame_img.strides[0], QImage.Format_Indexed8)
        self.frame_qimg = self.frame_qimg.convertToFormat(QImage.Format_RGB32, Qt.AutoColor)
        self.frame_qimg = self.frame_qimg.scaled(self.label_width, self.label_height, Qt.KeepAspectRatio)
        
    #file dialog to the the hdf5 file
    def getVideoFile(self):
        vfilename, _ = QFileDialog.getOpenFileName(self, "Find HDF5 video file", 
        self.videos_dir, "HDF5 files (*.hdf5);; All files (*)")

        if vfilename:
            if self.fid != -1:
                self.fid.close()
                self.ui.imageCanvas.clear()

            self.vfilename = vfilename
            self.updateVideoFile()
    
    def updateVideoFile(self):
        if not os.path.exists(self.vfilename):
            QMessageBox.critical(self, 'The hdf5 video file does not exists', "The hdf5 video file does not exists. Please select a valid file",
                    QMessageBox.Ok)
            return
        
        self.ui.lineEdit_video.setText(self.vfilename)
        self.videos_dir = self.vfilename.rpartition(os.sep)[0] + os.sep
        self.fid = tables.File(self.vfilename, 'r')
        
        self.updateImGroup()

    def updateGroupNames(self):
        valid_groups = []
        for group in self.fid.walk_groups("/"):
            for array in self.fid.list_nodes(group, classname='Array'):
                if array.ndim == 3:
                    valid_groups.append(array._v_pathname)
        
        if not valid_groups:
            QMessageBox.critical(self, '', "No valid video groups were found. Dataset with three dimensions and uint8 data type.",
                    QMessageBox.Ok)
            return

        self.ui.comboBox_h5path.clear()
        for kk in valid_groups:
            self.ui.comboBox_h5path.addItem(kk)
        self.getImGroup(0)
        self.updateImage()

    def getImGroup(self, index):
        self.h5path = self.ui.comboBox_h5path.itemText(index)
        self.updateImGroup()

    #read a valid groupset from the hdf5
    def updateImGroup(self):
        if self.fid == -1:
            return

        #self.h5path = self.ui.comboBox_h5path.text()
        if not self.h5path in self.fid:
            self.ui.imageCanvas.clear()
            self.image_group == -1
            QMessageBox.critical(self, 'The groupset path does not exists', "The groupset path does not exists. You must specify a valid groupset path",
                    QMessageBox.Ok)
            return

        self.image_group = self.fid.get_node(self.h5path)
        if len(self.image_group.shape) != 3:
            self.ui.imageCanvas.clear()
            self.image_group == -1
            QMessageBox.critical(self, 'Invalid groupset', "Invalid groupset. The groupset must have three dimensions",
                    QMessageBox.Ok)

        self.tot_frames = self.image_group.shape[0]
        self.image_height = self.image_group.shape[1]
        self.image_width = self.image_group.shape[2]
            
        self.ui.spinBox_frame.setMaximum(self.tot_frames-1)

        self.frame_number = 0
        self.ui.spinBox_frame.setValue(self.frame_number)
        self.updateImage()


    def setFileName(self, filename):
        self.filename = filename
        self.ui.lineEdit.setText(filename)

    
    def resizeEvent(self, event):
        if self.fid != -1:
            self.updateImage()
    
    def keyPressEvent(self, event):
        key = event.key()
        
        #Duplicate the frame step size (speed) when pressed  > or .: 
        if key == 46 or key == 62:
            self.frame_step *= 2
            self.ui.spinBox_step.setValue(self.frame_step)

        #Half the frame step size (speed) when pressed: < or ,
        elif key == 44 or key == 60:
            self.frame_step //=2
            if self.frame_step<1:
                self.frame_step = 1
            self.ui.spinBox_step.setValue(self.frame_step)

        #print(event.key())
        elif self.fid == -1:
            return
            
        #Move backwards when  are pressed
        elif key == Qt.Key_Left or key == 39:
            self.frame_number -= self.frame_step
            if self.frame_number<0:
                self.frame_number = 0
            self.ui.spinBox_frame.setValue(self.frame_number)
        
        #Move forward when  are pressed
        elif key == Qt.Key_Right or key == 92:
            self.frame_number += self.frame_step
            if self.frame_number >= self.tot_frames:
                self.frame_number = self.tot_frames-1
            self.ui.spinBox_frame.setValue(self.frame_number)

        else:
            QMainWindow.keyPressEvent(self, event)
Exemple #40
0
from PyQt5.QtGui import QImage, QFont, QColor
from PyQt5.QtCore import Qt
import os, numpy as np

img = QImage(filename)
img = img.convertToFormat(QImage.Format_RGB888)
assert img.format() == QImage.Format_RGB888
assert img.width()*img.height()*3 == img.byteCount()

if inscription != "":
    img2 = QImage(img.width(), img.height(), QImage.Format_RGB888)

    from PyQt5.QtGui import QPainter
    qp = QPainter()
    try:
        qp.begin(img2) #different results than painting on img!
        qp.drawImage(0,0,img)
        qp.setPen(QColor(255,185,50))
        fx = 2
        fy = 20
        font = QFont("Arial", int(0.7*img.height()/fy))
        qp.setFont(font)
        mx = img.width() / fx
        my = img.height() / fy
        for x in range(fx):
            for y in range(fy):
                qp.drawText(x*mx, y*my, mx, my, Qt.AlignCenter,inscription)
    finally:
        qp.end()
    img = img2
	def updateImage(self):
		if self.image_group == -1:
			return

		self.ui.spinBox_frame.setValue(self.frame_number)
		
		self.label_height = self.ui.imageCanvas.height()
		self.label_width = self.ui.imageCanvas.width()

		self.original_image = self.image_group[self.frame_number];

		image = QImage(self.original_image.data, 
			self.image_width, self.image_height, self.original_image.strides[0], QImage.Format_Indexed8)
		image = image.convertToFormat(QImage.Format_RGB32, Qt.AutoColor)
		image = image.scaled(self.label_width, self.label_height, Qt.KeepAspectRatio)#, Qt.SmoothTransformation)
		
		
		if isinstance(self.trajectories_data, pd.DataFrame): 
			try:
				#self.frame_data = self.trajectories_data[self.trajectories_data['frame_number'] == self.frame_number]#self.trajectories_data.get_group(self.frame_number)
				self.frame_data = self.traj_time_grouped.get_group(self.frame_number)
				self.frame_data = self.frame_data[self.frame_data[self.worm_index_str] >= 0]
			except KeyError:
				self.frame_data = -1

			#label_type = 'worm_label' if self.ui.comboBox_labelType.currentIndex() == 0 else 'auto_label'
			label_type = 'worm_label' if self.ui.comboBox_labelType.currentIndex() == 0 else 'auto_label'
			
			if isinstance(self.frame_data, pd.DataFrame) and \
			self.ui.checkBox_showLabel.isChecked() and label_type in self.frame_data:
				
				self.img_h_ratio = image.height()/self.image_height;
				self.img_w_ratio = image.width()/self.image_width;
		
				painter = QPainter()
				painter.begin(image)
				for row_id, row_data in self.frame_data.iterrows():
					x = row_data['coord_x']*self.img_h_ratio
					y = row_data['coord_y']*self.img_w_ratio
					if not (x == x) or  not (y == y): #check if the coordinates are nan
						continue

					x = int(x)
					y = int(y)
					c = self.wlabC[int(row_data[label_type])]
					
					painter.setPen(c)
					painter.setFont(QFont('Decorative', 10))
					
					#painter.drawText(x, y, str(int(row_data['worm_index_joined'])))
					painter.drawText(x, y, str(int(row_data[self.worm_index_str])))

					bb = row_data['roi_size']*self.img_w_ratio
					painter.drawRect(x-bb/2, y-bb/2, bb, bb);
				painter.end()
		
		
		pixmap = QPixmap.fromImage(image)
		self.ui.imageCanvas.setPixmap(pixmap);
		
		if self.ske_file_id != -1:
			self.updateROIcanvasN(1)
			self.updateROIcanvasN(2)
			
		progress = round(100*self.frame_number/self.tot_frames)
		if progress != self.ui.imageSlider.value():
			self.ui.imageSlider.setValue(progress)