Пример #1
0
    def paintTeam(self, painter, team, xOffset):
        pickXOffset = xOffset
        banXOffset = xOffset

        for player in self.championSelect.getTeam(team):
            if player.champion:
                qim = ImageQt(player.champion.image.image)
                qim = qim.scaled(40, 40, Qt.KeepAspectRatio)

                painter.drawImage(QPoint(qim.width() * pickXOffset, 0), qim)

                pickXOffset += 1

            for champion in player.bannedChampions.values():
                qim = ImageQt(champion.image.image)
                qim = qim.scaled(40, 40, Qt.KeepAspectRatio)

                painter.drawImage(
                    QPoint(qim.width() * banXOffset, qim.height()), qim)

                painter.drawLine(qim.width() * banXOffset, qim.height(),
                                 qim.width() * (banXOffset + 1),
                                 qim.height() * 2)
                painter.drawLine(qim.width() * (banXOffset + 1), qim.height(),
                                 qim.width() * banXOffset,
                                 qim.height() * 2)

                banXOffset += 1

        return max(pickXOffset, banXOffset)
Пример #2
0
 def __init__(self, m, c, p, path, parent=None):
     super(watcher, self).__init__(parent)
     im = PIL.Image.open(path)
     self.color = self.get_dominant_color(im)
     desktop =QtGui.QApplication.desktop()
     width = desktop.width()
     height = desktop.height()
     qim = ImageQt(im)
     
     self.fixLength = height - 150
     if qim.height()>=qim.width():
         _img = qim.scaled(self.fixLength, self.fixLength, Qt.Qt.KeepAspectRatio)
     else:
         _img = qim.scaled(qim.width()*self.fixLength/qim.height(), qim.width()*self.fixLength/qim.height(), Qt.Qt.KeepAspectRatio)
     self.setStyleSheet(("background-color: rgb"+str(self.color)+";"))
     self.setFixedSize(_img.width(), _img.height()+50)
     self.setWindowIcon(QtGui.QIcon("../pics/icon/icon.png"))
     self.move((width - self.width())/2, (height - self.height())/2-25)
     
     self.labelPhoto = QtGui.QLabel(self)
     self.labelPhoto.setPixmap(QtGui.QPixmap.fromImage(_img))
     self.labelPhoto.setGeometry(0, 0, _img.width(), _img.height())
     
     '''照片名字显示'''
     font = QtGui.QFont(u'微软雅黑', 15)
     name = path.split('\\')[-1]
     self.labelName = QtGui.QLabel(u'● '+name, self)
     self.labelName.setGeometry(10, _img.height(), len(name)*17, 50)
     self.nameColor = (int(255-self.color[0]), int(255-self.color[1]), int(255-self.color[2]))        
     self.labelName.setStyleSheet(("color: rgb"+str(self.nameColor)+";"))
     self.labelName.setFont(font)
     self.setWindowTitle("xiaoyWatcher v1.0 "+name)
     
     '''照片心情显示'''
     tipMood = [u'无', u'好', u'良', u'差']
     self.labelmood = QtGui.QLabel(self)
     self.labelmood.setPixmap(QtGui.QPixmap('../pics/watcher/moodGOOD'+str(m)+'.png'))
     self.labelmood.setGeometry(_img.width()-100, _img.height()+13, 34, 26)
     self.labelmood.setToolTip(tipMood[m])
     
     '''照片云标记显示'''
     tipCloud = [u'本地', u'云端']
     if c == 0:
         pathC = '../pics/watcher/cloudNO.png'
     else:
         pathC = '../pics/watcher/cloudYES.png'
     self.labelcloud = QtGui.QLabel(self)
     self.labelcloud.setPixmap(QtGui.QPixmap(pathC))
     self.labelcloud.setGeometry(_img.width()-150, _img.height()+14, 34, 21)
     self.labelcloud.setToolTip(tipCloud[c])
     
     '''显示照片评论'''
     self.comment = QtGui.QLabel(self)
     self.comment.setPixmap(QtGui.QPixmap('../pics/watcher/comment.png'))
     self.comment.setGeometry(_img.width()-50, _img.height()+12, 34, 29)
     if p:
         self.comment.setToolTip(u'评论:'+p)
     else:
         self.comment.setToolTip(u'尚未添加评论')
Пример #3
0
    def saveYoloFormat(self, filename, shapes, imagePath, imageData, classList,
                            lineColor=None, fillColor=None, databaseSrc=None):
        imgFolderPath = os.path.dirname(imagePath)
        imgFolderName = os.path.split(imgFolderPath)[-1]
        imgFileName = os.path.basename(imagePath)
        #imgFileNameWithoutExt = os.path.splitext(imgFileName)[0]
        # Read from file path because self.imageData might be empty if saving to
        # Pascal format
        if imagePath.split('.')[-1] == "exr":
            image = loadImageWithOpenEXR(imagePath)
            image = PilImageQt(image)
        else:
            image = QImage()
            image.load(imagePath)
        imageShape = [image.height(), image.width(),
                      1 if image.isGrayscale() else 3]
        writer = YOLOWriter(imgFolderName, imgFileName,
                                 imageShape, localImgPath=imagePath)
        writer.verified = self.verified

        for shape in shapes:
            points = shape['points']
            label = shape['label']
            # Add Chris
            difficult = int(shape['difficult'])
            bndbox = LabelFile.convertPoints2BndBox(points)
            writer.addBndBox(bndbox[0], bndbox[1], bndbox[2], bndbox[3], label, difficult)

        writer.save(targetFile=filename, classList=classList)
        return
Пример #4
0
def progress_fn(im):
    qim = ImageQt(im)
    pix = QPixmap.fromImage(qim)

    _Scene.clear()
    _Scene.setSceneRect(0, 0, qim.width(), qim.height())
    item = _Scene.addPixmap(pix)
Пример #5
0
class FramePainter(QtGui.QPainter):
    '''
        A QPainter for a blank frame, which can be converted into a
        Pillow image with finalize()
    '''
    def __init__(self, width, height):
        image = BlankFrame(width, height)
        log.debug("Creating QImage from PIL image object")
        self.image = ImageQt(image)
        super().__init__(self.image)

    def setPen(self, penStyle):
        if type(penStyle) is tuple:
            super().setPen(PaintColor(*penStyle))
        else:
            super().setPen(penStyle)

    def finalize(self):
        log.verbose("Finalizing FramePainter")
        imBytes = self.image.bits().asstring(self.image.byteCount())
        frame = Image.frombytes('RGBA',
                                (self.image.width(), self.image.height()),
                                imBytes)
        self.end()
        return frame
Пример #6
0
 def thumbnail(self, videoFile):
     '''
     create a thumbnail from video file
     '''
     container = av.open(videoFile)
     index = 0
     for frame in container.decode(video=0):
         img = frame.to_image()
         index += 1
         if (index == 10):
             qim = ImageQt(img)
             ratio = qim.height() * 1.0 / qim.width()
             pix = QtGui.QPixmap.fromImage(qim)
             pix = pix.scaled(128, 128, Qt.KeepAspectRatio)
             print(type(pix))
             return pix, ratio
Пример #7
0
    def set_texture(self, filename):
        # load image
        p = Path(filename)
        suffix = p.suffix[1:].upper()
        try:
            if p.is_file() and suffix in self._supported_images:
                pim = Image.open(filename)
                img = ImageQt(pim)
                self.info = f"{pim.format} - {pim.size} - {pim.mode} "
            else:
                ico = QFileIconProvider().icon(QFileInfo(filename))
                pix = ico.pixmap(256, 256)
                img = pix.toImage()
                self.info = "not an image "
        except UnidentifiedImageError as e:
            print("UnidentifiedImageError:\n", e, flush=True)
            return

        self._texture_size = img.width(), img.height()
        self.__update_scale(self.width(), self.height())

        # create texture
        self.makeCurrent()
        self._texture = QOpenGLTexture(QOpenGLTexture.Target2D)
        self._texture.create()
        self._texture.bind()
        self._texture.setMinMagFilters(QOpenGLTexture.LinearMipMapLinear,
                                       QOpenGLTexture.Linear)
        self._texture.setWrapMode(QOpenGLTexture.DirectionS,
                                  QOpenGLTexture.Repeat)
        self._texture.setWrapMode(QOpenGLTexture.DirectionT,
                                  QOpenGLTexture.Repeat)
        self._texture.setData(img)
        self._texture.release()

        # redraw
        self.update()
Пример #8
0
class EMBLXrayImaging(QtGraphicsManager, AbstractCollect):
    def __init__(self, *args):
        QtGraphicsManager.__init__(self, *args)
        AbstractCollect.__init__(self, *args)

        self.ff_apply = False
        self.ff_ssim = None
        self.qimage = None
        self.qpixmap = None
        self.image_count = 0
        self.image_reading_thread = None
        self.ff_corrected_list = []
        self.config_dict = {}
        self.collect_omega_start = 0
        self.omega_start = 0
        self.omega_move_enabled = False
        self.reference_distance = None
        self.reference_angle = None
        self.motor_positions = None

        self.image_dimension = (0, 0)
        self.graphics_camera_frame = None
        self.image_polling = None
        self.repeat_image_play = None
        self.current_image_index = None
        self.mouse_hold = False
        self.mouse_coord = [0, 0]
        self.centering_started = 0

        self._previous_collect_status = None
        self._actual_collect_status = None
        self._failed = False
        self._number_of_images = 0
        self.printed_warnings = []
        self.printed_errors = []

        self.chan_collect_status = None
        self.chan_collect_frame = None
        self.chan_collect_error = None
        self.chan_camera_error = None
        self.chan_camera_warning = None
        self.chan_ff_ssim = None

        self.cmd_collect_compression = None
        self.cmd_collect_detector = None
        self.cmd_collect_directory = None
        self.cmd_collect_exposure_time = None
        self.cmd_collect_in_queue = None
        self.cmd_collect_num_images = None
        self.cmd_collect_overlap = None
        self.cmd_collect_range = None
        self.cmd_collect_scan_type = None
        self.cmd_collect_shutter = None
        self.cmd_collect_start_angle = None
        self.cmd_collect_template = None
        self.cmd_collect_start = None
        self.cmd_collect_abort = None

        self.cmd_collect_ff_num_images = None
        self.cmd_collect_ff_offset = None
        self.cmd_collect_ff_pre = None
        self.cmd_collect_ff_post = None

        self.cmd_camera_trigger = None
        self.cmd_camera_live_view = None
        self.cmd_camera_write_data = None
        self.cmd_camera_ff_ssim = None

        self.beam_focusing_hwobj = None

    def init(self):
        AbstractCollect.init(self)
        self.ready_event = gevent.event.Event()
        self.image_dimension = (2048, 2048)
        self.reference_distance = self.getProperty("reference_distance")
        self.reference_angle = self.getProperty("reference_angle")

        QtGraphicsManager.init(self)

        self.disconnect(HWR.beamline.sample_view.camera, "imageReceived",
                        self.camera_image_received)

        self.disconnect(
            HWR.beamline.diffractometer,
            "minidiffStateChanged",
            self.diffractometer_state_changed,
        )
        self.disconnect(
            HWR.beamline.diffractometer,
            "centringStarted",
            self.diffractometer_centring_started,
        )
        self.disconnect(
            HWR.beamline.diffractometer,
            "centringAccepted",
            self.create_centring_point,
        )
        self.disconnect(
            HWR.beamline.diffractometer,
            "centringSuccessful",
            self.diffractometer_centring_successful,
        )
        self.disconnect(
            HWR.beamline.diffractometer,
            "centringFailed",
            self.diffractometer_centring_failed,
        )
        self.disconnect(
            HWR.beamline.diffractometer,
            "pixelsPerMmChanged",
            self.diffractometer_pixels_per_mm_changed,
        )
        self.disconnect(
            HWR.beamline.diffractometer,
            "omegaReferenceChanged",
            self.diffractometer_omega_reference_changed,
        )
        self.disconnect(
            HWR.beamline.diffractometer,
            "minidiffPhaseChanged",
            self.diffractometer_phase_changed,
        )

        self.diffractometer_pixels_per_mm_changed((20., 20.))
        self.graphics_manager_hwobj = self.getObjectByRole("graphics_manager")

        self.graphics_scale_item.set_start_position(
            20, self.image_dimension[1] - 20)

        self.graphics_scale_item.set_custom_pen_color(Colors.BLUE)
        self.graphics_omega_reference_item.set_custom_pen_color(
            Colors.DARK_BLUE)
        self.graphics_measure_distance_item.set_custom_pen_color(
            Colors.DARK_BLUE)
        self.graphics_beam_item.hide()

        self.graphics_view.scene().measureItemChanged.connect(
            self.measure_item_changed)
        self.graphics_view.scene().setSceneRect(0, 0, self.image_dimension[0],
                                                self.image_dimension[1])

        self.qimage = QtImport.QImage()
        self.qpixmap = QtImport.QPixmap()

        self.chan_frame = self.get_channel_object("chanFrame")
        self.chan_frame.connectSignal("update", self.frame_changed)

        self.chan_ff_ssim = self.get_channel_object("chanFFSSIM")
        self.chan_ff_ssim.connectSignal("update", self.ff_ssim_changed)

        self.chan_collect_status = self.get_channel_object("collectStatus")
        #self._actual_collect_status = self.chan_collect_status.getValue()
        self.chan_collect_status.connectSignal("update",
                                               self.collect_status_update)

        self.chan_collect_frame = self.get_channel_object("chanFrameCount")
        self.chan_collect_frame.connectSignal("update",
                                              self.collect_frame_update)

        self.chan_collect_error = self.get_channel_object("collectError")
        self.chan_collect_error.connectSignal("update",
                                              self.collect_error_update)

        self.chan_camera_warning = self.get_channel_object("cameraWarning")
        self.chan_camera_warning.connectSignal("update",
                                               self.camera_warning_update)

        self.chan_camera_error = self.get_channel_object("cameraError")
        self.chan_camera_error.connectSignal("update",
                                             self.camera_error_update)

        self.cmd_collect_detector = self.get_command_object("collectDetector")
        self.cmd_collect_directory = self.get_command_object(
            "collectDirectory")
        self.cmd_collect_exposure_time = self.get_command_object(
            "collectExposureTime")
        self.cmd_collect_in_queue = self.get_command_object("collectInQueue")
        self.cmd_collect_num_images = self.get_command_object(
            "collectNumImages")
        self.cmd_collect_range = self.get_command_object("collectRange")
        self.cmd_collect_scan_type = self.get_command_object("collectScanType")
        self.cmd_collect_shutter = self.get_command_object("collectShutter")
        self.cmd_collect_shutterless = self.get_command_object(
            "collectShutterless")
        self.cmd_collect_start_angle = self.get_command_object(
            "collectStartAngle")
        self.cmd_collect_template = self.get_command_object("collectTemplate")

        self.cmd_collect_ff_num_images = self.get_command_object(
            "collectFFNumImages")
        self.cmd_collect_ff_offset = self.get_command_object("collectFFOffset")
        self.cmd_collect_ff_pre = self.get_command_object("collectFFPre")
        self.cmd_collect_ff_post = self.get_command_object("collectFFPost")

        self.cmd_camera_trigger = self.get_command_object("cameraTrigger")
        self.cmd_camera_live_view = self.get_command_object("cameraLiveView")
        self.cmd_camera_write_data = self.get_command_object("cameraWriteData")
        self.cmd_camera_ff_ssim = self.get_command_object("cameraFFSSIM")

        self.cmd_collect_start = self.get_command_object("collectStart")
        self.cmd_collect_abort = self.get_command_object("collectAbort")

        self.beam_focusing_hwobj = self.getObjectByRole("beam_focusing")

    def frame_changed(self, data):
        if self._collecting:
            jpgdata = StringIO(data)
            im = Image.open(jpgdata)
            self.qimage = ImageQt(im)
            self.graphics_camera_frame.setPixmap(
                self.qpixmap.fromImage(self.qimage, QtImport.Qt.MonoOnly))

    def ff_ssim_changed(self, value):
        if self._collecting:
            self.ff_ssim = list(value)
            self.ff_ssim.sort()

    def mouse_clicked(self, pos_x, pos_y, left_click):
        QtGraphicsManager.mouse_clicked(self, pos_x, pos_y, left_click)
        if self.centering_started:
            self.motor_positions["phi"] = self.omega_angle
            HWR.beamline.diffractometer.set_static_positions(
                self.motor_positions)
            HWR.beamline.diffractometer.image_clicked(pos_x, pos_y)
            self.centering_started -= 1
        if not self.centering_started:
            self.set_centring_state(False)

    def mouse_released(self, pos_x, pos_y):
        QtGraphicsManager.mouse_released(self, pos_x, pos_y)
        self.mouse_hold = False

    def mouse_moved(self, pos_x, pos_y):
        QtGraphicsManager.mouse_moved(self, pos_x, pos_y)
        if self.mouse_hold:
            if self.mouse_coord[0] - pos_x > 0:
                index = self.current_image_index + 1
            elif self.mouse_coord[0] - pos_x < 0:
                index = self.current_image_index - 1
            else:
                return

            if index < 0:
                index = self.image_count - 1
            elif index >= self.image_count:
                index = 0
            self.mouse_coord[0] = pos_x
            self.display_image(index)

    def measure_item_changed(self, measured_points, measured_pix_num):
        start_x = measured_points[0].x()
        start_y = measured_points[0].y()
        end_x = measured_points[1].x()
        end_y = measured_points[1].y()

        if self.image_reading_thread is None:
            im = np.array(self.qimage.bits()).reshape(self.qimage.width(),
                                                      self.qimage.height())
        else:
            im = self.image_reading_thread.get_raw_image(
                self.current_image_index)
        #im_slice = im[start_x:start_y,end_x,end_y]
        #print im_slice.size, im_slice
        x = np.linspace(start_x, end_x, measured_pix_num)
        y = np.linspace(start_y, end_y, measured_pix_num)

        zi = ndimage.map_coordinates(im, np.vstack((x, y)))

        self.emit("measureItemChanged", zi)

    def get_graphics_view(self):
        return self.graphics_view

    def set_repeate_image_play(self, value):
        self.repeat_image_play = value

    def set_graphics_scene_size(self, size, fixed):
        pass

    def stop_move_beam_mark(self):
        """Stops to move beam mark

        :emits: infoMsg as str
        """
        self.set_cursor_busy(False)
        self.in_move_beam_mark_state = False
        self.graphics_move_beam_mark_item.hide()
        self.graphics_view.graphics_scene.update()
        pos_x = self.graphics_move_beam_mark_item.end_coord[0]
        pos_y = self.graphics_move_beam_mark_item.end_coord[1]

        HWR.beamline.diffractometer.set_imaging_beam_position(pos_x, pos_y)
        logging.getLogger("GUI").info("Imaging beam position set to %d, %d" %
                                      (pos_x, pos_y))
        self.emit("infoMsg", "")

    def diffractometer_phi_motor_moved(self, position):
        """Method when phi motor changed. Updates omega reference by
           redrawing phi angle

        :param position: phi rotation value
        :type position: float
        """
        QtGraphicsManager.diffractometer_phi_motor_moved(self, position)
        #logging.getLogger("GUI").info(str(position))
        self.display_image_by_angle(position)

    def pre_execute(self, data_model):
        self._failed = False
        """
        if self.beam_focusing_hwobj.get_focus_mode() != "imaging":
            self._error_msg = "Beamline is not in Imaging mode"
            self.emit("collectFailed", self._error_msg)
            logging.getLogger("GUI").error("Imaging: Error during acquisition (%s)" % self._error_msg)
            self.ready_event.set()
            self._collecting = False
            self._failed = True
            return
        """

        self.emit("progressInit", ("Image acquisition", 100, False))
        self._collect_frame = 0
        self.printed_warnings = []
        self.printed_errors = []
        self.ff_ssim = None
        self.config_dict = {}

        path_template = data_model.acquisitions[0].path_template
        acq_params = data_model.acquisitions[0].acquisition_parameters
        im_params = data_model.xray_imaging_parameters

        self._number_of_images = acq_params.num_images

        if im_params.detector_distance is not None:
            logging.getLogger("GUI").warning(
                "Imaging: Setting detector distance to %d mm" %
                int(im_params.detector_distance))

            delta = (im_params.detector_distance -
                     self.reference_distance) * self.reference_angle
            for motor in self.beam_focusing_hwobj.get_focus_motors():
                if motor["motorName"] == "P14DetHor1":
                    target = motor["focusingModes"]["Imaging"] + delta
                    tine.set("/P14/P14DetTrans/P14detHor1", "Move.START",
                             target)
                elif motor["motorName"] == "P14DetHor2":
                    target = motor["focusingModes"]["Imaging"] + delta
                    tine.set("/P14/P14DetTrans/P14detHor2", "Move.START",
                             target)
            #TODO add later wait
            time.sleep(3)
            HWR.beamline.detector.distance.set_value(
                im_params.detector_distance, timeout=30)
            logging.getLogger("GUI").info("Imaging: Detector distance set")

        self.cmd_collect_detector("pco")
        self.cmd_collect_directory(str(path_template.directory))
        self.cmd_collect_template(str(path_template.get_image_file_name()))
        self.cmd_collect_scan_type("xrimg")

        self.cmd_collect_exposure_time(acq_params.exp_time)
        self.cmd_collect_num_images(acq_params.num_images)
        self.cmd_collect_start_angle(acq_params.osc_start)
        self.cmd_collect_range(acq_params.osc_range)
        self.cmd_collect_in_queue(acq_params.in_queue != False)
        shutter_name = HWR.beamline.detector.get_shutter_name()
        self.cmd_collect_shutter(shutter_name)

        self.cmd_collect_ff_num_images(im_params.ff_num_images)
        self.cmd_collect_ff_offset([
            im_params.sample_offset_a,
            im_params.sample_offset_b,
            im_params.sample_offset_c,
        ])
        self.cmd_collect_ff_pre(im_params.ff_pre)
        self.cmd_collect_ff_post(im_params.ff_post)

        if acq_params.osc_range == 0:
            self.cmd_camera_trigger(False)
        else:
            self.cmd_camera_trigger(True)
        self.cmd_camera_live_view(im_params.ff_apply)
        self.cmd_camera_write_data(im_params.camera_write_data)
        self.cmd_camera_ff_ssim(im_params.ff_ssim_enabled)

        self.set_osc_start(acq_params.osc_start)

        self.current_dc_parameters = qmo.to_collect_dict(
            data_model, HWR.beamline.session, qmo.Sample())[0]
        self.current_dc_parameters["status"] = "Running"
        self.current_dc_parameters["comments"] = ""

        self.motor_positions = HWR.beamline.diffractometer.get_positions()
        self.take_crystal_snapshots()

        self.store_data_collection_in_lims()

    def execute(self, data_model):
        if not self._failed:
            self._collecting = True
            self.ready_event.clear()
            gevent.spawn(self.cmd_collect_start)
            #self.cmd_collect_start()
            #if data_model.xray_imaging_parameters.camera_write_data:
            #    self.read_images_task = gevent.spawn(self.load_images, None, None, None, data_model)
            self.ready_event.wait()
            self.ready_event.clear()

    def post_execute(self, data_model):
        self.emit("progressStop", ())
        self._collecting = False

        acq_params = data_model.acquisitions[0].acquisition_parameters
        path_template = data_model.acquisitions[0].path_template

        filename_template = "%s_%d_%" + str(path_template.precision) + "d"
        config_filename = filename_template % (
            path_template.base_prefix, path_template.run_number,
            path_template.start_num) + ".json"
        config_file_path = os.path.join(path_template.directory,
                                        config_filename)
        archive_config_path = os.path.join(
            path_template.get_archive_directory(), config_filename)

        self.config_dict = {
            "motor_pos": self.motor_positions,
            "collect": self.current_dc_parameters,
            "imaging": data_model.xray_imaging_parameters.as_dict(),
            "ff_ssim": None
        }

        if data_model.xray_imaging_parameters.ff_pre:
            self.config_dict["ff_ssim"] = self.ff_ssim
        try:
            if not os.path.exists(path_template.get_archive_directory()):
                os.makedirs(path_template.get_archive_directory())
            with open(archive_config_path, 'w') as outfile:
                json.dump(self.config_dict, outfile)
            logging.getLogger("GUI").info(
                "Imaging: Acquisition parameters saved in %s" %
                archive_config_path)
        except:
            logging.getLogger("GUI").error(
                "Imaging: Unable to save acquisition parameters in %s" %
                archive_config_path)

        #self.current_dc_parameters["status"] = "Data collection successful"
        self.update_data_collection_in_lims()

        #Copy first and last image to ispyb
        if data_model.xray_imaging_parameters.camera_write_data:
            image_filename = filename_template % (
                path_template.base_prefix, path_template.run_number,
                path_template.start_num) + ".jpeg"
            image_filename = os.path.join(
                path_template.get_archive_directory(), image_filename)
            raw_image = read_image(path_template.get_image_path() %
                                   path_template.start_num,
                                   timeout=3)
            if raw_image is not None:
                misc.imsave(image_filename, raw_image)
                #Scale image from 2048x2048 to 256x256
                misc.imsave(image_filename.replace(".jpeg", ".thumb.jpeg"),
                            misc.imresize(raw_image, (256, 256)))
                self.store_image_in_lims(path_template.start_num)

                if acq_params.num_images > 1:
                    image_filename = filename_template % (
                        path_template.base_prefix, path_template.run_number,
                        acq_params.num_images - 1) + ".jpeg"
                    image_filename = os.path.join(
                        path_template.get_archive_directory(), image_filename)
                    raw_image = read_image(path_template.get_image_path() %
                                           acq_params.num_images,
                                           timeout=3)
                    if raw_image is not None:
                        misc.imsave(image_filename, raw_image)
                        misc.imsave(
                            image_filename.replace(".jpeg", ".thumb.jpeg"),
                            misc.imresize(raw_image, (256, 256)))
                        self.store_image_in_lims(acq_params.num_images -
                                                 path_template.start_num)

    @task
    def _take_crystal_snapshot(self, filename):
        """Saves crystal snapshot"""
        self.graphics_manager_hwobj.save_scene_snapshot(filename)

    def data_collection_hook(self):
        pass

    @task
    def move_motors(self, motor_position_dict):
        """Move to centred position"""
        if motor_position_dict:
            HWR.beamline.diffractometer.move_motors(motor_position_dict)

    def trigger_auto_processing(self, process_event, frame_number):
        pass

    def collect_status_update(self, status):
        """Status event that controls execution

        :param status: collection status
        :type status: string
        """
        if status != self._actual_collect_status:
            self._previous_collect_status = self._actual_collect_status
            self._actual_collect_status = status
            if self._collecting:
                if self._actual_collect_status == "error":
                    #self.emit("collectFailed", self._error_msg)
                    error_msg = "Error during the acquisition (%s)" % self._error_msg
                    logging.getLogger("GUI").error("Imaging: %s" % error_msg)
                    self.collection_failed(error_msg)
                if self._previous_collect_status is None:
                    if self._actual_collect_status == "busy":
                        self.print_log("GUI", "info",
                                       "Imaging: Preparing acquisition...")
                elif self._previous_collect_status == "busy":
                    if self._actual_collect_status == "collecting":
                        self.emit("collectStarted", (None, 1))
                        self.print_log("GUI", "info",
                                       "Imaging: Acquisition started")
                    elif self._actual_collect_status == "ready":
                        self.ready_event.set()
                        self._collecting = False
                elif self._previous_collect_status == "collecting":
                    if self._actual_collect_status == "ready":
                        self.ready_event.set()
                        self._collecting = False
                        #if self.ff_ssim is None:
                        #    self.ff_ssim_changed(self.chan_ff_ssim.getValue())
                        logging.getLogger("GUI").info(
                            "Imaging: Acquisition done")
                    elif self._actual_collect_status == "aborting":
                        self.print_log("HWR", "info", "Imaging: Aborting...")
                        self.ready_event.set()
                        self._collecting = False

    def collect_error_update(self, error_msg):
        """Collect error behaviour

        :param error_msg: error message
        :type error_msg: string
        """

        if self._collecting and len(error_msg) > 0:
            self._error_msg = error_msg.replace("\n", "")
            logging.getLogger("GUI").error(
                "Imaging: Error from detector server: %s" % error_msg)

    def collect_frame_update(self, frame):
        """Image frame update

        :param frame: frame num
        :type frame: int
        """
        if self._collecting:
            if self._collect_frame != frame:
                self._collect_frame = frame
                self.emit("progressStep",
                          (int(float(frame) / self._number_of_images * 100)))
                self.emit("collectImageTaken", frame)

    def camera_warning_update(self, warning_str):
        if self._collecting:
            if warning_str.endswith("\n"):
                warning_str = warning_str[:-1]
            if warning_str.startswith("\n"):
                warning_str = warning_str[1:]
            warning_list = warning_str.split("\n")

            for warning in warning_list:
                if warning and warning not in self.printed_warnings:
                    logging.getLogger("GUI").warning(
                        "Imaging: PCO camera warning: %s" % warning)
                    self.printed_warnings.append(warning)

    def camera_error_update(self, error_str):
        if self._collecting:
            if error_str.endswith("\n"):
                error_str = error_str[:-1]
            if error_str.startswith("\n"):
                error_str = error_str[1:]
            error_list = error_str.split("\n")

            for error in error_list:
                if error and error not in self.printed_errors:
                    logging.getLogger("GUI").error(
                        "Imaging: PCO camera error: %s" % error)
                    self.printed_errors.append(error)

    def set_ff_apply(self, state):
        self.ff_apply = state
        self.display_image(self.current_image_index)

    def display_image_by_angle(self, angle=None):
        if self.config_dict:
            if not angle:
                angle = self.omega_angle
            osc_seq = self.config_dict["collect"]["oscillation_sequence"][0]
            index = int(osc_seq["range"] * (angle - osc_seq["start"]))
            self.display_image(index)

    def display_image(self, index):
        if self.image_reading_thread is None:
            return

        #osc_seq = self.config_dict["collect"]["oscillation_sequence"][0]
        #angle = osc_seq["start"] + index * osc_seq["range"]
        #self.motor_positions["phi"] = angle
        #HWR.beamline.diffractometer.set_static_positions(self.motor_positions)
        #self.graphics_omega_reference_item.set_phi_position(angle)
        self.current_image_index = index

        raw_image = self.image_reading_thread.get_raw_image(index)

        if self.ff_apply:
            if self.ff_corrected_list[index] is None:
                corrected_im_min_max = self.image_reading_thread.get_corrected_im_min_max(
                )
                ff_image = self.image_reading_thread.get_ff_image(
                    index).astype(float)
                ff_corrected_image = np.divide(raw_image.astype(float),
                                               ff_image,
                                               out=np.ones_like(
                                                   raw_image.astype(float)),
                                               where=ff_image != 0)
                im = 255. * (ff_corrected_image - corrected_im_min_max[0]) / (
                    corrected_im_min_max[1] - corrected_im_min_max[0])
                self.ff_corrected_list[index] = im.astype(np.uint16)
            else:
                im = self.ff_corrected_list[index]
        else:
            raw_im_min_max = self.image_reading_thread.get_raw_im_min_max()
            im = 255. * (raw_image - raw_im_min_max[0]) / (raw_im_min_max[1] -
                                                           raw_im_min_max[0])

        if im is not None:
            self.qimage = QtImport.QImage(im.astype(np.uint8), im.shape[1],
                                          im.shape[0], im.shape[1],
                                          QtImport.QImage.Format_Indexed8)
            self.graphics_camera_frame.setPixmap(
                self.qpixmap.fromImage(self.qimage))
            self.emit("imageLoaded", index)

    def display_image_relative(self, relative_index):
        self.display_image(self.current_image_index + relative_index)

    def play_image_relative(self, relative_angle):
        self.play_images(0.04, relative_angle, False)

    def set_osc_start(self, osc_start):
        self.collect_omega_start = osc_start

    def set_omega_move_enabled(self, state):
        self.omega_move_enabled = state

    def get_ff_and_config_path(self, raw_image_path):
        directory = os.path.dirname(raw_image_path)
        filename = os.path.basename(raw_image_path)

        ff_path = os.path.join(directory, "ff_%s_00001.tiff" % filename[:-11])
        if not os.path.exists(ff_path):
            ff_path = None
        config_path = os.path.join(
            directory.replace("mnt/beegfs/P14", "data/ispyb/p14"),
            "%s_00001.json" % filename[:-11])
        if not os.path.exists(config_path):
            config_path = None

        return ff_path, config_path

    def load_images(self,
                    data_path=None,
                    flat_field_path=None,
                    config_path=None,
                    data_model=None,
                    load_all=True):
        ff_ssim = None
        raw_filename_list = []
        ff_filename_list = []
        self.config_dict = {}
        self.omega_start = HWR.beamline.diffractometer.get_omega_position()
        self.motor_positions = None
        self.image_reading_thread = None

        if not data_model:
            if data_path.endswith("tiff"):
                ext_len = 4
            else:
                ext_len = 3

            base_name_list = os.path.splitext(os.path.basename(data_path))
            prefix = base_name_list[0][:-(ext_len + 1)]

            # Reading config json --------------------------------------------
            if config_path is None:
                config_path = data_path[:-ext_len] + "json"
            if os.path.exists(config_path):
                with open(config_path) as f:
                    self.config_dict = json.load(f)
                ff_ssim = self.config_dict["ff_ssim"]
                self.motor_positions = deepcopy(self.config_dict["motor_pos"])
                HWR.beamline.diffractometer.set_static_positions(
                    self.motor_positions)
            else:
                logging.getLogger("user_level_log").error(
                    "Imaging: Unable to open config file %s" % config_path)

        if data_model:
            if data_model.xray_imaging_parameters.ff_pre:
                acq_params = data_model.acquisitions[0].acquisition_parameters
                path_template = data_model.acquisitions[0].path_template
                for index in range(
                        data_model.xray_imaging_parameters.ff_num_images):
                    ff_filename_list.append(
                        os.path.join(
                            path_template.directory, "ff_" +
                            path_template.get_image_file_name() % (index + 1)))
        elif os.path.exists(flat_field_path):

            base_name_list = os.path.splitext(os.path.basename(data_path))
            ff_prefix = base_name_list[0][:-(ext_len + 1)]
            os.chdir(os.path.dirname(flat_field_path))
            ff_filename_list = [
                os.path.join(os.path.dirname(flat_field_path), f)
                for f in os.listdir(os.path.dirname(flat_field_path))
                if f.startswith("ff_" + prefix)
            ]
            ff_filename_list.sort()

        # Reading raw images -------------------------------------------------
        if data_model:
            acq_params = data_model.acquisitions[0].acquisition_parameters
            path_template = data_model.acquisitions[0].path_template
            for index in range(acq_params.num_images):
                raw_filename_list.append(
                    os.path.join(
                        path_template.directory,
                        path_template.get_image_file_name() % (index + 1)))
        elif os.path.exists(data_path):
            os.chdir(os.path.dirname(data_path))
            raw_filename_list = sorted([
                os.path.join(os.path.dirname(data_path), f)
                for f in os.listdir(os.path.dirname(data_path))
                if f.startswith(prefix)
            ])
        else:
            acq_params = data_model.acquisitions[0].acquisition_parameters
            path_template = data_model.acquisitions[0].path_template
            for index in range(acq_params.num_images):
                raw_filename_list.append(
                    os.path.join(
                        path_template.directory,
                        path_template.get_image_file_name() % (index + 1)))

        self.image_count = len(raw_filename_list)
        if self.image_reading_thread is not None:
            self.image_reading_thread.set_stop()

        self.ff_corrected_list = [None] * self.image_count

        self.image_reading_thread = ImageReadingThread(raw_filename_list,
                                                       ff_filename_list,
                                                       ff_ssim)
        self.image_reading_thread.start()

        self.current_image_index = 0
        self.emit('imageInit', self.image_count)

        gevent.sleep(2)
        self.last_image_index = 0
        self.display_image_by_angle()

    def play_images(self, exp_time=0.04, relative_angle=None, repeat=True):
        self.image_polling = gevent.spawn(self.do_image_polling, exp_time,
                                          relative_angle, repeat)

    def do_image_polling(self, exp_time=0.04, relative_angle=0, repeat=False):
        self.repeat_image_play = repeat

        direction = 1
        if relative_angle < 0:
            direction = -1
        rotate_in_sec = 2
        step = int(self.image_count / (rotate_in_sec / exp_time))
        start_index = self.current_image_index
        index = 0

        while self.current_image_index < self.image_count:
            if relative_angle:
                if index >= abs(self.image_count / 360. * relative_angle):
                    break
            logging.getLogger("HWR").debug("display: " +
                                           str(self.current_image_index))
            self.display_image(self.current_image_index)
            self.current_image_index += direction * step
            if self.repeat_image_play and self.current_image_index >= self.image_count:
                self.current_image_index = 0
            gevent.sleep(exp_time)
            index += step
        if relative_angle:
            self.display_image(
                int(start_index + self.image_count / 360. * relative_angle))

    def stop_image_play(self):
        self.image_polling.kill()

    def stop_collect(self):
        self.cmd_collect_abort()

    def mouse_wheel_scrolled(self, delta):
        if self.image_reading_thread is None or \
           self.image_reading_thread.get_raw_image(self.current_image_index) is None:
            return

        if delta > 0:
            self.current_image_index -= 1
            if self.current_image_index < 0:
                self.current_image_index = self.image_count - 1
        else:
            self.current_image_index += 1
            if self.current_image_index == self.image_count:
                self.current_image_index = 0
        self.display_image(self.current_image_index)

    def start_centering(self):
        self.centering_started = 3
        self.set_centring_state(True)

        #osc_seq = self.config_dict["collect"]["oscillation_sequence"][0]
        #angle = osc_seq["start"] + index * osc_seq["range"]
        self.motor_positions["phi"] = self.omega_angle
        HWR.beamline.diffractometer.set_static_positions(self.motor_positions)

        HWR.beamline.diffractometer.start_centring_method(
            HWR.beamline.diffractometer.CENTRING_METHOD_IMAGING)

    def start_n_centering(self):
        self.centering_started = 100
        self.set_centring_state(True)
        HWR.beamline.diffractometer.start_centring_method(
            HWR.beamline.diffractometer.CENTRING_METHOD_IMAGING_N)
Пример #9
0
class EMBLXrayImaging(QtGraphicsManager, AbstractCollect):
    """
    Based on the collection and graphics
    """

    def __init__(self, *args):
        QtGraphicsManager.__init__(self, *args)
        AbstractCollect.__init__(self, *args)

        self.ff_apply = False
        self.ff_ssim = None
        self.qimage = None
        self.qpixmap = None
        self.image_count = 0
        self.image_reading_thread = None
        self.image_processing_thread = None
        self.ff_corrected_list = []
        self.config_dict = {}
        self.collect_omega_start = 0
        self.omega_start = 0
        self.omega_move_enabled = False
        self.last_image_index = None

        self.image_dimension = (0, 0)
        self.graphics_camera_frame = None
        self.image_polling = None
        self.repeat_image_play = None
        self.current_image_index = None
        self.mouse_hold = False
        self.mouse_coord = [0, 0]
        self.centering_started = 0

        self.current_dc_parameters = None
        self._previous_collect_status = None
        self._actual_collect_status = None
        self._failed = False
        self._number_of_images = 0
        self._collect_frame = 0
        self.printed_warnings = []
        self.printed_errors = []

        self.chan_collect_status = None
        self.chan_collect_frame = None
        self.chan_collect_error = None
        self.chan_camera_error = None
        self.chan_camera_warning = None
        self.chan_frame = None
        self.chan_ff_ssim = None

        self.cmd_collect_compression = None
        self.cmd_collect_detector = None
        self.cmd_collect_directory = None
        self.cmd_collect_exposure_time = None
        self.cmd_collect_in_queue = None
        self.cmd_collect_num_images = None
        self.cmd_collect_overlap = None
        self.cmd_collect_range = None
        self.cmd_collect_scan_type = None
        self.cmd_collect_shutter = None
        self.cmd_collect_start_angle = None
        self.cmd_collect_template = None
        self.cmd_collect_start = None
        self.cmd_collect_shutterless = None
        self.cmd_collect_abort = None

        self.cmd_collect_ff_num_images = None
        self.cmd_collect_ff_offset = None
        self.cmd_collect_ff_pre = None
        self.cmd_collect_ff_post = None

        self.cmd_camera_trigger = None
        self.cmd_camera_live_view = None
        self.cmd_camera_write_data = None
        self.cmd_camera_ff_ssim = None

        self.beam_focusing_hwobj = None
        self.session_hwobj = None

    def init(self):
        AbstractCollect.init(self)
        self.ready_event = gevent.event.Event()
        self.image_dimension = (2048, 2048)

        QtGraphicsManager.init(self)

        self.disconnect(self.camera_hwobj, "imageReceived", self.camera_image_received)

        self.disconnect(
            self.diffractometer_hwobj,
            "minidiffStateChanged",
            self.diffractometer_state_changed,
        )
        self.disconnect(
            self.diffractometer_hwobj,
            "centringStarted",
            self.diffractometer_centring_started,
        )
        self.disconnect(
            self.diffractometer_hwobj, "centringAccepted", self.create_centring_point
        )
        self.disconnect(
            self.diffractometer_hwobj,
            "centringSuccessful",
            self.diffractometer_centring_successful,
        )
        self.disconnect(
            self.diffractometer_hwobj,
            "centringFailed",
            self.diffractometer_centring_failed,
        )
        self.disconnect(
            self.diffractometer_hwobj,
            "pixelsPerMmChanged",
            self.diffractometer_pixels_per_mm_changed,
        )
        self.disconnect(
            self.diffractometer_hwobj,
            "omegaReferenceChanged",
            self.diffractometer_omega_reference_changed,
        )
        self.disconnect(
            self.diffractometer_hwobj,
            "minidiffPhaseChanged",
            self.diffractometer_phase_changed,
        )

        self.diffractometer_pixels_per_mm_changed((20.0, 20.0))

        self.camera_hwobj = None

        self.graphics_scale_item.set_start_position(20, self.image_dimension[1] - 20)

        self.graphics_scale_item.set_custom_pen_color(Colors.BLUE)
        self.graphics_omega_reference_item.set_custom_pen_color(Colors.DARK_BLUE)
        self.graphics_measure_distance_item.set_custom_pen_color(Colors.DARK_BLUE)
        self.graphics_beam_item.hide()

        self.graphics_view.scene().measureItemChanged.connect(self.measure_item_changed)
        self.graphics_view.scene().setSceneRect(
            0, 0, self.image_dimension[0], self.image_dimension[1]
        )

        self.qimage = QtImport.QImage()
        self.qpixmap = QtImport.QPixmap()

        self.chan_frame = self.getChannelObject("chanFrame")
        self.chan_frame.connectSignal("update", self.frame_changed)

        self.chan_ff_ssim = self.getChannelObject("chanFFSSIM")
        self.chan_ff_ssim.connectSignal("update", self.ff_ssim_changed)

        self.chan_collect_status = self.getChannelObject("collectStatus")
        self._actual_collect_status = self.chan_collect_status.getValue()
        self.chan_collect_status.connectSignal("update", self.collect_status_update)

        self.chan_collect_frame = self.getChannelObject("chanFrameCount")
        self.chan_collect_frame.connectSignal("update", self.collect_frame_update)

        self.chan_collect_error = self.getChannelObject("collectError")
        self.chan_collect_error.connectSignal("update", self.collect_error_update)

        self.chan_camera_warning = self.getChannelObject("cameraWarning")
        self.chan_camera_warning.connectSignal("update", self.camera_warning_update)

        self.chan_camera_error = self.getChannelObject("cameraError")
        self.chan_camera_error.connectSignal("update", self.camera_error_update)

        self.cmd_collect_detector = self.getCommandObject("collectDetector")
        self.cmd_collect_directory = self.getCommandObject("collectDirectory")
        self.cmd_collect_exposure_time = self.getCommandObject("collectExposureTime")
        self.cmd_collect_in_queue = self.getCommandObject("collectInQueue")
        self.cmd_collect_num_images = self.getCommandObject("collectNumImages")
        self.cmd_collect_range = self.getCommandObject("collectRange")
        self.cmd_collect_scan_type = self.getCommandObject("collectScanType")
        self.cmd_collect_shutter = self.getCommandObject("collectShutter")
        self.cmd_collect_shutterless = self.getCommandObject("collectShutterless")
        self.cmd_collect_start_angle = self.getCommandObject("collectStartAngle")
        self.cmd_collect_template = self.getCommandObject("collectTemplate")

        self.cmd_collect_ff_num_images = self.getCommandObject("collectFFNumImages")
        self.cmd_collect_ff_offset = self.getCommandObject("collectFFOffset")
        self.cmd_collect_ff_pre = self.getCommandObject("collectFFPre")
        self.cmd_collect_ff_post = self.getCommandObject("collectFFPost")

        self.cmd_camera_trigger = self.getCommandObject("cameraTrigger")
        self.cmd_camera_live_view = self.getCommandObject("cameraLiveView")
        self.cmd_camera_write_data = self.getCommandObject("cameraWriteData")
        self.cmd_camera_ff_ssim = self.getCommandObject("cameraFFSSIM")

        self.cmd_collect_start = self.getCommandObject("collectStart")
        self.cmd_collect_abort = self.getCommandObject("collectAbort")

        self.beam_focusing_hwobj = self.getObjectByRole("beam_focusing")
        self.session_hwobj = self.getObjectByRole("session")

    def frame_changed(self, data):
        """
        Displays frame comming from camera
        :param data:
        :return:
        """
        if self._collecting:
            jpgdata = StringIO(data)
            im = Image.open(jpgdata)
            self.qimage = ImageQt(im)
            self.graphics_camera_frame.setPixmap(
                self.qpixmap.fromImage(self.qimage, QtImport.Qt.MonoOnly)
            )

    def ff_ssim_changed(self, value):
        """
        Updates ff ssim
        :param value: list of lists
        :return:
        """
        if self._collecting:
            self.ff_ssim = list(value)
            self.ff_ssim.sort()

    def mouse_clicked(self, pos_x, pos_y, left_click):
        """
        Mouse click event for centering
        :param pos_x: int
        :param pos_y: int
        :param left_click: boolean
        :return:
        """
        QtGraphicsManager.mouse_clicked(self, pos_x, pos_y, left_click)
        # self.mouse_hold = True
        # self.mouse_coord = [pos_x, pos_y]
        if self.centering_started:
            self.diffractometer_hwobj.image_clicked(pos_x, pos_y)
            self.play_image_relative(90)
            # self.diffractometer_hwobj.move_omega_relative(90, timeout=5)
            self.centering_started -= 1

    def mouse_released(self, pos_x, pos_y):
        """
        Mouse release event
        :param pos_x:
        :param pos_y:
        :return:
        """
        QtGraphicsManager.mouse_released(self, pos_x, pos_y)
        self.mouse_hold = False

    def mouse_moved(self, pos_x, pos_y):
        """
        Mouse move event
        :param pos_x:
        :param pos_y:
        :return:
        """
        QtGraphicsManager.mouse_moved(self, pos_x, pos_y)
        if self.mouse_hold:
            if self.mouse_coord[0] - pos_x > 0:
                index = self.current_image_index + 1
            elif self.mouse_coord[0] - pos_x < 0:
                index = self.current_image_index - 1
            else:
                return

            if index < 0:
                index = self.image_count - 1
            elif index >= self.image_count:
                index = 0
            self.mouse_coord[0] = pos_x
            self.display_image(index)

    def measure_item_changed(self, measured_points, measured_pix_num):
        """
        Updates image measurement item
        :param measured_points:
        :param measured_pix_num:
        :return:
        """
        start_x = measured_points[0].x()
        start_y = measured_points[0].y()
        end_x = measured_points[1].x()
        end_y = measured_points[1].y()

        if self.image_reading_thread is None:
            im = np.array(self.qimage.bits()).reshape(
                self.qimage.width(), self.qimage.height()
            )
        else:
            im = self.image_reading_thread.get_raw_image(self.current_image_index)
        # im_slice = im[start_x:start_y,end_x,end_y]
        # print im_slice.size, im_slice
        x = np.linspace(start_x, end_x, measured_pix_num)
        y = np.linspace(start_y, end_y, measured_pix_num)

        zi = ndimage.map_coordinates(im, np.vstack((x, y)))

        self.emit("measureItemChanged", zi)

    def get_graphics_view(self):
        """
        Returns graphics view
        :return:
        """
        return self.graphics_view

    def set_repeate_image_play(self, value):
        """
        Sets repeat the image play
        :param value:
        :return:
        """
        self.repeat_image_play = value

    def set_graphics_scene_size(self, size, fixed):
        pass

    def pre_execute(self, data_model):
        """
        Pre execute method
        :param data_model:
        :return:
        """
        self._failed = False

        """
        if self.beam_focusing_hwobj.get_focus_mode() != "imaging":
            self._error_msg = "Beamline is not in Imaging mode"
            self.emit("collectFailed", self._error_msg)
            logging.getLogger("GUI").error("Imaging: Error during acquisition (%s)" % self._error_msg)
            self.ready_event.set()
            self._collecting = False
            self._failed = True
            return
        """

        self.emit("progressInit", ("Image acquisition", 100, False))
        self._collect_frame = 0
        self.printed_warnings = []
        self.printed_errors = []
        self.ff_ssim = None
        self.config_dict = {}

        path_template = data_model.acquisitions[0].path_template
        acq_params = data_model.acquisitions[0].acquisition_parameters
        im_params = data_model.xray_imaging_parameters

        self._number_of_images = acq_params.num_images

        if im_params.detector_distance:
            delta = im_params.detector_distance - self.detector_hwobj.get_distance()
            if abs(delta) > 0.0001:
                tine.set(
                    "/P14/P14DetTrans/ComHorTrans",
                    "IncrementMove.START",
                    -0.003482 * delta,
                )
                self.detector_hwobj.set_distance(
                    im_params.detector_distance, wait=True, timeout=30
                )

        self.cmd_collect_detector("pco")
        self.cmd_collect_directory(str(path_template.directory))
        self.cmd_collect_template(str(path_template.get_image_file_name()))
        self.cmd_collect_scan_type("xrimg")

        self.cmd_collect_exposure_time(acq_params.exp_time)
        self.cmd_collect_num_images(acq_params.num_images)
        self.cmd_collect_start_angle(acq_params.osc_start)
        self.cmd_collect_range(acq_params.osc_range)
        self.cmd_collect_in_queue(acq_params.in_queue != False)
        shutter_name = self.detector_hwobj.get_shutter_name()
        self.cmd_collect_shutter(shutter_name)

        self.cmd_collect_ff_num_images(im_params.ff_num_images)
        self.cmd_collect_ff_offset(
            [
                im_params.sample_offset_a,
                im_params.sample_offset_b,
                im_params.sample_offset_c,
            ]
        )
        self.cmd_collect_ff_pre(im_params.ff_pre)
        self.cmd_collect_ff_post(im_params.ff_post)

        if acq_params.osc_range == 0:
            self.cmd_camera_trigger(False)
        else:
            self.cmd_camera_trigger(True)
        self.cmd_camera_live_view(im_params.ff_apply)
        self.cmd_camera_write_data(im_params.camera_write_data)
        self.cmd_camera_ff_ssim(im_params.ff_ssim_enabled)

        self.set_osc_start(acq_params.osc_start)

        self.current_dc_parameters = qmo.to_collect_dict(
            data_model, self.session_hwobj, qmo.Sample()
        )[0]
        self.current_dc_parameters["status"] = "Running"
        self.current_dc_parameters["comments"] = ""

        self.store_data_collection_in_lims()

    def execute(self, data_model):
        """
        Main execute method
        :param data_model:
        :return:
        """
        if not self._failed:
            self._collecting = True
            self.ready_event.clear()
            gevent.spawn(self.cmd_collect_start)
            # self.cmd_collect_start()
            # if data_model.xray_imaging_parameters.camera_write_data:
            #    self.read_images_task = gevent.spawn(self.load_images, None, None, None, data_model)
            self.ready_event.wait()
            self.ready_event.clear()

    def post_execute(self, data_model):
        """
        Stores results in ispyb
        :param data_model:
        :return:
        """
        self.emit("progressStop", ())
        self._collecting = False

        acq_params = data_model.acquisitions[0].acquisition_parameters
        path_template = data_model.acquisitions[0].path_template

        filename_template = "%s_%d_%" + str(path_template.precision) + "d"
        config_filename = (
            filename_template
            % (
                path_template.base_prefix,
                path_template.run_number,
                path_template.start_num,
            )
            + ".json"
        )
        #config_file_path = os.path.join(path_template.directory, config_filename)
        archive_config_path = os.path.join(
            path_template.get_archive_directory(), config_filename
        )

        self.config_dict = {
            "collect": acq_params.as_dict(),
            "path": path_template.as_dict(),
            "imaging": data_model.xray_imaging_parameters.as_dict(),
            "ff_ssim": None,
        }

        if data_model.xray_imaging_parameters.ff_pre:
            self.config_dict["ff_ssim"] = self.ff_ssim

        try:
            if not os.path.exists(path_template.get_archive_directory()):
                os.makedirs(path_template.get_archive_directory())
            with open(archive_config_path, "w") as outfile:
                json.dump(self.config_dict, outfile)
            logging.getLogger("GUI").info(
                "Imaging: Acquisition parameters saved in %s" % archive_config_path
            )
        except BaseException:
            logging.getLogger("GUI").error(
                "Imaging: Unable to save acquisition parameters in %s"
                % archive_config_path
            )

        self.current_dc_parameters["status"] = "Data collection successful"
        self.update_data_collection_in_lims()

        # Copy first and last image to ispyb
        if self.image_reading_thread is not None:
            image_filename = (
                filename_template
                % (
                    path_template.base_prefix,
                    path_template.run_number,
                    path_template.start_num,
                )
                + ".jpeg"
            )
            image_filename = os.path.join(
                path_template.get_archive_directory(), image_filename
            )
            # misc.imsave(image_filename, self.image_reading_thread.get_raw_image(0))
            self.store_image_in_lims(0)
        if acq_params.num_images > 1:
            image_filename = (
                filename_template
                % (
                    path_template.base_prefix,
                    path_template.run_number,
                    acq_params.num_images - 1,
                )
                + ".jpeg"
            )
            image_filename = os.path.join(
                path_template.get_archive_directory(), image_filename
            )
            # misc.imsave(image_filename, self.image_reading_thread.get_raw_image(0))
            self.store_image_in_lims(acq_params.num_images - 1)

    @task
    def _take_crystal_snapshot(self, filename):
        """Saves crystal snapshot"""
        self.graphics_manager_hwobj.save_scene_snapshot(filename)

    def data_collection_hook(self):
        """
        Not implemented
        :return:
        """
        pass

    def move_motors(self, motor_position_dict):
        """
        Not implemented
        :param motor_position_dict:
        :return:
        """
        pass

    def trigger_auto_processing(self, process_event, frame_number):
        """
        Not implemented
        :param process_event:
        :param frame_number:
        :return:
        """
        pass

    def collect_status_update(self, status):
        """Status event that controls execution

        :param status: collection status
        :type status: string
        """
        if status != self._actual_collect_status:
            self._previous_collect_status = self._actual_collect_status
            self._actual_collect_status = status
            if self._collecting:
                if self._actual_collect_status == "error":
                    self.emit("collectFailed", self._error_msg)
                    logging.getLogger("GUI").error(
                        "Imaging: Error during the acquisition (%s)" % self._error_msg
                    )
                    self.ready_event.set()
                    self._collecting = False
                if self._previous_collect_status is None:
                    if self._actual_collect_status == "busy":
                        self.print_log(
                            "GUI", "info", "Imaging: Preparing acquisition..."
                        )
                elif self._previous_collect_status == "busy":
                    if self._actual_collect_status == "collecting":
                        self.emit("collectStarted", (None, 1))
                        self.print_log("GUI", "info", "Imaging: Acquisition started")
                    elif self._actual_collect_status == "ready":
                        self.ready_event.set()
                        self._collecting = False
                elif self._previous_collect_status == "collecting":
                    if self._actual_collect_status == "ready":
                        self.ready_event.set()
                        self._collecting = False
                        if self.ff_ssim is None:
                            self.ff_ssim_changed(self.chan_ff_ssim.getValue())
                        logging.getLogger("GUI").info("Imaging: Acquisition done")
                    elif self._actual_collect_status == "aborting":
                        self.print_log("HWR", "info", "Imaging: Aborting...")
                        self.ready_event.set()
                        self._collecting = False

    def collect_error_update(self, error_msg):
        """Collect error behaviour

        :param error_msg: error message
        :type error_msg: string
        """

        if self._collecting and len(error_msg) > 0:
            self._error_msg = error_msg.replace("\n", "")
            logging.getLogger("GUI").error(
                "Imaging: Error from detector server: %s" % error_msg
            )

    def collect_frame_update(self, frame):
        """Image frame update

        :param frame: frame num
        :type frame: int
        """
        if self._collecting:
            if self._collect_frame != frame:
                self._collect_frame = frame
                self.emit(
                    "progressStep", (int(float(frame) / self._number_of_images * 100))
                )
                self.emit("collectImageTaken", frame)

    def camera_warning_update(self, warning_str):
        """
        Displays camera warnings
        :param warning_str:
        :return:
        """
        if self._collecting:
            if warning_str.endswith("\n"):
                warning_str = warning_str[:-1]
            if warning_str.startswith("\n"):
                warning_str = warning_str[1:]
            warning_list = warning_str.split("\n")

            for warning in warning_list:
                if warning and warning not in self.printed_warnings:
                    logging.getLogger("GUI").warning(
                        "Imaging: PCO camera warning: %s" % warning
                    )
                    self.printed_warnings.append(warning)

    def camera_error_update(self, error_str):
        """
        Displays camera errors
        :param error_str:
        :return:
        """
        if self._collecting:
            if error_str.endswith("\n"):
                error_str = error_str[:-1]
            if error_str.startswith("\n"):
                error_str = error_str[1:]
            error_list = error_str.split("\n")

            for error in error_list:
                if error and error not in self.printed_errors:
                    logging.getLogger("GUI").error(
                        "Imaging: PCO camera error: %s" % error
                    )
                    self.printed_errors.append(error)

    def set_ff_apply(self, state):
        """
        Apply ff to live view
        :param state:
        :return:
        """
        self.ff_apply = state
        self.display_image(self.current_image_index)

    def display_image(self, index):
        """
        Displays image on the canvas
        :param index: int
        :return:
        """
        angle = self.collect_omega_start + index * self.image_count / 360.0
        if angle > 360:
            angle -= 360
        elif angle < 0:
            angle += 360

        self.graphics_omega_reference_item.set_phi_position(angle)
        self.current_image_index = index

        im = self.image_reading_thread.get_raw_image(index)

        if self.ff_apply and self.image_processing_thread:
            if self.ff_corrected_list[index] is None:
                im_min, im_max = self.image_processing_thread.get_im_min_max()
                im = self.image_reading_thread.get_raw_image(index).astype(float)
                ff_image = self.image_reading_thread.get_ff_image(index).astype(float)
                ff_corrected_image = np.divide(
                    im, ff_image, out=np.ones_like(im), where=ff_image != 0
                )
                im = 255.0 * (ff_corrected_image - im_min) / (im_max - im_min)
                self.ff_corrected_list[index] = im.astype(np.uint16)
            else:
                im = self.ff_corrected_list[index]

        # sx = ndimage.sobel(im, axis=0, mode='constant')
        # sy = ndimage.sobel(im, axis=1, mode='constant')
        # im = np.hypot(sx, sy)

        if im is not None:
            self.qimage = QtImport.QImage(
                im.astype(np.uint8),
                im.shape[1],
                im.shape[0],
                im.shape[1],
                QtImport.QImage.Format_Indexed8,
            )
            self.graphics_camera_frame.setPixmap(self.qpixmap.fromImage(self.qimage))
            self.emit("imageLoaded", index)

    def display_image_relative(self, relative_index):
        """
        Displays relative image
        :param relative_index:
        :return:
        """
        self.display_image(self.current_image_index + relative_index)

    def play_image_relative(self, relative_angle):
        """
        Starts image video
        :param relative_angle:
        :return:
        """
        self.play_images(0.04, relative_angle, False)

    def set_osc_start(self, osc_start):
        """
        Defines osc start
        :param osc_start: float
        :return:
        """
        self.collect_omega_start = osc_start

    def set_omega_move_enabled(self, state):
        """
        Move omega if the image has been displayed
        :param state:
        :return:
        """
        self.omega_move_enabled = state

    def load_images(
        self,
        data_path=None,
        flat_field_path=None,
        config_path=None,
        data_model=None,
        load_all=True,
    ):
        """
        Load and process images via threads
        :param data_path: str
        :param flat_field_path: str
        :param config_path: str
        :param data_model:
        :param load_all: boolean
        :return:
        """

        ff_ssim = None
        self.config_dict = {}
        self.omega_start = self.diffractometer_hwobj.get_omega_position()

        self.image_reading_thread = None
        self.image_processing_thread = None

        ff_filename_list = []
        raw_filename_list = []

        if not data_model:
            if data_path.endswith("tiff"):
                ext_len = 4
            else:
                ext_len = 3

            base_name_list = os.path.splitext(os.path.basename(data_path))
            prefix = base_name_list[0][: -(ext_len + 1)]

            # Reading config json --------------------------------------------
            if config_path is None:
                config_path = data_path[:-ext_len] + "json"
            if os.path.exists(config_path):
                with open(config_path) as f:
                    self.config_dict = json.load(f)
                ff_ssim = self.config_dict["ff_ssim"]
                self.set_osc_start(self.config_dict["collect"]["osc_start"])
            else:
                logging.getLogger("user_level_log").error(
                    "Imaging: Unable to open config file %s" % config_path
                )

        if data_model:
            if data_model.xray_imaging_parameters.ff_pre:
                path_template = data_model.acquisitions[0].path_template
                for index in range(data_model.xray_imaging_parameters.ff_num_images):
                    ff_filename_list.append(
                        os.path.join(
                            path_template.directory,
                            "ff_" + path_template.get_image_file_name() % (index + 1),
                        )
                    )
        elif os.path.exists(flat_field_path):

            base_name_list = os.path.splitext(os.path.basename(data_path))
            os.chdir(os.path.dirname(flat_field_path))
            ff_filename_list = sorted(
                [
                    os.path.join(os.path.dirname(flat_field_path), f)
                    for f in os.listdir(os.path.dirname(flat_field_path))
                    if f.startswith("ff_" + prefix)
                ]
            )

        # Reading raw images -------------------------------------------------
        if data_model:
            acq_params = data_model.acquisitions[0].acquisition_parameters
            path_template = data_model.acquisitions[0].path_template
            for index in range(acq_params.num_images):
                raw_filename_list.append(
                    os.path.join(
                        path_template.directory,
                        path_template.get_image_file_name() % (index + 1),
                    )
                )
        elif os.path.exists(data_path):
            os.chdir(os.path.dirname(data_path))
            raw_filename_list = sorted(
                [
                    os.path.join(os.path.dirname(data_path), f)
                    for f in os.listdir(os.path.dirname(data_path))
                    if f.startswith(prefix)
                ]
            )
        else:
            acq_params = data_model.acquisitions[0].acquisition_parameters
            path_template = data_model.acquisitions[0].path_template
            for index in range(acq_params.num_images):
                raw_filename_list.append(
                    os.path.join(
                        path_template.directory,
                        path_template.get_image_file_name() % (index + 1),
                    )
                )

        self.image_count = len(raw_filename_list)
        if self.image_reading_thread is not None:
            self.image_reading_thread.set_stop()
        if self.image_processing_thread is not None:
            image_processing_queue.queue.clear()
            self.image_processing_thread.set_stop()

        self.ff_corrected_list = [None] * self.image_count

        self.image_reading_thread = ImageReadingThread(
            raw_filename_list, ff_filename_list, ff_ssim
        )
        self.image_reading_thread.start()

        if ff_filename_list:
            self.image_processing_thread = ImageProcessingThread(self.image_count)
            self.image_processing_thread.start()

        self.current_image_index = 0
        self.emit("imageInit", self.image_count)

        # gevent.sleep(5)
        self.last_image_index = 0
        # self.display_image(0)

    def play_images(self, exp_time=0.04, relative_index=None, repeat=True):
        """
        Play image video
        :param exp_time:
        :param relative_index:
        :param repeat:
        :return:
        """
        self.image_polling = gevent.spawn(
            self.do_image_polling, exp_time, relative_index, repeat
        )

    def do_image_polling(self, exp_time=0.04, relative_index=1, repeat=False):
        """
        Image polling task
        :param exp_time:
        :param relative_index:
        :param repeat:
        :return:
        """
        self.repeat_image_play = repeat

        direction = 1 if relative_index > 0 else -1
        rotate_in_sec = 10

        step = int(self.image_count / (rotate_in_sec / exp_time))
        index = 0

        while self.current_image_index < self.image_count:
            if index >= abs(relative_index):
                break

            self.display_image(self.current_image_index)
            self.current_image_index += direction * step
            if self.repeat_image_play and self.current_image_index >= self.image_count:
                self.current_image_index -= self.image_count
            gevent.sleep(exp_time)
            index += step

    def stop_image_play(self):
        """
        Stop image video
        :return:
        """
        self.image_polling.kill()

    def stop_collect(self):
        """
        Stops image collection
        :return:
        """
        self.cmd_collect_abort()

    def mouse_wheel_scrolled(self, delta):
        """
        Handles mouse scroll
        :param delta:
        :return:
        """
        if (
            self.image_reading_thread is None
            or self.image_reading_thread.get_raw_image(self.current_image_index) is None
        ):
            return

        if delta > 0:
            self.current_image_index -= 1
            if self.current_image_index < 0:
                self.current_image_index = self.image_count - 1
        else:
            self.current_image_index += 1
            if self.current_image_index == self.image_count:
                self.current_image_index = 0
        self.display_image(self.current_image_index)

    def start_centering(self):
        """
        Starts 3 click centering
        :return:
        """
        self.centering_started = 3
        self.diffractometer_hwobj.start_centring_method(
            self.diffractometer_hwobj.CENTRING_METHOD_IMAGING
        )

    def start_n_centering(self):
        """
        Starts n click centering
        :return:
        """
        self.centering_started = 100
        self.diffractometer_hwobj.start_centring_method(
            self.diffractometer_hwobj.CENTRING_METHOD_IMAGING_N
        )

    def move_omega(self, image_index):
        """
        Rotates omega
        :param image_index:
        :return:
        """
        if image_index != self.last_image_index:
            if self.config_dict:
                omega_relative = self.config_dict["collect"]["osc_range"] * image_index
            else:
                omega_relative = self.image_count / 360.0 * image_index
            if self.last_image_index > image_index:
                omega_relative *= -1

            self.diffractometer_hwobj.move_omega_relative(omega_relative)
            self.last_image_index = image_index

    def move_omega_relative(self, relative_index):
        """
        Rotates omega relative
        :param relative_index:
        :return:
        """
        self.move_omega(self.last_image_index + relative_index)
Пример #10
0
class Drawer(QWidget):
    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.setAttribute(Qt.WA_StaticContents)
        self.h = 720
        self.w = 1280
        self.Font_Color = Qt.white
        self.myPenWidth = 15
        self.myPenColor = Qt.black
        self.image = QImage(self.w, self.h, QImage.Format_RGB32)
        self.path = QPainterPath()
        self.clearImage()

    def setPenColor(self, newColor):
        self.path = QPainterPath()
        self.myPenColor = newColor

    def setPenWidth(self):
        i, okBtnPressed = QInputDialog.getInt(self, "Размер кисти",
                                              "Введите размер кисти",
                                              self.myPenWidth, 1, 100, 1)
        if okBtnPressed:
            self.path = QPainterPath()
            self.myPenWidth = i
        self.myCursor()

    def setFont(self):
        color = QColorDialog.getColor()
        self.Font_Color = color
        self.clearImage()

    def clearImage(self):
        self.path = QPainterPath()
        self.image.fill(self.Font_Color)
        self.update()

    def saveImage(self):
        try:
            fname = QFileDialog.getSaveFileName(self, 'Выбор файла', '.')[0]
            self.image.save(fname, fname.split(".")[1].upper())
        except Exception:
            pass

    def loadImage(self):
        self.path = QPainterPath()
        try:
            fname = QFileDialog.getOpenFileName(self, 'Выбор файла', '.')[0]
            self.image = QImage(fname, fname.split(".")[1].upper())
        except Exception:
            pass

    def paintEvent(self, event):
        painter = QPainter(self)
        painter.drawImage(event.rect(), self.image, self.rect())

    def mousePressEvent(self, event):

        if event.button() == Qt.LeftButton:
            self.flag = True
            self.path.moveTo(event.pos())

    def mouseMoveEvent(self, event):

        if self.flag:

            self.path.lineTo(event.pos())

            p = QPainter(self.image)

            p.setPen(
                QPen(self.myPenColor, self.myPenWidth, Qt.SolidLine,
                     Qt.RoundCap, Qt.RoundJoin))

            p.drawPath(self.path)

            p.end()

            self.update()

        else:

            self.path = QPainterPath()

    def sizeHint(self):
        if self.w == 1280 and self.h == 720:
            return QSize(1280, 720)
        return QSize(640, 480)
        self.update()

    def ChangeRes(self):
        if self.w == 1280 and self.h == 720:
            self.w = 640
            self.h = 480
        else:
            self.w = 1280
            self.h = 720
        self.update()
        self.image = QImage(self.w, self.h, QImage.Format_RGB32)
        self.path = QPainterPath()
        self.clearImage()

    def myCursor(self):

        cmap = QPixmap("cursor.png")

        cmap = cmap.scaled(self.myPenWidth + 50 * (1 + self.myPenWidth // 100),
                           self.myPenWidth + 50 * (1 + self.myPenWidth // 100))

        color = self.myPenColor

        self.setCursor(QCursor(cmap))

    def run(self):

        color = QColorDialog.getColor()

        if color.isValid():

            self.setPenColor(color)

            self.path = QPainterPath()

    def filter_invert_color(self):
        x, y = self.image.width(), self.image.height()
        im2 = Image.new("RGB", (x, y), (0, 0, 0))
        pixels2 = im2.load()

        for i in range(x):
            for j in range(y):
                r, g, b = self.image.pixelColor(i, j).getRgb()[:-1]
                pixels2[i, j] = 255 - r, 255 - g, 255 - b
        self.image = ImageQt(im2)
        self.update()
        self.path = QPainterPath()

    def filter_black_and_white(self):
        x, y = self.image.width(), self.image.height()
        im2 = Image.new("RGB", (x, y), (0, 0, 0))
        pixels2 = im2.load()

        for i in range(x):
            for j in range(y):
                r, g, b = self.image.pixelColor(i, j).getRgb()[:-1]
                bw = (r + g + b) // 3
                pixels2[i, j] = bw, bw, bw

        self.image = ImageQt(im2)
        self.update()
        self.path = QPainterPath()

    def filter_left(self):
        x, y = self.image.width(), self.image.height()
        im2 = Image.new("RGB", (x, y), (0, 0, 0))
        pixels2 = im2.load()

        for i in range(x):
            for j in range(y):
                r, g, b = self.image.pixelColor(i, j).getRgb()[:-1]
                pixels2[i, j] = r, g, b
        im2 = im2.transpose(Image.ROTATE_90)
        self.image = ImageQt(im2)
        self.update()

    def filter_right(self):
        x, y = self.image.width(), self.image.height()
        im2 = Image.new("RGB", (x, y), (0, 0, 0))
        pixels2 = im2.load()

        for i in range(x):
            for j in range(y):
                r, g, b = self.image.pixelColor(i, j).getRgb()[:-1]
                pixels2[i, j] = r, g, b
        im2 = im2.transpose(Image.ROTATE_270)
        self.image = ImageQt(im2)
        self.update()
Пример #11
0
class App(QMainWindow):
    def __init__(self, log_file: Path, list_file: Path, image_dir: Path):
        super().__init__()

        self.image = QImage()
        self.scaleFactor = 1.0

        self.imageLabel = QLabel()
        self.imageLabel.setBackgroundRole(QPalette.Base)
        self.imageLabel.setFixedSize(1024, 64)
        self.imageLabel.setSizePolicy(QSizePolicy.Ignored, QSizePolicy.Ignored)
        self.imageLabel.setScaledContents(False)

        layout = QVBoxLayout()

        ##################
        # Index
        ##################
        index_widget = QWidget()
        index_layout = QHBoxLayout()
        index_layout.setAlignment(Qt.AlignLeft)

        self.current_line_index = QLineEdit(f'{0:05d}')
        self.current_line_index.setValidator(QIntValidator())
        self.current_line_index.setMaxLength(5)
        self.current_line_index.setFixedWidth(50)
        self.current_line_index.editingFinished.connect(
            self.jump_to_line_index)
        self.total_line_label = QLabel('/0')
        index_layout.addWidget(self.current_line_index)
        index_layout.addWidget(self.total_line_label)

        self.current_path_label = QLineEdit('path')
        self.current_path_label.setFixedWidth(1000)
        self.current_path_label.setReadOnly(True)
        index_layout.addWidget(self.current_path_label)

        index_widget.setLayout(index_layout)
        layout.addWidget(index_widget)

        label = QLabel('Image:')
        layout.addWidget(label)

        self.scrollArea = QScrollArea()
        self.scrollArea.setBackgroundRole(QPalette.Dark)
        self.scrollArea.setWidget(self.imageLabel)
        self.scrollArea.setVisible(False)
        self.scrollArea.setWidgetResizable(True)
        layout.addWidget(self.scrollArea)

        label = QLabel('Label:')
        layout.addWidget(label)

        self.label_text = QLineEdit("label")
        f = self.label_text.font()
        f.setPointSize(27)  # sets the size to 27
        f.setStyleHint(QFont.Monospace)
        self.label_text.setFont(f)
        self.label_text.setFocus()
        self.label_text.setReadOnly(False)
        layout.addWidget(self.label_text)

        label = QLabel('OCR Predict:')
        layout.addWidget(label)

        self.pred_text = QLineEdit("pred")
        f = self.pred_text.font()
        f.setPointSize(27)  # sets the size to 27
        f.setStyleHint(QFont.Monospace)
        self.pred_text.setFont(f)
        self.pred_text.setFocus()
        self.pred_text.setReadOnly(True)
        layout.addWidget(self.pred_text)

        signal_widget = SwitchSignal()
        signal_widget.next.connect(self.next_image)
        signal_widget.prev.connect(self.prev_image)
        layout.addWidget(signal_widget)

        root = QWidget()
        root.setLayout(layout)
        self.setCentralWidget(root)

        self.resize(WIN_SIZE[0], WIN_SIZE[1])

        self.current_index = 0

        self.dataset = Dataset(log_file, list_file, image_dir)

        if len(self.dataset) == 0:
            print('Nothing to do! Nice!')
            exit(0)

        self.label_text.installEventFilter(self)

        self.need_save = False
        self.current_textline = self.dataset[0]
        self.total_line_label.setText(f'{len(self.dataset) - 1:05d}')
        self.set_step(0)

        #######################
        # Set shortcut
        shortcut = QShortcut(QKeySequence("Ctrl+S"), self)
        shortcut.activated.connect(self.save_current_line)

        shortcut_rotate = QShortcut(QKeySequence("Ctrl+R"), self)
        shortcut_rotate.activated.connect(self.rotate)

        self._is_rotate = False

    def save_current_line(self):

        if self.current_textline.textline() != self.label_text.text():
            print('Save text')
            self.current_textline.save(self.label_text.text())

        if self._is_rotate:
            self._is_rotate = False
            print('Save image')
            self.current_textline.save_image(
                self.pillow_image.rotate(self.current_angle, expand=True))

    def jump_to_line_index(self):
        step = int(self.current_line_index.text())
        self.set_step(step)

    def eventFilter(self, source, event):
        if (event.type() == QEvent.KeyPress and source is self.label_text):
            if event.key() == Qt.Key_Down:
                self.next_image()
            elif event.key() == Qt.Key_Up:
                self.prev_image()
        return super().eventFilter(source, event)

    def next_image(self):
        self.set_step(self.current_index + 1)

    def prev_image(self):
        self.set_step(self.current_index - 1)

    def is_able_to_next(self, step):
        if step >= len(self.current_textline):
            if self.current_step == len(self.account) - 1:
                return False
        return True

    def is_able_to_back(self, step):
        if step < 0:
            if self.current_step == 0:
                return False
        return True

    def set_step(self, step):
        if step >= len(self.dataset) or step < 0:
            return

        if step != self.current_index:
            # check if next/prev image without save
            if self.current_textline.textline() != self.label_text.text():
                buttonReply = QMessageBox.question(
                    self, 'Text not saved yet?', "Do you like to save?",
                    QMessageBox.Yes | QMessageBox.No, QMessageBox.No)
                if buttonReply == QMessageBox.Yes:
                    self.save_current_line()

        self.current_textline = self.dataset[step]
        self.current_index = step

        image = Image.open(self.current_textline.image_path)
        pred = self.current_textline.predict_text
        label = self.current_textline.textline()

        if image.size[0] * image.size[1] == 0:
            print(
                f'Width or height is 0. WxH = {image.size[0]}x{image.size[1]}')
            if self.is_able_to_next(step):
                self.next_image()
                return
            elif self.is_able_to_back(step):
                self.prev_image()
                return
            else:
                print('Done!')
                exit(0)
        self.pillow_image: Image.Image = image
        self.current_angle = 0
        self.current_line_index.setText(f'{self.current_index:05d}')
        self.pred_text.setText(pred)
        self.label_text.setText(label)
        self.loadImage(image)

        #### Resize font
        # Use binary search to efficiently find the biggest font that will fit.
        max_size = 27
        min_size = 1
        font = self.label_text.font()
        while 1 < max_size - min_size:
            new_size = (min_size + max_size) // 2
            font.setPointSize(new_size)
            metrics = QFontMetrics(font)

            target_rect = self.label_text.contentsRect()

            # Be careful which overload of boundingRect() you call.
            rect = metrics.boundingRect(target_rect, Qt.AlignLeft, label)
            if (rect.width() > target_rect.width()
                    or rect.height() > target_rect.height()):
                max_size = new_size
            else:
                min_size = new_size

        font.setPointSize(min_size)
        self.label_text.setFont(font)

        pred_font = self.pred_text.font()
        pred_font.setPointSize(min_size)
        self.pred_text.setFont(pred_font)

    def loadImage(self, pillow_image: Image.Image):
        image_w, image_h = pillow_image.size
        target_h = 64
        factor = target_h / image_h
        image_w = factor * image_w
        image_h = factor * image_h
        image_w, image_h = int(image_w), int(image_h)
        pillow_image = pillow_image.resize((image_w, image_h))

        self.scrollArea.setVisible(True)
        self.image = ImageQt(pillow_image)
        self.imageLabel.setPixmap(QPixmap.fromImage(self.image))
        self.imageLabel.setFixedSize(image_w, image_h)

        self.adjustScrollBar(self.scrollArea.horizontalScrollBar(), 0)
        self.adjustScrollBar(self.scrollArea.verticalScrollBar(), 1.0)

        self.current_path_label.setText(str(self.current_textline.image_path))
        message = "{}, {}x{}, Depth: {}".format(
            self.current_textline.image_path, self.image.width(),
            self.image.height(), self.image.depth())
        self.statusBar().showMessage(message)
        return True

    def rotate(self):
        self.current_angle += 90
        rotated_image = self.pillow_image.rotate(self.current_angle,
                                                 expand=True)
        self.loadImage(rotated_image)
        self._is_rotate = True

    def adjustScrollBar(self, scrollBar: QScrollBar, factor: float):
        scrollBar.setValue(
            int(factor * scrollBar.value() +
                ((factor - 1) * scrollBar.pageStep() / 2)))
Пример #12
0
class Drawer(QWidget):
    def __init__(self, parent=None):
        QWidget.__init__(self, parent)
        self.setAttribute(Qt.WA_StaticContents)
        self.h = 720
        self.w = 1280
        self.Font_Color = Qt.white
        self.myPenWidth = 15
        self.myPenColor = Qt.black
        self.image = QImage(self.w, self.h, QImage.Format_RGB32)
        self.path = QPainterPath()
        self.clearImage()
        self.flag = False
        self.colors = [[100, 0, 0], [150, 0, 0], [200, 0, 0], [0, 100, 0],
                       [0, 150, 0], [0, 200, 0], [0, 0, 100], [0, 0, 150],
                       [0, 0, 200]]
        self.color = 0

    def setPenColor(self, newColor):
        self.path = QPainterPath()
        self.myPenColor = newColor

    def setPenWidth(self):
        i, okBtnPressed = QInputDialog.getInt(self, "Размер кисти",
                                              "Введите размер кисти",
                                              self.myPenWidth, 1, 100, 1)
        if okBtnPressed:
            self.path = QPainterPath()
            self.myPenWidth = i
        self.myCursor()

    def setFont(self):
        color = QColorDialog.getColor()
        self.Font_Color = color
        self.clearImage()

    def clearImage(self):
        self.path = QPainterPath()
        self.image.fill(self.Font_Color)
        self.update()

    def saveImage(self):
        i, okBtnPressed = QInputDialog.getText(self, "Название файла",
                                               "Введите название файла")
        if okBtnPressed:
            name = i
            if "." in name:
                self.image.save(name, name.split(".")[1].upper())
            else:
                i1, okBtnPressed1 = QInputDialog.getItem(
                    self, "Расширение", "Выберите расширение", ("jpg", "png"))
                self.image.save(f"{name}.{i1}", i1.upper())

    def loadImage(self):
        i, okBtnPressed = QInputDialog.getText(self, "Название файла",
                                               "Введите название файла")
        if okBtnPressed:
            name = i
            if "." in name:
                self.image = QImage(name, name.split(".")[1].upper())

            else:
                i1, okBtnPressed1 = QInputDialog.getItem(
                    self, "Расширение", "Выберите расширение", ("jpg", "png"),
                    1, False)
                self.image = QImage(f"{name}.{i1}", i1.upper())

    def paintEvent(self, event):
        painter = QPainter(self)
        painter.drawImage(event.rect(), self.image, self.rect())

    def mousePressEvent(self, event):

        if event.button() == Qt.LeftButton:
            self.flag = True
            self.path.moveTo(event.pos())

    def mouseMoveEvent(self, event):

        if self.flag:

            self.path.lineTo(event.pos())

            p = QPainter(self.image)

            p.setPen(
                QPen(self.myPenColor, self.myPenWidth, Qt.SolidLine,
                     Qt.RoundCap, Qt.RoundJoin))

            p.drawPath(self.path)

            p.end()

            self.update()

        else:

            self.path = QPainterPath()

    def sizeHint(self):
        if self.w == 1280 and self.h == 720:
            return QSize(1280, 720)
        return QSize(640, 480)
        self.update()

    def ChangeRes(self):
        if self.w == 1280 and self.h == 720:
            self.w = 640
            self.h = 480
        else:
            self.w = 1280
            self.h = 720
        self.update()
        self.image = QImage(self.w, self.h, QImage.Format_RGB32)
        self.path = QPainterPath()
        self.clearImage()

    def myCursor(self):

        cmap = QPixmap("cursor.png")

        cmap = cmap.scaled(self.myPenWidth + 50 * (1 + self.myPenWidth // 100),
                           self.myPenWidth + 50 * (1 + self.myPenWidth // 100))

        color = self.myPenColor

        self.setCursor(QCursor(cmap))

    def run(self):

        color = QColorDialog.getColor()

        if color.isValid():

            self.setPenColor(color)

            self.path = QPainterPath()

    def filter_invert_color(self):
        x, y = self.image.width(), self.image.height()
        im2 = Image.new("RGB", (x, y), (0, 0, 0))
        pixels2 = im2.load()

        for i in range(x):
            for j in range(y):
                r, g, b = self.image.pixelColor(i, j).getRgb()[:-1]
                pixels2[i, j] = 255 - r, 255 - g, 255 - b
        self.image = ImageQt(im2)
        self.update()
        self.path = QPainterPath()

    def filter_black_and_white(self):
        x, y = self.image.width(), self.image.height()
        im2 = Image.new("RGB", (x, y), (0, 0, 0))
        pixels2 = im2.load()

        for i in range(x):
            for j in range(y):
                r, g, b = self.image.pixelColor(i, j).getRgb()[:-1]
                bw = (r + g + b) // 3
                pixels2[i, j] = bw, bw, bw

        self.image = ImageQt(im2)
        self.update()
        self.path = QPainterPath()

    def filter_(self):
        x, y = self.image.width(), self.image.height()
        im2 = Image.new("RGB", (x, y), (0, 0, 0))
        pixels2 = im2.load()

        for i in range(x):
            for j in range(y):
                r, g, b = self.image.pixelColor(i, j).getRgb()[:-1]
                bw = (r + g + b) // 3
                pixels2[i, j] = bw, bw, bw

        self.image = ImageQt(im2)
        self.update()
        self.path = QPainterPath()
Пример #13
0
class DrawingArea(QWidget):
    loadSignal = pyqtSignal()
    profileSignal = pyqtSignal(list)

    def __init__(self):
        super(DrawingArea, self).__init__()

        self.image_path = None
        self.loadImage()

        self.drawing = False
        self.last_point = QPoint()
        self.lines = []
        self.profiles = []

        self.setUpPen()

    # Main drawing functionalities
    def mousePressEvent(self, event):
        if event.button() == Qt.LeftButton:
            self.last_point = event.pos()
            self.drawing = True

    def mouseMoveEvent(self, event):
        if self.drawing and (event.buttons() & Qt.LeftButton):
            self.updateImage()
            self.drawLineOnImage(self.last_point, event.pos())

    def mouseReleaseEvent(self, event):
        if event.button() == Qt.LeftButton and self.drawing:
            self.image_lastdrawn = self.image.copy()
            self.drawing = False
            self.lines.append((self.last_point, event.pos()))
            self.extractLine()
            emit_value = [self.profiles[-1], self.lines[-1]]
            self.profileSignal.emit(emit_value)

    # Auxilliary drawing functionalities
    def updateImage(self):
        self.image = self.image_lastdrawn.copy()
        self.update()

    def drawLineOnImage(self, start, end):
        # Setting canvas
        painter = QPainter(self.image)
        painter.drawImage(self.rect(), self.image)
        painter.setPen(self.pen)

        # Draw line
        painter.drawLine(start, end)
        self.update()

    def paintEvent(self, event):
        painter = QPainter(self)
        painter.drawImage(event.rect(), self.image)
        painter.setPen(self.pen)

        # for line in self.lines:
        #     start, end = line
        #     painter.drawLine(start, end)

        self.update()

    def setUpPen(self, width=5, color='orange'):
        self.pen = QPen(QColor(color), width, Qt.SolidLine, Qt.RoundCap,
                        Qt.RoundJoin)

    # Line profile functionalities
    def extractLine(self):
        start, end = self.lines[-1]
        x0, y0 = max(start.x(), 0), max(start.y(), 0)
        x1, y1 = min(end.x(), self.image_grayscale.shape[1] - 1), min(
            end.y(), self.image_grayscale.shape[0] - 1)
        num = int(np.hypot(x1 - x0, y1 - y0))
        rows, cols = np.linspace(y0, y1, num), np.linspace(x0, x1, num)
        profile = self.image_grayscale[rows.astype(np.int),
                                       cols.astype(np.int)]
        self.profiles.append(list(profile))

    # Other functionalities
    def loadImage(self):
        # Unfix widget size
        self.setMaximumSize(QWIDGETSIZE_MAX, QWIDGETSIZE_MAX)
        self.setMinimumSize(0, 0)

        # Load image
        # if self.image_path is None, create dummy image
        if self.image_path == None:
            self.image_grayscale = np.array(Image.new('L', (600, 400), 255))
            self.image = ImageQt(Image.new('RGB', (600, 400), (255, 255, 255)))
            self.image_lastdrawn = self.image.copy()

        else:
            # Store grayscale image to self.image_grayscale
            self.image_grayscale = np.array(
                Image.open(self.image_path).convert('L'))
            image = QImage(self.image_path)
            if image.isGrayscale():
                # Create RGB version using PIL
                image_rgb = Image.open(self.image_path).convert('RGB')

                # Store RGB version of image in self.image, self.image_original
                self.image = ImageQt(image_rgb)
                self.image_lastdrawn = self.image.copy()

            else:
                # Store original and drawn image as it is
                self.image = image
                self.image_lastdrawn = image.copy()

        # Refix widget size
        self.setFixedSize(self.image.width(), self.image.height())

        # Flush lines
        self.lines = []
        self.profiles = []
        self.loadSignal.emit()
        self.update()

    def saveImage(self, file_name):
        self.image.save(file_name, 'PNG', -1)


# def main():
#     app = QApplication(sys.argv)

#     demo = DrawingArea()
#     demo.show()

#     sys.exit(app.exec_())

# main()