Exemple #1
0
    def on_new_frame(self, camera):

        camera.image_locked = True
        self.handler_busy = True
        self.signal_new_frame_received.emit(
        )  # self.liveController.turn_off_illumination()

        # measure real fps
        timestamp_now = round(time.time())
        if timestamp_now == self.timestamp_last:
            self.counter = self.counter + 1
        else:
            self.timestamp_last = timestamp_now
            self.fps_real = self.counter
            self.counter = 0
            print('real camera fps is ' + str(self.fps_real))

        # crop image
        image_cropped = utils.crop_image(camera.current_frame, self.crop_width,
                                         self.crop_height)
        image_cropped = np.squeeze(image_cropped)

        # send image to display
        time_now = time.time()
        if time_now - self.timestamp_last_display >= 1 / self.fps_display:
            # self.image_to_display.emit(cv2.resize(image_cropped,(round(self.crop_width*self.display_resolution_scaling), round(self.crop_height*self.display_resolution_scaling)),cv2.INTER_LINEAR))
            self.image_to_display.emit(
                utils.crop_image(
                    image_cropped,
                    round(self.crop_width * self.display_resolution_scaling),
                    round(self.crop_height * self.display_resolution_scaling)))
            self.timestamp_last_display = time_now

        # send image to array display
        self.packet_image_for_array_display.emit(
            image_cropped,
            (camera.frame_ID - camera.frame_ID_offset_hardware_trigger - 1) %
            VOLUMETRIC_IMAGING.NUM_PLANES_PER_VOLUME)

        # send image to write
        if self.save_image_flag and time_now - self.timestamp_last_save >= 1 / self.fps_save:
            if camera.is_color:
                image_cropped = cv2.cvtColor(image_cropped, cv2.COLOR_RGB2BGR)
            self.packet_image_to_write.emit(image_cropped, camera.frame_ID,
                                            camera.timestamp)
            self.timestamp_last_save = time_now

        # send image to track
        if self.track_flag and time_now - self.timestamp_last_track >= 1 / self.fps_track:
            # track is a blocking operation - it needs to be
            # @@@ will cropping before emitting the signal lead to speedup?
            self.packet_image_for_tracking.emit(image_cropped, camera.frame_ID,
                                                camera.timestamp)
            self.timestamp_last_track = time_now

        self.handler_busy = False
        camera.image_locked = False
Exemple #2
0
 def save_gallery(formset, place):
     formset.save()
     for inst, form in zip(formset.get_queryset(), formset):
         if TempGallery.objects.filter(gallery=inst).exists():
             temp = TempGallery.objects.get(gallery=inst)
             inst.image = temp.image
             inst.title = form.instance.title
             inst.save()
         if form.instance.id:
             crop = [form.cleaned_data.pop(key)
                     for key in\
                     ('crop_x', 'crop_y', 'crop_x2', 'crop_y2')]
             if all(crop):
                 crop_image(inst.image, *crop)
Exemple #3
0
    def _run_single_acquisition(self):
        self.single_acquisition_in_progress = True
        self.FOV_counter = 0

        print('multipoint acquisition - time point ' + str(self.time_point))

        # stop live
        if self.liveController.is_live:
            self.liveController.was_live_before_multipoint = True
            self.liveController.stop_live(
            )  # @@@ to do: also uncheck the live button

        # disable callback
        if self.camera.callback_is_enabled:
            self.camera.callback_was_enabled_before_multipoint = True
            self.camera.stop_streaming()
            self.camera.disable_callback()
            self.camera.start_streaming(
            )  # @@@ to do: absorb stop/start streaming into enable/disable callback - add a flag is_streaming to the camera class

        # do the multipoint acquisition

        # for each time point, create a new folder
        current_path = os.path.join(self.base_path, self.experiment_ID,
                                    str(self.time_point))
        os.mkdir(current_path)

        # along y
        for i in range(self.NY):

            # along x
            for j in range(self.NX):

                # z-stack
                for k in range(self.NZ):

                    # perform AF only if when not taking z stack
                    if (self.NZ == 1) and (self.do_autofocus) and (
                            self.FOV_counter %
                            Acquisition.NUMBER_OF_FOVS_PER_AF == 0):
                        self.autofocusController.autofocus()

                    file_ID = str(i) + '_' + str(j) + '_' + str(k)

                    # take bf
                    if self.do_bfdf:
                        self.liveController.set_microscope_mode(
                            MicroscopeMode.BFDF)
                        self.liveController.turn_on_illumination()
                        print('take bf image')
                        self.camera.send_trigger()
                        image = self.camera.read_frame()
                        self.liveController.turn_off_illumination()
                        image = utils.crop_image(image, self.crop_width,
                                                 self.crop_height)
                        saving_path = os.path.join(
                            current_path,
                            file_ID + '_bf' + '.' + Acquisition.IMAGE_FORMAT)
                        # self.image_to_display.emit(cv2.resize(image,(round(self.crop_width*self.display_resolution_scaling), round(self.crop_height*self.display_resolution_scaling)),cv2.INTER_LINEAR))
                        self.image_to_display.emit(
                            utils.crop_image(
                                image,
                                round(self.crop_width *
                                      self.display_resolution_scaling),
                                round(self.crop_height *
                                      self.display_resolution_scaling)))
                        if self.camera.is_color:
                            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
                        cv2.imwrite(saving_path, image)
                        QApplication.processEvents()

                    # take fluorescence
                    if self.do_fluorescence:
                        self.liveController.set_microscope_mode(
                            MicroscopeMode.FLUORESCENCE)
                        self.liveController.turn_on_illumination()
                        self.camera.send_trigger()
                        image = self.camera.read_frame()
                        print('take fluorescence image')
                        self.liveController.turn_off_illumination()
                        image = utils.crop_image(image, self.crop_width,
                                                 self.crop_height)
                        saving_path = os.path.join(
                            current_path, file_ID + '_fluorescence' + '.' +
                            Acquisition.IMAGE_FORMAT)
                        self.image_to_display.emit(
                            utils.crop_image(
                                image,
                                round(self.crop_width *
                                      self.display_resolution_scaling),
                                round(self.crop_height *
                                      self.display_resolution_scaling)))
                        # self.image_to_display.emit(cv2.resize(image,(round(self.crop_width*self.display_resolution_scaling), round(self.crop_height*self.display_resolution_scaling)),cv2.INTER_LINEAR))
                        if self.camera.is_color:
                            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
                        cv2.imwrite(saving_path, image)
                        QApplication.processEvents()

                    if self.do_bfdf is not True and self.do_fluorescence is not True:
                        QApplication.processEvents()

                    # move z
                    if k < self.NZ - 1:
                        self.navigationController.move_z(self.deltaZ)

                # move z back
                self.navigationController.move_z(-self.deltaZ * (self.NZ - 1))

                # update FOV counter
                self.FOV_counter = self.FOV_counter + 1

                # move x
                if j < self.NX - 1:
                    self.navigationController.move_x(self.deltaX)

            # move x back
            self.navigationController.move_x(-self.deltaX * (self.NX - 1))

            # move y
            if i < self.NY - 1:
                self.navigationController.move_y(self.deltaY)

        # move y back
        self.navigationController.move_y(-self.deltaY * (self.NY - 1))

        # re-enable callback
        if self.camera.callback_was_enabled_before_multipoint:
            self.camera.stop_streaming()
            self.camera.enable_callback()
            self.camera.start_streaming()
            self.camera.callback_was_enabled_before_multipoint = False

        if self.liveController.was_live_before_multipoint:
            self.liveController.start_live()
            # emit acquisitionFinished signal
            self.acquisitionFinished.emit()

        # update time_point for the next scheduled single acquisition (if any)
        self.time_point = self.time_point + 1

        if self.time_point >= self.Nt:
            print('Multipoint acquisition finished')
            if self.acquisitionTimer.isActive():
                self.acquisitionTimer.stop()
            self.acquisitionFinished.emit()

        self.single_acquisition_in_progress = False
Exemple #4
0
    def autofocus(self):

        # stop live
        if self.liveController.is_live:
            self.liveController.was_live_before_autofocus = True
            self.liveController.stop_live()

        # temporarily disable call back -> image does not go through streamHandler
        if self.camera.callback_is_enabled:
            self.camera.callback_was_enabled_before_autofocus = True
            self.camera.stop_streaming()
            self.camera.disable_callback()
            self.camera.start_streaming(
            )  # @@@ to do: absorb stop/start streaming into enable/disable callback - add a flag is_streaming to the camera class

        # @@@ to add: increase gain, decrease exposure time
        # @@@ can move the execution into a thread
        focus_measure_vs_z = [0] * self.N
        focus_measure_max = 0

        z_af_offset = self.deltaZ * round(self.N / 2)
        self.navigationController.move_z(-z_af_offset)

        steps_moved = 0
        for i in range(self.N):
            self.navigationController.move_z(self.deltaZ)
            steps_moved = steps_moved + 1
            self.liveController.turn_on_illumination()
            self.camera.send_trigger()
            image = self.camera.read_frame()
            self.liveController.turn_off_illumination()
            image = utils.crop_image(image, self.crop_width, self.crop_height)
            self.image_to_display.emit(image)
            QApplication.processEvents()
            timestamp_0 = time.time()  # @@@ to remove
            focus_measure = utils.calculate_focus_measure(image)
            timestamp_1 = time.time()  # @@@ to remove
            print('             calculating focus measure took ' +
                  str(timestamp_1 - timestamp_0) + ' second')
            focus_measure_vs_z[i] = focus_measure
            print(i, focus_measure)
            focus_measure_max = max(focus_measure, focus_measure_max)
            if focus_measure < focus_measure_max * AF.STOP_THRESHOLD:
                break

        idx_in_focus = focus_measure_vs_z.index(max(focus_measure_vs_z))
        self.navigationController.move_z(
            (idx_in_focus - steps_moved) * self.deltaZ)
        if idx_in_focus == 0:
            print('moved to the bottom end of the AF range')
        if idx_in_focus == self.N - 1:
            print('moved to the top end of the AF range')

        if self.camera.callback_was_enabled_before_autofocus:
            self.camera.stop_streaming()
            self.camera.enable_callback()
            self.camera.start_streaming()
            self.camera.callback_was_enabled_before_autofocus = False

        if self.liveController.was_live_before_autofocus:
            self.liveController.start_live()
            self.liveController.was_live = False

        print('autofocus finished')
        self.autofocusFinished.emit()
Exemple #5
0
    def _run_multipoint_single(self):

        self.FOV_counter = 0
        print('multipoint acquisition - time point ' + str(self.time_point))

        # do the multipoint acquisition

        # for each time point, create a new folder
        current_path = os.path.join(self.base_path, self.experiment_ID,
                                    str(self.time_point))
        os.mkdir(current_path)

        # z-stack
        for k in range(self.NZ):

            file_ID = str(k)

            # iterate through selected modes
            for config in self.selected_configurations:
                self.signal_current_configuration.emit(config)

                self.camera1.send_trigger()
                image = self.camera1.read_frame()
                image = utils.crop_image(image, self.crop_width,
                                         self.crop_height)
                saving_path = os.path.join(
                    current_path, 'camera1_' + file_ID + str(config.name) +
                    '.' + Acquisition.IMAGE_FORMAT)
                image_to_display = utils.crop_image(
                    image,
                    round(self.crop_width *
                          self.liveController1.display_resolution_scaling),
                    round(self.crop_height *
                          self.liveController1.display_resolution_scaling))
                self.image_to_display_camera1.emit(image_to_display)
                if self.camera1.is_color:
                    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
                cv2.imwrite(saving_path, image)

                self.camera2.send_trigger()
                image = self.camera2.read_frame()
                image = utils.crop_image(image, self.crop_width,
                                         self.crop_height)
                saving_path = os.path.join(
                    current_path, 'camera2_' + file_ID + str(config.name) +
                    '.' + Acquisition.IMAGE_FORMAT)
                image_to_display = utils.crop_image(
                    image,
                    round(self.crop_width *
                          self.liveController2.display_resolution_scaling),
                    round(self.crop_height *
                          self.liveController2.display_resolution_scaling))
                self.image_to_display_camera2.emit(image_to_display)
                if self.camera2.is_color:
                    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
                cv2.imwrite(saving_path, image)

                QApplication.processEvents()

            # QApplication.processEvents()

            # move z
            if k < self.NZ - 1:
                self.navigationController.move_z_usteps(self.deltaZ_usteps)

        # move z back
        self.navigationController.move_z_usteps(-self.deltaZ_usteps *
                                                (self.NZ - 1))

        # update FOV counter
        self.FOV_counter = self.FOV_counter + 1
Exemple #6
0
    def _run_multipoint_single(self):

        self.FOV_counter = 0
        print('multipoint acquisition - time point ' + str(self.time_point))

        # do the multipoint acquisition

        # for each time point, create a new folder
        current_path = os.path.join(self.base_path, self.experiment_ID,
                                    str(self.time_point))
        os.mkdir(current_path)

        # along y
        for i in range(self.NY):

            # along x
            for j in range(self.NX):

                # z-stack
                for k in range(self.NZ):

                    # perform AF only if when not taking z stack
                    # if (self.NZ == 1) and (self.do_autofocus) and (self.FOV_counter%Acquisition.NUMBER_OF_FOVS_PER_AF==0):
                    # temporary: replace the above line with the line below to AF every FOV
                    if (self.NZ == 1) and (self.do_autofocus):
                        # temporary (next 3 lines, to be modified) - use 405 nm for autofocus
                        configuration_name_AF = 'Fluorescence 405 nm Ex'
                        config_AF = next(
                            (config for config in
                             self.configurationManager.configurations
                             if config.name == configuration_name_AF))
                        self.signal_current_configuration.emit(config_AF)
                        self.autofocusController.autofocus()

                    if (self.NZ > 1):
                        # maneuver for achiving uniform step size and repeatability when using open-loop control
                        self.navigationController.move_z_usteps(80)
                        time.sleep(0.1)
                        self.navigationController.move_z_usteps(-80)
                        time.sleep(0.1)

                    file_ID = str(i) + '_' + str(j) + '_' + str(k)

                    # iterate through selected modes
                    for config in self.selected_configurations:
                        # self.liveController.set_microscope_mode(config)
                        self.signal_current_configuration.emit(config)
                        self.liveController.turn_on_illumination()
                        self.camera.send_trigger()
                        image = self.camera.read_frame()
                        self.liveController.turn_off_illumination()
                        image = utils.crop_image(image, self.crop_width,
                                                 self.crop_height)
                        saving_path = os.path.join(
                            current_path, file_ID + str(config.name) + '.' +
                            Acquisition.IMAGE_FORMAT)
                        # self.image_to_display.emit(cv2.resize(image,(round(self.crop_width*self.display_resolution_scaling), round(self.crop_height*self.display_resolution_scaling)),cv2.INTER_LINEAR))
                        image_to_display = utils.crop_image(
                            image,
                            round(
                                self.crop_width *
                                self.liveController.display_resolution_scaling
                            ),
                            round(
                                self.crop_height *
                                self.liveController.display_resolution_scaling)
                        )
                        self.image_to_display.emit(image_to_display)
                        self.image_to_display_multi.emit(
                            image_to_display, config.illumination_source)
                        if self.camera.is_color:
                            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
                        cv2.imwrite(saving_path, image)
                        QApplication.processEvents()

                    # QApplication.processEvents()

                    # move z
                    if k < self.NZ - 1:
                        self.navigationController.move_z_usteps(
                            self.deltaZ_usteps)

                # move z back
                self.navigationController.move_z_usteps(-self.deltaZ_usteps *
                                                        (self.NZ - 1))

                # update FOV counter
                self.FOV_counter = self.FOV_counter + 1

                # move x
                if j < self.NX - 1:
                    self.navigationController.move_x_usteps(self.deltaX_usteps)

            # move x back
            self.navigationController.move_x_usteps(-self.deltaX_usteps *
                                                    (self.NX - 1))

            # move y
            if i < self.NY - 1:
                self.navigationController.move_y_usteps(self.deltaY_usteps)

        # move y back
        self.navigationController.move_y_usteps(-self.deltaY_usteps *
                                                (self.NY - 1))
    def run_single_time_point(self):
        self.FOV_counter = 0
        column_counter = 0
        print('multipoint acquisition - time point ' +
              str(self.time_point + 1))

        # for each time point, create a new folder
        current_path = os.path.join(self.base_path, self.experiment_ID,
                                    str(self.time_point))
        os.mkdir(current_path)

        # run homing
        self.plateReaderNavigationController.home()
        self.wait_till_operation_is_completed()

        # row scan direction
        row_scan_direction = 1  # 1: A -> H, 0: H -> A

        # go through columns
        for column in self.selected_columns:

            # increament counter
            column_counter = column_counter + 1

            # move to the current column
            self.plateReaderNavigationController.moveto_column(column - 1)
            self.wait_till_operation_is_completed()
            '''
            # row homing
            if column_counter > 1:
                self.plateReaderNavigationController.home_y()
                self.wait_till_operation_is_completed()
            '''

            # go through rows
            for row in range(PLATE_READER.NUMBER_OF_ROWS):

                if row_scan_direction == 0:  # reverse scan:
                    row = PLATE_READER.NUMBER_OF_ROWS - 1 - row

                row_str = chr(ord('A') + row)
                file_ID = row_str + str(column)

                # move to the selected row
                self.plateReaderNavigationController.moveto_row(row)
                self.wait_till_operation_is_completed()
                time.sleep(SCAN_STABILIZATION_TIME_MS_Y / 1000)

                # AF
                if (self.NZ == 1) and (self.do_autofocus) and (
                        self.FOV_counter % Acquisition.NUMBER_OF_FOVS_PER_AF
                        == 0):
                    configuration_name_AF = 'BF LED matrix full'
                    config_AF = next(
                        (config
                         for config in self.configurationManager.configurations
                         if config.name == configuration_name_AF))
                    self.signal_current_configuration.emit(config_AF)
                    self.autofocusController.autofocus()
                    self.autofocusController.wait_till_autofocus_has_completed(
                    )

                # z stack
                for k in range(self.NZ):

                    if (self.NZ > 1):
                        # update file ID
                        file_ID = file_ID + '_' + str(k)
                        # maneuver for achiving uniform step size and repeatability when using open-loop control
                        self.plateReaderNavigationController.move_z_usteps(80)
                        self.wait_till_operation_is_completed()
                        self.plateReaderNavigationController.move_z_usteps(-80)
                        self.wait_till_operation_is_completed()
                        time.sleep(SCAN_STABILIZATION_TIME_MS_Z / 1000)

                    # iterate through selected modes
                    for config in self.selected_configurations:
                        self.signal_current_configuration.emit(config)
                        self.wait_till_operation_is_completed()
                        self.liveController.turn_on_illumination()
                        self.wait_till_operation_is_completed()
                        self.camera.send_trigger()
                        image = self.camera.read_frame()
                        self.liveController.turn_off_illumination()
                        image = utils.crop_image(image, self.crop_width,
                                                 self.crop_height)
                        saving_path = os.path.join(
                            current_path, file_ID + '_' + str(config.name) +
                            '.' + Acquisition.IMAGE_FORMAT)
                        # self.image_to_display.emit(cv2.resize(image,(round(self.crop_width*self.display_resolution_scaling), round(self.crop_height*self.display_resolution_scaling)),cv2.INTER_LINEAR))
                        # image_to_display = utils.crop_image(image,round(self.crop_width*self.liveController.display_resolution_scaling), round(self.crop_height*self.liveController.display_resolution_scaling))
                        image_to_display = utils.crop_image(
                            image, round(self.crop_width),
                            round(self.crop_height))
                        self.image_to_display.emit(image_to_display)
                        self.image_to_display_multi.emit(
                            image_to_display, config.illumination_source)
                        if self.camera.is_color:
                            image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
                        cv2.imwrite(saving_path, image)
                        QApplication.processEvents()

                    if (self.NZ > 1):
                        # move z
                        if k < self.NZ - 1:
                            self.plateReaderNavigationController.move_z_usteps(
                                self.deltaZ_usteps)
                            self.wait_till_operation_is_completed()
                            time.sleep(SCAN_STABILIZATION_TIME_MS_Z / 1000)

                if self.NZ > 1:
                    # move z back
                    self.plateReaderNavigationController.move_z_usteps(
                        -self.deltaZ_usteps * (self.NZ - 1))
                    self.wait_till_operation_is_completed()

                if self.abort_acquisition_requested:
                    return

            # update row scan direction
            row_scan_direction = 1 - row_scan_direction