Esempio n. 1
0
    def registration(self, grid_points_x=3, grid_points_y=3):
        galvothread = DAQmission()
        readinchan = []

        x_coords = np.linspace(-10, 10, grid_points_x + 2)[1:-1]
        y_coords = np.linspace(-10, 10, grid_points_y + 2)[1:-1]

        xy_mesh = np.reshape(np.meshgrid(x_coords, y_coords), (2, -1),
                             order='F').transpose()

        galvo_coordinates = xy_mesh
        camera_coordinates = np.zeros((galvo_coordinates.shape))

        for i in range(galvo_coordinates.shape[0]):

            galvothread.sendSingleAnalog('galvosx', galvo_coordinates[i, 0])
            galvothread.sendSingleAnalog('galvosy', galvo_coordinates[i, 1])
            time.sleep(1)

            image = self.cam.SnapImage(0.06)
            plt.imsave(
                os.getcwd() +
                '/CoordinatesManager/Registration_Images/2P/image_' + str(i) +
                '.png', image)

            camera_coordinates[i, :] = readRegistrationImages.gaussian_fitting(
                image)

        print('Galvo Coordinate')
        print(galvo_coordinates)
        print('Camera coordinates')
        print(camera_coordinates)
        del galvothread
        self.cam.Exit()

        transformation = CoordinateTransformations.polynomial2DFit(
            camera_coordinates, galvo_coordinates, order=1)

        print('Transformation found for x:')
        print(transformation[:, :, 0])
        print('Transformation found for y:')
        print(transformation[:, :, 1])
        return transformation
Esempio n. 2
0
    def gather_reference_coordinates_galvos(self):
        galvothread = DAQmission()
        readinchan = []

        camera_coordinates = np.zeros((3, 2))
        galvo_coordinates = np.array([[0, 3], [3, -3], [-3, -3]])

        for i in range(3):
            pos_x = galvo_coordinates[i, 0]
            pos_y = galvo_coordinates[i, 1]

            galvothread.sendSingleAnalog('galvosx', pos_x)
            galvothread.sendSingleAnalog('galvosy', pos_y)

            image = self.cam.SnapImage(0.04)

            camera_coordinates[i, :] = gaussian_fitting(image)

        del galvothread
        return np.array([camera_coordinates, galvo_coordinates])
Esempio n. 3
0
    def execute_tread_single_sample_analog(self, channel):
        daq = DAQmission()
        if channel == '640AO':
            self.lasers_status['640'][1] = self.slider640.value()

            daq.sendSingleAnalog(channel, self.slider640.value() / 100)

        elif channel == '532AO':
            self.lasers_status['532'][1] = self.slider532.value()
            daq.sendSingleAnalog(channel, self.slider532.value() / 100)

        elif channel == '488AO':
            self.lasers_status['488'][1] = self.slider488.value()
            daq.sendSingleAnalog(channel, self.slider488.value() / 100)

        self.sig_lasers_status_changed.emit(self.lasers_status)
Esempio n. 4
0
class Servo:
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.sampling_rate = 10000
        self.PWM_frequency = 50
        self.mission = DAQmission()

    def power_on(self):
        self.mission.sendSingleDigital(channel='servo_power_1', value=True)


#        time.sleep(1.5)

    def power_off(self):
        self.mission.sendSingleDigital(channel='servo_power_1', value=False)

    def rotate(self, degree):
        # Convert degree to duty cycle in PWM.
        if degree >= 0 and degree <= 360:
            dutycycle = 0.02  #round((degree/180) * 0.05 + 0.05, 6)

            PWM_wave = self.blockWave(self.sampling_rate,
                                      self.PWM_frequency,
                                      dutycycle,
                                      repeats=50)

            PWM_wave = np.where(PWM_wave == 0, False, True)
            #            plt.figure()
            #            plt.plot(PWM_wave)
            #            plt.show()
            PWM_wave_organized = np.array(
                [('servo_modulation_1', PWM_wave),
                 ('servo_power_1', np.ones(len(PWM_wave), dtype=bool))],
                dtype=[('Sepcification', 'U20'),
                       ('Waveform', bool, (len(PWM_wave), ))])

            self.mission.runWaveforms(clock_source = "DAQ", sampling_rate = self.sampling_rate, analog_signals = {},\
                                    digital_signals = PWM_wave_organized, readin_channels = {})

        else:
            print('Rotation degree out of range!')

    def blockWave(self,
                  sampleRate,
                  frequency,
                  dutycycle,
                  repeats,
                  voltMin=0,
                  voltMax=5):
        """
        Generates a one period blockwave. 
        sampleRate      samplerate set on the DAQ (int)
        frequency       frequency you want for the block wave (int)
        voltMin         minimum value of the blockwave (float)
        voltMax         maximum value of the blockwave (float)
        dutycycle       duty cycle of the wave (wavelength at voltMax) (float)
        """
        wavelength = int(sampleRate /
                         frequency)  #Wavelength in number of samples
        #The high values
        high = np.ones(math.ceil(wavelength * dutycycle)) * voltMax
        #Low values
        low = np.ones(math.floor(wavelength * (1 - dutycycle))) * voltMin
        #Adding them
        single_period = np.append(high, low)
        """
        Repeats the wave a set number of times and returns a new repeated wave.
        """
        extendedWave = np.array([])
        for i in range(repeats):
            extendedWave = np.append(extendedWave, single_period)

        return extendedWave
Esempio n. 5
0
    def control_for_registration(self, wavelength, value):
        value = int(value)
        daq = DAQmission()

        if value == 0:
            switch = False
        else:
            switch = True

        if wavelength == '640':
            print(wavelength + ':' + str(value))
            print(str(switch))
            daq.sendSingleAnalog('640AO', value)

            daq.sendSingleDigital('640blanking', switch)

        elif wavelength == '532':
            print(wavelength + ':' + str(value))
            print(str(switch))
            daq.sendSingleAnalog('532AO', value)

            daq.sendSingleDigital('640blanking', switch)

        else:
            print(wavelength + ':' + str(value))
            print(str(switch))
            daq.sendSingleAnalog('488AO', value)

            daq.sendSingleDigital('640blanking', switch)
Esempio n. 6
0
    def execute_tread_single_sample_digital(self, channel):
        daq = DAQmission()
        if channel == '640blanking':
            if self.switchbutton_blankingAll.isChecked():
                self.lasers_status['640'][0] = True
                daq.sendSingleDigital(channel, True)

            else:
                self.lasers_status['640'][0] = False
                daq.sendSingleDigital(channel, False)

        elif channel == '532blanking':
            if self.switchbutton_532.isChecked():
                self.lasers_status['532'][0] = True
                daq.sendSingleDigital(channel, True)

            else:
                self.lasers_status['532'][0] = False
                daq.sendSingleDigital(channel, False)

        elif channel == '488blanking':
            if self.switchbutton_488.isChecked():
                self.lasers_status['488'][0] = True
                daq.sendSingleDigital(channel, True)
            else:
                self.lasers_status['488'][0] = False
                daq.sendSingleDigital(channel, False)

        self.sig_lasers_status_changed.emit(self.lasers_status)
class PatchclampSealTestUI(QWidget):
    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.saving_dir = r'M:\tnw\ist\do\projects\Neurophotonics\Brinkslab\Data\Patch clamp\seal_test'

        #------------------------Initiating patchclamp class-------------------
        self.sealTest = PatchclampSealTest()
        self.sealTest.measurementThread.measurement.connect(
            self.handleMeasurement)  #Connecting to the measurement signal

        self.holdTest = PatchclampSealTest_hold()
        self.holdTest.measurementThread_hold.measurement.connect(
            self.handleMeasurement)  #Connecting to the measurement signal

        self.currentclampTest = PatchclampSealTest_currentclamp()
        self.currentclampTest.measurementThread_currentclamp.measurement.connect(
            self.handleMeasurement)  #Connecting to the measurement signal

        self.zapfunction = PatchclampSealTest_zap()
        #----------------------------------------------------------------------
        #----------------------------------GUI---------------------------------
        #----------------------------------------------------------------------
        self.setFixedHeight(700)
        self.setWindowTitle("Patchclamp Seal Test")

        self.ICON_RED_LED = "./Icons/off.png"
        self.ICON_GREEN_LED = '/Icons/on.png'
        self.is_sealtesting = False

        #------------------------------Gains-----------------------------------
        gainContainer = StylishQT.roundQGroupBox(title="Gains")
        # gainContainer.setFixedWidth(320)
        gainLayout = QGridLayout()

        gainLayout.addWidget(QLabel("Input Voltage"), 0, 0)
        self.inVolGainList = QComboBox()
        self.inVolGainList.addItems(['1/10', '1', '1/50'])
        gainLayout.addWidget(self.inVolGainList, 1, 0)

        gainLayout.addWidget(QLabel("Output Voltage"), 0, 1)
        self.outVolGainList = QComboBox()
        self.outVolGainList.addItems(['10', '2', '5', '1', '20', '50', '100'])
        gainLayout.addWidget(self.outVolGainList, 1, 1)

        gainLayout.addWidget(QLabel("Output Current"), 0, 2)
        self.outCurGainList = QComboBox()
        self.outCurGainList.addItems(['1', '2', '5', '10', '20', '50', '100'])
        gainLayout.addWidget(self.outCurGainList, 1, 2)

        gainLayout.addWidget(QLabel("Probe"), 0, 3)
        self.probeGainList = QComboBox()
        self.probeGainList.addItems(['100M\u03A9', '10G\u03A9'])
        gainLayout.addWidget(self.probeGainList, 1, 3)

        gainContainer.setLayout(gainLayout)
        #------------------------------Wavesettings-----------------------------------
        WavesettingsContainer = StylishQT.roundQGroupBox(title="Wave settings")
        # WavesettingsContainer.setFixedWidth(320)
        WavesettingsContainerLayout = QGridLayout()

        WavesettingsContainerLayout.addWidget(QLabel("Voltage step(mV)"), 0, 0)
        self.DiffVoltagebox = QSpinBox(self)
        self.DiffVoltagebox.setMaximum(2000)
        self.DiffVoltagebox.setMinimum(-2000)
        self.DiffVoltagebox.setValue(10)
        self.DiffVoltagebox.setSingleStep(10)
        WavesettingsContainerLayout.addWidget(self.DiffVoltagebox, 0, 1)

        WavesettingsContainerLayout.addWidget(QLabel("Voltage baseline(mV)"),
                                              0, 2)
        self.LowerVoltagebox = QSpinBox(self)
        self.LowerVoltagebox.setMaximum(2000)
        self.LowerVoltagebox.setMinimum(-2000)
        self.LowerVoltagebox.setValue(0)
        self.LowerVoltagebox.setSingleStep(30)
        WavesettingsContainerLayout.addWidget(self.LowerVoltagebox, 0, 3)

        WavesettingsContainer.setLayout(WavesettingsContainerLayout)
        #------------------------------Membrane potential-----------------------------------
        Vm_measureContainer = StylishQT.roundQGroupBox(title="Vm measurement")
        # Vm_measureContainer.setFixedWidth(320)
        Vm_measureContainerLayout = QGridLayout()

        Vm_measureContainerLayout.addWidget(QLabel("Clamping current(pA)"), 0,
                                            0)
        self.clampingcurrentbox = QSpinBox(self)
        self.clampingcurrentbox.setMaximum(2000)
        self.clampingcurrentbox.setMinimum(-2000)
        self.clampingcurrentbox.setValue(0)
        self.clampingcurrentbox.setSingleStep(100)
        Vm_measureContainerLayout.addWidget(self.clampingcurrentbox, 0, 1)

        self.membraneVoltLabel = QLabel("Vm: ")
        Vm_measureContainerLayout.addWidget(self.membraneVoltLabel, 0, 2)

        self.VmstartButton = StylishQT.runButton()
        self.VmstartButton.clicked.connect(lambda: self.measure_currentclamp())
        Vm_measureContainerLayout.addWidget(self.VmstartButton, 0, 3)

        self.VmstopButton = StylishQT.stop_deleteButton()
        self.VmstopButton.setEnabled(False)
        self.VmstopButton.clicked.connect(
            lambda: self.stopMeasurement_currentclamp())
        Vm_measureContainerLayout.addWidget(self.VmstopButton, 0, 4)

        Vm_measureContainer.setLayout(Vm_measureContainerLayout)
        #------------------------------zap-----------------------------------
        zapContainer = StylishQT.roundQGroupBox(title="ZAP")
        # zapContainer.setFixedWidth(320)
        zapContainerLayout = QGridLayout()

        self.ICON_zap = './Icons/zap.jpg'
        self.zapiconlabel = QLabel()
        self.zapiconlabel.setPixmap(QPixmap(self.ICON_zap))
        zapContainerLayout.addWidget(self.zapiconlabel, 0, 0)

        self.zapButton = QPushButton("ZAP!")
        self.zapButton.setStyleSheet(
            "QPushButton {color:white;background-color: blue; border-style: outset;border-radius: 10px;border-width: 2px;font: bold 14px;padding: 6px}"
            "QPushButton:pressed {color:black;background-color: red; border-style: outset;border-radius: 10px;border-width: 2px;font: bold 14px;padding: 6px}"
            "QPushButton:hover:!pressed {color:blue;background-color: white; border-style: outset;border-radius: 10px;border-width: 2px;font: bold 14px;padding: 6px}"
        )
        self.zapButton.setShortcut('z')
        zapContainerLayout.addWidget(self.zapButton, 0, 1)
        self.zapButton.clicked.connect(self.zap)

        zapContainerLayout.addWidget(QLabel("ZAP voltage(V)"), 0, 2)
        self.zapVbox = QDoubleSpinBox(self)
        self.zapVbox.setMaximum(1)
        self.zapVbox.setMinimum(-2000)
        self.zapVbox.setValue(1)
        self.zapVbox.setSingleStep(0.1)
        zapContainerLayout.addWidget(self.zapVbox, 0, 3)

        zapContainerLayout.addWidget(QLabel("ZAP duration(us)"), 0, 4)
        self.zaptimebox = QSpinBox(self)
        self.zaptimebox.setMaximum(200000000)
        self.zaptimebox.setMinimum(-200000000)
        self.zaptimebox.setValue(200)
        self.zaptimebox.setSingleStep(200)
        zapContainerLayout.addWidget(self.zaptimebox, 0, 5)

        zapContainer.setLayout(zapContainerLayout)
        #----------------------------Control-----------------------------------
        controlContainer = StylishQT.roundQGroupBox(title="Control")
        controlContainer.setFixedWidth(350)
        controlLayout = QGridLayout()

        #        controlLayout.addWidget(QLabel("Holding Vm:"), 1, 0)
        #        self.HoldingList = QComboBox()
        #        self.HoldingList.addItems(['000 mV', '-30 mV', '-50 mV', '-40 mV', '-60 mV'])
        #        controlLayout.addWidget(self.HoldingList, 2, 0)
        #
        #        self.iconlabel = QLabel(self)
        #        self.iconlabel.setPixmap(QPixmap(self.ICON_RED_LED))
        #        controlLayout.addWidget(self.iconlabel, 1, 1)
        #
        #        self.holdingbutton = QPushButton("Hold")
        #        self.holdingbutton.setCheckable(True)
        #        #self.holdingbutton.toggle()
        #
        #        self.holdingbutton.clicked.connect(lambda: self.btnstate())
        #        #self.holdingbutton.clicked.connect(lambda: self.hold())
        #        #self.holdingbutton.clicked.connect(self.setGreenlight)
        #        controlLayout.addWidget(self.holdingbutton, 2, 1)

        #        self.stopandholdbutton = QPushButton("Stop and Hold")
        #        self.stopandholdbutton.clicked.connect(lambda: self.stopMeasurement())
        #        self.stopandholdbutton.clicked.connect(lambda: self.measure_hold())
        #        self.stopandholdbutton.clicked.connect(self.setGreenlight)
        #        controlLayout.addWidget(self.stopandholdbutton, 0, 2)
        '''
        self.stopholdingbutton = QPushButton("Stop holding")
        self.stopholdingbutton.clicked.connect(lambda: self.stophold())
        self.stopholdingbutton.clicked.connect(self.setRedlight)        
        controlLayout.addWidget(self.stopholdingbutton, 2, 2)        
        '''
        controlLayout.addWidget(gainContainer, 0, 0, 1, 2)
        controlLayout.addWidget(WavesettingsContainer, 1, 0, 1, 2)
        controlLayout.addWidget(Vm_measureContainer, 2, 0, 1, 2)
        controlLayout.addWidget(zapContainer, 3, 0, 1, 2)

        self.startButton = StylishQT.runButton("Start seal-test")
        self.startButton.clicked.connect(lambda: self.measure())
        # self.startButton.setFixedWidth(120)
        # self.startButton.clicked.connect(self.setRedlight)
        # self.startButton.clicked.connect(self.startUpdatingGUIThread)
        controlLayout.addWidget(self.startButton, 4, 0, 1, 1)

        self.stopButton = StylishQT.stop_deleteButton()
        self.stopButton.setEnabled(False)
        # self.stopButton.setFixedWidth(120)
        self.stopButton.clicked.connect(lambda: self.stopMeasurement())
        controlLayout.addWidget(self.stopButton, 4, 1, 1, 1)

        controlContainer.setLayout(controlLayout)

        #-----------------------------Plots------------------------------------
        plotContainer = StylishQT.roundQGroupBox(title="Output")
        plotContainer.setFixedWidth(350)
        self.plotLayout = QGridLayout(
        )  #We set the plotLayout as an attribute of the object (i.e. self.plotLayout instead of plotLayout)
        #This is to prevent the garbage collector of the C++ wrapper from deleting the layout and thus triggering errors.
        #Derived from: https://stackoverflow.com/questions/17914960/pyqt-runtimeerror-wrapped-c-c-object-has-been-deleted
        # and http://enki-editor.org/2014/08/23/Pyqt_mem_mgmt.html

        # self.outVolPlotWidget = SlidingWindow(200, title = "Voltage", unit = "V") #Should be bigger than the readvalue
        self.outCurPlotWidget = SlidingWindow(
            200, title="Current",
            unit="A")  #Should be bigger than the readvalue

        self.display_tab_widget = QTabWidget()

        # self.plotLayout.addWidget(QLabel('Voltage (mV):'), 0, 0)
        self.display_tab_widget.addTab(self.outCurPlotWidget, "Current")
        # self.plotLayout.addWidget(self.outVolPlotWidget, 1, 0)
        # self.plotLayout.addWidget(QLabel('Current (pA):'), 0, 1)
        # self.display_tab_widget.addTab(self.outVolPlotWidget, "Voltage")
        # self.plotLayout.addWidget(self.outCurPlotWidget, 1, 1)

        valueContainer = QGroupBox("Resistance/Capacitance")
        self.valueLayout = QGridLayout()
        self.resistanceLabel = QLabel("Resistance: ")
        self.capacitanceLabel = QLabel("Capacitance: ")
        self.ratioLabel = QLabel("Ratio: ")
        self.valueLayout.addWidget(self.resistanceLabel, 0, 0)
        self.valueLayout.addWidget(self.capacitanceLabel, 0, 1)
        self.valueLayout.addWidget(self.ratioLabel, 0, 2)

        self.pipette_resistance = QLineEdit(self)
        self.pipette_resistance.setPlaceholderText('Pipette resistance')
        self.pipette_resistance.setFixedWidth(100)
        self.valueLayout.addWidget(self.pipette_resistance, 1, 0)

        self.savedataButton = QPushButton("Save figure")
        self.savedataButton.clicked.connect(lambda: self.savePatchfigure())
        self.valueLayout.addWidget(self.savedataButton, 1, 1)

        self.resetButton = QPushButton("Reset Iplot")
        self.resetButton.clicked.connect(lambda: self.ResetCurrentImgView())
        self.valueLayout.addWidget(self.resetButton, 1, 2)

        valueContainer.setLayout(self.valueLayout)
        self.plotLayout.addWidget(self.display_tab_widget, 0, 0, 1, 1)
        self.plotLayout.addWidget(valueContainer, 2, 0, 1, 1)

        plotContainer.setLayout(self.plotLayout)
        #---------------------------Adding to master---------------------------
        master = QVBoxLayout()
        master.addWidget(controlContainer)
        master.addWidget(plotContainer)

        self.setLayout(master)

        #--------------------------Setting variables---------------------------
        self.changeVolInGain(self.inVolGainList.currentText())
        self.changeVolOutGain(self.outVolGainList.currentText())
        self.changeCurOutGain(self.outCurGainList.currentText())
        self.changeProbeGain(self.probeGainList.currentText())

        self.inVolGainList.currentIndexChanged.connect(
            lambda: self.changeVolInGain(self.inVolGainList.currentText()))
        self.outVolGainList.currentIndexChanged.connect(
            lambda: self.changeVolOutGain(self.outVolGainList.currentText()))
        self.outCurGainList.currentIndexChanged.connect(
            lambda: self.changeCurOutGain(self.outCurGainList.currentText()))
        self.probeGainList.currentIndexChanged.connect(
            lambda: self.changeProbeGain(self.probeGainList.currentText()))

    def ResetCurrentImgView(self):
        """Closes the widget nicely, making sure to clear the graphics scene and release memory."""
        self.outCurPlotWidget.close()
        # self.outVolPlotWidget.close()

        # Replot the imageview
        self.outCurPlotWidget = SlidingWindow(200, title="Current", unit="A")

        self.display_tab_widget.addTab(self.outCurPlotWidget, "Current")
        # self.display_tab_widget.addTab(self.outVolPlotWidget, "Voltage")

    def measure(self):
        """Pop up window asking to check the gains.
        Returns 
        True if the measurement can be done
        and 
        False if not.
        """
        #check = QMessageBox.question(self, 'GAINS!', "Are all the gains corresponding?",
        #QMessageBox.Yes | QMessageBox.No)

        #if check == QMessageBox.Yes:
        """Start the patchclamp measurement"""
        self.stopButton.setEnabled(True)
        self.startButton.setEnabled(False)

        self.diffvoltage = self.DiffVoltagebox.value() / 1000
        self.lowervoltage = self.LowerVoltagebox.value() / 1000
        self.sealTest.setWave(self.inVolGain, self.diffvoltage,
                              self.lowervoltage)
        self.sealTest.start()
        self.is_sealtesting = True
        self.startUpdatingGUIThread()

    def measure_hold(self):
        """Pop up window asking to check the gains.
        Returns 
        True if the measurement can be done
        and 
        False if not.
        """
        self.holdTest.setWave(self.inVolGain,
                              float(self.HoldingList.currentText()[0:3]))
        self.holdTest.start()
        self.is_sealtesting = True

    def measure_currentclamp(self):
        """Pop up window asking to check the gains.
        Returns 
        True if the measurement can be done
        and 
        False if not.
        """
        #check = QMessageBox.question(self, 'GAINS!', "Are all the gains corresponding?",
        #QMessageBox.Yes | QMessageBox.No)

        #if check == QMessageBox.Yes:
        """Start the patchclamp measurement"""
        self.VmstartButton.setEnabled(False)
        self.VmstopButton.setEnabled(True)

        self.currentclamp_value = self.clampingcurrentbox.value()
        self.currentclampTest.setWave(self.inVolGain, self.probeGain,
                                      self.currentclamp_value)
        self.currentclampTest.start()
        self.is_sealtesting = True

    def hold(self):
        constant = float(self.HoldingList.currentText()[0:3])
        self.executer = DAQmission()
        self.executer.sendSingleAnalog('patchAO', constant / 1000 * 10)
        print("Holding vm at " + str(constant) + ' mV')

    def stophold(self):
        self.executer.sendSingleAnalog('patchAO', 0)
        print('Stop holding')

    def btnstate(self):
        #source = self.sender()
        if self.holdingbutton.isChecked():
            self.measure_hold()
            self.setGreenlight()
        else:
            self.stop_hold_Measurement()
            self.setRedlight()

    def setRedlight(self):
        self.iconlabel.setPixmap(QPixmap(self.ICON_RED_LED))

    def setGreenlight(self):
        self.iconlabel.setPixmap(QPixmap(self.ICON_GREEN_LED))

    def startUpdatingGUIThread(self):
        time.sleep(0.3)
        StartGUIThread = threading.Thread(target=self.startUpdatingGUI)
        StartGUIThread.start()


#        else:
#            .disconnect()

    def handleMeasurement(self, voltOut, curOut):
        """Handle the measurement. Update the graph."""

        #Rescaling using gains
        self.voltOut = voltOut / self.outVolGain
        self.curOut = curOut / self.outCurGain / self.probeGain

    def startUpdatingGUI(self):
        while self.is_sealtesting == True:
            try:
                # self.outVolPlotWidget.append_(self.voltOut)
                self.outCurPlotWidget.append_(self.curOut)
                self.updateGraphs()
                self.updateLabels(self.curOut, self.voltOut)
                time.sleep(0.05)
            except:
                pass

    def updateGraphs(self):
        """Update graphs."""
        self.outCurPlotWidget.updateWindow()
        # self.outVolPlotWidget.updateWindow()

    def updateLabels(self, curOut, voltOut):
        """Update the resistance and capacitance labels.
        http://scipy-lectures.org/intro/scipy/auto_examples/plot_curve_fit.html
        https://docs.scipy.org/doc/scipy/reference/generated/scipy.optimize.curve_fit.html"""
        constants = MeasurementConstants()
        sampPerCyc = int(constants.patchSealSampRate / constants.patchSealFreq)

        try:
            curOutCyc = curOut.reshape(int(curOut.size / sampPerCyc),
                                       sampPerCyc)
            curData = np.mean(curOutCyc, axis=0)
        except:
            curData = curOut

        voltData = voltOut
        try:
            #Computing resistance
            tres = np.mean(voltData)
            dV = np.mean(voltData[voltData > tres]) - np.mean(
                voltData[voltData < tres])  #Computing the voltage difference
            dIss = np.mean(
                curData[math.floor(0.15 *
                                   sampPerCyc):math.floor(sampPerCyc / 2) -
                        2]) - np.mean(
                            curData[math.floor(0.65 * sampPerCyc):sampPerCyc -
                                    2])  #Computing the current distance
            membraneResistance = dV / (dIss * 1000000)  #Ohms law (MegaOhm)
            self.resistanceLabel.setText("Resistance:  %.4f M\u03A9" %
                                         membraneResistance)

            self.estimated_size_resistance = 10000 / (
                membraneResistance * 1000000
            )  #The resistance of a typical patch of membrane, RM is 10000 Omega/{cm}^2
        except:
            self.resistanceLabel.setText("Resistance:  %s" % 'NaN')

        try:
            measured_vlotage = np.mean(voltData) * 1000
            self.membraneVoltLabel.setText("Vm:  %.2f mV" % measured_vlotage)
            self.membraneVoltLabel.setStyleSheet('color: red')
        except:
            self.membraneVoltLabel.setText("Vm:  %s" % 'NaN')
        try:
            #Computing capacitance
            points = 10
            maxCur = np.amax(curData)
            maxCurIndex = np.where(curData == maxCur)[0][0]
            curFit = curData[int(maxCurIndex + 1):int(
                maxCurIndex + 1 + points - 1)] - 0.5 * (np.mean(curData[
                    math.floor(0.15 * sampPerCyc):math.floor(sampPerCyc / 2) -
                    2]) + np.mean(
                        curData[math.floor(0.65 * sampPerCyc):sampPerCyc - 2]))
            timepoints = 1000 * np.arange(
                3, points - 1 + 3) / constants.patchSealSampRate
            #Fitting the data to an exponential of the form y=a*exp(-b*x) where b = 1/tau and tau = RC
            #I(t)=I0*e^−t/τ, y=a*exp(-b*x), get log of both sides:log y = -bx + log a
            fit = np.polyfit(
                timepoints, curFit, 1
            )  #Converting the exponential to a linear function and fitting it
            #Extracting data
            current = fit[0]
            resistance = dV * 1000 / current / 2  #Getting the resistance
            tau = -1 / fit[1]
            capacitance = 1000 * tau / resistance
            self.capacitanceLabel.setText("Capacitance:  %.4f" % capacitance)

            self.estimated_size_capacitance = capacitance * (10**-12) * (10**6)

            if self.estimated_size_capacitance > self.estimated_size_resistance:
                self.estimated_ratio = self.estimated_size_capacitance / self.estimated_size_resistance
            else:
                self.estimated_ratio = self.estimated_size_resistance / self.estimated_size_capacitance

            self.ratioLabel.setText(
                "Ratio:  %.4f" % self.estimated_ratio
            )  #http://www.cnbc.cmu.edu/~bard/passive2/node5.html

        except:
            self.capacitanceLabel.setText("Capacitance:  %s" % 'NaN')
            self.ratioLabel.setText("Ratio:  %s" % 'NaN')

        self.patch_parameters = "R_{}_C_{}".format(
            round(membraneResistance, 3), round(capacitance, 3))

    def stopMeasurement(self):
        """Stop the seal test."""
        self.stopButton.setEnabled(False)
        self.startButton.setEnabled(True)
        self.sealTest.aboutToQuitHandler()
        self.is_sealtesting = False
        #constant = float(self.HoldingList.currentText()[0:3])
        #self.executer = execute_constant_vpatch(constant/1000*10)
        #print("Holding vm at "+str(constant)+' mV')

    '''        
    def closeEvent(self, event):
        """On closing the application we have to make sure that the measuremnt
        stops and the device gets freed."""
        self.stopMeasurement()
    '''

    def stop_hold_Measurement(self):
        """Stop the seal test."""
        self.holdTest.aboutToQuitHandler()
        self.is_sealtesting = False

    def close_hold_Event(self, event):
        """On closing the application we have to make sure that the measuremnt
        stops and the device gets freed."""
        self.stop_hold_Measurement()

    def stopMeasurement_currentclamp(self):
        """Stop the seal test."""
        self.VmstartButton.setEnabled(True)
        self.VmstopButton.setEnabled(False)

        self.currentclampTest.aboutToQuitHandler()
        self.is_sealtesting = False

    #Change gain
    def changeVolInGain(self, gain):
        if gain == '1':
            self.inVolGain = 1
        elif gain == '1/10':
            self.inVolGain = 0.1
        elif gain == '1/50':
            self.inVolGain = 1. / 50

    def changeVolOutGain(self, gain):
        self.outVolGain = float(gain)

    def changeCurOutGain(self, gain):
        self.outCurGain = float(gain)

    def changeProbeGain(self, gain):
        if gain == '100M\u03A9':
            self.probeGain = 100 * 10**6
        elif gain == '10G\u03A9':
            self.probeGain = 10 * 10**9

    def zap(self):
        self.zap_v = self.zapVbox.value()
        self.zap_time = self.zaptimebox.value()
        self.sealTest.aboutToQuitHandler()
        self.zapfunction.setWave(self.inVolGain, self.zap_v, self.zap_time)
        self.zapfunction.start()
        time.sleep(0.06)
        self.zapfunction.aboutToQuitHandler()
        """Start the patchclamp measurement"""
        self.diffvoltage = self.DiffVoltagebox.value() / 1000
        self.lowervoltage = self.LowerVoltagebox.value() / 1000
        self.sealTest.setWave(self.inVolGain, self.diffvoltage,
                              self.lowervoltage)
        self.sealTest.start()

    def savePatchfigure(self):
        # create an exporter instance, as an argument give it
        # the item you wish to export
        exporter = pg.exporters.ImageExporter(
            self.outCurPlotWidget.getPlotItem())

        # set export parameters if needed
        exporter.parameters(
        )['width'] = 500  # (note this also affects height parameter)

        # save to file
        exporter.export(
            os.path.join(
                self.saving_dir,
                'SealTest_' + "Rpip_" + self.pipette_resistance.text() +
                "Mohm_" + self.patch_parameters + '.png'))
Esempio n. 8
0
    def control_for_registration(self, wavelength, value):
        value = int(value)
        daq = DAQmission()

        if value == 0:
            switch = False
        else:
            switch = True

        if wavelength == "640":
            print(wavelength + ":" + str(value))
            print(str(switch))
            daq.sendSingleAnalog("640AO", value)

            daq.sendSingleDigital("640blanking", switch)

        elif wavelength == "532":
            print(wavelength + ":" + str(value))
            print(str(switch))
            daq.sendSingleAnalog("532AO", value)

            daq.sendSingleDigital("640blanking", switch)

        else:
            print(wavelength + ":" + str(value))
            print(str(switch))
            daq.sendSingleAnalog("488AO", value)

            daq.sendSingleDigital("640blanking", switch)
    def inidividual_coordinate_operation(self, EachRound, EachWaveform, RowIndex, ColumnIndex):
        """
        Execute pre-set operations at each coordinate.

        Parameters
        ----------
        EachRound : int
            Current round index.
        EachWaveform : int
            Current waveform package index.
        RowIndex : int
            Current sample stage row index.
        ColumnIndex : int
            Current sample stage row index.

        Returns
        -------
        None.

        """
        # Extract information
        WaveformPackageToBeExecute = self.RoundQueueDict['RoundPackage_{}'.format(EachRound+1)][0]['WaveformPackage_{}'.format(EachWaveform+1)]
        CameraPackageToBeExecute = self.RoundQueueDict['RoundPackage_{}'.format(EachRound+1)][1]["CameraPackage_{}".format(EachWaveform+1)]                  
        WaveformPackageGalvoInfor = self.RoundQueueDict['GalvoInforPackage_{}'.format(EachRound+1)]['GalvoInfor_{}'.format(EachWaveform+1)]

        self.readinchan = WaveformPackageToBeExecute[3]
        self.RoundWaveformIndex = [EachRound+1, EachWaveform+1] # first is current round number, second is current waveform package number.
        self.CurrentPosIndex = [RowIndex, ColumnIndex]
        
        #----------------Camera operations-----------------
        _camera_isUsed = False
        if CameraPackageToBeExecute != {}: # if camera operations are configured
            _camera_isUsed = True
            CamSettigList = CameraPackageToBeExecute["Settings"]
            self.HamamatsuCam.StartStreaming(BufferNumber = CameraPackageToBeExecute["Buffer_number"],
                                             trigger_source = CamSettigList[CamSettigList.index("trigger_source")+1],
                                             exposure_time = CamSettigList[CamSettigList.index("exposure_time")+1],
                                             trigger_active = CamSettigList[CamSettigList.index("trigger_active")+1],
                                             subarray_hsize = CamSettigList[CamSettigList.index("subarray_hsize")+1],
                                             subarray_vsize = CamSettigList[CamSettigList.index("subarray_vsize")+1],
                                             subarray_hpos = CamSettigList[CamSettigList.index("subarray_hpos")+1],
                                             subarray_vpos = CamSettigList[CamSettigList.index("subarray_vpos")+1])
            # HamamatsuCam starts another thread to pull out frames from buffer.
            # Make sure that the camera is prepared before waveform execution.
#                                while self.HamamatsuCam.isStreaming == False:
#                                    print('Waiting for camera...')
#                                    time.sleep(0.5)
            time.sleep(1)
        print('Now start waveforms')
        #----------------Waveforms operations--------------
        if WaveformPackageGalvoInfor != 'NoGalvo': # Unpack the information of galvo scanning.
            self.readinchan = WaveformPackageGalvoInfor[0]
            self.repeatnum = WaveformPackageGalvoInfor[1]
            self.PMT_data_index_array = WaveformPackageGalvoInfor[2]
            self.averagenum = WaveformPackageGalvoInfor[3]
            self.lenSample_1 = WaveformPackageGalvoInfor[4]
            self.ScanArrayXnum = WaveformPackageGalvoInfor[5]
        
        self.adcollector = DAQmission()
        # self.adcollector.collected_data.connect(self.ProcessData)
        self.adcollector.runWaveforms(clock_source = self.clock_source, sampling_rate = WaveformPackageToBeExecute[0],
                                      analog_signals = WaveformPackageToBeExecute[1], digital_signals = WaveformPackageToBeExecute[2], 
                                      readin_channels = WaveformPackageToBeExecute[3])
        self.adcollector.save_as_binary(self.scansavedirectory)
        self.recorded_raw_data = self.adcollector.get_raw_data()
        self.Process_raw_data()
        
        #------------------Camera saving-------------------
        if _camera_isUsed == True:
            self.HamamatsuCam.isSaving = True
            img_text =  "_Cam_"+str(self.RoundWaveformIndex[1])+"_Zpos" + str(self.ZStackOrder)
            self.cam_tif_name = self.generate_tif_name(extra_text = img_text)
            self.HamamatsuCam.StopStreaming(saving_dir = self.cam_tif_name)
            # Make sure that the saving process is finished.
            while self.HamamatsuCam.isSaving == True:
                print('Camera saving...')
                time.sleep(0.5)
            time.sleep(1) 

        time.sleep(0.5)      
class ScanningExecutionThread(QThread):
    
    ScanningResult = pyqtSignal(np.ndarray, np.ndarray, object, object) #The signal for the measurement, we can connect to this signal
    #%%
    def __init__(self, RoundQueueDict, RoundCoordsDict, GeneralSettingDict, *args, **kwargs):        
        super().__init__(*args, **kwargs)
        self.RoundQueueDict = RoundQueueDict
        self.RoundCoordsDict = RoundCoordsDict
        self.GeneralSettingDict = GeneralSettingDict
        self.Status_list = None
        self.ludlStage = LudlStage("COM12")
        self.watchdog_flag = True
        
        self.clock_source = 'DAQ' # Should be set by GUI.
        
        self.scansavedirectory = self.GeneralSettingDict['savedirectory']
        self.meshgridnumber = int(self.GeneralSettingDict['Meshgrid'])
        
        self.wavelength_offset = 0 # An offset of 0.002 mm is observed between 900 and 1280 nm.
    #%%
    def run(self):
        """~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        #                                                               Connect devices.
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
        """
        # =============================================================================
        #         connect the Objective motor
        # =============================================================================
        """
        print('----------------------Starting to connect the Objective motor-------------------------')
        self.pi_device_instance = PIMotor()
        print('Objective motor connected.')
        self.errornum = 0
        self.init_focus_position = self.pi_device_instance.pidevice.qPOS(self.pi_device_instance.pidevice.axes)['1']
        print("init_focus_position : {}".format(self.init_focus_position))

        """
        # =============================================================================
        #         connect the Hmamatsu camera
        # =============================================================================
        """
        self._use_camera = False
        # Check if camera is used.
        for key in self.RoundQueueDict:
            if "RoundPackage_" in key:
                for waveform_and_cam_key in self.RoundQueueDict[key][1]:
                    if "CameraPackage_" in waveform_and_cam_key:
                        if len(self.RoundQueueDict[key][1][waveform_and_cam_key]) != 0:
                            self._use_camera = True
        
        if self._use_camera:
            print('Connecting camera...')
            self.HamamatsuCam = CamActuator()
            self.HamamatsuCam.initializeCamera()
        else:
            print('No camera involved.')
        
        """
        # =============================================================================
        #         connect the Insight X3
        # =============================================================================
        """        
        if len(self.RoundQueueDict['InsightEvents']) != 0:
            self.Laserinstance = InsightX3('COM11')
            try:
                # querygap = 1.1
                # self.Laserinstance.SetWatchdogTimer(0) # Disable the laser watchdog!
#                Status_watchdog_thread = threading.Thread(target = self.Status_watchdog, args=[querygap], daemon=True)
#                Status_watchdog_thread.start() 
                # time.sleep(1)
                #-------------Initialize laser--------------
                self.watchdog_flag = False
                time.sleep(0.5)
        
                warmupstatus = 0
                while int(warmupstatus) != 100:
                    warmupstatus = self.Laserinstance.QueryWarmupTime()
                    time.sleep(0.6)
                        
                self.watchdog_flag = True
                time.sleep(0.5)
            except:
                print('Laser not connected.')
                
            # If turn on the laser shutter in the beginning
            if 'Shutter_Open' in self.GeneralSettingDict['StartUpEvents']:
                time.sleep(0.5)

                self.Laserinstance.Open_TunableBeamShutter()

                time.sleep(0.5)     
        """
        # =============================================================================
        #         Initialize ML
        # =============================================================================
        """        
        self._use_ML = False
        # Check if machine learning segmentation is used.
        for key in self.RoundQueueDict:
            if "RoundPackage_" in key:
                for photocycle_key in self.RoundQueueDict[key][2]:
                    if "PhotocyclePackage_" in photocycle_key:
                        if len(self.RoundQueueDict[key][2][photocycle_key]) != 0:
                            self._use_ML = True
        if self._use_ML:
            from ImageAnalysis.ImageProcessing_MaskRCNN import ProcessImageML
            self.Predictor = ProcessImageML()
            print("ML loaded.")
            
        #%%
        """~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        #                                                                  Execution
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""
        GridSequence = 0
        TotalGridNumber = self.meshgridnumber**2
        
        for EachGrid in range(TotalGridNumber):
            """
            #------------------------------------------------------------------------------
            #:::::::::::::::::::::::::::::::: AT EACH GRID ::::::::::::::::::::::::::::::::
            #------------------------------------------------------------------------------
            """
            self.Grid_index = EachGrid
            """
            # =============================================================================
            #         For each small repeat unit in the scanning meshgrid
            # =============================================================================
            """

            time.sleep(0.5)
            
            for EachRound in range(int(len(self.RoundQueueDict)/2-1)): # EachRound is the round sequence number starting from 0, while the actual number used in dictionary is 1.
                """
                #-------------------------------------------------------------------------------
                #:::::::::::::::::::::::::::::::: AT EACH ROUND ::::::::::::::::::::::::::::::::
                #-------------------------------------------------------------------------------
                """
                print ('----------------------------------------------------------------------------')            
                print('Below is Grid {}, Round {}.'.format(EachGrid, EachRound+1)) # EachRound+1 is the corresponding round number when setting the dictionary starting from round 1.
                """
                # =============================================================================
                #         Execute Insight event at the beginning of each round
                # =============================================================================
                """            
                self.laser_init(EachRound)
                    
                """
                # =============================================================================
                #         Execute filter event at the beginning of each round
                # =============================================================================
                """
                self.filters_init(EachRound)

                """
                # =============================================================================
                #         Generate focus position list at the beginning of each round
                # =============================================================================
                """
                if EachRound == 0:
                    ZStackinfor = self.GeneralSettingDict['FocusStackInfoDict']['RoundPackage_{}'.format(EachRound+1)]        
                    ZStackNum = int(ZStackinfor[ZStackinfor.index('Focus')+5])
                    ZStackStep = float(ZStackinfor[ZStackinfor.index('Being')+5:len(ZStackinfor)])
            
                    # Generate position list.
                    ZStacklinspaceStart = self.init_focus_position - (math.floor(ZStackNum/2)) * ZStackStep
                    ZStacklinspaceEnd = self.init_focus_position + (ZStackNum - math.floor(ZStackNum/2)-1) * ZStackStep
    
                    self.ZStackPosList = np.linspace(ZStacklinspaceStart, ZStacklinspaceEnd, num = ZStackNum)
                
                self.currentCoordsSeq = 0
                #%%
                #-------------Unpack infor for stage move.
                CoordsNum = len(self.RoundCoordsDict['CoordsPackage_{}'.format(EachRound+1)])              
                for EachCoord in range(CoordsNum):
                    """
                    #------------------------------------------------------------------------------------
                    #:::::::::::::::::::::::::::::::: AT EACH COORDINATE ::::::::::::::::::::::::::::::::
                    #------------------------------------------------------------------------------------
                    """
                    self.error_massage = None
                    
                    self.currentCoordsSeq += 1
                    
                    """
                    # =============================================================================
                    #         Stage movement
                    # =============================================================================
                    """
                    self.coord_array = self.RoundCoordsDict['CoordsPackage_{}'.format(EachRound+1)][EachCoord]
                    
                    # Offset coordinate row value for each well.
                    ScanningGridOffset_Row = int(EachGrid % self.meshgridnumber) * (self.GeneralSettingDict['StageGridOffset']) 
                    # Offset coordinate colunm value for each well.
                    ScanningGridOffset_Col = int(EachGrid/self.meshgridnumber) * (self.GeneralSettingDict['StageGridOffset']) 

                    RowIndex = self.coord_array['row'] + ScanningGridOffset_Row
                    ColumnIndex = self.coord_array['col'] + ScanningGridOffset_Col
                                      
                    try:
                        # for _ in range(2): # Repeat twice
                        #     self.ludlStage.moveAbs(RowIndex,ColumnIndex) # Row/Column indexs of np.array are opposite of stage row-col indexs.
                        #     time.sleep(1)
                        self.ludlStage.moveAbs(RowIndex,ColumnIndex) # Row/Column indexs of np.array are opposite of stage row-col indexs.
                        time.sleep(1.2)
                    except:
                        self.error_massage = 'Fail_MoveStage'
                        self.errornum += 1
                        print('Stage move failed! Error number: {}'.format(int(self.errornum)))

                    time.sleep(0.2)
                    
                    """
                    # =============================================================================
                    #                               Focus position
                    #         Unpack the focus stack information, conduct auto-focusing if set.
                    # =============================================================================
                    """
                    # Here also generate the ZStackPosList.
                    self.ZStackNum = self.unpack_focus_stack(EachGrid, EachRound, EachCoord)
                    
                    self.stack_focus_degree_list = []
                    
                    print('*******************************************Round {}. Current index: {}.**************************************************'.format\
                          (EachRound+1, [RowIndex,ColumnIndex]))
                    
                    #------------------Move to Z stack focus-------------------
                    for EachZStackPos in range(self.ZStackNum): 
                        """
                        #------------------------------------------------------------------------------------
                        #:::::::::::::::::::::::::::::::: AT EACH ZSTACK ::::::::::::::::::::::::::::::::::::
                        #------------------------------------------------------------------------------------
                        """
                        print('--------------------------------------------Stack {}--------------------------------------------------'.format(EachZStackPos+1))
                        if self.ZStackNum > 1:
                            self.ZStackOrder = int(EachZStackPos +1) # Here the first one is 1, not starting from 0.
                            
                            self.FocusPos = self.ZStackPosList[EachZStackPos]# + self.wavelength_offset    
                            
                            # Add the focus degree of previous image to the list.
                            # For stack of 3 only.
                            if EachZStackPos > 0:
                                try:
                                    self.stack_focus_degree_list.append(self.FocusDegree_img_reconstructed)
                                    
                                except:
                                    # FocusDegree_img_reconstructed is not generated with camera imaging.
                                    pass
                            print('stack_focus_degree_list is {}'.format(self.stack_focus_degree_list))
                            #-----Suppose now it's the 3rd stack position-----
                            if len(self.stack_focus_degree_list) == 2:
                                if self.stack_focus_degree_list[-1] < self.stack_focus_degree_list[-2]:
                                    self.FocusPos = self.ZStackPosList[0] - (self.ZStackPosList[1] - self.ZStackPosList[0])/2
                                    print('Focus degree decreasing, run the other direction.')
                            #-------------------------------------------------
                            
                            print('Target focus pos: {}'.format(self.FocusPos))
    
                            self.pi_device_instance.move(self.FocusPos)
                            # self.auto_focus_positionInStack = self.pi_device_instance.pidevice.qPOS(self.pi_device_instance.pidevice.axes)
                            # print("Current position: {:.4f}".format(self.auto_focus_positionInStack['1']))
                            
                            time.sleep(0.3)
                        else:
                            self.ZStackOrder = 1
                            self.FocusPos = self.init_focus_position
                        """
                        # =============================================================================
                        #         Execute waveform packages
                        # =============================================================================
                        """
                        self.Waveform_sequence_Num = int(len(self.RoundQueueDict['RoundPackage_{}'.format(EachRound+1)][0]))                        
                        #------------For waveforms in each coordinate----------
                        for EachWaveform in range(self.Waveform_sequence_Num):
                            
                            """
                            # =============================================================================
                            #         For photo-cycle 
                            # =============================================================================
                            """
                            # Get the photo cycle information
                            PhotocyclePackageToBeExecute = self.RoundQueueDict['RoundPackage_{}'.format(EachRound+1)][2] \
                                                            ["PhotocyclePackage_{}".format(EachWaveform+1)]
                            
                            # See if in this waveform sequence photo cycle is involved.
                            # PhotocyclePackageToBeExecute is {} if not configured.
                            if len(PhotocyclePackageToBeExecute) > 0:
                                
                                
                                
                                # Load the previous acquired camera image
                                self.cam_tif_name = r"M:\tnw\ist\do\projects\Neurophotonics\Brinkslab\Data\Octoscope\2020-8-13 Screening Archon1 library V5 and 6\V6\Round2_Coords181_R19800C0_PMT_0Zmax.tif"
                                previous_cam_img = imread(self.cam_tif_name)
                                img_width = previous_cam_img.shape[1]

                                img_height = previous_cam_img.shape[0]

                                # Get the segmentation of full image.
                                fig, ax = plt.subplots()
                                MLresults = self.Predictor.DetectionOnImage(previous_cam_img, axis = ax)
                                
                                ROI_number = len(MLresults["scores"])
                                print('roi number: {}'.format(ROI_number))
                                for each_ROI in range(ROI_number):
                                    # if MLresults['class_ids'][each_ROI] == 3:
                                    ROIlist = MLresults['rois'][each_ROI]
                                    print(ROIlist)
                                    # np array's column([1]) is the width of image, and is the row in stage coordinates.
                                    ROI_center_width = int(ROIlist[1] + (ROIlist[3] - ROIlist[1])/2)
                                    print('ROI_center_width '.format(ROIlist))
                                    ROI_center_height = int(ROIlist[0] + (ROIlist[2] + ROIlist[0])/2)
                                    print('ROI_center_height '.format(ROIlist))
                                    cam_stage_transform_factor = 1.135
                                    
                                    stage_move_col = (int((img_width)/2) - ROI_center_width) * cam_stage_transform_factor
                                    print(stage_move_col)
                                    stage_move_row = (int((img_height)/2) - ROI_center_height) * cam_stage_transform_factor
                                    print(stage_move_row)
                                    # Move cell of interest to the center of field of view
                                    self.ludlStage.moveRel(xRel = stage_move_row, yRel= stage_move_col)
                                    
                                    time.sleep(1)
                                    
                                    # Move the cell back
                                    self.ludlStage.moveRel(xRel = -1*stage_move_row, yRel= -1*stage_move_col)
                                    time.sleep(1)
                            #--------------------------------------------------
                            
                            """
                            # =============================================================================
                            #         Execute pre-set operations at EACH COORDINATE.
                            # =============================================================================
                            """
                            self.inidividual_coordinate_operation(EachRound, EachWaveform, RowIndex, ColumnIndex)
                                
                            
                        time.sleep(0.6) # Wait for receiving data to be done.
                    time.sleep(0.5)
                    print('*************************************************************************************************************************')
                    
        #%%
        """~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
        #                                                            Disconnect devices.
        # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~"""             
        
        # Switch off laser
        if len(self.RoundQueueDict['InsightEvents']) != 0:
            self.watchdog_flag = False
            time.sleep(0.5)

            self.Laserinstance.Close_TunableBeamShutter()

            time.sleep(0.5)
            
            self.Laserinstance.SaveVariables()
                     
            self.Laserinstance.Turn_Off_PumpLaser()
                    
        # Disconnect camera
        if self._use_camera == True:
            self.HamamatsuCam.Exit()
        
        # Disconnect focus motor
        try:
            self.pi_device_instance.CloseMotorConnection()
            print('Objective motor disconnected.')
        except:
            pass
        
    #%%
    def laser_init(self, EachRound):
        """
        Execute Insight event at the beginning of each round

        Parameters
        ----------
        EachRound : int
            Round index.

        Returns
        -------
        None.

        """
        #-Unpack infor for Insight X3. In the list, the first one is for shutter event and the second one is for wavelength event. 
        InsightX3EventIndexList = [i for i,x in enumerate(self.RoundQueueDict['InsightEvents']) if 'Round_{}'.format(EachRound+1) in x]
          
        if len(InsightX3EventIndexList) == 1:
            print(InsightX3EventIndexList)
            InsightText = self.RoundQueueDict['InsightEvents'][InsightX3EventIndexList[0]]
            if 'Shutter_Open' in InsightText:
                self.watchdog_flag = False
                time.sleep(0.5)

                self.Laserinstance.Open_TunableBeamShutter()

                time.sleep(0.5)
                print('Laser shutter open.')
                self.watchdog_flag = True
                time.sleep(0.5)

            elif 'Shutter_Close' in InsightText:
                self.watchdog_flag = False
                time.sleep(0.5)

                self.Laserinstance.Close_TunableBeamShutter()

                time.sleep(0.5)
                print('Laser shutter closed.')
                self.watchdog_flag = True
                time.sleep(0.5)
            elif 'WavelengthTo' in InsightText:
                self.watchdog_flag = False
                time.sleep(0.5)
                TargetWavelen = int(InsightText[InsightText.index('To_')+3:len(InsightText)])
                
                if TargetWavelen == 1280:
                    self.wavelength_offset = -0.002 # give an offset if wavelength goes to 1280.
                elif TargetWavelen == 900:
                    self.wavelength_offset = 0
                    
                self.Laserinstance.SetWavelength(TargetWavelen)

                time.sleep(5)
                self.watchdog_flag = True
                time.sleep(0.5)
                
        elif len(InsightX3EventIndexList) == 2:
            
            InsightText_wl = self.RoundQueueDict['InsightEvents'][InsightX3EventIndexList[1]]
            InsightText_st = self.RoundQueueDict['InsightEvents'][InsightX3EventIndexList[0]]
            
            if 'WavelengthTo' in InsightText_wl and 'Shutter_Open' in InsightText_st:
                self.watchdog_flag = False
                time.sleep(0.5)
                TargetWavelen = int(InsightText_wl[InsightText_wl.index('To_')+3:len(InsightText_wl)])

                self.Laserinstance.SetWavelength(TargetWavelen)

                if TargetWavelen == 1280:
                    self.wavelength_offset = -0.002 # give an offset if wavelength goes to 1280.
                elif TargetWavelen == 900:
                    self.wavelength_offset = 0
                    
                time.sleep(5)

                self.Laserinstance.Open_TunableBeamShutter()

                print('Laser shutter open.')
                self.watchdog_flag = True
                time.sleep(0.5)
                
            elif 'WavelengthTo' in InsightText_wl and 'Shutter_Close' in InsightText_st:
                self.watchdog_flag = False
                time.sleep(0.5)
                TargetWavelen = int(InsightText_wl[InsightText_wl.index('To_')+3:len(InsightText_wl)])
               
                self.Laserinstance.SetWavelength(TargetWavelen)

                if TargetWavelen == 1280:
                    self.wavelength_offset = -0.002 # give an offset if wavelength goes to 1280.
                elif TargetWavelen == 900:
                    self.wavelength_offset = 0
                    
                time.sleep(5)

                self.Laserinstance.Close_TunableBeamShutter()

                time.sleep(1)
                print('Laser shutter closed.')
                self.watchdog_flag = True
                time.sleep(0.5)
                
            time.sleep(2)
            
    def filters_init(self, EachRound):
        """
        Execute filter event at the beginning of each round.

        Parameters
        ----------
        EachRound : int
            Round index.

        Returns
        -------
        None.

        """
        
        #-Unpack infor for filter event. In the list, the first one is for ND filter and the second one is for emission filter.
        FilterEventIndexList = [i for i,x in enumerate(self.RoundQueueDict['FilterEvents']) if 'Round_{}'.format(EachRound+1) in x]
        
        if len(FilterEventIndexList) > 0:
            NDposText = self.RoundQueueDict['FilterEvents'][FilterEventIndexList[0]]
            NDnumber = NDposText[NDposText.index('ToPos_')+6:len(NDposText)]
            
            EMposText = self.RoundQueueDict['FilterEvents'][FilterEventIndexList[1]]
            EMprotein = EMposText[EMposText.index('ToPos_')+6:len(EMposText)]
            
            # "COM9" for filter 1 port, which has ND values from 0 to 3.
            # "COM7" for filter 2 port, which has ND values from 0 to 0.5.
            if NDnumber == '0':
                ND_filter1_Pos = 0
                ND_filter2_Pos = 0
            elif NDnumber == '1':
                ND_filter1_Pos = 1
                ND_filter2_Pos = 0
            elif NDnumber == '2':
                ND_filter1_Pos = 2
                ND_filter2_Pos = 0
            elif NDnumber == '2.3':
                ND_filter1_Pos = 2
                ND_filter2_Pos = 2
            elif NDnumber == '2.5':
                ND_filter1_Pos = 2
                ND_filter2_Pos = 3
            elif NDnumber == '0.5':
                ND_filter1_Pos = 0
                ND_filter2_Pos = 3        
            elif NDnumber == '0.3':
                ND_filter1_Pos = 0
                ND_filter2_Pos = 2
            
            if EMprotein == 'Arch':
                EM_filter_Pos = 0
            elif EMprotein == 'eGFP' or EMprotein == 'Citrine':
                EM_filter_Pos = 1
            
            # Execution
            if ND_filter1_Pos != None and ND_filter2_Pos != None:
                #Move filter 1
                self.filter1 = ELL9Filter("COM9")
                self.filter1.moveToPosition(ND_filter1_Pos)
                time.sleep(1)
                #Move filter 2
                self.filter2 = ELL9Filter("COM7")
                self.filter2.moveToPosition(ND_filter2_Pos)
                time.sleep(1)
            if EM_filter_Pos != None:
                self.filter3 = ELL9Filter("COM15")
                self.filter3.moveToPosition(EM_filter_Pos)
                time.sleep(1)
    
    def unpack_focus_stack(self, EachGrid, EachRound, EachCoord):
        """
        Unpack the focus stack information.
        Determine focus position either from pre-set numbers of by auto-focusing.
        
        Parameters
        ----------
        EachGrid : int
            Current grid index.
        EachRound : int
            Current round index.
        EachCoord : int
            Current coordinate index.

        Returns
        -------
        ZStackNum : int
            Number of focus positions in stack.
        ZStackPosList : list
            List of focus stack positions for objective to go to.

        """
        ZStackinfor = self.GeneralSettingDict['FocusStackInfoDict']['RoundPackage_{}'.format(EachRound+1)]        
        ZStackNum = int(ZStackinfor[ZStackinfor.index('Focus')+5])
        ZStackStep = float(ZStackinfor[ZStackinfor.index('Being')+5:len(ZStackinfor)])
        
        # If manual focus correction applies, unpact the target focus infor.
        if len(self.GeneralSettingDict['FocusCorrectionMatrixDict']) > 0:
            FocusPosArray = self.GeneralSettingDict['FocusCorrectionMatrixDict']['RoundPackage_{}_Grid_{}'.format(EachRound+1, EachGrid)]
            FocusPosArray = FocusPosArray.flatten('F')
            FocusPos_fromCorrection = FocusPosArray[EachCoord]
            
            ZStacklinspaceStart = FocusPos_fromCorrection - (math.floor(ZStackNum/2))* ZStackStep
            ZStacklinspaceEnd = FocusPos_fromCorrection + (ZStackNum - math.floor(ZStackNum/2)-1) * ZStackStep         
        
        # With auto-focus correction
        else:
            # If go for auto-focus at this coordinate
            auto_focus_flag = self.coord_array['auto_focus_flag']
            # auto_focus_flag = False
            print('focus_position {}'.format(self.coord_array['focus_position']))
            
            #-----------------------Auto focus---------------------------------
            if auto_focus_flag == "yes":
                
                if self.coord_array['focus_position'] == -1.:
                    instance_FocusFinder = FocusFinder(motor_handle = self.pi_device_instance)
                    print("--------------Start auto-focusing-----------------")
                    # instance_FocusFinder.total_step_number = 7
                    # instance_FocusFinder.init_step_size = 0.013
                    # self.auto_focus_position = instance_FocusFinder.gaussian_fit()
                    self.auto_focus_position = instance_FocusFinder.bisection()
                    
                    relative_move_coords = [[1550,0],[0,1550],[1550,1550]]
                    trial_num = 0
                    while self.auto_focus_position == False: # If there's no cell in FOV
                        if trial_num <= 2:
                            print('No cells found. move to next pos.')
                            # Move to next position in real scanning coordinates.
                            self.ludlStage.moveRel(relative_move_coords[trial_num][0],relative_move_coords[trial_num][1])
                            time.sleep(1)
                            print('Now stage pos is {}'.format(self.ludlStage.getPos()))
                            
                            self.auto_focus_position = instance_FocusFinder.bisection()
                            # Move back 
                            self.ludlStage.moveRel(-1*relative_move_coords[trial_num][0],-1*relative_move_coords[trial_num][1])
                            
                            trial_num += 1
                        else:
                            print('No cells in neighbouring area.')
                            self.auto_focus_position = self.ZStackPosList[int(len(self.ZStackPosList)/2)]
                            break
                        
                    print("--------------End of auto-focusing----------------")
                    time.sleep(1)
      
                    # Record the position, try to write it in the NEXT round dict.
                    try:
                        self.RoundCoordsDict['CoordsPackage_{}'.format(EachRound + 2)][EachCoord]['focus_position'] = self.auto_focus_position
                    except:# If it's already the last round, skip.
                        pass
                    
                    # Generate position list.
                    ZStacklinspaceStart = self.auto_focus_position - (math.floor(ZStackNum/2)) * ZStackStep
                    ZStacklinspaceEnd = self.auto_focus_position + (ZStackNum - math.floor(ZStackNum/2)-1) * ZStackStep
                
                else: # If there's already position from last round, move to it.
                    self.previous_auto_focus_position = self.RoundCoordsDict['CoordsPackage_{}'.format(EachRound+1)][EachCoord]['focus_position']
                    
                    try:
                        # Record the position, try to write it in the NEXT round dict. 
                        self.RoundCoordsDict['CoordsPackage_{}'.format(EachRound + 2)][EachCoord]['focus_position'] = self.previous_auto_focus_position
                    except:
                        pass
                    
                    # Generate position list.
                    ZStacklinspaceStart = self.previous_auto_focus_position - (math.floor(ZStackNum/2)) * ZStackStep
                    ZStacklinspaceEnd = self.previous_auto_focus_position + (ZStackNum - math.floor(ZStackNum/2)-1) * ZStackStep
                    
                    # self.previous_auto_focus_position = self.auto_focus_position
                    # print("=====================Move to last recorded position=================")
                    # self.pi_device_instance.move(self.previous_auto_focus_position)
                    # time.sleep(0.2)
                    
                    # instance_FocusFinder = FocusFinder(motor_handle = self.pi_device_instance)
                    # print("--------------Start auto-focusing-----------------")
                    # self.auto_focus_position = instance_FocusFinder.bisection()
                    
                    # try:
                    #     if self.auto_focus_position[0] == False: # If there's no cell in FOV
                    #     # Skip?  mid_position = [False, self.current_pos]
                    #         self.auto_focus_position = self.auto_focus_position[1]
                    # except:
                    #     pass
                        
                    # print("--------------End of auto-focusing----------------")
                    # time.sleep(1)
      
                    # # Record the position, try to write it in the NEXT round dict.
                    # try:
                    #     self.RoundCoordsDict['CoordsPackage_{}'.format(EachRound + 2)][EachCoord]['focus_position'] = self.auto_focus_position
                    # except:# If it's already the last round, skip.
                    #     pass                    
                

                # Generate the position list, for none-auto-focus coordinates they will use the same list variable.
                self.ZStackPosList = np.linspace(ZStacklinspaceStart, ZStacklinspaceEnd, num = ZStackNum)
                print('ZStackPos is : {}'.format(self.ZStackPosList))   
            #------------------------------------------------------------------
            # If not auto focus, use the same list variable self.ZStackPosList.
            else:
                pass
                
        return ZStackNum
        
    def inidividual_coordinate_operation(self, EachRound, EachWaveform, RowIndex, ColumnIndex):
        """
        Execute pre-set operations at each coordinate.

        Parameters
        ----------
        EachRound : int
            Current round index.
        EachWaveform : int
            Current waveform package index.
        RowIndex : int
            Current sample stage row index.
        ColumnIndex : int
            Current sample stage row index.

        Returns
        -------
        None.

        """
        # Extract information
        WaveformPackageToBeExecute = self.RoundQueueDict['RoundPackage_{}'.format(EachRound+1)][0]['WaveformPackage_{}'.format(EachWaveform+1)]
        CameraPackageToBeExecute = self.RoundQueueDict['RoundPackage_{}'.format(EachRound+1)][1]["CameraPackage_{}".format(EachWaveform+1)]                  
        WaveformPackageGalvoInfor = self.RoundQueueDict['GalvoInforPackage_{}'.format(EachRound+1)]['GalvoInfor_{}'.format(EachWaveform+1)]

        self.readinchan = WaveformPackageToBeExecute[3]
        self.RoundWaveformIndex = [EachRound+1, EachWaveform+1] # first is current round number, second is current waveform package number.
        self.CurrentPosIndex = [RowIndex, ColumnIndex]
        
        #----------------Camera operations-----------------
        _camera_isUsed = False
        if CameraPackageToBeExecute != {}: # if camera operations are configured
            _camera_isUsed = True
            CamSettigList = CameraPackageToBeExecute["Settings"]
            self.HamamatsuCam.StartStreaming(BufferNumber = CameraPackageToBeExecute["Buffer_number"],
                                             trigger_source = CamSettigList[CamSettigList.index("trigger_source")+1],
                                             exposure_time = CamSettigList[CamSettigList.index("exposure_time")+1],
                                             trigger_active = CamSettigList[CamSettigList.index("trigger_active")+1],
                                             subarray_hsize = CamSettigList[CamSettigList.index("subarray_hsize")+1],
                                             subarray_vsize = CamSettigList[CamSettigList.index("subarray_vsize")+1],
                                             subarray_hpos = CamSettigList[CamSettigList.index("subarray_hpos")+1],
                                             subarray_vpos = CamSettigList[CamSettigList.index("subarray_vpos")+1])
            # HamamatsuCam starts another thread to pull out frames from buffer.
            # Make sure that the camera is prepared before waveform execution.
#                                while self.HamamatsuCam.isStreaming == False:
#                                    print('Waiting for camera...')
#                                    time.sleep(0.5)
            time.sleep(1)
        print('Now start waveforms')
        #----------------Waveforms operations--------------
        if WaveformPackageGalvoInfor != 'NoGalvo': # Unpack the information of galvo scanning.
            self.readinchan = WaveformPackageGalvoInfor[0]
            self.repeatnum = WaveformPackageGalvoInfor[1]
            self.PMT_data_index_array = WaveformPackageGalvoInfor[2]
            self.averagenum = WaveformPackageGalvoInfor[3]
            self.lenSample_1 = WaveformPackageGalvoInfor[4]
            self.ScanArrayXnum = WaveformPackageGalvoInfor[5]
        
        self.adcollector = DAQmission()
        # self.adcollector.collected_data.connect(self.ProcessData)
        self.adcollector.runWaveforms(clock_source = self.clock_source, sampling_rate = WaveformPackageToBeExecute[0],
                                      analog_signals = WaveformPackageToBeExecute[1], digital_signals = WaveformPackageToBeExecute[2], 
                                      readin_channels = WaveformPackageToBeExecute[3])
        self.adcollector.save_as_binary(self.scansavedirectory)
        self.recorded_raw_data = self.adcollector.get_raw_data()
        self.Process_raw_data()
        
        #------------------Camera saving-------------------
        if _camera_isUsed == True:
            self.HamamatsuCam.isSaving = True
            img_text =  "_Cam_"+str(self.RoundWaveformIndex[1])+"_Zpos" + str(self.ZStackOrder)
            self.cam_tif_name = self.generate_tif_name(extra_text = img_text)
            self.HamamatsuCam.StopStreaming(saving_dir = self.cam_tif_name)
            # Make sure that the saving process is finished.
            while self.HamamatsuCam.isSaving == True:
                print('Camera saving...')
                time.sleep(0.5)
            time.sleep(1) 

        time.sleep(0.5)      
        
    def Process_raw_data(self):

        self.channel_number = len(self.recorded_raw_data)
        
        self.data_collected_0 = self.recorded_raw_data[0][0:len(self.recorded_raw_data[0])-1]*-1
        print(len(self.data_collected_0))         
        
        if self.channel_number == 1:            
            if 'Vp' in self.readinchan:
                pass
            elif 'Ip' in self.readinchan:
                pass
            elif 'PMT' in self.readinchan:  # repeatnum, PMT_data_index_array, averagenum, ScanArrayXnum
                # Reconstruct the image from np array and save it.
                self.PMT_image_processing()
                
        elif self.channel_number == 2: 
            if 'PMT' not in self.readinchan:
                pass
            elif 'PMT' in self.readinchan:

                self.PMT_image_processing()

        print('ProcessData executed.')        
        

    # def ProcessData(self, data_waveformreceived):
    #     print('ZStackOrder is:'+str(self.ZStackOrder)+'numis_'+str(self.ZStackNum))
    #     self.adcollector.save_as_binary(self.scansavedirectory)
        
    #     self.channel_number = len(data_waveformreceived)
        
    #     self.data_collected_0 = data_waveformreceived[0]*-1
    #     self.data_collected_0 = self.data_collected_0[0:len(self.data_collected_0)-1]
    #     print(len(self.data_collected_0))
        
    #     if self.channel_number == 1:            
    #         if 'Vp' in self.readinchan:
    #             pass
    #         elif 'Ip' in self.readinchan:
    #             pass
    #         elif 'PMT' in self.readinchan:  # repeatnum, PMT_data_index_array, averagenum, ScanArrayXnum

    #             # Reconstruct the image from np array and save it.
    #             self.PMT_image_processing()
                
    #     elif self.channel_number == 2: 
    #         if 'PMT' not in self.readinchan:
    #             pass
    #         elif 'PMT' in self.readinchan:
                
    #             self.PMT_image_processing()

    #     print('ProcessData executed.')

    def PMT_image_processing(self):
        """
        Reconstruct the image from np array and save it.

        Returns
        -------
        None.

        """
        for imageSequence in range(self.repeatnum):
            
            try:
                self.PMT_image_reconstructed_array = self.data_collected_0[np.where(self.PMT_data_index_array == imageSequence+1)]

                Dataholder_average = np.mean(self.PMT_image_reconstructed_array.reshape(self.averagenum, -1), axis=0)

                Value_yPixels = int(self.lenSample_1/self.ScanArrayXnum)
                self.PMT_image_reconstructed = np.reshape(Dataholder_average, (Value_yPixels, self.ScanArrayXnum))
                
                self.PMT_image_reconstructed = self.PMT_image_reconstructed[:, 50:550] # Crop size based on: M:\tnw\ist\do\projects\Neurophotonics\Brinkslab\Data\Xin\2019-12-30 2p beads area test 4um
                # self.PMT_image_reconstructed = self.PMT_image_reconstructed[:, 70:326] # for 256*256 images
                
                # Evaluate the focus degree of re-constructed image.
                self.FocusDegree_img_reconstructed = ProcessImage.local_entropy(self.PMT_image_reconstructed.astype('float32'))
                print('FocusDegree_img_reconstructed is {}'.format(self.FocusDegree_img_reconstructed))
                
                # Save the individual file.
                with skimtiff.TiffWriter(os.path.join(self.scansavedirectory, 'Round'+str(self.RoundWaveformIndex[0]) + '_Grid' + str(self.Grid_index) +'_Coords'+str(self.currentCoordsSeq)+'_R'+str(self.CurrentPosIndex[0])+'C'+str(self.CurrentPosIndex[1])+'_PMT_'+str(imageSequence)+'Zpos'+str(self.ZStackOrder)+'.tif'), imagej = True) as tif:                
                    tif.save(self.PMT_image_reconstructed.astype('float32'), compress=0, metadata = {"FocusPos: " : str(self.FocusPos)})
               
                plt.figure()
                plt.imshow(self.PMT_image_reconstructed, cmap = plt.cm.gray) # For reconstructed image we pull out the first layer, getting 2d img.
                plt.show()                

                #---------------------------------------------For multiple images in one z pos, Stack the arrays into a 3d array--------------------------------------------------------------------------
                # if imageSequence == 0:
                #     self.PMT_image_reconstructed_stack = self.PMT_image_reconstructed[np.newaxis, :, :] # Turns into 3d array
                # else:
                #     self.PMT_image_reconstructed_stack = np.concatenate((self.PMT_image_reconstructed_stack, self.PMT_image_reconstructed[np.newaxis, :, :]), axis=0)
                #     print(self.PMT_image_reconstructed_stack.shape)
                    
                #---------------------------------------------Calculate the z max projection-----------------------------------------------------------------------
                if self.repeatnum == 1: # Consider one repeat image situlation 
                    if self.ZStackNum > 1:
                        if self.ZStackOrder == 1:
                            self.PMT_image_maxprojection_stack = self.PMT_image_reconstructed[np.newaxis, :, :]

                        else:

                            self.PMT_image_maxprojection_stack = np.concatenate((self.PMT_image_maxprojection_stack, self.PMT_image_reconstructed[np.newaxis, :, :]), axis=0)

                    else:
                        self.PMT_image_maxprojection_stack = self.PMT_image_reconstructed[np.newaxis, :, :]
                        
                # Save the max projection image
                if self.ZStackOrder == self.ZStackNum:
                    self.PMT_image_maxprojection = np.max(self.PMT_image_maxprojection_stack, axis=0)
                    
                    # Save the zmax file.
                    with skimtiff.TiffWriter(os.path.join(self.scansavedirectory, 'Round'+str(self.RoundWaveformIndex[0])+ '_Grid' + str(self.Grid_index) + '_Coords'+str(self.currentCoordsSeq)+'_R'+str(self.CurrentPosIndex[0])+'C'+str(self.CurrentPosIndex[1])+'_PMT_'+str(imageSequence)+'Zmax'+'.tif'), imagej = True) as tif:                
                        tif.save(self.PMT_image_maxprojection.astype('float32'), compress=0, metadata = {"FocusPos: " : str(self.FocusPos)})

                
            except:
                print('No.{} image failed to generate.'.format(imageSequence))
                        
        
    def generate_tif_name(self, extra_text = "_"):
        
        tif_name = os.path.join(self.scansavedirectory, 'Round'+str(self.RoundWaveformIndex[0])+'_Coords'+str(self.currentCoordsSeq)+ \
                                '_R'+str(self.CurrentPosIndex[0])+'C'+str(self.CurrentPosIndex[1])+ extra_text +'.tif')            
        return tif_name
    
    #----------------------------------------------------------------WatchDog for laser----------------------------------------------------------------------------------
    def Status_watchdog(self, querygap):
        
        while True:
            if self.watchdog_flag == True:
                self.Status_list = self.Laserinstance.QueryStatus()
                time.sleep(querygap)
            else:
                print('Watchdog stopped')
                time.sleep(querygap)
Esempio n. 11
0
 def hold(self):
     constant = self.HoldingList.value()
     self.executer = DAQmission()
     self.executer.sendSingleAnalog("patchAO", constant / 1000 * 10)
     print("Holding vm at " + str(constant) + " mV")
Esempio n. 12
0
class FocusFinder():
    
    def __init__(self, source_of_image = "PMT", init_search_range = 0.010, total_step_number = 5, imaging_conditions = {'edge_volt':5}, \
                 motor_handle = None, camera_handle = None, twophoton_handle = None, *args, **kwargs):
        """
        

        Parameters
        ----------
        source_of_image : string, optional
            The input source of image. The default is PMT.
        init_search_range : int, optional
            The step size when first doing coarse searching. The default is 0.010.
        total_step_number : int, optional
            Number of steps in total to find optimal focus. The default is 5.
        imaging_conditions : list
            Parameters for imaging.
            For PMT, it specifies the scanning voltage.
            For camera, it specifies the AOTF voltage and exposure time.
        motor_handle : TYPE, optional
            Handle to control PI motor. The default is None.
        twophoton_handle : TYPE, optional
            Handle to control Insight X3. The default is None.

        Returns
        -------
        None.

        """
        super().__init__(*args, **kwargs)
        
        # The step size when first doing coarse searching.
        self.init_search_range = init_search_range
        
        # Number of steps in total to find optimal focus.
        self.total_step_number = total_step_number
        
        # Parameters for imaging.
        self.imaging_conditions = imaging_conditions
        
        if motor_handle == None:
            # Connect the objective if the handle is not provided.
            self.pi_device_instance = PIMotor()
        else:
            self.pi_device_instance = motor_handle
        
        # Current position of the focus.
        self.current_pos = self.pi_device_instance.GetCurrentPos()

        # Number of steps already tried.
        self.steps_taken = 0
        # The focus degree of previous position.
        self.previous_degree_of_focus = 0
        # Number of going backwards.
        self.turning_point = 0
        # The input source of image.
        self.source_of_image = source_of_image
        if source_of_image == "PMT":
            self.galvo = RasterScan(Daq_sample_rate = 500000, edge_volt = self.imaging_conditions['edge_volt'])
        elif source_of_image == "Camera":
            if camera_handle == None:
                # If no camera instance fed in, initialize camera.
                self.HamamatsuCam_ins = CamActuator()
                self.HamamatsuCam_ins.initializeCamera()
            else:
                self.HamamatsuCam_ins = camera_handle
    
    def gaussian_fit(self, move_to_focus = True):
        
        # The upper edge.
        upper_position = self.current_pos + self.init_search_range
        # The lower edge.
        lower_position = self.current_pos - self.init_search_range
        
        # Generate the sampling positions.
        sample_positions = np.linspace(lower_position, upper_position, self.total_step_number)
        
        degree_of_focus_list = []
        for each_pos in sample_positions:
            # Go through each position and write down the focus degree.
            degree_of_focus = self.evaluate_focus(round(each_pos, 6))
            degree_of_focus_list.append(degree_of_focus)
        print(degree_of_focus_list)
        
        try:
            interpolated_fitted_curve = ProcessImage.gaussian_fit(degree_of_focus_list)

            # Generate the inpterpolated new focus position axis.
            x_axis_new = np.linspace(lower_position, upper_position, len(interpolated_fitted_curve))
            
            # Generate a dictionary and find the position where has the highest focus degree.
            max_focus_pos = dict(zip(interpolated_fitted_curve, x_axis_new))[np.amax(interpolated_fitted_curve)]
            
            if True: # Plot the fitting.
                plt.plot(sample_positions, np.asarray(degree_of_focus_list),'b+:',label='data')
                plt.plot(x_axis_new, interpolated_fitted_curve,'ro:',label='fit')
                plt.legend()
                plt.title('Fig. Fit for focus degree')
                plt.xlabel('Position')
                plt.ylabel('Focus degree')
                plt.show()
            
            max_focus_pos = round(max_focus_pos, 6)
            print(max_focus_pos)
            self.pi_device_instance.move(max_focus_pos)
            # max_focus_pos_focus_degree = self.evaluate_focus(round(max_focus_pos, 6))
        except:
            print("Fitting failed. Find max in the list.")
            
            max_focus_pos = sample_positions[degree_of_focus_list.index(max(degree_of_focus_list))]
            print(max_focus_pos)
            
        if move_to_focus == True:
            self.pi_device_instance.move(max_focus_pos)
            
        return max_focus_pos
        
    def bisection(self):
        """
        Bisection way of finding focus.

        Returns
        -------
        mid_position : float
            DESCRIPTION.

        """
        # The upper edge in which we run bisection.
        upper_position = self.current_pos + self.init_search_range
        # The lower edge in which we run bisection.
        lower_position = self.current_pos - self.init_search_range

        for step_index in range(1, self.total_step_number + 1):   
            # In each step of bisection finding.
            
            # In the first round, get degree of focus at three positions.
            if step_index == 1:
                # Get degree of focus in the mid.
                mid_position = (upper_position + lower_position)/2
                degree_of_focus_mid = self.evaluate_focus(mid_position)
                print("mid focus degree: {}".format(round(degree_of_focus_mid, 5)))
                
                # Break the loop if focus degree is below threshold which means
                # that there's no cell in image.
                if not ProcessImage.if_theres_cell(self.galvo_image.astype('float32')):
                    print('no cell')
                    mid_position = False
                    break

                # Move to top and evaluate.
                degree_of_focus_up = self.evaluate_focus(obj_position = upper_position)
                print("top focus degree: {}".format(round(degree_of_focus_up, 5)))
                # Move to bottom and evaluate.
                degree_of_focus_low = self.evaluate_focus(obj_position = lower_position)
                print("bot focus degree: {}".format(round(degree_of_focus_low, 5)))
                # Sorting dicitonary of degrees in ascending.
                biesection_range_dic = {"top":[upper_position, degree_of_focus_up], 
                                        "bot":[lower_position, degree_of_focus_low]}
                
            # In the next rounds, only need to go to center and update boundaries.
            elif step_index > 1:
                # The upper edge in which we run bisection.
                upper_position = biesection_range_dic["top"][0]
                # The lower edge in which we run bisection.
                lower_position = biesection_range_dic["bot"][0]
                
                # Get degree of focus in the mid.
                mid_position = (upper_position + lower_position)/2
                degree_of_focus_mid = self.evaluate_focus(mid_position)
                
                print("Current focus degree: {}".format(round(degree_of_focus_mid, 5)))
                
            # If sits in upper half, make the middle values new bottom.
            if biesection_range_dic["top"][1] > biesection_range_dic["bot"][1]:
                biesection_range_dic["bot"] = [mid_position, degree_of_focus_mid]
            else:
                biesection_range_dic["top"] = [mid_position, degree_of_focus_mid]
            
            print("The upper pos: {}; The lower: {}".format(biesection_range_dic["top"][0], biesection_range_dic["bot"][0]))
            
        return mid_position
                
                
    
    def evaluate_focus(self, obj_position = None):
        """
        Evaluate the focus degree of certain objective position.

        Parameters
        ----------
        obj_position : float, optional
            The target objective position. The default is None.

        Returns
        -------
        degree_of_focus : float
            Degree of focus.

        """
        
        if obj_position != None:
            self.pi_device_instance.move(obj_position)
            
        # Get the image.
        if self.source_of_image == "PMT":
            self.galvo_image = self.galvo.run()
            plt.figure()
            plt.imshow(self.galvo_image)
            plt.show()
            
            if False:
                with skimtiff.TiffWriter(os.path.join(r'M:\tnw\ist\do\projects\Neurophotonics\Brinkslab\Data\Xin\2020-11-17 gaussian fit auto-focus cells\trial_11', str(obj_position).replace(".", "_")+ '.tif')) as tif:                
                    tif.save(self.galvo_image.astype('float32'), compress=0)
                            
            degree_of_focus = ProcessImage.local_entropy(self.galvo_image.astype('float32'))
            
        elif self.source_of_image == "Camera":
            # First configure the AOTF.
            self.AOTF_runner = DAQmission()
            # Find the AOTF channel key
            for key in self.imaging_conditions:
                if 'AO' in key:
                    # like '488AO'
                    AOTF_channel_key = key
            
            # Set the AOTF first.
            self.AOTF_runner.sendSingleDigital('blankingall', True)
            self.AOTF_runner.sendSingleAnalog(AOTF_channel_key, self.imaging_conditions[AOTF_channel_key])
            
            # Snap an image from camera
            self.camera_image = self.HamamatsuCam_ins.SnapImage(self.imaging_conditions['exposure_time'])
            time.sleep(0.5)
            
            # Set back AOTF
            self.AOTF_runner.sendSingleDigital('blankingall', False)
            self.AOTF_runner.sendSingleAnalog(AOTF_channel_key, 0)
            
            plt.figure()
            plt.imshow(self.camera_image)
            plt.show()
            
            if False:
                with skimtiff.TiffWriter(os.path.join(r'M:\tnw\ist\do\projects\Neurophotonics\Brinkslab\Data\Xin\2021-03-06 Camera AF\beads', str(obj_position).replace(".", "_")+ '.tif')) as tif:                
                    tif.save(self.camera_image.astype('float32'), compress=0)
                            
            degree_of_focus = ProcessImage.variance_of_laplacian(self.camera_image.astype('float32'))
                
        time.sleep(0.2)
        
        return degree_of_focus
Esempio n. 13
0
    def evaluate_focus(self, obj_position = None):
        """
        Evaluate the focus degree of certain objective position.

        Parameters
        ----------
        obj_position : float, optional
            The target objective position. The default is None.

        Returns
        -------
        degree_of_focus : float
            Degree of focus.

        """
        
        if obj_position != None:
            self.pi_device_instance.move(obj_position)
            
        # Get the image.
        if self.source_of_image == "PMT":
            self.galvo_image = self.galvo.run()
            plt.figure()
            plt.imshow(self.galvo_image)
            plt.show()
            
            if False:
                with skimtiff.TiffWriter(os.path.join(r'M:\tnw\ist\do\projects\Neurophotonics\Brinkslab\Data\Xin\2020-11-17 gaussian fit auto-focus cells\trial_11', str(obj_position).replace(".", "_")+ '.tif')) as tif:                
                    tif.save(self.galvo_image.astype('float32'), compress=0)
                            
            degree_of_focus = ProcessImage.local_entropy(self.galvo_image.astype('float32'))
            
        elif self.source_of_image == "Camera":
            # First configure the AOTF.
            self.AOTF_runner = DAQmission()
            # Find the AOTF channel key
            for key in self.imaging_conditions:
                if 'AO' in key:
                    # like '488AO'
                    AOTF_channel_key = key
            
            # Set the AOTF first.
            self.AOTF_runner.sendSingleDigital('blankingall', True)
            self.AOTF_runner.sendSingleAnalog(AOTF_channel_key, self.imaging_conditions[AOTF_channel_key])
            
            # Snap an image from camera
            self.camera_image = self.HamamatsuCam_ins.SnapImage(self.imaging_conditions['exposure_time'])
            time.sleep(0.5)
            
            # Set back AOTF
            self.AOTF_runner.sendSingleDigital('blankingall', False)
            self.AOTF_runner.sendSingleAnalog(AOTF_channel_key, 0)
            
            plt.figure()
            plt.imshow(self.camera_image)
            plt.show()
            
            if False:
                with skimtiff.TiffWriter(os.path.join(r'M:\tnw\ist\do\projects\Neurophotonics\Brinkslab\Data\Xin\2021-03-06 Camera AF\beads', str(obj_position).replace(".", "_")+ '.tif')) as tif:                
                    tif.save(self.camera_image.astype('float32'), compress=0)
                            
            degree_of_focus = ProcessImage.variance_of_laplacian(self.camera_image.astype('float32'))
                
        time.sleep(0.2)
        
        return degree_of_focus
Esempio n. 14
0
    def control_for_registration(self, wavelength, value):
        value = int(value)
        daq = DAQmission()

        if value == 0:
            switch = False
        else:
            switch = True

        if wavelength == '640':
            print(wavelength + ':' + str(value))
            print(str(switch))
            daq.sendSingleAnalog('640AO', value)
            # execute_tread_singlesample_AOTF_analog = execute_tread_singlesample_analog()
            # execute_tread_singlesample_AOTF_analog.set_waves('640AO', value)
            # execute_tread_singlesample_AOTF_analog.start()

            daq.sendSingleDigital('640blanking', switch)
            # execute_tread_singlesample_AOTF_digital = execute_tread_singlesample_digital()
            # execute_tread_singlesample_AOTF_digital.set_waves('640blanking', switch)
            # execute_tread_singlesample_AOTF_digital.start()

        elif wavelength == '532':
            print(wavelength + ':' + str(value))
            print(str(switch))
            daq.sendSingleAnalog('532AO', value)

            daq.sendSingleDigital('640blanking', switch)
            # execute_tread_singlesample_AOTF_analog = execute_tread_singlesample_analog()
            # execute_tread_singlesample_AOTF_analog.set_waves('532AO', value)
            # execute_tread_singlesample_AOTF_analog.start()

            # execute_tread_singlesample_AOTF_digital = execute_tread_singlesample_digital()
            # execute_tread_singlesample_AOTF_digital.set_waves('640blanking', switch)
            # execute_tread_singlesample_AOTF_digital.start()

        else:
            print(wavelength + ':' + str(value))
            print(str(switch))
            daq.sendSingleAnalog('488AO', value)

            daq.sendSingleDigital('640blanking', switch)
Esempio n. 15
0
    def execute_tread_single_sample_digital(self, channel):
        daq = DAQmission()
        if channel == '640blanking':
            if self.switchbutton_640.isChecked():
                self.lasers_status['640'][0] = True
                daq.sendSingleDigital(channel, True)
                # execute_tread_singlesample_AOTF_digital = execute_tread_singlesample_digital()
                # execute_tread_singlesample_AOTF_digital.set_waves(channel, 1)
                # execute_tread_singlesample_AOTF_digital.start()
            else:
                self.lasers_status['640'][0] = False
                daq.sendSingleDigital(channel, False)
                # execute_tread_singlesample_AOTF_digital = execute_tread_singlesample_digital()
                # execute_tread_singlesample_AOTF_digital.set_waves(channel, 0)
                # execute_tread_singlesample_AOTF_digital.start()
        elif channel == '532blanking':
            if self.switchbutton_532.isChecked():
                self.lasers_status['532'][0] = True
                daq.sendSingleDigital(channel, True)
                # execute_tread_singlesample_AOTF_digital = execute_tread_singlesample_digital()
                # execute_tread_singlesample_AOTF_digital.set_waves(channel, 1)
                # execute_tread_singlesample_AOTF_digital.start()
            else:
                self.lasers_status['532'][0] = False
                daq.sendSingleDigital(channel, False)
                # execute_tread_singlesample_AOTF_digital = execute_tread_singlesample_digital()
                # execute_tread_singlesample_AOTF_digital.set_waves(channel, 0)
                # execute_tread_singlesample_AOTF_digital.start()
        elif channel == '488blanking':
            if self.switchbutton_488.isChecked():
                self.lasers_status['488'][0] = True
                daq.sendSingleDigital(channel, True)
                # execute_tread_singlesample_AOTF_digital = execute_tread_singlesample_digital()
                # execute_tread_singlesample_AOTF_digital.set_waves(channel, 1)
                # execute_tread_singlesample_AOTF_digital.start()
            else:
                self.lasers_status['488'][0] = False
                daq.sendSingleDigital(channel, False)
                # execute_tread_singlesample_AOTF_digital = execute_tread_singlesample_digital()
                # execute_tread_singlesample_AOTF_digital.set_waves(channel, 0)
                # execute_tread_singlesample_AOTF_digital.start()

        elif channel == 'LED':
            if self.switchbutton_LED.isChecked():
                execute_tread_singlesample_AOTF_digital = execute_tread_singlesample_digital(
                )
                execute_tread_singlesample_AOTF_digital.set_waves(channel, 1)
                execute_tread_singlesample_AOTF_digital.start()
            else:
                execute_tread_singlesample_AOTF_digital = execute_tread_singlesample_digital(
                )
                execute_tread_singlesample_AOTF_digital.set_waves(channel, 0)
                execute_tread_singlesample_AOTF_digital.start()

        self.sig_lasers_status_changed.emit(self.lasers_status)
Esempio n. 16
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.sampling_rate = 10000
     self.PWM_frequency = 50
     self.mission = DAQmission()
 def hold(self):
     constant = float(self.HoldingList.currentText()[0:3])
     self.executer = DAQmission()
     self.executer.sendSingleAnalog('patchAO', constant / 1000 * 10)
     print("Holding vm at " + str(constant) + ' mV')
Esempio n. 18
0
    def registration(self, grid_points_x=3, grid_points_y=3):
        """
        By default, generate 9 galvo voltage coordinates from (-5,-5) to (5,5),
        take the camera images of these points, return a function matrix that 
        transforms camera_coordinates into galvo_coordinates using polynomial transform. 

        Parameters
        ----------
        grid_points_x : TYPE, optional
            DESCRIPTION. The default is 3.
        grid_points_y : TYPE, optional
            DESCRIPTION. The default is 3.

        Returns
        -------
        transformation : TYPE
            DESCRIPTION.

        """
        galvothread = DAQmission()
        readinchan = []

        x_coords = np.linspace(-10, 10, grid_points_x + 2)[1:-1]
        y_coords = np.linspace(-10, 10, grid_points_y + 2)[1:-1]

        xy_mesh = np.reshape(np.meshgrid(x_coords, y_coords), (2, -1),
                             order='F').transpose()

        galvo_coordinates = xy_mesh
        camera_coordinates = np.zeros((galvo_coordinates.shape))

        for i in range(galvo_coordinates.shape[0]):

            galvothread.sendSingleAnalog('galvosx', galvo_coordinates[i, 0])
            galvothread.sendSingleAnalog('galvosy', galvo_coordinates[i, 1])
            time.sleep(1)

            image = self.cam.SnapImage(0.06)
            plt.imsave(
                os.getcwd() +
                '/CoordinatesManager/Registration_Images/2P/image_' + str(i) +
                '.png', image)

            camera_coordinates[i, :] = readRegistrationImages.gaussian_fitting(
                image)

        print('Galvo Coordinate')
        print(galvo_coordinates)
        print('Camera coordinates')
        print(camera_coordinates)
        del galvothread
        self.cam.Exit()

        transformation_cam2galvo = CoordinateTransformations.polynomial2DFit(
            camera_coordinates, galvo_coordinates, order=1)

        transformation_galvo2cam = CoordinateTransformations.polynomial2DFit(
            galvo_coordinates, camera_coordinates, order=1)

        print('Transformation found for x:')
        print(transformation_cam2galvo[:, :, 0])
        print('Transformation found for y:')
        print(transformation_cam2galvo[:, :, 1])

        print('galvo2cam found for x:')
        print(transformation_galvo2cam[:, :, 0])
        print('galvo2cam found for y:')
        print(transformation_galvo2cam[:, :, 1])

        return transformation_cam2galvo