Beispiel #1
0
 def __init__(self, params):     
     
     
     #super(DAQ,self).__init__(params)
     super().__init__(params)
     try:                                 
         self.timeout = params['timeout']
         self.acq_dur = params['acq_dur'] # acquistion segment duration
         
     except KeyError:
         raise
         
     if 'timer' in params:
         self.timer = params['timer']            
     else:
         self.timer = Timer()            
         
     self.read_request_size =[] # will be converted from self.acq_dur from init_daq
     self.hat = []  
           
     self.worker = Threadworker(self.acq_start)
     self.worker.setName('DAQthreader_ch'+str(self.ai_ch))
    
     
     
     self.init_daq()
Beispiel #2
0
    def __init__(self, params):
        try:
            self.wh_daqch = params['channels']  # channel index from daq card
            self.daq = params['daq']
        except KeyError:
            raise

        self.ROTAROADSIZE_RAD = 8  # wheel size in cm
        self.ENCODER_RESOLUTION = 2500  # cycles/revolution

        self.alldata = np.array([])
        self.allt = np.array([])

        self.whspeed = []
        self.whsig = None
        self.t = None
        self.wheel_counter = 0
        self.calc_requested = False

        if 'timer' in params:
            self.timer = params['timer']
        else:
            self.timer = None

        self.worker = Threadworker(self.calc_speed)
        self.worker.setName('Wheelthreader')
Beispiel #3
0
  def __init__(self, params):     
      try:			  
          self.rawdata = params['rawdata']	             
          self.scan_rate = params['scan_rate']           
          self.acq_dur = params['acq_dur'] # acquistion segment duration
      except KeyError:
          raise            
      self.ch = list(range(0,self.rawdata.shape[1])) 
      self.read_request_size =int(self.scan_rate*self.acq_dur)
     
      
      self.data =[] # segment data
      self.t =[]  # acquisition relative time for segment
      self.acq_counter = 0 # segment counter
      self.total_nsample_perchannel =0  # total number of samples per channel
      
      if 'timer' in params:
          self.timer = params['timer']            
      else:
          self.timer = Timer()            
          
      self.data_acqtime ={} #relative time of data-segment acquisition
      self.data_len = {} # sample number per each segment
 
      self.worker = Threadworker(self.acq_start)
Beispiel #4
0
    def __init__(self, params):
        #super(usb6009, self).__init__(params)

        # for python 2.7
        DAQ.__init__(self, params)
        self.device_name = 'USB-6009'
        try:
            self.dev_id = params['dev_id']
            self.ai_fdb_chan = params['ai_feedback_ch']
            if self.mode == 'trigger':
                self.trg_chan = params['ai_trg_ch']
                self.trg_mode = params[
                    'trg_mode']  #'rising_edge' or 'falling_edge'

            self.ao_chan = params['ao_ch']
            self.acq_dur = params['acq_dur']
            self.ai_buffersize = 5100
            self.timeout = 10
        except (KeyError, ValueError) as err:
            logging.error(err, exc_info=True)

        if 'timer' in params:
            self.timer = params['timer']
        else:
            self.timer = Timer()

        self.init_daq()
        self.aiworker = Threadworker(self.acq_start)
        self.aiworker.setName('DAQthreader_ch' + str(self.ai_ch))
        self.aiworker_live = True
Beispiel #5
0
class MCC118(DAQ):
    """ mcc118"""

    def __init__(self, params):     
        
        
        #super(DAQ,self).__init__(params)
        super().__init__(params)
        try:                                 
            self.timeout = params['timeout']
            self.acq_dur = params['acq_dur'] # acquistion segment duration
            
        except KeyError:
            raise
            
        if 'timer' in params:
            self.timer = params['timer']            
        else:
            self.timer = Timer()            
            
        self.read_request_size =[] # will be converted from self.acq_dur from init_daq
        self.hat = []  
              
        self.worker = Threadworker(self.acq_start)
        self.worker.setName('DAQthreader_ch'+str(self.ai_ch))
       
        
        
        self.init_daq()
        
    def init_daq(self):
        try:            
            address = select_hat_device(HatIDs.MCC_118)
            self.hat = mcc118(address)
            
            num_channels = len(self.ai_ch)             
            self.scan_rate = self.hat.a_in_scan_actual_rate(num_channels, self.scan_rate)        
            self.read_request_size = int(self.scan_rate*self.acq_dur)
        
            
            
        except (NameError, SyntaxError):
            pass
        
        
    def reset_timer(self):
        """
        def reset_timer(self):
            reset timer
        """
        self.timer.start()
        
    def record_cont(self):
        """
        def record_cont(self):
        recording continously while scan_status is running
        """
        
        nch  = len(self.ai_ch)            
            
        scan_status = self.hat.a_in_scan_status()    
        while self.worker.running() & scan_status.running : 
            
            scan_status = self.hat.a_in_scan_status()
            nsample =scan_status.samples_available
            
            if nsample>= self.read_request_size:                
                read_result = self.hat.a_in_scan_read_numpy(READ_ALL_AVAILABLE, self.timeout)       
                self.data_acqtime[self.acq_counter] = self.timer.elapsed_time()
                nsample = int(len(read_result.data) / nch) # 
                self.data_len[self.acq_counter] =nsample
                
                # Check for an overrun error
                if read_result.hardware_overrun:
                    print('\n\nHardware overrun\n')                
                elif read_result.buffer_overrun:
                    print('\n\nBuffer overrun\n')                
                    
                self.data = np.reshape(read_result.data,(nsample,nch))
                timeoff = self.total_nsample_perchannel/self.scan_rate
                self.t = timeoff + np.array(range(0,nsample))/self.scan_rate
                
                workername = self.worker.getName()                
                #logging.debug('{}: counter:{}, nsample:{}, abstime:{}'.format(workername,self.acq_counter, nsample, self.data_acqtime[self.acq_counter]))
                
                self.worker.set_datflag()
                
                self.total_nsample_perchannel += nsample
                self.acq_counter +=1
                sleep(self.acq_dur*0.9)
            else:                
                sleep(0.05)
                
                
    def record_N_sample(self):
        """
        def record_N_sample(self):
        read N samples 
        """
        
        nch  = len(self.ai_ch)                        
        scan_status = self.hat.a_in_scan_status()    
        total_samples_read =0
        segment_size = int(self.scan_rate* 0.1) #set segment size to 100msec
        N = self.read_request_size
        
        if self.worker.running() & scan_status.running :                         
            while total_samples_read <N:                        
                read_result = self.hat.a_in_scan_read_numpy(segment_size, self.timeout)       
                self.data_acqtime[self.acq_counter] = self.timer.elapsed_time()
                nsample = int(len(read_result.data) / nch) #                 
                # Check for an overrun error
                if read_result.hardware_overrun:
                    print('\n\nHardware overrun\n')                
                elif read_result.buffer_overrun:
                    print('\n\nBuffer overrun\n')                
                    
                dataseg = np.reshape(read_result.data,(nsample,nch))
                timeoff = self.total_nsample_perchannel/self.scan_rate
                tseg = timeoff + np.array(range(0,nsample))/self.scan_rate
                self.t = np.hstack((self.t,tseg))
                self.data = np.vstack((self.data,dataseg))
                self.data_len[self.acq_counter] =nsample
                
                workername = self.worker.getName()                
                #logging.debug('{}: counter:{}, nsample:{}, abstime:{}'.format(workername,self.acq_counter, nsample, self.data_acqtime[self.acq_counter]))                                                
                
                self.total_nsample_perchannel += nsample
                self.acq_counter +=1
                sleep(segment_size*0.9)
            self.worker.set_datflag() # set data_ready_flag
            
    
    def wait_for_trigger(self):
        """
        Monitor the status of the specified HAT device in a loop until the
        triggered status is True or the running status is False.        
        """
        # Read the status only to determine when the trigger occurs.
        is_running = True
        is_triggered = False
        while is_running and not is_triggered:
            status = self.hat.a_in_scan_status()
            is_running = status.running
            is_triggered = status.triggered
            
            
                
    def record_withtrigger(self):                                
        while self.worker.running():             
            self.wait_for_trigger()
            self.record_N_sample()         
            
                
    def acq_start(self):
        """
        def acq_start(self):
            acqusition start
        """
        
        
        channel_mask = chan_list_to_mask(self.ai_ch)
        
        if self.mode =='continuous':
            samples_per_channel = 0
            options = OptionFlags.CONTINUOUS    
                        
            self.hat.a_in_scan_start(channel_mask, samples_per_channel, self.scan_rate,options)                 
            self.record_cont()
            
            
        elif self.mode=='trigger':
            samples_per_channel = self.read_request_size
            options = OptionFlags.EXTTRIGGER
            trigger_mode = TriggerModes.RISING_EDGE
            
            self.hat.trigger_mode(trigger_mode)
            
            self.hat.a_in_scan_start(channel_mask, samples_per_channel, self.scan_rate,options) 
            self.record_withtrigger()
            
        elif self.mode =='finite':
            samples_per_channel = self.read_request_size
            options = OptionFlags.DEFAULT
            self.hat.a_in_scan_start(channel_mask, samples_per_channel, self.scan_rate,options) 
            self.record_N_sample()
            
            
        else:
            print('not implmented\n')
            raise      
        self.worker.clear_datflag()
            
    def acq_stop(self):
        self.hat.a_in_scan_stop()
        self.worker.stop()
    def acq_cleanup(self):
        self.hat.a_in_scan_cleanup()



#READ_ALL_AVAILABLE = -1
#class DAQ:
#    """ DAQ"""
#
#    def __init__(self, params):     
#        try:      
#            self.ch = params['channels']
#            self.scan_rate = params['scan_rate']
#            self.mode = params['mode'] # continuous, trigger, finite        
#            self.timeout = params['timeout']
#            self.acq_dur = params['acq_dur'] # acquistion segment duration
#        except KeyError:
#            raise
#            
#        self.read_request_size =[]
#        self.hat = [] 
#        
#        self.data =[] # segment data
#        self.t =[]  # acquisition relative time for segment
#        self.acq_counter = 0 # segment counter
#        self.total_nsample_perchannel =0  # total number of samples per channel
#        
#        if 'timer' in params:
#            self.timer = params['timer']            
#        else:
#            self.timer = Timer()            
#            
#        self.data_acqtime ={} #relative time of data-segment acquisition
#        self.data_len = {} # sample number per each segment
#        #pdb.set_trace()        
#        self.worker = Threadworker(self.acq_start)
#        self.worker.setName('DAQthreader_ch'+str(self.ch))
#        #self.worker = Processworker(self.acq_start)
#        
#        
#        self.init_daq()
#        
#    def init_daq(self):
#        try:            
#            address = select_hat_device(HatIDs.MCC_118)
#            self.hat = mcc118(address)
#            #pdb.set_trace()
#            num_channels = len(self.ch)
#            self.read_request_size = int(self.scan_rate*self.acq_dur) 
#            self.scan_rate = self.hat.a_in_scan_actual_rate(num_channels, self.scan_rate)        
#        
#            
#            
#        except (NameError, SyntaxError):
#            pass
#        
#        
#    def reset_timer(self):
#        """
#        def reset_timer(self):
#            reset timer
#        """
#        self.timer.start()
#        
#    def record_cont(self):
#        """
#        def record_cont(self):
#        recording continously while scan_status is running
#        """
#        
#        nch  = len(self.ch)            
#            
#        scan_status = self.hat.a_in_scan_status()    
#        while self.worker.running() & scan_status.running : 
#            
#            scan_status = self.hat.a_in_scan_status()
#            nsample =scan_status.samples_available
#            
#            if nsample>= self.read_request_size:                
#                read_result = self.hat.a_in_scan_read_numpy(READ_ALL_AVAILABLE, self.timeout)       
#                self.data_acqtime[self.acq_counter] = self.timer.elapsed_time()
#                nsample = int(len(read_result.data) / nch) # 
#                self.data_len[self.acq_counter] =nsample
#                
#                # Check for an overrun error
#                if read_result.hardware_overrun:
#                    print('\n\nHardware overrun\n')                
#                elif read_result.buffer_overrun:
#                    print('\n\nBuffer overrun\n')                
#                    
#                self.data = np.reshape(read_result.data,(nsample,nch))
#                timeoff = self.total_nsample_perchannel/self.scan_rate
#                self.t = timeoff + np.array(range(0,nsample))/self.scan_rate
#                
#                workername = self.worker.getName()                
#                #logging.debug('{}: counter:{}, nsample:{}, abstime:{}'.format(workername,self.acq_counter, nsample, self.data_acqtime[self.acq_counter]))
#                
#                self.worker.set_datflag()
#                
#                self.total_nsample_perchannel += nsample
#                self.acq_counter +=1
#                sleep(self.acq_dur*0.9)
#            else:                
#                sleep(0.05)
#                
#                
#    def record_N_sample(self):
#        """
#        def record_N_sample(self):
#        read N samples 
#        """
#        
#        nch  = len(self.ch)                        
#        scan_status = self.hat.a_in_scan_status()    
#        total_samples_read =0
#        segment_size = int(self.scan_rate* 0.1) #set segment size to 100msec
#        N = self.read_request_size
#        
#        if self.worker.running() & scan_status.running :                         
#            while total_samples_read <N:                        
#                read_result = self.hat.a_in_scan_read_numpy(segment_size, self.timeout)       
#                nsample = int(len(read_result.data) / nch) #                 
#                # Check for an overrun error
#                if read_result.hardware_overrun:
#                    print('\n\nHardware overrun\n')                
#                elif read_result.buffer_overrun:
#                    print('\n\nBuffer overrun\n')                
#                    
#                dataseg = np.reshape(read_result.data,(nsample,nch))
#                timeoff = self.total_nsample_perchannel/self.scan_rate
#                tseg = timeoff + np.array(range(0,nsample))/self.scan_rate
#                self.t = np.hstack((self.t,tseg))
#                self.data = np.vstack((self.data,dataseg))
#                self.data_len[self.acq_counter] =nsample
#                
#                workername = self.worker.getName()                
#                #logging.debug('{}: counter:{}, nsample:{}, abstime:{}'.format(workername,self.acq_counter, nsample, self.data_acqtime[self.acq_counter]))                                                
#                
#                self.total_nsample_perchannel += nsample
#                self.acq_counter +=1
#                sleep(segment_size*0.9)
#            self.worker.set_datflag() # set data_ready_flag
#            
#    
#    def wait_for_trigger(self):
#        """
#        Monitor the status of the specified HAT device in a loop until the
#        triggered status is True or the running status is False.        
#        """
#        # Read the status only to determine when the trigger occurs.
#        is_running = True
#        is_triggered = False
#        while is_running and not is_triggered:
#            status = self.hat.a_in_scan_status()
#            is_running = status.running
#            is_triggered = status.triggered
#            
#            
#                
#    def record_withtrigger(self):                                
#        while self.worker.running():             
#            self.wait_for_trigger()
#            self.record_N_sample()         
#            
#                
#    def acq_start(self):
#        """
#        def acq_start(self):
#            acqusition start
#        """
#        
#        
#        channel_mask = chan_list_to_mask(self.ch)
#        
#        if self.mode =='continuous':
#            samples_per_channel = 0
#            options = OptionFlags.CONTINUOUS    
#                        
#            self.hat.a_in_scan_start(channel_mask, samples_per_channel, self.scan_rate,options)                 
#            self.record_cont()
#            
#            
#        elif self.mode=='trigger':
#            samples_per_channel = self.read_request_size
#            options = OptionFlags.EXTTRIGGER
#            trigger_mode = TriggerModes.RISING_EDGE
#            
#            self.hat.trigger_mode(trigger_mode)
#            
#            self.hat.a_in_scan_start(channel_mask, samples_per_channel, self.scan_rate,options) 
#            self.record_withtrigger()
#            
#        elif self.mode =='finite':
#            samples_per_channel = self.read_request_size
#            options = OptionFlags.DEFAULT
#            self.hat.a_in_scan_start(channel_mask, samples_per_channel, self.scan_rate,options) 
#            self.record_N_sample()
#            
#            
#        else:
#            print('not implmented\n')
#            raise      
#        self.worker.clear_datflag()
#            
#    def acq_stop(self):
#        self.hat.a_in_scan_stop()
#        self.worker.stop()
#    def acq_cleanup(self):
#        self.hat.a_in_scan_cleanup()                
Beispiel #6
0
class Wheel:
    """ Class for wheel rotation info """
    def __init__(self, params):
        try:
            self.wh_daqch = params['channels']  # channel index from daq card
            self.daq = params['daq']
        except KeyError:
            raise

        self.ROTAROADSIZE_RAD = 8  # wheel size in cm
        self.ENCODER_RESOLUTION = 2500  # cycles/revolution

        self.alldata = np.array([])
        self.allt = np.array([])

        self.whspeed = []
        self.whsig = None
        self.t = None
        self.wheel_counter = 0
        self.calc_requested = False

        if 'timer' in params:
            self.timer = params['timer']
        else:
            self.timer = None

        self.worker = Threadworker(self.calc_speed)
        self.worker.setName('Wheelthreader')
        #self.worker = Processworker(self.calc_speed)

    def get_datinx(self):
        """ get channel index from acquired daq data"""
        map = np.zeros(8)  # suppose maximum channel < 8
        ch1 = list(self.daq.ai_ch)
        map[ch1] = list(range(0, len(ch1)))
        inx = map[list(self.wh_daqch)]
        return inx.astype(int)

    def get_latestdaq(self):
        """
        def get_latestdaq(self):
            get lastest segment daq data
        """
        chinx = self.get_datinx()
        self.whsig = self.daq.data[:, chinx]  # wheel signal Samples x ch
        self.t = self.daq.t

        if self.alldata.size == 0:
            self.alldata = self.whsig
            self.allt = self.t
        else:
            self.alldata = np.concatenate((self.alldata, self.whsig), axis=0)
            self.allt = np.concatenate((self.allt, self.t), axis=0)

    def enable_calc_speed(self):
        self.calc_requested = True
        return self.wheel_counter

    def get_wheel_counter(self):
        return self.wheel_counter

    def disable_calc_speed(self):
        self.calc_requested = False
        return self.wheel_counter

    def is_active(self, thr, start, end=-1):
        #pdb.set_trace()

        if start >= len(self.whspeed):
            return False

        if end == -1:
            spd = self.whspeed[start:]
        else:
            spd = self.whspeed[start:end]
        #print('len:{}, start:{}'.format(len(self.whspeed),start))
        #pdb.set_trace()
        #print('len:{}, start:{}, spdlen:{}'.format(len(self.whspeed),start,len(spd)))
        if np.nanmean(spd) > thr:
            return True
        else:
            return False

    def calc_speed(self):
        logging.info('wheel_thread started!!!\n')

        while self.daq.worker.running():
            daq_counter = self.daq.acq_counter
            dataready = self.daq.worker.check_datflag(
            )  # checking daq data is ready
            if self.daq.acq_dur < 0.05:
                logging.warning(
                    'DAQ segment {} is too small for Wheel objects'.format(
                        self.daq.acq_dur))

            if dataready:
                if True:  #self.calc_requested:
                    self.get_latestdaq()
                    spd = get_whspeed(self.whsig, self.t, twin=0.1)
                    spd = round(np.nanmean(spd))
                #else:
                #    spd = float('nan')
                self.whspeed.insert(self.wheel_counter, spd)
                #self.whspeed.append(spd)

                workername = self.worker.getName()
                logging.debug('{}: daq_counter:{}, wheel_counter:{},speed:{}, abstime:{}'\
                              .format(workername,daq_counter,self.wheel_counter,\
                                      self.whspeed[self.wheel_counter],self.timer.elapsed_time()))

                self.wheel_counter += 1
                self.daq.worker.clear_datflag()

            sleep(0.01)

        self.worker.stop()
        if not self.worker.running():
            workername = self.worker.getName()
            logging.info('{} STOPPED'.format(workername))


#def main():
#    if sys.platform =='win32':
#        fname = 'Z:\\Pybehav\\VoltageRecording-04042019-1055-001_Cycle00001_VoltageRecording_001.csv'
#    else:
#        fname = '/home/slee/data/Pybehav/VoltageRecording-04042019-1055-001_Cycle00001_VoltageRecording_001.csv'
#
#
#    D = pd.read_csv(fname, delimiter = ',')
#    dty = D.dtypes
#    #X = D.to_numpy()
#    X = D.values
#    X1 = X[:,1:]
#
#    t= X[:,0]
#    t = t[:,np.newaxis]/1000
#    t = t.flatten()
#    sig=X1[:,2:]
#    speed= get_whspeed(sig,t)
#    plt.plot(t,speed)
#
#if __name__=='__main__':
#    main()
#

######################################################################

#def get_whspeed(sig,t, twin=0.1, ROTAROADSIZE_RAD =8, ENCODER_RESOLUTION = 2500):
#
#    """"
#    get_whspeed(sig,t, twin=0.1, ROTAROADSIZE_RAD =8, ENCODER_RESOLUTION = 2500)
#    ROTAROADSIZE_RAD = 8cm in radius (default)
#    ENCODER_RESOLUTION = 2500 cycles/revolution
#    twin = 0.1(default) in second
#    The following codes take into account for counts/revolution
#    1 cycle consists of 4 counts as follows:
#        ROT1 = (0, 0, 0, 1, 1, 1, 1, 0) # clockwise
#        ROT2 = (1, 0, 1, 1, 0, 1, 0, 0) # counter clockwise
#    """
#    # rotation pattern (clockwise or counter clockwise)
#    ROT1 = (0, 0, 0, 1, 1, 1, 1, 0)
#    ROT2 = (1, 0, 1, 1, 0, 1, 0, 0)
#
#    A=sig[:,0]-np.mean(sig[:,0]);
#    A=(np.sign(A/np.max(np.abs(A)))+1)/2;
#
#
#    B=sig[:,1]-np.mean(sig[:,1]);
#    B=(np.sign(B/np.max(np.abs(B)))+1)/2;
#
#    statechange_inxA = np.nonzero(np.diff(A)!=0)
#    statechange_inxB = np.nonzero(np.diff(B)!=0)
#    statechange_inx = np.concatenate((statechange_inxA[0], statechange_inxB[0]))
#    statechange_inx = np.unique(statechange_inx)
#
#    Time_statechange = t[statechange_inx]
#    M=np.array([A[statechange_inx], B[statechange_inx]])
#
#    Maug =np.concatenate((M[:,:-1],M[:,1:]),axis=0)
#
#    lenstate = Maug.shape[1]
#
#    speed = np.zeros(t.shape)
#    # rotation distance / count
#    distseg = (ROTAROADSIZE_RAD*2*np.pi)/(ENCODER_RESOLUTION*4)
#
#
#
#    #c = time()
#    FWD = np.zeros(Maug.shape) # checking forward movement
#    BWD = np.zeros(Maug.shape) # checking backward movement
#    shifts = list(range(0,8,2))
#    for i in range(0,4):
#        ishift = shifts[i]
#        ROT1sh = np.roll(ROT1,ishift)
#        ROT2sh = np.roll(ROT2,ishift)
#        ROT1sh = ROT1sh[:4,np.newaxis]
#        ROT2sh = ROT2sh[:4,np.newaxis]
#        FWD[i,:]=  np.sum(np.abs(Maug - ROT1sh),0)==4
#        BWD[i,:]=  np.sum(np.abs(Maug - ROT2sh),0)==4
#    FWD = np.sum(FWD,0)>0
#    BWD = np.sum(BWD,0)>0
#    statetimedur = Time_statechange[1:]-Time_statechange[:-1]
#    statetimedur[BWD] = -1*statetimedur[BWD]
#    statetimedur[np.logical_not(np.logical_or(FWD,BWD))]=np.inf
#    for i in range(lenstate):
#        ist = statechange_inx[i]
#        ied = statechange_inx[i+1]
#        speed[ist:ied+1] = distseg/statetimedur[i]
#
#
#    tperiod = t[1]-t[0]
#    winsize = int(twin/tperiod)
#
#    twindow = np.ones(winsize)
#
#    speed_t=np.convolve(speed,twindow)/winsize
#
#    offset = int(winsize/2)
#    speed_t = speed_t[range(offset,offset+len(t))]
#
##    plt.plot(t,speed)
##    plt.plot(t,speed_t)
#    #print('totaltime:' + str(time()-c))
#    return speed_t
Beispiel #7
0
class fakeDAQ:
    """ DAQ"""

    def __init__(self, params):     
        try:			  
            self.rawdata = params['rawdata']	             
            self.scan_rate = params['scan_rate']           
            self.acq_dur = params['acq_dur'] # acquistion segment duration
        except KeyError:
            raise            
        self.ch = list(range(0,self.rawdata.shape[1])) 
        self.read_request_size =int(self.scan_rate*self.acq_dur)
       
        
        self.data =[] # segment data
        self.t =[]  # acquisition relative time for segment
        self.acq_counter = 0 # segment counter
        self.total_nsample_perchannel =0  # total number of samples per channel
        
        if 'timer' in params:
            self.timer = params['timer']            
        else:
            self.timer = Timer()            
            
        self.data_acqtime ={} #relative time of data-segment acquisition
        self.data_len = {} # sample number per each segment
   
        self.worker = Threadworker(self.acq_start)
       
       
        
        

    def reset_timer(self):
        """
        def reset_timer(self):
            reset timer
        """
        self.timer.start()
        
    def record_cont(self):
        """
        def record_cont(self):
        recording continously while scan_status is running
        """
        
        nsample = int(self.scan_rate*self.acq_dur)
        nch  = len(self.ch)            
        datalen = self.rawdata.shape[0]    
        
        while self.worker.running() : 
            try:
                
                sleep(self.acq_dur)
                #sleep(0.001)
                if self.total_nsample_perchannel>datalen:
                    break
             
                self.data_acqtime[self.acq_counter] = self.timer.elapsed_time()
               
                inx = range(self.acq_counter*nsample,(self.acq_counter+1)*nsample)
                self.data = self.rawdata[inx,:]
                timeoff = self.total_nsample_perchannel/self.scan_rate
                self.t = timeoff + np.array(range(0,nsample))/self.scan_rate
                self.data_len[self.acq_counter] =nsample
            
                
                workername = 'fakeDAQ'              
                #logging.debug('{}: counter:{}, nsample:{}, abstime:{}'.format(workername,self.acq_counter, self.total_nsample_perchannel, self.data_acqtime[self.acq_counter]))
                
                self.worker.set_datflag()
                
                self.total_nsample_perchannel += nsample
                self.acq_counter +=1
                
            except KeyboardInterrupt:
                logging.info('\nExit from DAQ\n')                                
                break
            
        self.acq_stop()
                
    def acq_start(self):
        """
        def acq_start(self):
            acqusition start
        """   
        self.record_cont()
        self.worker.clear_datflag()
            
    def acq_stop(self):       
        self.worker.stop()
Beispiel #8
0
class usb6009(DAQ):
    """ NI USB-6009 DAQ: for analog input and output"""
    def __init__(self, params):
        #super(usb6009, self).__init__(params)

        # for python 2.7
        DAQ.__init__(self, params)
        self.device_name = 'USB-6009'
        try:
            self.dev_id = params['dev_id']
            self.ai_fdb_chan = params['ai_feedback_ch']
            if self.mode == 'trigger':
                self.trg_chan = params['ai_trg_ch']
                self.trg_mode = params[
                    'trg_mode']  #'rising_edge' or 'falling_edge'

            self.ao_chan = params['ao_ch']
            self.acq_dur = params['acq_dur']
            self.ai_buffersize = 5100
            self.timeout = 10
        except (KeyError, ValueError) as err:
            logging.error(err, exc_info=True)

        if 'timer' in params:
            self.timer = params['timer']
        else:
            self.timer = Timer()

        self.init_daq()
        self.aiworker = Threadworker(self.acq_start)
        self.aiworker.setName('DAQthreader_ch' + str(self.ai_ch))
        self.aiworker_live = True

    def init_daq(self):
        """ configure USB-6009"""
        system = nidaqmx.system.System.local()

        dev = system.devices[self.dev_id]
        #dev = system.devices['Dev3']
        try:
            if not (dev.product_type == self.device_name):
                logging.error('not proper DAQ{} selected!\n', dev.product_type)

            self.task_ao = nidaqmx.Task()
            self.task_ai = nidaqmx.Task()

            ao_ch = self.get_channame('ao', self.ao_chan)
            self.task_ao.ao_channels.add_ao_voltage_chan(ao_ch,
                                                         min_val=0.0,
                                                         max_val=5.0)

            for i in self.ai_ch:
                ai_ch = self.get_channame('ai', i)
                self.task_ai.ai_channels.add_ai_voltage_chan(ai_ch)

            #ai_ch = self.get_channame('ai', self.ai_fdb_chan)
            #self.task_ai.ai_channels.add_ai_voltage_chan(ai_ch)
            #trg_ch = self.get_channame('ai', self.trg_chan)
            #self.task_ai.ai_channels.add_ai_voltage_chan(trg_ch)


            self.task_ai.timing.cfg_samp_clk_timing(self.scan_rate,samps_per_chan=self.ai_buffersize, \
                                                    sample_mode= nidaqmx.constants.AcquisitionType.CONTINUOUS)

            self.scan_rate = self.task_ai.timing.samp_clk_rate  # actual sample rate set in the USB-6009
            self.read_request_size = int(
                self.scan_rate * self.acq_dur)  # requested sample number

        except (nidaqmx.errors.DaqError, KeyError, ValueError) as err:
            logging.error(err, exc_info=True)

    def get_channame(self, chtype, ch):
        """
        def get_channame(self, chtype,ch):
            chtype: 'ao', 'ai'
            ch: channel number
            return device dependent channel names
        """
        return self.dev_id + "/" + chtype + str(ch)

    def get_ai_ch_inx(self, ch):
        """ def get_ai_ch_inx(self,chtype, ch):
        """
        chname = self.get_channame('ai', ch)
        chs = self.task_ai.channel_names
        return chs.index(chname)

    def record_N_sample(self):
        """
        def record_N_sample(self):
            read definite N samples 
        """

        #self.total_samples_read =0
        self.acq_counter = 0
        segment_size = nidaqmx.constants.READ_ALL_AVAILABLE  #set segment size to 100msec
        N = self.read_request_size

        segdur = 1  #self.ai_buffersize/self.scan_rate
        while self.total_nsample_perchannel < N:

            if not self.aiworker.running():
                break

            data = self.task_ai.read(segment_size, self.timeout)
            self.data_acqtime[self.acq_counter] = self.timer.elapsed_time()
            nsample = len(data[0])  #

            dataseg = np.array(data)
            timeoff = self.total_nsample_perchannel / self.scan_rate
            tseg = timeoff + np.array(range(0, nsample)) / self.scan_rate

            if self.acq_counter == 0:
                self.data = dataseg
                self.t = tseg
            else:
                self.data = np.hstack((self.data, dataseg))
                self.t = np.hstack((self.t, tseg))

            self.data_len[self.acq_counter] = nsample

            workername = self.aiworker.getName()
            logging.debug('{}: counter:{}, nsample:{}, abstime:{}'.format(
                workername, self.acq_counter, nsample,
                self.data_acqtime[self.acq_counter]))

            self.total_nsample_perchannel += nsample
            self.acq_counter += 1
            sleep(segdur * 0.95)

    def wait_for_trigger(self):
        """
        def wait_for_trigger(self):
            wait_for_trigger        
        """
        inxch = self.get_ai_ch_inx(self.trg_chan)
        while True:
            value = self.task_ai.read()
            if self.trg_mode == 'rising_edge' and np.mean(value[inxch]) > 2.5:
                logging.info('Triggered')
                break
            elif self.trg_mode == 'falling_edge' and np.mean(
                    value[inxch]) < 2.5:
                logging.info('Triggered')
                break
            sleep(0.001)

    def write_volt(self, vals):
        """
        def write_volt(self, val):
            vals: list for multiple channels
        """
        nch = len(self.task_ao.channel_names)
        if nch == len(vals):
            self.task_ao.write(vals)
        else:
            logging.error('No. ch:{}, No. val:{}'.format(nch, len(vals)))

    def acq_start(self):
        """
        def acq_start(self):
            acqusition start, this function is called from thread.start()
            within this function, record_X should be called
        """

        while True:
            if not self.aiworker_live:
                break

            if self.aiworker.running():
                if self.mode == 'trigger':
                    self.wait_for_trigger()
                self.task_ai.start()
                self.record_N_sample()

            sleep(0.1)

    def acq_resume(self):
        """
        def acq_resume(self):
            start ai acquistion
        """

        # if thread was not started
        if not self.aiworker.is_alive():
            self.aiworker.start()
            logging.info("aiworker is started")

        # if thread was started already
        if not self.aiworker.running():
            self.aiworker.resume()
            logging.info("aiworker resumes working")

    def acq_stop(self):
        """
        def acq_stop(self):
            acqusition stop
        """
        self.task_ai.stop()
        self.task_ao.stop()
        self.aiworker.stop()

    def cleanup(self):
        self.aiworker.stop()
        self.aiworker_live = False