def get_single (self): """ gets one block of data """ taskHandle = TaskHandle(0) read = uInt32() fnum = float64() lnum = uInt64() try: DAQmxLoadTask(self.taskname,ctypes.byref(taskHandle)) if self.numchannels<1: raise RuntimeError except RuntimeError: # no valid task time_data_import.getdata(self,td) return #import data ac = numpy.empty((self.numsamples,self.numchannels),numpy.float32) DAQmxGetSampQuantSampPerChan(taskHandle,ctypes.byref(lnum)) max_num_samples = lnum.value data = numpy.empty((max_num_samples,self.numchannels),dtype=numpy.float64) DAQmxStartTask(taskHandle) count = 0L numsamples = self.numsamples while count<numsamples: DAQmxReadAnalogF64(taskHandle,-1,float64(10.0), DAQmx_Val_GroupByScanNumber,data.ctypes.data, data.size,ctypes.byref(read),None) anz = min(read.value,numsamples-count) ac[count:count+anz]=numpy.array(data[:anz],dtype=numpy.float32) count+=read.value DAQmxStopTask(taskHandle) DAQmxClearTask(taskHandle) return ac
def get_data(self, td): """ main work is done here: imports the data from CSV file into TimeSamples object td and saves also a '*.h5' file so this import need not be performed every time the data is needed """ taskHandle = TaskHandle(0) read = uInt32() fnum = float64() lnum = uInt64() try: DAQmxLoadTask(self.taskname, ctypes.byref(taskHandle)) if self.numchannels < 1: raise RuntimeError DAQmxSetSampClkRate(taskHandle, float64(self.sample_freq)) except RuntimeError: # no valid task time_data_import.getdata(self, td) return #import data name = td.name if name == '': name = datetime.now().isoformat('_').replace(':', '-').replace( '.', '_') name = path.join(td_dir, name + '.h5') f5h = tables.open_file(name, mode='w') ac = f5h.create_earray(f5h.root, 'time_data', tables.atom.Float32Atom(), (0, self.numchannels)) ac.set_attr('sample_freq', self.sample_freq) DAQmxSetSampQuantSampPerChan(taskHandle, uInt64(100000)) DAQmxGetSampQuantSampPerChan(taskHandle, ctypes.byref(lnum)) max_num_samples = lnum.value print "Puffergroesse: %i" % max_num_samples data = numpy.empty((max_num_samples, self.numchannels), dtype=numpy.float64) DAQmxStartTask(taskHandle) count = 0L numsamples = self.numsamples while count < numsamples: #~ DAQmxReadAnalogF64(taskHandle,-1,float64(10.0), #~ DAQmx_Val_GroupByScanNumber,data.ctypes.data, #~ data.size,ctypes.byref(read),None) DAQmxReadAnalogF64(taskHandle, 1024, float64(10.0), DAQmx_Val_GroupByScanNumber, data.ctypes.data, data.size, ctypes.byref(read), None) ac.append( numpy.array(data[:min(read.value, numsamples - count)], dtype=numpy.float32)) count += read.value #~ if read.value>200: #~ print count, read.value DAQmxStopTask(taskHandle) DAQmxClearTask(taskHandle) f5h.close() td.name = name td.load_data()
def get_data (self,td): """ main work is done here: imports the data from CSV file into TimeSamples object td and saves also a '*.h5' file so this import need not be performed every time the data is needed """ taskHandle = TaskHandle(0) read = uInt32() fnum = float64() lnum = uInt64() try: DAQmxLoadTask(self.taskname,ctypes.byref(taskHandle)) if self.numchannels<1: raise RuntimeError DAQmxSetSampClkRate(taskHandle,float64(self.sample_freq)) except RuntimeError: # no valid task time_data_import.getdata(self,td) return #import data name = td.name if name=='': name = datetime.now().isoformat('_').replace(':','-').replace('.','_') name = path.join(td_dir,name+'.h5') f5h = tables.open_file(name,mode='w') ac = f5h.create_earray(f5h.root,'time_data',tables.atom.Float32Atom(),(0,self.numchannels)) ac.set_attr('sample_freq',self.sample_freq) DAQmxSetSampQuantSampPerChan(taskHandle,uInt64(100000)) DAQmxGetSampQuantSampPerChan(taskHandle,ctypes.byref(lnum)) max_num_samples = lnum.value print "Puffergroesse: %i" % max_num_samples data = numpy.empty((max_num_samples,self.numchannels),dtype=numpy.float64) DAQmxStartTask(taskHandle) count = 0L numsamples = self.numsamples while count<numsamples: #~ DAQmxReadAnalogF64(taskHandle,-1,float64(10.0), #~ DAQmx_Val_GroupByScanNumber,data.ctypes.data, #~ data.size,ctypes.byref(read),None) DAQmxReadAnalogF64(taskHandle,1024,float64(10.0), DAQmx_Val_GroupByScanNumber,data.ctypes.data, data.size,ctypes.byref(read),None) ac.append(numpy.array(data[:min(read.value,numsamples-count)],dtype=numpy.float32)) count+=read.value #~ if read.value>200: #~ print count, read.value DAQmxStopTask(taskHandle) DAQmxClearTask(taskHandle) f5h.close() td.name = name td.load_data()
def get_single(self): """ gets one block of data """ taskHandle = TaskHandle(0) read = uInt32() fnum = float64() lnum = uInt64() try: DAQmxLoadTask(self.taskname, ctypes.byref(taskHandle)) if self.numchannels < 1: raise RuntimeError except RuntimeError: # no valid task time_data_import.getdata(self, td) return #import data ac = numpy.empty((self.numsamples, self.numchannels), numpy.float32) DAQmxGetSampQuantSampPerChan(taskHandle, ctypes.byref(lnum)) max_num_samples = lnum.value data = numpy.empty((max_num_samples, self.numchannels), dtype=numpy.float64) DAQmxStartTask(taskHandle) count = 0L numsamples = self.numsamples while count < numsamples: DAQmxReadAnalogF64(taskHandle, -1, float64(10.0), DAQmx_Val_GroupByScanNumber, data.ctypes.data, data.size, ctypes.byref(read), None) anz = min(read.value, numsamples - count) ac[count:count + anz] = numpy.array(data[:anz], dtype=numpy.float32) count += read.value DAQmxStopTask(taskHandle) DAQmxClearTask(taskHandle) return ac