예제 #1
0
파일: spectrum.py 프로젝트: l3enny/rovib
 def __setitem__(self, wavelength, intensity):
     index, = pylab.where(self.wavelengths == wavelength)
     if pylab.any(index.shape):
         self.intensities[index] = intensity
     else:
         index, = pylab.where(self.wavelengths < wavelength)
         if pylab.any(index.shape):
             self.wavelengths = pylab.insert(self.wavelengths, index[-1] + 1,
                                             wavelength)
             self.intensities = pylab.insert(self.intensities, index[-1] + 1,
                                             intensity)
         else:
             self.wavelengths = pylab.insert(self.wavelengths, 0, wavelength)
             self.intensities = pylab.insert(self.intensities, 0, intensity)
예제 #2
0
파일: lynxio.py 프로젝트: kghose/neurapy
def read_csc(fin, assume_same_fs=True):
  """Read a continuous record file. We return the raw packets but, in addition, if we set assume_same_fs as true we
  return a trace with all the data concatenated together, assuming that a constant sampling frequency was maintained
  through out. Gaps in the record are padded with zeros.
  Input:
    fin - file handle
    assume_same_fs - if True, concatenate any segments together, fill time gaps with zeros and return average Fs
  Ouput:
    Dictionary with fields
      'header' - the file header
      'packets' - the actual packets as read. This is a new pylab dtype with fields:
        'timestamp' - timestamp (us)
        'chan' - channel
        'Fs' - the sampling frequency
        'Ns' - the number of valid samples in the packet
        'samp' - the samples in the packet.
          e.g. x['packets']['samp'] will return a 2D array, number of packets long and 512 wide (since each packet carries 512 wave points)
          similarly x['packets']['timestamp'] will return an array number of packets long
      'Fs': the average frequency computed from the timestamps (can differ from the nominal frequency the device reports)
      'trace': the concatenated data from all the packets
      't0': the timestamp of the first packet.
  NOTE: while 'packets' returns the exact packets read, 'Fs' and 'trace' assume that the record has no gaps and that the
  sampling frequency has not changed during the recording
  """
  hdr = read_header(fin)
  csc_packet = pylab.dtype([
    ('timestamp', 'Q'),
    ('chan', 'I'),
    ('Fs', 'I'),
    ('Ns', 'I'),
    ('samp', '512h')
  ])

  data = pylab.fromfile(fin, dtype=csc_packet, count=-1)
  Fs = None
  trace = None
  if assume_same_fs:
    if data['Fs'].std() > 1e-6: #
      logger.warning('Fs is not fixed across trace, not packing packets together')
      assume_same_fs = False

  if not assume_same_fs: return {'header': hdr, 'packets': data}

  packet_duration_us = 512*(1./data['Fs'][0])*1e6
  #For the version we are dealing with, Neuralynx packets are always 512
  #This is actually a very poor estimate if the sampling freq is low, since it rounds to nearest Hz
  #So we'll not rely on this but come up with our own estimate

  samp = data['samp']
  ts_us = data['timestamp']
  dt_us = pylab.diff(ts_us).astype('f')
  idx = pylab.find(dt_us > packet_duration_us) #This will find any instances where we paused the recording
  if idx.size == 0:#No padding needed
    trace = samp.ravel()
    Fs = (data['Ns'][:-1]/(dt_us*1e-6)).mean()
  else: #We have some padding to do.
    logger.debug('Gaps in record, padding')
    #Our first task is to find all the contiguous sections of data
    idx += 1 #Shifting indexes to point at the packets that come after a gap
    idx = pylab.insert(idx, 0, 0) #Now idx contains the indexes of every packet that starts a contiguous section
    idx = pylab.append(idx,ts_us.size) #And the index of the last packet
    Ns = data['Ns']
    estimFs_sum = 0
    N_samps = 0
    sections = []
    for n in xrange(idx.size-1): #collect all the sections
      n0 = idx[n]; n1=idx[n+1]
      sections.append(samp[n0:n1].ravel())
      if n1-n0 > 1:#We need more than one packet in a section to get an estimate
        estimFs_sum += (Ns[n0:n1-1]/(dt_us[n0:n1-1]*1e-6)).sum()
        N_samps += n1-1-n0

    Fs = estimFs_sum / float(N_samps)
    #Now pad the data appropriately
    padded = [sections[0]]
    cum_N = sections[0].size
    for n in xrange(1,len(sections)):
      #Now figure out how many zeros we have to pad to get the right length
      Npad = int((ts_us[idx[n]] - ts_us[0])*1e-6*Fs - cum_N)
      padded.append(pylab.zeros(Npad))
      padded.append(sections[n])
      cum_N += Npad + sections[n].size
    trace = pylab.concatenate(padded) #From this packet to the packet before the gap

  return {'header': hdr, 'packets': data, 'Fs': Fs, 'trace': trace, 't0': ts_us[0]}
예제 #3
0
print(len(rho), len(beta), len(d))
N = np.ones_like(rho) * 2
mif.create_layers(N, d, rho, beta)
N = len(rho)
print(N)
mif.evolve(qmax=qmax,
           iplot=10,
           iprint=3,
           deltam=0.001,
           avg=3,
           saveFig=True,
           dir=dir + 'Images/')
x = mif.x
y = mif.y
yerr = mif.y_err
#z=mif.z
q = np.arange(data[0, 0], data[-1, 0], 0.001)
fit = mif.func(q)
rho = mif.rhol
z = mif.z
print(len(rho), len(z))
#fit_fname=fname.split('.')[0]+'_fit1.txt'
fit_fname = fname + '_fit1.txt'
#rho_fname=fname.split('.')[0]+'_edp1.txt'
rho_fname = fname + '_edp1.txt'
edp = np.vstack((z, rho)).transpose()
edp = pl.insert(edp, 0, [z[0] - 20, rho[0]], axis=0)
edp[-1, 0] = z[-1] + 20
np.savetxt(dir + fit_fname, np.vstack((q, fit)).transpose())
np.savetxt(dir + rho_fname, edp)
예제 #4
0
def read_csc(fin, assume_same_fs=True):
    """Read a continuous record file. We return the raw packets but, in addition, if we set assume_same_fs as true we
  return a trace with all the data concatenated together, assuming that a constant sampling frequency was maintained
  through out. Gaps in the record are padded with zeros.
  Input:
    fin - file handle
    assume_same_fs - if True, concatenate any segments together, fill time gaps with zeros and return average Fs
  Ouput:
    Dictionary with fields
      'header' - the file header
      'packets' - the actual packets as read. This is a new pylab dtype with fields:
        'timestamp' - timestamp (us)
        'chan' - channel
        'Fs' - the sampling frequency
        'Ns' - the number of valid samples in the packet
        'samp' - the samples in the packet.
          e.g. x['packets']['samp'] will return a 2D array, number of packets long and 512 wide (since each packet carries 512 wave points)
          similarly x['packets']['timestamp'] will return an array number of packets long
      'Fs': the average frequency computed from the timestamps (can differ from the nominal frequency the device reports)
      'trace': the concatenated data from all the packets
      't0': the timestamp of the first packet.
  NOTE: while 'packets' returns the exact packets read, 'Fs' and 'trace' assume that the record has no gaps and that the
  sampling frequency has not changed during the recording
  """
    hdr = read_header(fin)
    csc_packet = pylab.dtype([('timestamp', 'Q'), ('chan', 'I'), ('Fs', 'I'),
                              ('Ns', 'I'), ('samp', '512h')])

    data = pylab.fromfile(fin, dtype=csc_packet, count=-1)
    Fs = None
    trace = None
    if assume_same_fs:
        if data['Fs'].std() > 1e-6:  #
            logger.warning(
                'Fs is not fixed across trace, not packing packets together')
            assume_same_fs = False

    if not assume_same_fs: return {'header': hdr, 'packets': data}

    packet_duration_us = 512 * (1. / data['Fs'][0]) * 1e6
    #For the version we are dealing with, Neuralynx packets are always 512
    #This is actually a very poor estimate if the sampling freq is low, since it rounds to nearest Hz
    #So we'll not rely on this but come up with our own estimate

    samp = data['samp']
    ts_us = data['timestamp']
    dt_us = pylab.diff(ts_us).astype('f')
    idx = pylab.find(
        dt_us > packet_duration_us
    )  #This will find any instances where we paused the recording
    if idx.size == 0:  #No padding needed
        trace = samp.ravel()
        Fs = (data['Ns'][:-1] / (dt_us * 1e-6)).mean()
    else:  #We have some padding to do.
        logger.debug('Gaps in record, padding')
        #Our first task is to find all the contiguous sections of data
        idx += 1  #Shifting indexes to point at the packets that come after a gap
        idx = pylab.insert(
            idx, 0, 0
        )  #Now idx contains the indexes of every packet that starts a contiguous section
        idx = pylab.append(idx, ts_us.size)  #And the index of the last packet
        Ns = data['Ns']
        estimFs_sum = 0
        N_samps = 0
        sections = []
        for n in xrange(idx.size - 1):  #collect all the sections
            n0 = idx[n]
            n1 = idx[n + 1]
            sections.append(samp[n0:n1].ravel())
            if n1 - n0 > 1:  #We need more than one packet in a section to get an estimate
                estimFs_sum += (Ns[n0:n1 - 1] /
                                (dt_us[n0:n1 - 1] * 1e-6)).sum()
                N_samps += n1 - 1 - n0

        Fs = estimFs_sum / float(N_samps)
        #Now pad the data appropriately
        padded = [sections[0]]
        cum_N = sections[0].size
        for n in xrange(1, len(sections)):
            #Now figure out how many zeros we have to pad to get the right length
            Npad = int((ts_us[idx[n]] - ts_us[0]) * 1e-6 * Fs - cum_N)
            padded.append(pylab.zeros(Npad))
            padded.append(sections[n])
            cum_N += Npad + sections[n].size
        trace = pylab.concatenate(
            padded)  #From this packet to the packet before the gap

    return {
        'header': hdr,
        'packets': data,
        'Fs': Fs,
        'trace': trace,
        't0': ts_us[0]
    }
예제 #5
0
def read_bunch_data(file_list, format='ascii'):

    A, R = [], []
    
    if format == 'ascii':
        data_list = [f.replace('.cfg', '_prt.dat') for f in file_list]

        for i, d in enumerate(data_list):
            try:
                A.append(plt.loadtxt(d).T)
            except IOError:
                print '*** WARNING: no file ', d
                # R.append((i, file_list[i]))
                A.append(A[-1])
            except ValueError:
                print i, d
                raise ValueError
    elif format == 'hdf5':
        data_list = [f.replace('.cfg', '.prt.h5') for f in file_list]

        for i, d in enumerate(data_list):
            try:
                hf = h5py.File(d, 'r')
                A.append(
                    plt.insert(hf['Fields']['Data'], 0, hf['Time']['time'], axis=1).T)
            except IOError:
                print '*** WARNING: no file ', d
                R.append((i, file_list[i]))
    elif format == 'h5':
        data_list = file_list
        A = None

        for i, d in enumerate(data_list):
            try:
                hf = h5py.File(d, 'r')['Bunch']
                if A == None:
                    keys = hf.keys()
                    a = hf[keys[0]]
                    A = plt.zeros((len(data_list), len(keys) + 1, len(a)))
                A[i, 0, :] = plt.arange(len(hf['mean_x']))
                A[i, 1, :] = hf['mean_x']
                A[i, 2, :] = hf['mean_xp']
                A[i, 3, :] = hf['mean_y']
                A[i, 4, :] = hf['mean_yp']
                A[i, 5, :] = hf['mean_z']
                A[i, 6, :] = hf['mean_dp']
                A[i, 7, :] = hf['sigma_x']
                A[i, 8, :] = hf['sigma_y']
                A[i, 9, :] = hf['sigma_z']
                A[i, 10, :] = hf['sigma_dp']
                A[i, 11, :] = hf['epsn_x']
                A[i, 12, :] = hf['epsn_y']
                A[i, 13, :] = hf['epsn_z']
                A[i, 14, :] = hf['n_macroparticles']
                # A.append(plt.array(
                #     [,
                #      hf['mean_x'], hf['mean_xp'], hf['mean_y'], hf['mean_yp'], hf['mean_dz'], hf['mean_dp'],
                #      hf['sigma_x'], hf['sigma_y'], hf['sigma_dz'], hf['sigma_dp'],
                #      hf['epsn_x'], hf['epsn_y'], hf['epsn_z'], hf['n_macroparticles']]).T)
            except IOError:
                print '*** WARNING: no file ', d
                R.append((i, file_list[i]))
            except KeyError:
                print '*** WARNING: damaged file ', d
                R.append((i, file_list[i]))
    else:
        raise(ValueError('*** Unknown format: ', format))

    for r in R:
        file_list.remove(r[1])
    delete_from_data_array = [r[0] for r in R]
    A = plt.delete(A, delete_from_data_array, axis=0)

    A = plt.array(A)
    A = plt.rollaxis(A, 0, 3)
    # [n_cols, n_turns, n_files] = A.shape

    return A, file_list