示例#1
0
def train_mwu_mkl(kerns, param, feat, 
                  Xtr, ytr,
                  eps = 0.2, C = 1000.0, norm1or2 = 2, 
                  verbose = 0):
    """
    (success, Sigma, alpha, bsvm, posw) = 
    train_mwu_mkl(
      kerns, kern_params, features, 
      Xtr, ytr, 
      eps = 0.2, C = 1000.0, verbose = 0
    )
    """
    (m,) = kerns.shape
    (d,ntr) = Xtr.shape
    in_kerns = np.require(kerns, dtype = np.int32,   requirements = ['C'])
    in_param = np.require(param, dtype = np.float64, requirements = ['C'])
    in_feat  = np.require(feat,  dtype = np.int32,   requirements = ['C'])
    in_Xtr   = np.require(Xtr,   dtype = np.float64, requirements = ['C'])
    in_ytr   = np.require(ytr,   dtype = np.int32,   requirements = ['C'])
    return _train_mwu_mkl(m, ntr, ntr, 
                          in_kerns, 
                          in_param, 
                          in_feat,  
                          in_Xtr,   
                          in_ytr,   
                          d, ntr, m, 
                          eps, 1.0, 20.0, C, norm1or2, 
                          verbose)
示例#2
0
文件: layer.py 项目: phecy/striate
  def fc_layer(self, ld):
    epsB = ld['epsB']
    epsW = ld['epsW']
    initB = ld.get('initB', 0.0)
    initW = ld['initW']
    momB = ld['momB']
    momW = ld['momW']

    wc = ld['wc']
    dropRate = ld.get('dropRate', 0.0)

    n_out = ld['outputs']
    bias = ld.get('biases', None)
    weight = ld.get('weights', None)
    
    if bias is not None: 
      bias = bias.transpose()
      bias = np.require(bias, dtype = np.float32, requirements = 'C')
    if weight is not None: 
      weight = weight.transpose()
      weight = np.require(weight, dtype = np.float32, requirements = 'C')
    
    name = ld['name']
    input_shape = ld['inputShape']
    return FCLayer(name, input_shape, n_out, epsW, epsB, initW, initB, momW = momW, momB = momB, wc
        = wc, dropRate = dropRate, weight = weight, bias = bias)
 def cache_z(self, z):
     x = np.require(z.real, dtype = np.double, requirements = ['A','W','O','C'])
     y = np.require(z.imag, dtype = np.double, requirements = ['A','W','O','C'])
     xd = gpuarray.to_gpu(x)
     yd = gpuarray.to_gpu(y)
     cuda.memcpy_dtod(self.xd, xd.ptr, xd.nbytes)
     cuda.memcpy_dtod(self.yd, yd.ptr, yd.nbytes)
示例#4
0
文件: mf.py 项目: chrinide/pyscf
  def read_wfsx(self, fname, **kw):
    """ An occasional reading of the SIESTA's .WFSX file """
    from pyscf.nao.m_siesta_wfsx import siesta_wfsx_c
    from pyscf.nao.m_siesta2blanko_denvec import _siesta2blanko_denvec
    from pyscf.nao.m_fermi_dirac import fermi_dirac_occupations

    self.wfsx = siesta_wfsx_c(fname=fname, **kw)
    
    assert self.nkpoints == self.wfsx.nkpoints
    assert self.norbs == self.wfsx.norbs 
    assert self.nspin == self.wfsx.nspin
    orb2m = self.get_orb2m()
    for k in range(self.nkpoints):
      for s in range(self.nspin):
        for n in range(self.norbs):
          _siesta2blanko_denvec(orb2m, self.wfsx.x[k,s,n,:,:])

    self.mo_coeff = np.require(self.wfsx.x, dtype=self.dtype, requirements='CW')
    self.mo_energy = np.require(self.wfsx.ksn2e, dtype=self.dtype, requirements='CW')
    self.telec = kw['telec'] if 'telec' in kw else self.hsx.telec
    self.nelec = kw['nelec'] if 'nelec' in kw else self.hsx.nelec
    self.fermi_energy = kw['fermi_energy'] if 'fermi_energy' in kw else self.fermi_energy
    ksn2fd = fermi_dirac_occupations(self.telec, self.mo_energy, self.fermi_energy)
    self.mo_occ = (3-self.nspin)*ksn2fd
    return self
 def compute_v_without_derivs(self, Xs, Yinvs, Ts):
     #Turn the parts of omega into gpuarrays
     Xs = np.require(Xs, dtype = np.double, requirements=['A', 'W', 'O', 'C'])
     Yinvs = np.require(Yinvs, dtype = np.double, requirements=['A', 'W', 'O', 'C'])
     Ts = np.require(Ts, dtype = np.double, requirements=['A', 'W', 'O', 'C'])
     Xs_d = gpuarray.to_gpu(Xs)
     Yinvs_d = gpuarray.to_gpu(Yinvs)
     Ts_d = gpuarray.to_gpu(Ts)
     #Determine N = the number of integer points to sum over
     #          K = the number of different omegas to compute the function at
     N = self.Sd.size/self.g
     K = Xs.size/(self.g**2)
     #Create room on the gpu for the real and imaginary finite sum calculations
     fsum_reald = gpuarray.zeros(N*K, dtype=np.double)
     fsum_imagd = gpuarray.zeros(N*K, dtype=np.double)
     #Turn all scalars into numpy data types
     Nd = np.int32(N)
     Kd = np.int32(K)
     gd = np.int32(self.g)
     blocksize = (self.tilewidth, self.tileheight, 1)
     gridsize = (N//self.tilewidth + 1, K//self.tileheight + 1, 1)
     self.finite_sum_without_derivs(fsum_reald, fsum_imagd, Xs_d, Yinvs_d, Ts_d,
                                    self.Sd, gd, Nd, Kd,
                                    block = blocksize,
                                    grid = gridsize)
     cuda.Context.synchronize()
     fsums_real = self.sum_reduction(fsum_reald, N, K, Kd, Nd)
     fsums_imag = self.sum_reduction(fsum_imagd, N, K, Kd, Nd)
     return fsums_real + 1.0j*fsums_imag
示例#6
0
    def get_next_batch(self):
        self.advance_batch()

        epoch = self.curr_epoch        
        batch_num = self.curr_batchnum

        tp_batch_ind = self.tp_batch_dic[batch_num]

        # print 'tp-info, batch_name: %d' % tp_batch_ind
        if self.multiview:
            data,labels = tp_utils.make_multiview_batch_n_labels(self.tp_dataStore,self.tp_batches[tp_batch_ind],self.tp_class_dict)
        else:
            data,labels = tp_utils.make_batch_n_labels(self.tp_dataStore,self.tp_batches[tp_batch_ind],self.tp_class_dict)

        data.shape

        # data = tp_utils.make_batch(self.tp_dataStore,self.tp_batches[tp_batch_ind])
        # labels = tp_utils.make_batch_labels(self.tp_class_dict,self.tp_batches[tp_batch_ind])
        #dic = {'data':data,'labels':labels}
        
        data = np.require(data,requirements='C')

        #tp_utils.test_data(data[:,0])

        labels = np.require(labels,requirements='C')

        return epoch, batch_num, [data, labels]
示例#7
0
    def check_distance(self, parent_ix, coords):
        '''Check to ensure that the distance between the coordinates `coords`
        and the parent_ix is less than the distance to any other center in the parent level'''
        if parent_ix is None:
            return True

        try:
            passed_coord_dtype = coords.dtype
        except AttributeError:
            coords = np.require(coords, dtype=coord_dtype)
        else:
            if passed_coord_dtype != coord_dtype:
                coords = np.require(coords, dtype=coord_dtype)

        coords = coords.reshape((1, -1))

        assert len(coords) == 1
        parent_level = self.bin_graph.node[parent_ix]['level']
        level_indices = self.level_indices[parent_level]
        parent_centers = self.fetch_centers(level_indices)

        mask = np.ones((1,), dtype=np.bool_)
        output = np.empty((1,), dtype=index_dtype)
        min_dist = np.empty((1,), dtype=coord_dtype)
        self._assign_level(coords, parent_centers, mask, output, min_dist)

        res = output[0] == level_indices.index(parent_ix)

        return res
示例#8
0
    def get_next_batch(self):
        epoch, batchnum, dic = LabeledDummyDataProvider.get_next_batch(self)

        dic['data'] = n.require(dic['data'].T, requirements='C')
        dic['labels'] = n.require(dic['labels'].T, requirements='C')

        return epoch, batchnum, [dic['data'], dic['labels']]
示例#9
0
def setdiff_rows(A, B, return_index=False):
    """
    Similar to MATLAB's setdiff(A, B, 'rows'), this returns C, I
    where C are the row of A that are not in B and I satisfies
    C = A[I,:].

    Returns I if return_index is True.
    """
    A = np.require(A, requirements='C')
    B = np.require(B, requirements='C')

    assert A.ndim == 2, "array must be 2-dim'l"
    assert B.ndim == 2, "array must be 2-dim'l"
    assert A.shape[1] == B.shape[1], \
           "arrays must have the same number of columns"
    assert A.dtype == B.dtype, \
           "arrays must have the same data type"

    # NumPy provides setdiff1d, which operates only on one dimensional
    # arrays. To make the array one-dimensional, we interpret each row
    # as being a string of characters of the appropriate length.
    orig_dtype = A.dtype
    ncolumns = A.shape[1]
    dtype = np.dtype((np.character, orig_dtype.itemsize*ncolumns))
    C = np.setdiff1d(A.view(dtype), B.view(dtype)) \
        .view(A.dtype) \
        .reshape((-1, ncolumns), order='C')
    if return_index:
        raise NotImplementedError
    else:
        return C
 def compute_v_without_derivs(self, Z):
     #Turn the numpy set Z into gpuarrays
     x = Z.real
     y = Z.imag
     x = np.require(x, dtype = np.double, requirements=['A','W','O','C'])
     y = np.require(y, dtype = np.double, requirements=['A','W','O','C'])
     xd = gpuarray.to_gpu(x)
     yd = gpuarray.to_gpu(y)
     self.yd = yd
     #Detemine N = the number of integer points to sum over and
     #         K = the number of values to compute the function at
     N = self.Sd.size/self.g
     K = Z.size/self.g
     #Create room on the gpu for the real and imaginary finite sum calculations
     fsum_reald = gpuarray.zeros(N*K, dtype=np.double)
     fsum_imagd = gpuarray.zeros(N*K, dtype=np.double)
     #Make all scalars into numpy data types
     Nd = np.int32(N)
     Kd = np.int32(K)
     gd = np.int32(self.g)
     blocksize = (self.tilewidth, self.tileheight, 1)
     gridsize = (N//self.tilewidth + 1, K//self.tileheight + 1, 1)
     self.finite_sum_without_derivs(fsum_reald, fsum_imagd, xd, yd, 
                  self.Sd, gd, Nd, Kd,
                  block = blocksize,
                  grid = gridsize)
     cuda.Context.synchronize()
     fsums_real = self.sum_reduction(fsum_reald, N, K, Kd, Nd)
     fsums_imag = self.sum_reduction(fsum_imagd, N, K, Kd, Nd)
     return fsums_real + 1.0j*fsums_imag
示例#11
0
文件: actors.py 项目: kefir-/tatlin
    def load_data(self, model_data, callback=None):
        t_start = time.time()

        vertices, normals = model_data
        # convert python lists to numpy arrays for constructing vbos
        self.vertices = numpy.require(vertices, 'f')
        self.normals  = numpy.require(normals, 'f')

        self.scaling_factor = 1.0
        self.rotation_angle = {
            self.AXIS_X: 0.0,
            self.AXIS_Y: 0.0,
            self.AXIS_Z: 0.0,
        }

        self.mat_specular   = (1.0, 1.0, 1.0, 1.0)
        self.mat_shininess  = 50.0
        self.light_position = (20.0, 20.0, 20.0)

        self.initialized = False

        t_end = time.time()

        logging.info('Initialized STL model in %.2f seconds' % (t_end - t_start))
        logging.info('Vertex count: %d' % len(self.vertices))
示例#12
0
def autocorr_fft(signal, axis = -1):
    """Return full autocorrelation along specified axis. Use fft
    for computation."""
    if N.ndim(signal) == 0:
        return signal
    elif signal.ndim == 1:
        n       = signal.shape[0]
        nfft    = int(2 ** nextpow2(2 * n - 1))
        lag     = n - 1
        a       = fft(signal, n = nfft, axis = -1)
        au      = ifft(a * N.conj(a), n = nfft, axis = -1)
        return N.require(N.concatenate((au[-lag:], au[:lag+1])), dtype = signal.dtype)
    elif signal.ndim == 2:
        n       = signal.shape[axis]
        lag     = n - 1
        nfft    = int(2 ** nextpow2(2 * n - 1))
        a       = fft(signal, n = nfft, axis = axis)
        au      = ifft(a * N.conj(a), n = nfft, axis = axis)
        if axis == 0:
            return N.require(N.concatenate( (au[-lag:], au[:lag+1]), axis = axis), \
                    dtype = signal.dtype)
        else:
            return N.require(N.concatenate( (au[:, -lag:], au[:, :lag+1]), 
                        axis = axis), dtype = signal.dtype)
    else:
        raise RuntimeError("rank >2 not supported yet")
示例#13
0
def bench():
    size    = 256
    nframes = 4000
    lag     = 24

    X       = N.random.randn(nframes, size)
    X       = N.require(X, requirements = 'C')

    niter   = 10

    # Contiguous
    print "Running optimized with ctypes"
    def contig(*args, **kargs):
        return autocorr_oneside_nofft(*args, **kargs)
    for i in range(niter):
        Yt  = contig(X, lag, axis = 1)

    Yr  = _autocorr_oneside_nofft_py(X, lag, axis = 1)
    N.testing.assert_array_almost_equal(Yt, Yr, 10)

    # Non contiguous
    print "Running optimized with ctypes (non contiguous)"
    def ncontig(*args, **kargs):
        return autocorr_oneside_nofft(*args, **kargs)
    X       = N.require(X, requirements = 'F')
    for i in range(niter):
        Yt  = ncontig(X, lag, axis = 1)

    Yr  = _autocorr_oneside_nofft_py(X, lag, axis = 1)
    N.testing.assert_array_almost_equal(Yt, Yr, 10)

    print "Benchmark func done"
示例#14
0
 def comp_apair_pp_libint(self, a1,a2):
   """ Get's the vertex coefficient and conversion coefficients for a pair of atoms given by their atom indices """
   from operator import mul
   from pyscf.nao.m_prod_biloc import prod_biloc_c
   if not hasattr(self, 'sv_pbloc_data') : raise RuntimeError('.sv_pbloc_data is absent')
   assert a1>=0
   assert a2>=0
   
   t1 = timer()
   sv = self.sv
   aos = self.sv.ao_log
   sp12 = np.require( np.array([sv.atom2sp[a] for a in (a1,a2)], dtype=c_int64), requirements='C')
   rc12 = np.require( np.array([sv.atom2coord[a,:] for a in (a1,a2)]), requirements='C')
   icc2a = np.require( np.array(self.ls_contributing(a1,a2), dtype=c_int64), requirements='C')
   npmx = aos.sp2norbs[sv.atom2sp[a1]]*aos.sp2norbs[sv.atom2sp[a2]]
   npac = sum([self.prod_log.sp2norbs[sv.atom2sp[ia]] for ia in icc2a ])
   nout = c_int64(npmx**2+npmx*npac+10)
   dout = np.require( zeros(nout.value), requirements='CW')
   
   libnao.vrtx_cc_apair( sp12.ctypes.data_as(POINTER(c_int64)), rc12.ctypes.data_as(POINTER(c_double)), icc2a.ctypes.data_as(POINTER(c_int64)), c_int64(len(icc2a)), dout.ctypes.data_as(POINTER(c_double)), nout )    
   if dout[0]<1: return None
   
   nnn = np.array(dout[0:3], dtype=int)
   nnc = np.array([dout[8],dout[7]], dtype=int)
   ncc = int(dout[9])
   if ncc!=len(icc2a): raise RuntimeError('ncc!=len(icc2a)')
   s = 10; f=s+np.prod(nnn); vrtx  = dout[s:f].reshape(nnn)
   s = f;  f=s+np.prod(nnc); ccoe  = dout[s:f].reshape(nnc)
   icc2s = np.zeros(len(icc2a)+1, dtype=np.int64)
   for icc,a in enumerate(icc2a): icc2s[icc+1] = icc2s[icc] + self.prod_log.sp2norbs[sv.atom2sp[a]]
   pbiloc = prod_biloc_c(atoms=array([a2,a1]),vrtx=vrtx,cc2a=icc2a,cc2s=icc2s,cc=ccoe)
   
   return pbiloc
示例#15
0
文件: data.py 项目: agomez2/skynet
    def get_next_batch(self):
        self.advance_batch()

        epoch = self.curr_epoch        
        batchnum = self.curr_batchnum
        
        datadic = leveldb.LevelDB(self.data_dir + '/batch-%d' % batchnum)
        img_raw = []
        label_raw = []
        for k, pickled in datadic.RangeIter():
          imgdata = cPickle.loads(pickled)
          img_raw.append(Image.open(c.StringIO(imgdata['data'])))
          label_raw.append(imgdata['label'])
        
        labels = n.array(label_raw)
        images = n.ndarray((len(img_raw), 64 * 64 * 3), dtype=n.single)
        for idx, jpegdata in enumerate(img_raw):
          images[idx] = n.array(img_raw)
     
        print labels.shape
        print images.shape

        images = n.require(images, dtype=n.single, requirements='C')
        labels = labels.reshape((1, images.shape[1]))
        labels = n.require(labels, dtype=n.single, requirements='C')

        return epoch, batchnum, [images, labels]
示例#16
0
def _ellipsoidLineIntersects_ne(a, b, lineOrigin, lineDirection, directed=True):
    lineOrigin = np.require(lineOrigin, dtype=np.float64)
    lineDirection = np.require(lineDirection, dtype=np.float64)
    
    # turn into column vectors
    direction = lineDirection.T
    origin = -lineOrigin[:,None]
    
    radius = np.array([[1/a], [1/a], [1/b]])
    directionTimesRadius = ne('direction * radius')
    originTimesRadius = ne('origin * radius')
    
    dirDotOri = np.einsum("ij,ij->j", directionTimesRadius, originTimesRadius)
    dirDotDir = np.einsum("ij,ij->j", directionTimesRadius, directionTimesRadius)
    oriDotOri = np.einsum("ij,ij->j", originTimesRadius, originTimesRadius)

    if directed:
        if _isInsideEllipsoid(lineOrigin, a, b):
            intersects = ne('dirDotOri + sqrt(dirDotOri**2 - oriDotOri*dirDotDir + dirDotDir) >= 0')
        else:
            intersects = ne('dirDotOri - sqrt(dirDotOri**2 - oriDotOri*dirDotDir + dirDotDir) >= 0')
    else:
        intersects = ne('dirDotOri**2 - oriDotOri*dirDotDir + dirDotDir >= 0')
        
    return intersects
示例#17
0
    def get_next_batch(self):
        epoch, batchnum, d = LabeledDataProvider.get_next_batch(self)
	#print(datadic)
        # This converts the data matrix to single precision and makes sure that it is C-ordered
        d['data'] = n.require((d['data'].transpose()), dtype=n.single, requirements='C')
        d['labels'] = n.require(d['labels'].reshape((1, d['data'].shape[1])), dtype=n.single, requirements='C')
        return epoch, batchnum, [d['data'], d['labels']]
示例#18
0
文件: sampler.py 项目: AstroVPK/kali
 def sample(self, **kwargs):
     returnLC = self.lcObj.copy()
     probVal = kwargs.get('probability', 1.0)
     sampleSeedVal = kwargs.get('sampleSeed', rand.rdrand(np.array([0], dtype='uint32')))
     np.random.seed(seed=sampleSeedVal)
     keepArray = spstats.bernoulli.rvs(probVal, size=self.lcObj.numCadences)
     newNumCadences = np.sum(keepArray)
     tNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     xNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     yNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     yerrNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     maskNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     counter = 0
     for i in xrange(self.lcObj.numCadences):
         if keepArray[i] == 1:
             tNew[counter] = self.lcObj.t[i]
             xNew[counter] = self.lcObj.x[i]
             yNew[counter] = self.lcObj.y[i]
             yerrNew[counter] = self.lcObj.yerr[i]
             maskNew[counter] = self.lcObj.mask[i]
             counter += 1
     returnLC.t = tNew
     returnLC.x = xNew
     returnLC.y = yNew
     returnLC.yerr = yerrNew
     returnLC.mask = maskNew
     returnLC._numCadences = newNumCadences
     returnLC._checkIsRegular()
     returnLC._times()
     returnLC._statistics()
     return returnLC
示例#19
0
def actionAngleStaeckel_calcu0(E,Lz,pot,delta):
    """
    NAME:
       actionAngleStaeckel_calcu0
    PURPOSE:
       Use C to calculate u0 in the Staeckel approximation
    INPUT:
       E, Lz - energy and angular momentum
       pot - Potential or list of such instances
       delta - focal length of prolate spheroidal coordinates
    OUTPUT:
       (u0,err)
       u0 : array, shape (len(E))
       err - non-zero if error occured
    HISTORY:
       2012-12-03 - Written - Bovy (IAS)
    """
    #Parse the potential
    npot, pot_type, pot_args= _parse_pot(pot,potforactions=True)

    #Set up result arrays
    u0= numpy.empty(len(E))
    err= ctypes.c_int(0)

    #Set up the C code
    ndarrayFlags= ('C_CONTIGUOUS','WRITEABLE')
    actionAngleStaeckel_actionsFunc= _lib.calcu0
    actionAngleStaeckel_actionsFunc.argtypes= [ctypes.c_int,
                               ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
                               ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
                               ctypes.c_int,
                               ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
                               ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
                               ctypes.c_double,
                               ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
                               ctypes.POINTER(ctypes.c_int)]

    #Array requirements, first store old order
    f_cont= [E.flags['F_CONTIGUOUS'],
             Lz.flags['F_CONTIGUOUS']]
    E= numpy.require(E,dtype=numpy.float64,requirements=['C','W'])
    Lz= numpy.require(Lz,dtype=numpy.float64,requirements=['C','W'])
    u0= numpy.require(u0,dtype=numpy.float64,requirements=['C','W'])

    #Run the C code
    actionAngleStaeckel_actionsFunc(len(E),
                                    E,
                                    Lz,
                                    ctypes.c_int(npot),
                                    pot_type,
                                    pot_args,
                                    ctypes.c_double(delta),
                                    u0,
                                    ctypes.byref(err))

    #Reset input arrays
    if f_cont[0]: E= numpy.asfortranarray(E)
    if f_cont[1]: Lz= numpy.asfortranarray(Lz)

    return (u0,err.value)
示例#20
0
文件: sampler.py 项目: AstroVPK/kali
 def sample(self, **kwargs):
     returnLC = self.lcObj.copy()
     widthVal = kwargs.get('width', returnLC.T/10.0)
     centerVal = kwargs.get('center', returnLC.T/2.0)
     sampleSeedVal = kwargs.get('sampleSeed', rand.rdrand(np.array([0], dtype='uint32')))
     np.random.seed(seed=sampleSeedVal)
     del returnLC.t
     del returnLC.x
     del returnLC.y
     del returnLC.yerr
     del returnLC.mask
     tList = list()
     xList = list()
     yList = list()
     yerrList = list()
     maskList = list()
     for i in xrange(self.lcObj.numCadences):
         keepYN = np.random.binomial(1, self.normalizedSincSq(widthVal, centerVal, self.lcObj.t[i]))
         if keepYN == 1:
             tList.append(self.lcObj.t[i])
             xList.append(self.lcObj.x[i])
             yList.append(self.lcObj.y[i])
             yerrList.append(self.lcObj.yerr[i])
             maskList.append(self.lcObj.mask[i])
     newNumCadences = len(tList)
     returnLC.t = np.require(np.array(tList), requirements=['F', 'A', 'W', 'O', 'E'])
     returnLC.x = np.require(np.array(xList), requirements=['F', 'A', 'W', 'O', 'E'])
     returnLC.y = np.require(np.array(yList), requirements=['F', 'A', 'W', 'O', 'E'])
     returnLC.yerr = np.require(np.array(yerrList), requirements=['F', 'A', 'W', 'O', 'E'])
     returnLC.mask = np.require(np.array(maskList), requirements=['F', 'A', 'W', 'O', 'E'])
     returnLC._numCadences = newNumCadences
     returnLC._checkIsRegular()
     returnLC._times()
     returnLC._statistics()
     return returnLC
示例#21
0
文件: sampler.py 项目: AstroVPK/kali
 def sample(self, **kwargs):
     returnLC = self.lcObj.copy()
     jumpVal = kwargs.get('jump', 1)
     newNumCadences = int(self.lcObj.numCadences/float(jumpVal))
     tNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     xNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     yNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     yerrNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     maskNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     for i in xrange(newNumCadences):
         tNew[i] = self.lcObj.t[jumpVal*i]
         xNew[i] = self.lcObj.x[jumpVal*i]
         yNew[i] = self.lcObj.y[jumpVal*i]
         yerrNew[i] = self.lcObj.yerr[jumpVal*i]
         maskNew[i] = self.lcObj.mask[jumpVal*i]
     returnLC.t = tNew
     returnLC.x = xNew
     returnLC.y = yNew
     returnLC.yerr = yerrNew
     returnLC.mask = maskNew
     returnLC._numCadences = newNumCadences
     returnLC._checkIsRegular()
     returnLC._times()
     returnLC._statistics()
     return returnLC
示例#22
0
文件: slit.py 项目: jgrunhut/crifors
def slit_uniform_psf(n, seeing, mu_x, mu_y, tau_0, slit_width, slit_height, plot=False):
    """Returns x- and y- coordinate arrays of a 2D random uniformly distributed
    circle.

    Parameters
    ----------
    n : int
        Size of coordinate arrays.
    seeing: double
        Seeing of source psf in arcseconds.
    mu_x : double
        Center of PSF in x-coords.
    mu_y : double
        Center of PSF in y-coords.
    tau_0 : double
        Rotation about z-axis (tilt).
    slit_width : double
        Width of slit in arcseconds.
    slit_height : double
        Height of slit in arcseconds.

    Returns
    -------
    slit_x : array_like
        Array of x-coordinates.
    slit_y : array_like
        Array of y-coordinates.

    """
    desc = "Source psf: uniform, mux=%.2f muy=%.2f seeing=%.2f arcsec" % (mu_x, mu_y, seeing)
    log.info(desc)
    # initialize output arrays to send to c function
    slit_x = np.empty(n, dtype=np.float64)
    slit_y = np.empty(n, dtype=np.float64)
    slit_x = np.require(slit_x, requirements=ci.req_out, dtype=np.float64)
    slit_y = np.require(slit_y, requirements=ci.req_out, dtype=np.float64)
    func = ci.slitc.slit_uniform_psf
    func.argtypes = [
        ct.c_int,             # n
        ct.c_double,          # seeing
        ct.c_double,          # mu_x
        ct.c_double,          # mu_y
        ct.c_double,          # tau_0
        ct.c_double,          # slit_width
        ct.c_double,          # slit_height
        ci.array_1d_double,   # slit_x
        ci.array_1d_double]   # slit_y
    func.restype = None
    log.info("Slit Rejection Sampling: %s rays...", n)
    func(n, seeing, mu_x, mu_y, tau_0, slit_width, slit_height, slit_x, slit_y)
    # preview slit
    if plot:
        log.info("Opening preview plot of 2D uniformly random psf.")
        import matplotlib.pylab as plt
        fig = plt.figure()
        ax = fig.add_subplot(111)#, aspect='equal')
        ax.scatter(slit_x, slit_y, s=20, edgecolor=None)
        plt.title("0D Point Source PSF")
        plt.show()
    return slit_x, slit_y
示例#23
0
def make_predictions(net, data, labels, num_classes):
    data = np.require(data, requirements='C')
    labels = np.require(labels, requirements='C')

    preds = np.zeros((data.shape[1], num_classes), dtype=np.single)
    softmax_idx = net.get_layer_idx('probs', check_type='softmax')

    t0 = time.time()
    net.libmodel.startFeatureWriter(
        [data, labels, preds], softmax_idx)
    net.finish_batch()
    print "Predicted %s cases in %.2f seconds." % (
        labels.shape[1], time.time() - t0)

    if net.multiview_test:
        #  We have to deal with num_samples * num_views
        #  predictions.
        num_views = net.test_data_provider.num_views
        num_samples = labels.shape[1] / num_views
        split_sections = range(
            num_samples, num_samples * num_views, num_samples)
        preds = np.split(preds, split_sections, axis=0)
        labels = np.split(labels, split_sections, axis=1)
        preds = reduce(np.add, preds)
        labels = labels[0]

    return preds, labels
示例#24
0
def _lpc2_py(signal, order, axis = -1):
    """python implementation of lpc for rank 2., Do not use, for testing purpose only"""
    if signal.ndim > 2:
        raise NotImplemented("only for rank <=2")
    
    if signal.ndim < 2:
        return lpc(_N.require(signal, requirements = 'C'), order)

    # For each array of direction axis, compute levinson durbin
    if axis  % 2 == 0:
        # Prepare output arrays
        coeff   = _N.zeros((order+1, signal.shape[1]), signal.dtype)
        kcoeff  = _N.zeros((order, signal.shape[1]), signal.dtype)
        err     = _N.zeros(signal.shape[1], signal.dtype)
        for i in range(signal.shape[1]):
            coeff[:, i], err[i], kcoeff[:, i] = \
                    lpc(_N.require(signal[:, i], requirements = 'C'), order)
    elif axis % 2 == 1:
        # Prepare output arrays
        coeff   = _N.zeros((signal.shape[0], order+1), signal.dtype)
        kcoeff  = _N.zeros((signal.shape[0], order), signal.dtype)
        err     = _N.zeros(signal.shape[0], signal.dtype)
        for i in range(signal.shape[0]):
            coeff[i], err[i], kcoeff[i] = \
                lpc(_N.require(signal[i], requirements = 'C'), order)
    else:
        raise RuntimeError("this should not happen, please fill a bug")

    return coeff, err, kcoeff
示例#25
0
def lagrange_interpol_2D_td(points1, points2, coefficients, x1, x2):
    points1 = np.require(points1, dtype=np.float64,
                         requirements=["F_CONTIGUOUS"])
    points2 = np.require(points2, dtype=np.float64,
                         requirements=["F_CONTIGUOUS"])
    coefficients = np.require(coefficients, dtype=np.float64,
                              requirements=["F_CONTIGUOUS"])

    # Should be safe enough. This was never raised while extracting a lot of
    # seismograms.
    assert len(points1) == len(points2)

    N = len(points1) - 1
    nsamp = coefficients.shape[0]

    interpolant = np.zeros(nsamp, dtype="float64", order="F")

    lib.lagrange_interpol_2D_td(
        C.c_int(N),
        C.c_int(nsamp),
        points1.ctypes.data_as(C.POINTER(C.c_double)),
        points2.ctypes.data_as(C.POINTER(C.c_double)),
        coefficients.ctypes.data_as(C.POINTER(C.c_double)),
        C.c_double(x1),
        C.c_double(x2),
        interpolant.ctypes.data_as(C.POINTER(C.c_double)))
    return interpolant
示例#26
0
文件: data.py 项目: allenbo/distnet
  def get_next_batch(self):
    self.get_next_index()
    epoch = self.curr_epoch
    filename = os.path.join(self.data_dir, 'data_batch_%d' % (self.curr_batch))
    start = time.time()
    if os.path.isdir(filename):
      images = []
      labels = []

      for sub_filename in os.listdir(filename):
        path = os.path.join(filename, sub_filename)
        data = util.load(path)
        images.extend(data['data'])
        labels.extend(data['labels'])
      data['data'] = images
      data['labels'] = labels
    else:
      data = util.load(filename)
    data = self.__multigpu_seg(data)
    images = data['data']

    cropped = np.ndarray((3, self.inner_size, self.inner_size, len(images) * self.num_view), dtype = np.float32)
    self.__decode_trim_images2(images, cropped)

    cropped = garray.reshape_last(cropped) - self.data_mean
    cropped = np.require(cropped.reshape((3, self.inner_size, self.inner_size, len(images) * self.num_view)), dtype = np.single, requirements='C')

    labels = np.array(labels)
    labels = labels.reshape(labels.size, )
    labels = np.require(labels, dtype=np.single, requirements='C')
    return BatchData(cropped, labels, epoch)
示例#27
0
    def print_predictions(self):
        data = self.get_next_batch(train=False)[2] # get a test batch
        num_classes = self.test_data_provider.get_num_classes()
        softmax_idx = self.get_layer_idx('probs', check_type='softmax')
        NUM_IMGS = 1
        NUM_TOP_CLASSES = min(num_classes, 4) # show this many top labels
        label_names = self.test_data_provider.batch_meta['label_names']
        preds = n.zeros((NUM_IMGS, num_classes), dtype=n.single)
        rand_idx = nr.randint(0, data[0].shape[1], NUM_IMGS)
        data[0] = n.require(data[0][:,rand_idx], requirements='C')
        data[1] = n.require(data[1][:,rand_idx], requirements='C')
        data += [preds]

        # Run the model
        self.libmodel.startFeatureWriter(data, softmax_idx)
        self.finish_batch()

        data[0] = self.test_data_provider.get_plottable_data(data[0])
        img_idx = 0
        true_label = int(data[1][0,img_idx])

        img_labels = sorted(zip(preds[img_idx,:], label_names), key=lambda x: x[0])[-NUM_TOP_CLASSES:]
        print "true_label=%s" % (label_names[true_label])
        for l in img_labels:
          print "l=%s" % (str(l))

        binary_checkpoint_file = "binary_%d.%d.ntwk" % (self.epoch, self.batchnum)
        self.save_as_binary(binary_checkpoint_file)
示例#28
0
    def __init__(self, data_dir, 
            img_size, num_colors,  # options i've add to cifar data provider
            batch_range=None, 
            init_epoch=1, init_batchnum=None, dp_params=None, test=False):
        LabeledMemoryDataProvider.__init__(self, data_dir, batch_range, init_epoch, init_batchnum, dp_params, test)

        self.num_colors = num_colors
        self.img_size = img_size
        self.border_size = dp_params['crop_border']
        self.inner_size = self.img_size - self.border_size*2
        self.multiview = dp_params['multiview_test'] and test

        self.img_flip = dp_params['img_flip']
        if self.img_flip:
            self.num_views = 5*2
        else :
            self.num_views = 5;
        self.data_mult = self.num_views if self.multiview else 1
        
        for d in self.data_dic:
            d['data'] = n.require(d['data'], requirements='C')
            d['labels'] = n.require(n.tile(d['labels'].reshape((1, d['data'].shape[1])), (1, self.data_mult)), requirements='C')
        
        self.cropped_data = [n.zeros((self.get_data_dims(), self.data_dic[0]['data'].shape[1]*self.data_mult), dtype=n.single) for x in xrange(2)]

        self.batches_generated = 0
        self.data_mean = self.batch_meta['data_mean'].reshape((self.num_colors,self.img_size,self.img_size))[:,self.border_size:self.border_size+self.inner_size,self.border_size:self.border_size+self.inner_size].reshape((self.get_data_dims(), 1))
示例#29
0
文件: data.py 项目: allenbo/distnet
  def get_next_batch(self):
    self.get_next_index()

    epoch = self.curr_epoch
    batchnum = self.curr_batch
    names = self.images[self.batches[batchnum]]
    num_imgs = len(names)
    labels = np.zeros(len(names))
    cropped = np.ndarray((3, self.inner_size, self.inner_size, num_imgs * self.num_view),
            dtype=np.float32)
    self.__decode_trim_images2(names, cropped)

    clabel = []
    # extract label from the filename
    for idx, filename in enumerate(names):
      filename = os.path.basename(filename)
      synid = filename[1:].split('_')[0]
      label = self.batch_meta['synid_to_label'][synid]
      labels[idx] = label

    cropped = np.require(cropped, dtype=np.single, requirements='C')
    old_shape = cropped.shape
    cropped = garray.reshape_last(cropped) - self.data_mean
    cropped = cropped.reshape(old_shape)

    labels = np.array(labels)
    labels = labels.reshape(labels.size,)
    labels = np.require(labels, dtype=np.single, requirements='C')

    #util.log_info('get one batch %f', time.time() - start)
    return BatchData(cropped, labels, epoch)
示例#30
0
文件: sampler.py 项目: AstroVPK/kali
 def sample(self, **kwargs):
     returnLC = self.lcObj.copy()
     timeStamps = kwargs.get('timestamps', None)
     timeStampDeltas = timeStamps[1:] - timeStamps[:-1]
     SDSSLength = timeStamps[-1] - timeStamps[0]
     minDelta = np.min(timeStampDeltas)
     if minDelta < self.lcObj.dt:
         raise ValueError('Insufficiently dense sampling!')
     if SDSSLength > self.lcObj.T:
         raise ValueError('Insufficiently long lc!')
     newNumCadences = timeStamps.shape[0]
     tNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     xNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     yNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     yerrNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     maskNew = np.require(np.zeros(newNumCadences), requirements=['F', 'A', 'W', 'O', 'E'])
     for i in xrange(newNumCadences):
         index = np.where(self.lcObj.t > timeStamps[i])[0][0]
         tNew[i] = self.lcObj.t[index]
         xNew[i] = self.lcObj.x[index]
         yNew[i] = self.lcObj.y[index]
         yerrNew[i] = self.lcObj.yerr[index]
         maskNew[i] = self.lcObj.mask[index]
     returnLC.t = tNew
     returnLC.x = xNew
     returnLC.y = yNew
     returnLC.yerr = yerrNew
     returnLC.mask = maskNew
     returnLC._numCadences = newNumCadences
     returnLC._checkIsRegular()
     returnLC._times()
     returnLC._statistics()
     return returnLC
示例#31
0
    def initialize_simulation(self,
                              basis_states,
                              target_states,
                              segs_per_state=1,
                              suppress_we=False):
        '''Initialize a new weighted ensemble simulation, taking ``segs_per_state`` initial
        states from each of the given ``basis_states``.
        
        ``w_init`` is the forward-facing version of this function'''

        data_manager = self.data_manager
        work_manager = self.work_manager
        pstatus = self.rc.pstatus
        system = self.system

        pstatus('Creating HDF5 file {!r}'.format(
            self.data_manager.we_h5filename))
        data_manager.prepare_backing()

        # Process target states
        data_manager.save_target_states(target_states)
        self.report_target_states(target_states)

        # Process basis states
        self.get_bstate_pcoords(basis_states)
        self.data_manager.create_ibstate_group(basis_states)
        self.report_basis_states(basis_states)

        pstatus('Preparing initial states')
        initial_states = []
        weights = []
        if self.do_gen_istates:
            istate_type = InitialState.ISTATE_TYPE_GENERATED
        else:
            istate_type = InitialState.ISTATE_TYPE_BASIS

        for basis_state in basis_states:
            for _iseg in range(segs_per_state):
                initial_state = data_manager.create_initial_states(1, 1)[0]
                initial_state.basis_state_id = basis_state.state_id
                initial_state.basis_state = basis_state
                initial_state.istate_type = istate_type
                weights.append(basis_state.probability / segs_per_state)
                initial_states.append(initial_state)

        if self.do_gen_istates:
            futures = [
                work_manager.submit(wm_ops.gen_istate,
                                    args=(initial_state.basis_state,
                                          initial_state))
                for initial_state in initial_states
            ]
            for future in work_manager.as_completed(futures):
                rbstate, ristate = future.get_result()
                initial_states[ristate.state_id].pcoord = ristate.pcoord
        else:
            for initial_state in initial_states:
                basis_state = initial_state.basis_state
                initial_state.pcoord = basis_state.pcoord
                initial_state.istate_status = InitialState.ISTATE_STATUS_PREPARED

        for initial_state in initial_states:
            log.debug('initial state created: {!r}'.format(initial_state))

        # save list of initial states just generated
        # some of these may not be used, depending on how WE shakes out
        data_manager.update_initial_states(initial_states, n_iter=1)

        if not suppress_we:
            self.we_driver.populate_initial(initial_states, weights, system)
            segments = list(self.we_driver.next_iter_segments)
            binning = self.we_driver.next_iter_binning
        else:
            segments = list(self.we_driver.current_iter_segments)
            binning = self.we_driver.final_binning

        bin_occupancies = numpy.fromiter(map(len, binning),
                                         dtype=numpy.uint,
                                         count=self.we_driver.bin_mapper.nbins)
        target_occupancies = numpy.require(self.we_driver.bin_target_counts,
                                           dtype=numpy.uint)

        # Make sure we have
        for segment in segments:
            segment.n_iter = 1
            segment.status = Segment.SEG_STATUS_PREPARED
            assert segment.parent_id < 0
            assert initial_states[segment.initial_state_id].iter_used == 1

        data_manager.prepare_iteration(1, segments)
        data_manager.update_initial_states(initial_states, n_iter=1)

        if self.rc.verbose_mode:
            pstatus('\nSegments generated:')
            for segment in segments:
                pstatus('{!r}'.format(segment))

        pstatus('''
        Total bins:            {total_bins:d}
        Initial replicas:      {init_replicas:d} in {occ_bins:d} bins, total weight = {weight:g}
        Total target replicas: {total_replicas:d}
        '''.format(total_bins=len(bin_occupancies),
                   init_replicas=int(sum(bin_occupancies)),
                   occ_bins=len(bin_occupancies[bin_occupancies > 0]),
                   weight=float(sum(segment.weight for segment in segments)),
                   total_replicas=int(sum(target_occupancies))))

        # Send the segments over to the data manager to commit to disk
        data_manager.current_iteration = 1

        # Report statistics
        pstatus('Simulation prepared.')
        self.segments = {segment.seg_id: segment for segment in segments}
        self.report_bin_statistics(binning, save_summary=True)
        data_manager.flush_backing()
示例#32
0
 def get_data(d, i):
     data = d.get_data(request=i)[d.sources.index('features')]
     # Fuel provides Cifar in uint8, convert to float32
     data = numpy.require(data, dtype=numpy.float32)
     return data if cnorm is None else cnorm.apply(data)
示例#33
0
#volume.SetProperty(volumeProperty)
#
#ren = vtk.vtkRenderer()
#
#
#ren.AddVolume(volume)
#ren.SetBackground(0, 0, 0)
#ren.ResetCamera()
#
#renWin = vtk.vtkRenderWindow()
#renWin.AddRenderer(ren)
#iren = vtk.vtkRenderWindowInteractor()
#iren.SetRenderWindow(renWin)
#renWin.Render()
#iren.Start()
b = np.require(a, dtype=np.uint8)
b_string = b.tostring()
data_importer = vtk.vtkImageImport()
data_importer.CopyImportVoidPointer(b_string, len(b_string))
data_importer.SetDataScalarTypeToUnsignedChar()
data_importer.SetNumberOfScalarComponents(1)
data_importer.SetDataExtent(0, 511, 0, 511, 0, 511)
data_importer.SetWholeExtent(0, 511, 0, 511, 0, 511)

alphaChannelFunc = vtk.vtkPiecewiseFunction()
alphaChannelFunc.AddPoint(0, 0.0)
alphaChannelFunc.AddPoint(64, 0.3)
alphaChannelFunc.AddPoint(128, 0.5)
alphaChannelFunc.AddPoint(200, 0.7)

colorFunc = vtk.vtkPiecewiseFunction()
# coding=utf-8
import numpy as np
from load_data import load_data
import caffe
(X_train,Y_train)=load_data()
X_train = np.require(X_train,requirements='C')
Y_train = np.require(Y_train,requirements='C')
#m0=np.mean(X_train[:,0,:,:])
#m1=np.mean(X_train[:,1,:,:])
#m2=np.mean(X_train[:,2,:,:])
#print(np.shape(X_train))#(1000L, 3L, 224L, 224L)
#print(np.shape(Y_train))#(1000L, 2L)
#print(m0)
#print(m1)
#print(m2)
solver = caffe.SGDSolver('solver_mid_cid_clf.prototxt')
solver.net.copy_from('bvlc_alexnet.caffemodel')
solver.net.set_input_arrays(X_train, Y_train)
solver.test_nets[0].set_input_arrays(X_train, Y_train)

for i in range(2):
    solver.step(1)
#solver.step(40)#average_loss=40
#solver.net.save(r'D:\xinjian\test')


示例#35
0
def require_numpy_array_layout(value):
    if isinstance(value, tuple):
        return tuple(require_numpy_array_layout(x) for x in value)
    else:
        return np.require(value, requirements=['C', 'A'])
示例#36
0
def rand_choice(probs, choose_from=None):
    probs = numpy.require(probs, dtype='float32')
    probs /= probs.sum()
    if choose_from is None:
        choose_from = range(len(probs))
    return numpy.random.choice(choose_from, 1, p=probs)[0]
示例#37
0
    def compute(self):
        data_in = self.getData('in')
        kind = self.getVal('interpolation-mode')

        self.dimm = data_in.shape
        self.ndim = data_in.ndim

        data_out = data_in.copy()
        if self.getVal('compute'):
            if kind in ('linear', 'nearest'):
                for i in range(self.ndim):
                    val = self.getVal(self.dim_base_name + str(i) + ']')
                    interpnew = val['length']
                    axisnew = self.dimm[i]
                    x = np.linspace(0, axisnew - 1, axisnew)
                    xnew = np.linspace(0, axisnew - 1, interpnew)
                    if axisnew == 1:
                        reps = np.ones((self.ndim, ))
                        reps[i] = interpnew
                        ynew = np.tile(data_out, reps)
                    elif axisnew == interpnew:
                        continue
                    else:
                        yinterp = interpolate.interp1d(x,
                                                       data_out,
                                                       kind=kind,
                                                       axis=i)
                        ynew = yinterp(xnew)

                    data_out = ynew
            elif kind in ('zero', 'slinear', 'quadratic', 'cubic'):
                from scipy.ndimage.interpolation import map_coordinates
                orders = {'zero': 0, 'slinear': 1, 'quadratic': 2, 'cubic': 3}
                o = orders[kind]

                # if the data is complex, interp real and imaginary separately
                if data_in.dtype in (np.complex64, np.complex128):
                    data_real = np.real(data_in)
                    data_imag = np.imag(data_in)
                else:
                    data_real = data_in
                    data_imag = None

                new_dims = []
                for i in range(self.ndim):
                    val = self.getVal(self.dim_base_name + str(i) + ']')
                    new_dims.append(
                        np.linspace(0, val['in_len'] - 1, val['length']))

                coords = np.meshgrid(*new_dims, indexing='ij')
                data_out = map_coordinates(data_real, coords, order=o)

                if data_imag is not None:
                    data_out = (
                        data_out +
                        1j * map_coordinates(data_imag, coords, order=o))

            else:
                # use zero-padding and FFTW to sinc-interpolate
                import core.math.fft as ft
                data_in_c64 = np.require(data_in,
                                         dtype=np.complex64,
                                         requirements='C')

                old_dims = np.asarray(self.dimm[::-1], dtype=np.int64)
                new_dims = old_dims.copy()
                fftargs = {}
                win = np.ones(data_in.shape)
                scale = 1
                for i in range(self.ndim):
                    val = self.getVal(self.dim_base_name + str(i) + ']')
                    new_dims[self.ndim - i - 1] = np.int64(val['length'])
                    if val['length'] == val['in_len']:
                        fftargs['dim{}'.format(self.ndim - i)] = 0
                    else:
                        win *= self.window(i)
                        fftargs['dim{}'.format(self.ndim - i)] = 1
                        scale *= val['length'] / val['in_len']

                # forward FFT with original dimensions
                fftargs['dir'] = 0
                data_out = ft.fftw(data_in_c64, old_dims, **fftargs)

                data_out *= win.astype(np.complex64)
                # inverse FFT with new dimensions (zero-pad kspace)
                fftargs['dir'] = 1
                data_out = ft.fftw(data_out, new_dims, **fftargs)

                if data_in.dtype in (np.float32, np.float64):
                    data_out = np.real(data_out)

                # data needs to be scaled since we changed the size between the
                # forward and inverse FT
                data_out *= scale

            self.setData('out', data_out)
        else:
            pass

        return (0)
示例#38
0
文件: lenet.py 项目: vcicii/Lenet
valid_x, valid_y = valid_set
test_x, test_y = test_set

print train_x.shape, train_y.shape
print valid_x.shape, valid_y.shape
print test_x.shape, test_y.shape

batch_size = 50
learning_rate = 0.001

input_x = train_x[0:batch_size]
print input_x.shape
image = input_x.reshape([batch_size, 28, 28, 1])
print image.shape
k = map(lambda x: x.reshape(28, 28, 1), input_x)
image = np.require(k)
print image.shape

conv1 = Convolution((batch_size, 28, 28, 1), (5, 5, 20))
relu1 = ReLU([batch_size, 24, 24, 20])
pool1 = Pooling([batch_size, 24, 24, 20], (2, 2), stride=2)

conv2 = Convolution((batch_size, 12, 12, 20), (5, 5, 32))
relu2 = ReLU([batch_size, 8, 8, 32])
pool2 = Pooling([batch_size, 8, 8, 32], (2, 2), stride=2)

mlp = MLP(batch_size, learning_rate)

for epoch in range(200):
    for i in range(train_x.shape[0] // batch_size):
        input_X = train_x[i * batch_size:(i + 1) * batch_size, :]
示例#39
0
def actionAngleTorus_xvFreqs_c(pot,
                               jr,
                               jphi,
                               jz,
                               angler,
                               anglephi,
                               anglez,
                               tol=0.003):
    """
    NAME:
       actionAngleTorus_xvFreqs_c
    PURPOSE:
       compute configuration (x,v) and frequencies of a set of angles on a single torus
    INPUT:
       pot - Potential object or list thereof
       jr - radial action (scalar)
       jphi - azimuthal action (scalar)
       jz - vertical action (scalar)
       angler - radial angle (array [N])
       anglephi - azimuthal angle (array [N])
       anglez - vertical angle (array [N])
       tol= (0.003) goal for |dJ|/|J| along the torus
    OUTPUT:
       (R,vR,vT,z,vz,phi,Omegar,Omegaphi,Omegaz,flag)
    HISTORY:
       2015-08-05/07 - Written - Bovy (UofT)
    """
    #Parse the potential
    from ..orbit.integrateFullOrbit import _parse_pot
    npot, pot_type, pot_args = _parse_pot(pot, potfortorus=True)

    #Set up result arrays
    R = numpy.empty(len(angler))
    vR = numpy.empty(len(angler))
    vT = numpy.empty(len(angler))
    z = numpy.empty(len(angler))
    vz = numpy.empty(len(angler))
    phi = numpy.empty(len(angler))
    Omegar = numpy.empty(1)
    Omegaphi = numpy.empty(1)
    Omegaz = numpy.empty(1)
    flag = ctypes.c_int(0)

    #Set up the C code
    ndarrayFlags = ('C_CONTIGUOUS', 'WRITEABLE')
    actionAngleTorus_xvFreqsFunc = _lib.actionAngleTorus_xvFreqs
    actionAngleTorus_xvFreqsFunc.argtypes=\
        [ctypes.c_double,
         ctypes.c_double,
         ctypes.c_double,
         ctypes.c_int,
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ctypes.c_int,
         ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ctypes.c_double,
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ctypes.POINTER(ctypes.c_int)]

    #Array requirements, first store old order
    f_cont = [
        angler.flags['F_CONTIGUOUS'], anglephi.flags['F_CONTIGUOUS'],
        anglez.flags['F_CONTIGUOUS']
    ]
    angler = numpy.require(angler,
                           dtype=numpy.float64,
                           requirements=['C', 'W'])
    anglephi = numpy.require(anglephi,
                             dtype=numpy.float64,
                             requirements=['C', 'W'])
    anglez = numpy.require(anglez,
                           dtype=numpy.float64,
                           requirements=['C', 'W'])
    R = numpy.require(R, dtype=numpy.float64, requirements=['C', 'W'])
    vR = numpy.require(vR, dtype=numpy.float64, requirements=['C', 'W'])
    vT = numpy.require(vT, dtype=numpy.float64, requirements=['C', 'W'])
    z = numpy.require(z, dtype=numpy.float64, requirements=['C', 'W'])
    vz = numpy.require(vz, dtype=numpy.float64, requirements=['C', 'W'])
    phi = numpy.require(phi, dtype=numpy.float64, requirements=['C', 'W'])
    Omegar = numpy.require(Omegar,
                           dtype=numpy.float64,
                           requirements=['C', 'W'])
    Omegaphi = numpy.require(Omegaphi,
                             dtype=numpy.float64,
                             requirements=['C', 'W'])
    Omegaz = numpy.require(Omegaz,
                           dtype=numpy.float64,
                           requirements=['C', 'W'])

    #Run the C code
    actionAngleTorus_xvFreqsFunc(ctypes.c_double(jr), ctypes.c_double(jphi),
                                 ctypes.c_double(jz),
                                 ctypes.c_int(len(angler)), angler, anglephi,
                                 anglez,
                                 ctypes.c_int(npot), pot_type, pot_args,
                                 ctypes.c_double(tol), R, vR, vT, z, vz, phi,
                                 Omegar, Omegaphi, Omegaz, ctypes.byref(flag))

    #Reset input arrays
    if f_cont[0]: angler = numpy.asfortranarray(angler)
    if f_cont[1]: anglephi = numpy.asfortranarray(anglephi)
    if f_cont[2]: anglez = numpy.asfortranarray(anglez)

    return (R, vR, vT, z, vz, phi, Omegar[0], Omegaphi[0], Omegaz[0],
            flag.value)
示例#40
0
def showpoints(xyz,
               c_gt=None,
               c_pred=None,
               waittime=0,
               showrot=False,
               magnifyBlue=0,
               freezerot=False,
               background=(0, 0, 0),
               normalizecolor=True,
               ballradius=10):
    global showsz, mousex, mousey, zoom, changed
    xyz = xyz - xyz.mean(axis=0)
    radius = ((xyz**2).sum(axis=-1)**0.5).max()
    xyz /= (radius * 2.2) / showsz
    if c_gt is None:
        c0 = np.zeros((len(xyz), ), dtype='float32') + 255
        c1 = np.zeros((len(xyz), ), dtype='float32') + 255
        c2 = np.zeros((len(xyz), ), dtype='float32') + 255
    else:
        c0 = c_gt[:, 0]
        c1 = c_gt[:, 1]
        c2 = c_gt[:, 2]

    if normalizecolor:
        c0 /= (c0.max() + 1e-14) / 255.0
        c1 /= (c1.max() + 1e-14) / 255.0
        c2 /= (c2.max() + 1e-14) / 255.0

    c0 = np.require(c0, 'float32', 'C')
    c1 = np.require(c1, 'float32', 'C')
    c2 = np.require(c2, 'float32', 'C')

    show = np.zeros((showsz, showsz, 3), dtype='uint8')

    def render():
        rotmat = np.eye(3)
        if not freezerot:
            xangle = (mousey - 0.5) * np.pi * 1.2
        else:
            xangle = 0
        rotmat = rotmat.dot(
            np.array([
                [1.0, 0.0, 0.0],
                [0.0, np.cos(xangle), -np.sin(xangle)],
                [0.0, np.sin(xangle), np.cos(xangle)],
            ]))
        if not freezerot:
            yangle = (mousex - 0.5) * np.pi * 1.2
        else:
            yangle = 0
        rotmat = rotmat.dot(
            np.array([
                [np.cos(yangle), 0.0, -np.sin(yangle)],
                [0.0, 1.0, 0.0],
                [np.sin(yangle), 0.0, np.cos(yangle)],
            ]))
        rotmat *= zoom
        nxyz = xyz.dot(rotmat) + [showsz / 2, showsz / 2, 0]

        ixyz = nxyz.astype('int32')
        show[:] = background
        dll.render_ball(ct.c_int(show.shape[0]), ct.c_int(show.shape[1]),
                        show.ctypes.data_as(ct.c_void_p),
                        ct.c_int(ixyz.shape[0]),
                        ixyz.ctypes.data_as(ct.c_void_p),
                        c0.ctypes.data_as(ct.c_void_p),
                        c1.ctypes.data_as(ct.c_void_p),
                        c2.ctypes.data_as(ct.c_void_p), ct.c_int(ballradius))

        if magnifyBlue > 0:
            show[:, :, 0] = np.maximum(show[:, :, 0],
                                       np.roll(show[:, :, 0], 1, axis=0))
            if magnifyBlue >= 2:
                show[:, :, 0] = np.maximum(show[:, :, 0],
                                           np.roll(show[:, :, 0], -1, axis=0))
            show[:, :, 0] = np.maximum(show[:, :, 0],
                                       np.roll(show[:, :, 0], 1, axis=1))
            if magnifyBlue >= 2:
                show[:, :, 0] = np.maximum(show[:, :, 0],
                                           np.roll(show[:, :, 0], -1, axis=1))
        if showrot:
            cv2.putText(show, 'xangle %d' % (int(xangle / np.pi * 180)),
                        (30, showsz - 30), 0, 0.5, cv2.cv.CV_RGB(255, 0, 0))
            cv2.putText(show, 'yangle %d' % (int(yangle / np.pi * 180)),
                        (30, showsz - 50), 0, 0.5, cv2.cv.CV_RGB(255, 0, 0))
            cv2.putText(show, 'zoom %d%%' % (int(zoom * 100)),
                        (30, showsz - 70), 0, 0.5, cv2.cv.CV_RGB(255, 0, 0))

    changed = True
    while True:
        if changed:
            render()
            changed = False
        cv2.imshow('show3d', show)
        if waittime == 0:
            cmd = cv2.waitKey(10) % 256
        else:
            cmd = cv2.waitKey(waittime) % 256
        if cmd == ord('q'):
            break
        elif cmd == ord('Q'):
            sys.exit(0)

        if cmd == ord('t') or cmd == ord('p'):
            if cmd == ord('t'):
                if c_gt is None:
                    c0 = np.zeros((len(xyz), ), dtype='float32') + 255
                    c1 = np.zeros((len(xyz), ), dtype='float32') + 255
                    c2 = np.zeros((len(xyz), ), dtype='float32') + 255
                else:
                    c0 = c_gt[:, 0]
                    c1 = c_gt[:, 1]
                    c2 = c_gt[:, 2]
            else:
                if c_pred is None:
                    c0 = np.zeros((len(xyz), ), dtype='float32') + 255
                    c1 = np.zeros((len(xyz), ), dtype='float32') + 255
                    c2 = np.zeros((len(xyz), ), dtype='float32') + 255
                else:
                    c0 = c_pred[:, 0]
                    c1 = c_pred[:, 1]
                    c2 = c_pred[:, 2]
            if normalizecolor:
                c0 /= (c0.max() + 1e-14) / 255.0
                c1 /= (c1.max() + 1e-14) / 255.0
                c2 /= (c2.max() + 1e-14) / 255.0
            c0 = np.require(c0, 'float32', 'C')
            c1 = np.require(c1, 'float32', 'C')
            c2 = np.require(c2, 'float32', 'C')
            changed = True

        if cmd == ord('n'):
            zoom *= 1.1
            changed = True
        elif cmd == ord('m'):
            zoom /= 1.1
            changed = True
        elif cmd == ord('r'):
            zoom = 1.0
            changed = True
        elif cmd == ord('s'):
            cv2.imwrite('show3d.png', show)
        if waittime != 0:
            break
    return cmd
示例#41
0
def actionAngleTorus_hessian_c(pot, jr, jphi, jz, tol=0.003, dJ=0.001):
    """
    NAME:
       actionAngleTorus_hessian_c
    PURPOSE:
       compute dO/dJ on a single torus
    INPUT:
       pot - Potential object or list thereof
       jr - radial action (scalar)
       jphi - azimuthal action (scalar)
       jz - vertical action (scalar)
       tol= (0.003) goal for |dJ|/|J| along the torus
       dJ= (0.001) action difference when computing derivatives (Hessian or Jacobian)
    OUTPUT:
       (dO/dJ,Omegar,Omegaphi,Omegaz,Autofit error flag)
       Note: dO/dJ is *not* symmetrized here
    HISTORY:
       2016-07-15 - Written - Bovy (UofT)
    """
    #Parse the potential
    from ..orbit.integrateFullOrbit import _parse_pot
    npot, pot_type, pot_args = _parse_pot(pot, potfortorus=True)

    #Set up result
    dOdJT = numpy.empty(9)
    Omegar = numpy.empty(1)
    Omegaphi = numpy.empty(1)
    Omegaz = numpy.empty(1)
    flag = ctypes.c_int(0)

    #Set up the C code
    ndarrayFlags = ('C_CONTIGUOUS', 'WRITEABLE')
    actionAngleTorus_HessFunc = _lib.actionAngleTorus_hessianFreqs
    actionAngleTorus_HessFunc.argtypes=\
        [ctypes.c_double,
         ctypes.c_double,
         ctypes.c_double,
         ctypes.c_int,
         ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ctypes.c_double,
         ctypes.c_double,
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ctypes.POINTER(ctypes.c_int)]

    #Array requirements
    dOdJT = numpy.require(dOdJT, dtype=numpy.float64, requirements=['C', 'W'])
    Omegar = numpy.require(Omegar,
                           dtype=numpy.float64,
                           requirements=['C', 'W'])
    Omegaphi = numpy.require(Omegaphi,
                             dtype=numpy.float64,
                             requirements=['C', 'W'])
    Omegaz = numpy.require(Omegaz,
                           dtype=numpy.float64,
                           requirements=['C', 'W'])

    #Run the C code
    actionAngleTorus_HessFunc(ctypes.c_double(jr), ctypes.c_double(jphi),
                              ctypes.c_double(jz), ctypes.c_int(npot),
                              pot_type, pot_args, ctypes.c_double(tol),
                              ctypes.c_double(dJ), dOdJT, Omegar, Omegaphi,
                              Omegaz, ctypes.byref(flag))

    return (dOdJT.reshape(
        (3, 3)).T, Omegar[0], Omegaphi[0], Omegaz[0], flag.value)
def FORCESNLPsolver_solve(params_arg):
    '''
a Python wrapper for a fast solver generated by FORCES Pro v1.6.121

   OUTPUT = FORCESNLPsolver_py.FORCESNLPsolver_solve(PARAMS) solves a multistage problem
   subject to the parameters supplied in the following dictionary:
       PARAMS['x0'] - column vector of length 600
       PARAMS['xinit'] - column vector of length 4
       PARAMS['xfinal'] - column vector of length 2

   OUTPUT returns the values of the last iteration of the solver where
       OUTPUT['x001'] - column vector of size 6
       OUTPUT['x002'] - column vector of size 6
       OUTPUT['x003'] - column vector of size 6
       OUTPUT['x004'] - column vector of size 6
       OUTPUT['x005'] - column vector of size 6
       OUTPUT['x006'] - column vector of size 6
       OUTPUT['x007'] - column vector of size 6
       OUTPUT['x008'] - column vector of size 6
       OUTPUT['x009'] - column vector of size 6
       OUTPUT['x010'] - column vector of size 6
       OUTPUT['x011'] - column vector of size 6
       OUTPUT['x012'] - column vector of size 6
       OUTPUT['x013'] - column vector of size 6
       OUTPUT['x014'] - column vector of size 6
       OUTPUT['x015'] - column vector of size 6
       OUTPUT['x016'] - column vector of size 6
       OUTPUT['x017'] - column vector of size 6
       OUTPUT['x018'] - column vector of size 6
       OUTPUT['x019'] - column vector of size 6
       OUTPUT['x020'] - column vector of size 6
       OUTPUT['x021'] - column vector of size 6
       OUTPUT['x022'] - column vector of size 6
       OUTPUT['x023'] - column vector of size 6
       OUTPUT['x024'] - column vector of size 6
       OUTPUT['x025'] - column vector of size 6
       OUTPUT['x026'] - column vector of size 6
       OUTPUT['x027'] - column vector of size 6
       OUTPUT['x028'] - column vector of size 6
       OUTPUT['x029'] - column vector of size 6
       OUTPUT['x030'] - column vector of size 6
       OUTPUT['x031'] - column vector of size 6
       OUTPUT['x032'] - column vector of size 6
       OUTPUT['x033'] - column vector of size 6
       OUTPUT['x034'] - column vector of size 6
       OUTPUT['x035'] - column vector of size 6
       OUTPUT['x036'] - column vector of size 6
       OUTPUT['x037'] - column vector of size 6
       OUTPUT['x038'] - column vector of size 6
       OUTPUT['x039'] - column vector of size 6
       OUTPUT['x040'] - column vector of size 6
       OUTPUT['x041'] - column vector of size 6
       OUTPUT['x042'] - column vector of size 6
       OUTPUT['x043'] - column vector of size 6
       OUTPUT['x044'] - column vector of size 6
       OUTPUT['x045'] - column vector of size 6
       OUTPUT['x046'] - column vector of size 6
       OUTPUT['x047'] - column vector of size 6
       OUTPUT['x048'] - column vector of size 6
       OUTPUT['x049'] - column vector of size 6
       OUTPUT['x050'] - column vector of size 6
       OUTPUT['x051'] - column vector of size 6
       OUTPUT['x052'] - column vector of size 6
       OUTPUT['x053'] - column vector of size 6
       OUTPUT['x054'] - column vector of size 6
       OUTPUT['x055'] - column vector of size 6
       OUTPUT['x056'] - column vector of size 6
       OUTPUT['x057'] - column vector of size 6
       OUTPUT['x058'] - column vector of size 6
       OUTPUT['x059'] - column vector of size 6
       OUTPUT['x060'] - column vector of size 6
       OUTPUT['x061'] - column vector of size 6
       OUTPUT['x062'] - column vector of size 6
       OUTPUT['x063'] - column vector of size 6
       OUTPUT['x064'] - column vector of size 6
       OUTPUT['x065'] - column vector of size 6
       OUTPUT['x066'] - column vector of size 6
       OUTPUT['x067'] - column vector of size 6
       OUTPUT['x068'] - column vector of size 6
       OUTPUT['x069'] - column vector of size 6
       OUTPUT['x070'] - column vector of size 6
       OUTPUT['x071'] - column vector of size 6
       OUTPUT['x072'] - column vector of size 6
       OUTPUT['x073'] - column vector of size 6
       OUTPUT['x074'] - column vector of size 6
       OUTPUT['x075'] - column vector of size 6
       OUTPUT['x076'] - column vector of size 6
       OUTPUT['x077'] - column vector of size 6
       OUTPUT['x078'] - column vector of size 6
       OUTPUT['x079'] - column vector of size 6
       OUTPUT['x080'] - column vector of size 6
       OUTPUT['x081'] - column vector of size 6
       OUTPUT['x082'] - column vector of size 6
       OUTPUT['x083'] - column vector of size 6
       OUTPUT['x084'] - column vector of size 6
       OUTPUT['x085'] - column vector of size 6
       OUTPUT['x086'] - column vector of size 6
       OUTPUT['x087'] - column vector of size 6
       OUTPUT['x088'] - column vector of size 6
       OUTPUT['x089'] - column vector of size 6
       OUTPUT['x090'] - column vector of size 6
       OUTPUT['x091'] - column vector of size 6
       OUTPUT['x092'] - column vector of size 6
       OUTPUT['x093'] - column vector of size 6
       OUTPUT['x094'] - column vector of size 6
       OUTPUT['x095'] - column vector of size 6
       OUTPUT['x096'] - column vector of size 6
       OUTPUT['x097'] - column vector of size 6
       OUTPUT['x098'] - column vector of size 6
       OUTPUT['x099'] - column vector of size 6
       OUTPUT['x100'] - column vector of size 6

   [OUTPUT, EXITFLAG] = FORCESNLPsolver_py.FORCESNLPsolver_solve(PARAMS) returns additionally
   the integer EXITFLAG indicating the state of the solution with 
       1 - Optimal solution has been found (subject to desired accuracy)
       2 - (only branch-and-bound) A feasible point has been identified for which the objective value is no more than codeoptions.mip.mipgap*100 per cent worse than the global optimum 
       0 - Timeout - maximum number of iterations reached
      -1 - (only branch-and-bound) Infeasible problem (problems solving the root relaxation to the desired accuracy)
      -2 - (only branch-and-bound) Out of memory - cannot fit branch and bound nodes into pre-allocated memory.
      -6 - NaN or INF occured during evaluation of functions and derivatives. Please check your initial guess.
      -7 - Method could not progress. Problem may be infeasible. Run FORCESdiagnostics on your problem to check for most common errors in the formulation.
     -10 - The convex solver could not proceed due to an internal error
    -100 - License error

   [OUTPUT, EXITFLAG, INFO] = FORCESNLPsolver_py.FORCESNLPsolver_solve(PARAMS) returns 
   additional information about the last iterate:
       INFO.it        - number of iterations that lead to this result
       INFO.it2opt    - number of convex solves
       INFO.res_eq    - max. equality constraint residual
       INFO.res_ineq  - max. inequality constraint residual
       INFO.rsnorm    - norm of stationarity condition
       INFO.rcompnorm    - max of all complementarity violations
       INFO.pobj      - primal objective
       INFO.mu        - duality measure
       INFO.solvetime - Time needed for solve (wall clock time)
       INFO.fevalstime - Time needed for function evaluations (wall clock time)

 See also COPYING

	'''
    global _lib

    # convert parameters
    params_py = FORCESNLPsolver_params_ctypes()
    for par in params_arg:
        try:
            #setattr(params_py, par, npct.as_ctypes(np.reshape(params_arg[par],np.size(params_arg[par]),order='A')))
            params_arg[par] = np.require(params_arg[par],
                                         dtype=np.float64,
                                         requirements='F')
            setattr(
                params_py, par,
                npct.as_ctypes(
                    np.reshape(params_arg[par],
                               np.size(params_arg[par]),
                               order='F')))
        except:
            raise ValueError(
                'Parameter ' + par +
                ' does not have the appropriate dimensions or data type. Please use numpy arrays for parameters.'
            )

    outputs_py = FORCESNLPsolver_outputs_ctypes()
    info_py = FORCESNLPsolver_info()
    if sys.version_info.major == 2:
        if sys.platform.startswith('win'):
            fp = None  # if set to none, the solver prints to stdout by default - necessary because we have an access violation otherwise under windows
        else:
            #fp = open('stdout_temp.txt','w')
            fp = sys.stdout
        try:
            PyFile_AsFile.restype = ctypes.POINTER(FILE)
            exitflag = _lib.FORCESNLPsolver_solve(
                params_py, ctypes.byref(outputs_py), ctypes.byref(info_py),
                PyFile_AsFile(fp), _lib.FORCESNLPsolver_casadi2forces)
            #fp = open('stdout_temp.txt','r')
            #print (fp.read())
            #fp.close()
        except:
            #print 'Problem with solver'
            raise
    elif sys.version_info.major == 3:
        if sys.platform.startswith('win'):
            libc = ctypes.cdll.msvcrt
        elif sys.platform.startswith('darwin'):
            libc = ctypes.CDLL('libc.dylib')
        else:
            libc = ctypes.CDLL('libc.so.6')  # Open libc
        cfopen = getattr(libc, 'fopen')  # Get its fopen
        cfopen.restype = ctypes.POINTER(
            FILE)  # Yes, fopen gives a file pointer
        cfopen.argtypes = [ctypes.c_char_p,
                           ctypes.c_char_p]  # Yes, fopen gives a file pointer
        fp = cfopen('stdout_temp.txt'.encode('utf-8'),
                    'w'.encode('utf-8'))  # Use that fopen

        try:
            if sys.platform.startswith('win'):
                exitflag = _lib.FORCESNLPsolver_solve(
                    params_py, ctypes.byref(outputs_py), ctypes.byref(info_py),
                    None, _lib.FORCESNLPsolver_casadi2forces)
            else:
                exitflag = _lib.FORCESNLPsolver_solve(
                    params_py, ctypes.byref(outputs_py), ctypes.byref(info_py),
                    fp, _lib.FORCESNLPsolver_casadi2forces)
            libc.fclose(fp)
            fptemp = open('stdout_temp.txt', 'r')
            print(fptemp.read())
            fptemp.close()
        except:
            #print 'Problem with solver'
            raise

    # convert outputs
    for out in FORCESNLPsolver_outputs:
        FORCESNLPsolver_outputs[out] = npct.as_array(getattr(outputs_py, out))

    return FORCESNLPsolver_outputs, int(exitflag), info_py
示例#43
0
def unwrap_phase(image, wrap_around=False, seed=None):
    '''Recover the original from a wrapped phase image.

    From an image wrapped to lie in the interval [-pi, pi), recover the
    original, unwrapped image.

    Parameters
    ----------
    image : 1D, 2D or 3D ndarray of floats, optionally a masked array
        The values should be in the range [-pi, pi). If a masked array is
        provided, the masked entries will not be changed, and their values
        will not be used to guide the unwrapping of neighboring, unmasked
        values. Masked 1D arrays are not allowed, and will raise a
        `ValueError`.
    wrap_around : bool or sequence of bool, optional
        When an element of the sequence is  `True`, the unwrapping process
        will regard the edges along the corresponding axis of the image to be
        connected and use this connectivity to guide the phase unwrapping
        process. If only a single boolean is given, it will apply to all axes.
        Wrap around is not supported for 1D arrays.
    seed : int, optional
        Unwrapping 2D or 3D images uses random initialization. This sets the
        seed of the PRNG to achieve deterministic behavior.

    Returns
    -------
    image_unwrapped : array_like, double
        Unwrapped image of the same shape as the input. If the input `image`
        was a masked array, the mask will be preserved.

    Raises
    ------
    ValueError
        If called with a masked 1D array or called with a 1D array and
        ``wrap_around=True``.

    Examples
    --------
    >>> c0, c1 = np.ogrid[-1:1:128j, -1:1:128j]
    >>> image = 12 * np.pi * np.exp(-(c0**2 + c1**2))
    >>> image_wrapped = np.angle(np.exp(1j * image))
    >>> image_unwrapped = unwrap_phase(image_wrapped)
    >>> np.std(image_unwrapped - image) < 1e-6   # A constant offset is normal
    True

    References
    ----------
    .. [1] Miguel Arevallilo Herraez, David R. Burton, Michael J. Lalor,
           and Munther A. Gdeisat, "Fast two-dimensional phase-unwrapping
           algorithm based on sorting by reliability following a noncontinuous
           path", Journal Applied Optics, Vol. 41, No. 35 (2002) 7437,
    .. [2] Abdul-Rahman, H., Gdeisat, M., Burton, D., & Lalor, M., "Fast
           three-dimensional phase-unwrapping algorithm based on sorting by
           reliability following a non-continuous path. In W. Osten,
           C. Gorecki, & E. L. Novak (Eds.), Optical Metrology (2005) 32--40,
           International Society for Optics and Photonics.
    '''
    if image.ndim not in (1, 2, 3):
        raise ValueError('Image must be 1, 2, or 3 dimensional')
    if isinstance(wrap_around, bool):
        wrap_around = [wrap_around] * image.ndim
    elif (hasattr(wrap_around, '__getitem__')
          and not isinstance(wrap_around, str)):
        if len(wrap_around) != image.ndim:
            raise ValueError('Length of `wrap_around` must equal the '
                             'dimensionality of image')
        wrap_around = [bool(wa) for wa in wrap_around]
    else:
        raise ValueError('`wrap_around` must be a bool or a sequence with '
                         'length equal to the dimensionality of image')
    if image.ndim == 1:
        if np.ma.isMaskedArray(image):
            raise ValueError('1D masked images cannot be unwrapped')
        if wrap_around[0]:
            raise ValueError('`wrap_around` is not supported for 1D images')
    if image.ndim in (2, 3) and 1 in image.shape:
        warn('Image has a length 1 dimension. Consider using an '
             'array of lower dimensionality to use a more efficient '
             'algorithm')

    if np.ma.isMaskedArray(image):
        mask = np.require(np.ma.getmaskarray(image), np.uint8, ['C'])
    else:
        mask = np.zeros_like(image, dtype=np.uint8, order='C')

    image_not_masked = np.asarray(np.ma.getdata(image),
                                  dtype=np.double,
                                  order='C')
    image_unwrapped = np.empty_like(image,
                                    dtype=np.double,
                                    order='C',
                                    subok=False)

    if image.ndim == 1:
        unwrap_1d(image_not_masked, image_unwrapped)
    elif image.ndim == 2:
        unwrap_2d(image_not_masked, mask, image_unwrapped, wrap_around, seed)
    elif image.ndim == 3:
        unwrap_3d(image_not_masked, mask, image_unwrapped, wrap_around, seed)

    if np.ma.isMaskedArray(image):
        return np.ma.array(image_unwrapped,
                           mask=mask,
                           fill_value=image.fill_value)
    else:
        return image_unwrapped
示例#44
0
def actionAngleTorus_jacobian_c(pot,
                                jr,
                                jphi,
                                jz,
                                angler,
                                anglephi,
                                anglez,
                                tol=0.003,
                                dJ=0.001):
    """
    NAME:
       actionAngleTorus_jacobian_c
    PURPOSE:
       compute d(x,v)/d(J,theta) on a single torus, also compute dO/dJ and the frequencies
    INPUT:
       pot - Potential object or list thereof
       jr - radial action (scalar)
       jphi - azimuthal action (scalar)
       jz - vertical action (scalar)
       angler - radial angle (array [N])
       anglephi - azimuthal angle (array [N])
       anglez - vertical angle (array [N])
       tol= (0.003) goal for |dJ|/|J| along the torus
       dJ= (0.001) action difference when computing derivatives (Hessian or Jacobian)
    OUTPUT:
       (d[R,vR,vT,z,vz,phi]/d[J,theta],
        Omegar,Omegaphi,Omegaz,
        Autofit error message)
        Note: dO/dJ is *not* symmetrized here
    HISTORY:
       2016-07-19 - Written - Bovy (UofT)
    """
    #Parse the potential
    from ..orbit.integrateFullOrbit import _parse_pot
    npot, pot_type, pot_args = _parse_pot(pot, potfortorus=True)

    #Set up result
    R = numpy.empty(len(angler))
    vR = numpy.empty(len(angler))
    vT = numpy.empty(len(angler))
    z = numpy.empty(len(angler))
    vz = numpy.empty(len(angler))
    phi = numpy.empty(len(angler))
    dxvOdJaT = numpy.empty(36 * len(angler))
    dOdJT = numpy.empty(9)
    Omegar = numpy.empty(1)
    Omegaphi = numpy.empty(1)
    Omegaz = numpy.empty(1)
    flag = ctypes.c_int(0)

    #Set up the C code
    ndarrayFlags = ('C_CONTIGUOUS', 'WRITEABLE')
    actionAngleTorus_JacFunc = _lib.actionAngleTorus_jacobianFreqs
    actionAngleTorus_JacFunc.argtypes=\
        [ctypes.c_double,
         ctypes.c_double,
         ctypes.c_double,
         ctypes.c_int,
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ctypes.c_int,
         ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ctypes.c_double,
         ctypes.c_double,
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ctypes.POINTER(ctypes.c_int)]

    #Array requirements, first store old order
    f_cont = [
        angler.flags['F_CONTIGUOUS'], anglephi.flags['F_CONTIGUOUS'],
        anglez.flags['F_CONTIGUOUS']
    ]
    angler = numpy.require(angler,
                           dtype=numpy.float64,
                           requirements=['C', 'W'])
    anglephi = numpy.require(anglephi,
                             dtype=numpy.float64,
                             requirements=['C', 'W'])
    anglez = numpy.require(anglez,
                           dtype=numpy.float64,
                           requirements=['C', 'W'])
    R = numpy.require(R, dtype=numpy.float64, requirements=['C', 'W'])
    vR = numpy.require(vR, dtype=numpy.float64, requirements=['C', 'W'])
    vT = numpy.require(vT, dtype=numpy.float64, requirements=['C', 'W'])
    z = numpy.require(z, dtype=numpy.float64, requirements=['C', 'W'])
    vz = numpy.require(vz, dtype=numpy.float64, requirements=['C', 'W'])
    phi = numpy.require(phi, dtype=numpy.float64, requirements=['C', 'W'])
    dxvOdJaT = numpy.require(dxvOdJaT,
                             dtype=numpy.float64,
                             requirements=['C', 'W'])
    dOdJT = numpy.require(dOdJT, dtype=numpy.float64, requirements=['C', 'W'])
    Omegar = numpy.require(Omegar,
                           dtype=numpy.float64,
                           requirements=['C', 'W'])
    Omegaphi = numpy.require(Omegaphi,
                             dtype=numpy.float64,
                             requirements=['C', 'W'])
    Omegaz = numpy.require(Omegaz,
                           dtype=numpy.float64,
                           requirements=['C', 'W'])

    #Run the C code
    actionAngleTorus_JacFunc(ctypes.c_double(jr), ctypes.c_double(jphi),
                             ctypes.c_double(jz), ctypes.c_int(len(angler)),
                             angler, anglephi, anglez, ctypes.c_int(npot),
                             pot_type, pot_args, ctypes.c_double(tol),
                             ctypes.c_double(dJ), R, vR, vT, z, vz, phi,
                             dxvOdJaT, dOdJT, Omegar, Omegaphi, Omegaz,
                             ctypes.byref(flag))

    #Reset input arrays
    if f_cont[0]: angler = numpy.asfortranarray(angler)
    if f_cont[1]: anglephi = numpy.asfortranarray(anglephi)
    if f_cont[2]: anglez = numpy.asfortranarray(anglez)

    dxvOdJaT = numpy.reshape(dxvOdJaT, (len(angler), 6, 6), order='C')
    dxvOdJa = numpy.swapaxes(dxvOdJaT, 1, 2)

    return (R, vR, vT, z, vz, phi, dxvOdJa, dOdJT.reshape(
        (3, 3)).T, Omegar[0], Omegaphi[0], Omegaz[0], flag.value)
示例#45
0
        with tf.Session(config=config) as session:
            saver = tf.train.import_meta_graph('./' + name + '.meta')
            tf.train.update_checkpoint_state('./', name)

            saver.restore(session, tf.train.latest_checkpoint('./'))

            x = tf.get_collection('x')[0]
            y = tf.get_collection('y')[0]
            pred = tf.get_collection('p')[0]

            # Run trained network on the training sample
            train_data = pandas.read_pickle('inference_training_sample.pickle')
            x_train = train_data[variables].values
            y_train = train_data['is_quark'].values
            x_train = np.require(x_train,
                                 dtype=np.float32,
                                 requirements=['A', 'W', 'C', 'O'])

            p = np.zeros(len(y_train))
            for i in range(0, len(y_train), 100000):
                e = i + 100000
                e = min(len(y_train), e)
                p[i:e] = apply(x_train[i:e], session, x, pred)

            # Save results and print out ROC value
            df_result_train = pandas.DataFrame({'y': y_train, 'p': p})
            df_result_train.to_pickle('result_train_with_boost.pickle')
            print("train", sklearn.metrics.roc_auc_score(y_train, p))
            del train_data

            # Run trained network on the test sample
示例#46
0
def actionAngleTorus_Freqs_c(pot, jr, jphi, jz, tol=0.003):
    """
    NAME:
       actionAngleTorus_Freqs_c
    PURPOSE:
       compute frequencies on a single torus
    INPUT:
       pot - Potential object or list thereof
       jr - radial action (scalar)
       jphi - azimuthal action (scalar)
       jz - vertical action (scalar)
       tol= (0.003) goal for |dJ|/|J| along the torus
    OUTPUT:
       (Omegar,Omegaphi,Omegaz,flag)
    HISTORY:
       2015-08-05/07 - Written - Bovy (UofT)
    """
    #Parse the potential
    from ..orbit.integrateFullOrbit import _parse_pot
    npot, pot_type, pot_args = _parse_pot(pot, potfortorus=True)

    #Set up result
    Omegar = numpy.empty(1)
    Omegaphi = numpy.empty(1)
    Omegaz = numpy.empty(1)
    flag = ctypes.c_int(0)

    #Set up the C code
    ndarrayFlags = ('C_CONTIGUOUS', 'WRITEABLE')
    actionAngleTorus_FreqsFunc = _lib.actionAngleTorus_Freqs
    actionAngleTorus_FreqsFunc.argtypes=\
        [ctypes.c_double,
         ctypes.c_double,
         ctypes.c_double,
         ctypes.c_int,
         ndpointer(dtype=numpy.int32,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ctypes.c_double,
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ndpointer(dtype=numpy.float64,flags=ndarrayFlags),
         ctypes.POINTER(ctypes.c_int)]

    #Array requirements
    Omegar = numpy.require(Omegar,
                           dtype=numpy.float64,
                           requirements=['C', 'W'])
    Omegaphi = numpy.require(Omegaphi,
                             dtype=numpy.float64,
                             requirements=['C', 'W'])
    Omegaz = numpy.require(Omegaz,
                           dtype=numpy.float64,
                           requirements=['C', 'W'])

    #Run the C code
    actionAngleTorus_FreqsFunc(ctypes.c_double(jr), ctypes.c_double(jphi),
                               ctypes.c_double(jz),
                               ctypes.c_int(npot), pot_type, pot_args,
                               ctypes.c_double(tol), Omegar, Omegaphi, Omegaz,
                               ctypes.byref(flag))

    return (Omegar[0], Omegaphi[0], Omegaz[0], flag.value)
def is_integer(x):
    return np.array_equal(x, np.require(x, dtype=np.int_))
示例#48
0
def require_writeable_array(arr):
    # This should be fixed in numba (numba issue #2521)
    return np.require(arr, requirements='W')
示例#49
0
文件: tools.py 项目: wangvei/orbkit
def require(data, dtype='f', requirements='CA'):
    if dtype == 'f':
        dtype = numpy.float64
    elif dtype == 'i':
        dtype = numpy.intc
    return numpy.require(data, dtype=dtype, requirements='CA')
def is_binary(x):
    return np.array_equal(x, np.require(x, dtype=np.bool_))
示例#51
0
 def reorder(self, a):
     return np.require(np.transpose(a, axes=self.axes), requirements='C')
示例#52
0
文件: trigger.py 项目: zurgeg/obspy
def ar_pick(a,
            b,
            c,
            samp_rate,
            f1,
            f2,
            lta_p,
            sta_p,
            lta_s,
            sta_s,
            m_p,
            m_s,
            l_p,
            l_s,
            s_pick=True):
    """
    Pick P and S arrivals with an AR-AIC + STA/LTA algorithm.

    The algorithm picks onset times using an Auto Regression - Akaike
    Information Criterion (AR-AIC) method. The detection intervals are
    successively narrowed down with the help of STA/LTA ratios as well as
    STA-LTA difference calculations. For details, please see [Akazawa2004]_.

    An important feature of this algorithm is that it requires comparatively
    little tweaking and site-specific settings and is thus applicable to large,
    diverse data sets.

    :type a: :class:`numpy.ndarray`
    :param a: Z signal the data.
    :type b: :class:`numpy.ndarray`
    :param b: N signal of the data.
    :type c: :class:`numpy.ndarray`
    :param c: E signal of the data.
    :type samp_rate: float
    :param samp_rate: Number of samples per second.
    :type f1: float
    :param f1: Frequency of the lower bandpass window.
    :type f2: float
    :param f2: Frequency of the upper .andpass window.
    :type lta_p: float
    :param lta_p: Length of LTA for the P arrival in seconds.
    :type sta_p: float
    :param sta_p: Length of STA for the P arrival in seconds.
    :type lta_s: float
    :param lta_s: Length of LTA for the S arrival in seconds.
    :type sta_s: float
    :param sta_s: Length of STA for the S arrival in seconds.
    :type m_p: int
    :param m_p: Number of AR coefficients for the P arrival.
    :type m_s: int
    :param m_s: Number of AR coefficients for the S arrival.
    :type l_p: float
    :param l_p: Length of variance window for the P arrival in seconds.
    :type l_s: float
    :param l_s: Length of variance window for the S arrival in seconds.
    :type s_pick: bool
    :param s_pick: If ``True``, also pick the S phase, otherwise only the P
        phase.
    :rtype: tuple
    :returns: A tuple with the P and the S arrival.
    """
    if not (len(a) == len(b) == len(c)):
        raise ValueError("All three data arrays must have the same length.")

    a = scipy.signal.detrend(a, type='linear')
    b = scipy.signal.detrend(b, type='linear')
    c = scipy.signal.detrend(c, type='linear')

    # be nice and adapt type if necessary
    a = np.require(a, dtype=np.float32, requirements=['C_CONTIGUOUS'])
    b = np.require(b, dtype=np.float32, requirements=['C_CONTIGUOUS'])
    c = np.require(c, dtype=np.float32, requirements=['C_CONTIGUOUS'])

    # scale amplitudes to avoid precision issues in case of low amplitudes
    # C code picks the horizontal component with larger amplitudes, so scale
    # horizontal components with a common scaling factor
    data_max = np.abs(a).max()
    if data_max < 100:
        a *= 1e6
        a /= data_max
    data_max = max(np.abs(b).max(), np.abs(c).max())
    if data_max < 100:
        for data in (b, c):
            data *= 1e6
            data /= data_max

    s_pick = C.c_int(s_pick)  # pick S phase also
    ptime = C.c_float()
    stime = C.c_float()
    args = (len(a), samp_rate, f1, f2, lta_p, sta_p, lta_s, sta_s, m_p, m_s,
            C.byref(ptime), C.byref(stime), l_p, l_s, s_pick)
    errcode = clibsignal.ar_picker(a, b, c, *args)
    if errcode != 0:
        bufs = [
            'buff1', 'buff1_s', 'buff2', 'buff3', 'buff4', 'buff4_s',
            'f_error', 'b_error', 'ar_f', 'ar_b', 'buf_sta', 'buf_lta',
            'extra_tr1', 'extra_tr2', 'extra_tr3'
        ]
        if errcode <= len(bufs):
            raise MemoryError('Unable to allocate %s!' % (bufs[errcode - 1]))
        raise Exception('Error during PAZ calculation!')
    return ptime.value, stime.value
示例#53
0
def konno_ohmachi_smoothing(spectra,
                            frequencies,
                            bandwidth=40,
                            count=1,
                            enforce_no_matrix=False,
                            max_memory_usage=512,
                            normalize=False):
    """
    Smooths a matrix containing one spectra per row with the Konno-Ohmachi
    smoothing window.

    All spectra need to have frequency bins corresponding to the same
    frequencies.

    This method first will estimate the memory usage and then either use a fast
    and memory intensive method or a slow one with a better memory usage.

    :type spectra: :class:`numpy.ndarray` (float32 or float64)
    :param spectra:
        One or more spectra per row. If more than one the first spectrum has to
        be accessible via spectra[0], the next via spectra[1], ...
    :type frequencies: :class:`numpy.ndarray` (float32 or float64)
    :param frequencies:
        Contains the frequencies for the spectra.
    :type bandwidth: float
    :param bandwidth:
        Determines the width of the smoothing peak. Lower values result in a
        broader peak. Must be greater than 0. Defaults to 40.
    :type count: int, optional
    :param count:
        How often the apply the filter. For very noisy spectra it is useful to
        apply is more than once. Defaults to 1.
    :type enforce_no_matrix: bool, optional
    :param enforce_no_matrix:
        An efficient but memory intensive matrix-multiplication algorithm is
        used in case more than one spectra is to be smoothed or one spectrum is
        to be smoothed more than once if enough memory is available. This flag
        disables the matrix algorithm altogether. Defaults to False
    :type max_memory_usage: int, optional
    :param max_memory_usage:
        Set the maximum amount of extra memory in MB for this method. Decides
        whether or not the matrix multiplication method is used. Defaults to
        512 MB.
    :type normalize: bool, optional
    :param normalize:
        The Konno-Ohmachi smoothing window is normalized on a logarithmic
        scale. Set this parameter to True to normalize it on a normal scale.
        Default to False.
    """
    if (frequencies.dtype != np.float32 and frequencies.dtype != np.float64) \
       or (spectra.dtype != np.float32 and spectra.dtype != np.float64):
        msg = 'frequencies and spectra need to have a dtype of float32/64.'
        raise ValueError(msg)
    # Spectra and frequencies should have the same dtype.
    if frequencies.dtype != spectra.dtype:
        frequencies = np.require(frequencies, np.float64)
        spectra = np.require(spectra, np.float64)
        msg = 'frequencies and spectra should have the same dtype. It ' + \
              'will be changed to np.float64 for both.'
        warnings.warn(msg)
    # Check the dtype to get the correct size.
    if frequencies.dtype == np.float32:
        size = 4.0
    elif frequencies.dtype == np.float64:
        size = 8.0
    # Calculate the approximate usage needs for the smoothing matrix algorithm.
    length = len(frequencies)
    approx_mem_usage = (length * length + 2 * len(spectra) + length) * \
        size / 1048576.0
    # If smaller than the allowed maximum memory consumption build a smoothing
    # matrix and apply to each spectrum. Also only use when more then one
    # spectrum is to be smoothed.
    if enforce_no_matrix is False and (len(spectra.shape) > 1 or count > 1) \
       and approx_mem_usage < max_memory_usage:
        # Disable NumPy warnings due to possible divisions by zero/logarithms
        # of zero.
        temp = np.geterr()
        np.seterr(all='ignore')
        smoothing_matrix = calculate_smoothing_matrix(frequencies,
                                                      bandwidth,
                                                      normalize=normalize)
        np.seterr(**temp)
        new_spec = np.dot(spectra, smoothing_matrix)
        # Eventually apply more than once.
        for _i in range(count - 1):
            new_spec = np.dot(new_spec, smoothing_matrix)
        return new_spec
    # Otherwise just calculate the smoothing window every time and apply it.
    else:
        new_spec = np.empty(spectra.shape, spectra.dtype)
        # Separate case for just one spectrum.
        if len(new_spec.shape) == 1:
            # Disable NumPy warnings due to possible divisions by
            # zero/logarithms of zero.
            temp = np.geterr()
            np.seterr(all='ignore')
            for _i in range(len(frequencies)):
                window = konno_ohmachi_smoothing_window(frequencies,
                                                        frequencies[_i],
                                                        bandwidth,
                                                        normalize=normalize)
                new_spec[_i] = (window * spectra).sum()
            np.seterr(**temp)
        # Reuse smoothing window if more than one spectrum.
        else:
            # Disable NumPy warnings due to possible divisions by
            # zero/logarithms of zero.
            temp = np.geterr()
            np.seterr(all='ignore')
            for _i in range(len(frequencies)):
                window = konno_ohmachi_smoothing_window(frequencies,
                                                        frequencies[_i],
                                                        bandwidth,
                                                        normalize=normalize)
                for _j, spec in enumerate(spectra):
                    new_spec[_j, _i] = (window * spec).sum()
            np.seterr(**temp)
        # Eventually apply more than once.
        while count > 1:
            new_spec = konno_ohmachi_smoothing(new_spec,
                                               frequencies,
                                               bandwidth,
                                               enforce_no_matrix=True,
                                               normalize=normalize)
            count -= 1
        return new_spec
示例#54
0
    def _estimate(self, trajs):
        """
        Parameters
        ----------
        X : tuple of (ttrajs, dtrajs)
            Simulation trajectories. ttrajs contain the indices of the thermodynamic state and
            dtrajs contains the indices of the configurational states.
        ttrajs : list of numpy.ndarray(X_i, dtype=int)
            Every elements is a trajectory (time series). ttrajs[i][t] is the index of the
            thermodynamic state visited in trajectory i at time step t.
        dtrajs : list of numpy.ndarray(X_i, dtype=int)
            dtrajs[i][t] is the index of the configurational state (Markov state) visited in
            trajectory i at time step t.

        """
        # check input
        assert isinstance(trajs, (tuple, list))
        assert len(trajs) == 2
        ttrajs = trajs[0]
        dtrajs = trajs[1]
        # validate input
        for ttraj, dtraj in zip(ttrajs, dtrajs):
            _types.assert_array(ttraj, ndim=1, kind='numeric')
            _types.assert_array(dtraj, ndim=1, kind='numeric')
            assert _np.shape(ttraj)[0] == _np.shape(dtraj)[0]

        # harvest transition counts
        self.count_matrices_full = _util.count_matrices(
            ttrajs,
            dtrajs,
            self.lag,
            sliding=self.count_mode,
            sparse_return=False,
            nstates=self.nstates_full)
        # harvest state counts (for WHAM)
        self.state_counts_full = _util.state_counts(ttrajs,
                                                    dtrajs,
                                                    nthermo=self.nthermo,
                                                    nstates=self.nstates_full)

        # restrict to connected set
        C_sum = self.count_matrices_full.sum(axis=0)
        # TODO: use improved cset
        cset = _largest_connected_set(C_sum, directed=True)
        self.active_set = cset
        # correct counts
        self.count_matrices = self.count_matrices_full[:, cset[:, _np.newaxis],
                                                       cset]
        self.count_matrices = _np.require(self.count_matrices,
                                          dtype=_np.intc,
                                          requirements=['C', 'A'])
        # correct bias matrix
        self.bias_energies = self.bias_energies_full[:, cset]
        self.bias_energies = _np.require(self.bias_energies,
                                         dtype=_np.float64,
                                         requirements=['C', 'A'])
        # correct state counts
        self.state_counts = self.state_counts_full[:, cset]
        self.state_counts = _np.require(self.state_counts,
                                        dtype=_np.intc,
                                        requirements=['C', 'A'])

        # run initialisation
        if self.init is not None:
            if self.init == 'wham':
                self.therm_energies, self.conf_energies, _increments, _loglikelihoods = \
                    _wham.estimate(
                        self.state_counts, self.bias_energies,
                        maxiter=self.init_maxiter, maxerr=self.init_maxerr, save_convergence_info=0,
                        therm_energies=self.therm_energies, conf_energies=self.conf_energies,
                        callback=_ConvergenceProgressIndicatorCallBack(
                            self, 'WHAM init.', self.init_maxiter, self.init_maxerr))
                self._progress_force_finish(stage='WHAM init.')

        # run estimator
        self.therm_energies, self.conf_energies, self.log_lagrangian_mult, \
            self.increments, self.loglikelihoods = _dtram.estimate(
                self.count_matrices, self.bias_energies,
                maxiter=self.maxiter, maxerr=self.maxerr,
                log_lagrangian_mult=self.log_lagrangian_mult,
                conf_energies=self.conf_energies,
                save_convergence_info=self.save_convergence_info,
                callback=_ConvergenceProgressIndicatorCallBack(
                    self, 'DTRAM', self.maxiter, self.maxerr))
        self._progress_force_finish(stage='DTRAM')

        # compute models
        models = [
            _dtram.estimate_transition_matrix(
                self.log_lagrangian_mult, self.bias_energies,
                self.conf_energies, self.count_matrices,
                _np.zeros(shape=self.conf_energies.shape,
                          dtype=_np.float64), K) for K in range(self.nthermo)
        ]
        self.model_active_set = [
            _largest_connected_set(msm, directed=False) for msm in models
        ]
        models = [
            _np.ascontiguousarray((msm[lcc, :])[:, lcc])
            for msm, lcc in zip(models, self.model_active_set)
        ]

        # set model parameters to self
        self.set_model_params(models=[
            _MSM(msm, dt_model=self.timestep_traj.get_scaled(self.lag))
            for msm in models
        ],
                              f_therm=self.therm_energies,
                              f=self.conf_energies)

        # done
        return self
def read(source,
         channels,
         start=None,
         end=None,
         scaled=None,
         type=None,
         series_class=TimeSeries):
    # pylint: disable=redefined-builtin
    """Read a dict of series from one or more GWF files

    Parameters
    ----------
    source : `str`, `list`
        Source of data, any of the following:

        - `str` path of single data file,
        - `str` path of cache file,
        - `list` of paths.

    channels : `~gwpy.detector.ChannelList`, `list`
        a list of channels to read from the source.

    start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional
        GPS start time of required data, anything parseable by
        :func:`~gwpy.time.to_gps` is fine.

    end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
        GPS end time of required data, anything parseable by
        :func:`~gwpy.time.to_gps` is fine.

    scaled : `bool`, optional
        apply slope and bias calibration to ADC data.

    type : `dict`, optional
        a `dict` of ``(name, channel-type)`` pairs, where ``channel-type``
        can be one of ``'adc'``, ``'proc'``, or ``'sim'``.

    series_class : `type`, optional
        the `Series` sub-type to return.

    Returns
    -------
    data : `~gwpy.timeseries.TimeSeriesDict` or similar
        a dict of ``(channel, series)`` pairs read from the GWF source(s).
    """
    # parse input source
    source = file_list(source)

    # parse type
    ctype = channel_dict_kwarg(type, channels, (str, ))

    # read each individually and append
    out = series_class.DictClass()
    for i, file_ in enumerate(source):
        if i == 1:  # force data into fresh memory so that append works
            for name in out:
                out[name] = numpy.require(out[name], requirements=['O'])
        # read frame
        out.append(read_gwf(file_,
                            channels,
                            start=start,
                            end=end,
                            ctype=ctype,
                            scaled=scaled,
                            series_class=series_class),
                   copy=False)
    return out
示例#56
0
 def conv(buff, *args, **kwargs):  # pylint: disable=unused-argument
     n = args[size_arg_pos]
     data = ctypes.string_at(buff, n * dformat.size)
     return np.require(np.frombuffer(data, dtype=dformat.to_desc("numpy")),
                       requirements="W")
示例#57
0
文件: rttrace.py 项目: rpratt20/obspy
    def append(self, trace, gap_overlap_check=False, verbose=False):
        """
        Appends a Trace object to this RtTrace.

        Registered real-time processing will be applied to copy of appended
        Trace object before it is appended.  This RtTrace will be truncated
        from the beginning to RtTrace.max_length, if specified.
        Sampling rate, data type and trace.id of both traces must match.

        :type trace: :class:`~obspy.core.trace.Trace`
        :param trace:  :class:`~obspy.core.trace.Trace` object to append to
            this RtTrace
        :type gap_overlap_check: bool, optional
        :param gap_overlap_check: Action to take when there is a gap or overlap
            between the end of this RtTrace and start of appended Trace:

            * If True, raise TypeError.
            * If False, all trace processing memory will be re-initialized to
              prevent false signal in processed trace.

            (default is ``True``).
        :type verbose: bool, optional
        :param verbose: Print additional information to stdout
        :return: NumPy :class:`~numpy.ndarray` object containing processed
            trace data from appended Trace object.
        """
        if not isinstance(trace, Trace):
            # only add Trace objects
            raise TypeError("Only obspy.core.trace.Trace objects are allowed")

        # sanity checks
        if self.have_appended_data:
            #  check id
            if self.getId() != trace.getId():
                raise TypeError("Trace ID differs:", self.getId(),
                                trace.getId())
            #  check sample rate
            if self.stats.sampling_rate != trace.stats.sampling_rate:
                raise TypeError("Sampling rate differs:",
                                self.stats.sampling_rate,
                                trace.stats.sampling_rate)
            #  check calibration factor
            if self.stats.calib != trace.stats.calib:
                raise TypeError("Calibration factor differs:",
                                self.stats.calib, trace.stats.calib)
            # check data type
            if self.data.dtype != trace.data.dtype:
                raise TypeError("Data type differs:", self.data.dtype,
                                trace.data.dtype)
        # TODO: IMPORTANT? Should improve check for gaps and overlaps
        # and handle more elegantly
        # check times
        gap_or_overlap = False
        if self.have_appended_data:
            # delta = int(math.floor(\
            #    round((rt.stats.starttime - lt.stats.endtime) * sr, 5) )) - 1
            diff = trace.stats.starttime - self.stats.endtime
            delta = diff * self.stats.sampling_rate - 1.0
            if verbose:
                msg = "%s: Overlap/gap of (%g) samples in data: (%s) (%s) " + \
                    "diff=%gs  dt=%gs"
                print(msg %
                      (self.__class__.__name__, delta, self.stats.endtime,
                       trace.stats.starttime, diff, self.stats.delta))
            if delta < -0.1:
                msg = "Overlap of (%g) samples in data: (%s) (%s) diff=%gs" + \
                    "  dt=%gs"
                msg = msg % (-delta, self.stats.endtime, trace.stats.starttime,
                             diff, self.stats.delta)
                if gap_overlap_check:
                    raise TypeError(msg)
                gap_or_overlap = True
            if delta > 0.1:
                msg = "Gap of (%g) samples in data: (%s) (%s) diff=%gs" + \
                    "  dt=%gs"
                msg = msg % (delta, self.stats.endtime, trace.stats.starttime,
                             diff, self.stats.delta)
                if gap_overlap_check:
                    raise TypeError(msg)
                gap_or_overlap = True
            if gap_or_overlap:
                msg += " - Trace processing memory will be re-initialized."
                warnings.warn(msg, UserWarning)
            else:
                # correct start time to pin absolute trace timing to start of
                # appended trace, this prevents slow drift of nominal trace
                # timing from absolute time when nominal sample rate differs
                # from true sample rate
                self.stats.starttime = \
                    self.stats.starttime + diff - self.stats.delta
                if verbose:
                    print("%s: self.stats.starttime adjusted by: %gs" %
                          (self.__class__.__name__, diff - self.stats.delta))
        # first apply all registered processing to Trace
        for proc in self.processing:
            process_name, options, rtmemory_list = proc
            # if gap or overlap, clear memory
            if gap_or_overlap and rtmemory_list is not None:
                for n in range(len(rtmemory_list)):
                    rtmemory_list[n] = RtMemory()
            # apply processing
            trace = trace.copy()
            dtype = trace.data.dtype
            if hasattr(process_name, '__call__'):
                # check if direct function call
                trace.data = process_name(trace.data, **options)
            else:
                # got predefined function
                func = REALTIME_PROCESS_FUNCTIONS[process_name.lower()][0]
                options['rtmemory_list'] = rtmemory_list
                trace.data = func(trace, **options)
            # assure dtype is not changed
            trace.data = np.require(trace.data, dtype=dtype)
        # if first data, set stats
        if not self.have_appended_data:
            self.data = np.array(trace.data)
            self.stats = Stats(header=trace.stats)
            self.have_appended_data = True
            return trace
        # handle all following data sets
        # fix Trace.__add__ parameters
        # TODO: IMPORTANT? Should check for gaps and overlaps and handle
        # more elegantly
        sum_trace = Trace.__add__(self,
                                  trace,
                                  method=0,
                                  interpolation_samples=0,
                                  fill_value='latest',
                                  sanity_checks=True)
        # Trace.__add__ returns new Trace, so update to this RtTrace
        self.data = sum_trace.data
        # left trim if data length exceeds max_length
        if self.max_length is not None:
            max_samples = int(self.max_length * self.stats.sampling_rate + 0.5)
            if np.size(self.data) > max_samples:
                starttime = self.stats.starttime + \
                    (np.size(self.data) - max_samples) / \
                    self.stats.sampling_rate
                self._ltrim(starttime,
                            pad=False,
                            nearest_sample=True,
                            fill_value=None)
        return trace
                  squeeze_me=True)  # specify filename to load
os.chdir(owd)

X = mat['X']  # output data, structured
lengthscale = mat['lengthscale']  # lengthscales l
lengthscale_p = mat['lengthscale_p']  # lengthscales p
sn = mat['sn']  # noise parameter
sf = mat['sf']  # power parameter
S = mat['S']  # spectral points
MU = mat['MU']  # variational latent state
SIGMA = mat['SIGMA']  # variational latent state variance
U = np.array(mat['U'], dtype=np.float64)  # pseudo input points
b = mat['b']  # phases

# eliminate bad matlab to python coversion
X = np.require(X, dtype=None, requirements='A')
lengthscale = np.require(lengthscale, dtype=None, requirements='A')
lengthscale_p = np.require(lengthscale_p, dtype=None, requirements='A')
sn = np.require(sn, dtype=None, requirements='A')
sf = np.require(sf, dtype=None, requirements='A')
S = np.require(S, dtype=None, requirements='A')
MU = np.require(MU, dtype=None, requirements='A')
SIGMA = np.require(SIGMA, dtype=None, requirements='A')
U = np.require(U, dtype=None, requirements='A')
b = np.require(b, dtype=None, requirements='A')

Q = MU.shape[1]  # input data dimension
(N, D) = X.shape  # output data dimension
M = U.shape[0]

# S = np.random.normal(0, 1,(M,Q))
示例#59
0
def _run_reco(args):
    np.seterr(divide='ignore', invalid='ignore')
    # Create par struct to store parameters
    par = {}
    ###############################################################################
    # Read Input data   ###########################################################
    ###############################################################################
    if args.data == '':
        raise ValueError("No data file specified")

    name = os.path.normpath(args.data)
    fname = name.split(os.sep)[-1]
    h5_dataset = h5py.File(name, 'r')
    par["file"] = h5_dataset
    h5_dataset_rawdata_name = 'rawdata'
    h5_dataset_trajectory_name = 'trajectory'

    if "heart" in args.data:
        if args.acc == 2:
            R = 33
            trajectory = h5_dataset.get(h5_dataset_trajectory_name)[:, :, :33]
            rawdata = h5_dataset.get(h5_dataset_rawdata_name)[:, :, :33, :]
        elif args.acc == 3:
            R = 22
            trajectory = h5_dataset.get(h5_dataset_trajectory_name)[:, :, :22]
            rawdata = h5_dataset.get(h5_dataset_rawdata_name)[:, :, :22, :]
        elif args.acc == 4:
            R = 11
            trajectory = h5_dataset.get(h5_dataset_trajectory_name)[:, :, :11]
            rawdata = h5_dataset.get(h5_dataset_rawdata_name)[:, :, :11, :]
        else:
            R = 55
            trajectory = h5_dataset.get(h5_dataset_trajectory_name)[...]
            rawdata = h5_dataset.get(h5_dataset_rawdata_name)[...]
    else:
        R = args.acc
        trajectory = h5_dataset.get(h5_dataset_trajectory_name)[:, :, ::R]
        rawdata = h5_dataset.get(h5_dataset_rawdata_name)[:, :, ::R, :]

    [dummy, nFE, nSpokes, nCh] = rawdata.shape

    ###############################################################################
    # Read Data ###################################################################
    ###############################################################################
    par["ogf"] = float(eval(args.ogf))
    dimX, dimY, NSlice = [int(nFE / par["ogf"]), int(nFE / par["ogf"]), 1]
    data = np.require(np.squeeze(rawdata.T)[None, :, None, ...],
                      requirements='C')
    par["traj"] = np.require(
        (trajectory[0] / (2 * np.max(trajectory[0])) + 1j * trajectory[1] /
         (2 * np.max(trajectory[0]))).T[None, ...],
        requirements='C')

    par["dcf"] = np.sqrt(np.array(goldcomp.cmp(par["traj"]),
                                  dtype=DTYPE_real)).astype(DTYPE_real)
    par["dcf"] = np.require(np.abs(par["dcf"]), DTYPE_real, requirements='C')
    [NScan, NC, reco_Slices, Nproj, N] = data.shape
    ###############################################################################
    # Set sequence related parameters #############################################
    ###############################################################################
    par["NC"] = NC
    par["dimY"] = dimY
    par["dimX"] = dimX
    par["NSlice"] = NSlice
    par["NScan"] = NScan
    par["N"] = N
    par["Nproj"] = Nproj
    ###############################################################################
    # Create OpenCL Context and Queues ############################################
    ###############################################################################
    platforms = cl.get_platforms()
    par["GPU"] = False
    par["Platform_Indx"] = 0
    for j in range(len(platforms)):
        if platforms[j].get_devices(device_type=cl.device_type.GPU):
            print(
                "GPU OpenCL platform <%s> found\
 with %i device(s) and OpenCL-version <%s>" %
                (str(platforms[j].get_info(cl.platform_info.NAME)),
                 len(platforms[j].get_devices(device_type=cl.device_type.GPU)),
                 str(platforms[j].get_info(cl.platform_info.VERSION))))
            par["GPU"] = True
            par["Platform_Indx"] = j
    if not par["GPU"]:
        print("No GPU OpenCL platform found. Returning.")

    par["ctx"] = []
    par["queue"] = []
    num_dev = len(platforms[par["Platform_Indx"]].get_devices())
    par["num_dev"] = num_dev
    for device in range(num_dev):
        dev = []
        dev.append(platforms[par["Platform_Indx"]].get_devices()[device])
        tmp = cl.Context(dev)
        par["ctx"].append(tmp)
        par["queue"].append(
            cl.CommandQueue(
                tmp,
                platforms[par["Platform_Indx"]].get_devices()[device],
                properties=cl.command_queue_properties.
                OUT_OF_ORDER_EXEC_MODE_ENABLE
                | cl.command_queue_properties.PROFILING_ENABLE))
###############################################################################
# Coil Sensitivity Estimation #################################################
###############################################################################
    img_igrid = bart(1, 'nufft -i -t', trajectory, rawdata)
    img_igrid_sos = bart(1, 'rss 8', img_igrid)
    img_igrid_sos = np.abs(img_igrid_sos).astype(DTYPE)

    try:
        slices_coils = par["file"]['Coils'][()].shape[1]
        print("Using precomputed coil sensitivities")
        par["C"] = par["file"]['Coils'][
            :, int(slices_coils/2) - int(np.floor((par["NSlice"])/2)):
            int(slices_coils/2) + int(np.ceil(par["NSlice"]/2)), ...]\
            .astype(DTYPE)

        par["InScale"] = par["file"]["InScale"][
         int(slices_coils/2)-int(np.floor((par["NSlice"])/2)):
         int(slices_coils/2)+int(np.ceil(par["NSlice"]/2)), ...]\
            .astype(DTYPE_real)
    except KeyError:
        img_igrid = bart(1, 'nufft -i -t', trajectory, rawdata)
        data_bart = np.fft.fftshift(
            np.fft.fft2(np.fft.ifftshift(img_igrid.T, (-2, -1)), norm='ortho'),
            (-2, -1))
        data_bart = np.require(np.squeeze(data_bart.T).astype(DTYPE),
                               requirements='C')[None, ...]
        sens_maps = bart(1, 'ecalib -m1 -I', data_bart)
        sens_maps = np.require(np.squeeze(sens_maps).T, requirements='C')
        par["C"] = sens_maps[:, None, ...]
        par["C"] = np.require(np.transpose(par["C"], (0, 1, 3, 2)),
                              requirements='C')
        sumSqrC = np.sqrt(np.sum(np.abs(par["C"] * np.conj(par["C"])), 0))
        par["C"] = par["C"] / np.tile(sumSqrC, (par["NC"], 1, 1, 1))
        par["C"][~np.isfinite(par["C"])] = 0
        #        #### Remove backfoled part at the top
        #        par["C"][:, :, :20, :] = 0
        par["InScale"] = sumSqrC
        par["file"].create_dataset('Coils',
                                   shape=par["C"].shape,
                                   dtype=DTYPE,
                                   data=par["C"])
        par["file"].create_dataset('InScale',
                                   shape=sumSqrC.shape,
                                   dtype=DTYPE_real,
                                   data=sumSqrC)
        del sumSqrC
    par["file"].close()
    ###############################################################################
    # Set Intensity and Density Scaling ###########################################
    ###############################################################################
    if args.inscale:
        pass
    else:
        par["C"] *= par["InScale"]
        par["InScale"] = np.ones_like(par["InScale"])
    if args.denscor:
        data = data * (par["dcf"])
    else:
        par["dcf"] = np.ones_like(par["dcf"])


###############################################################################
# generate nFFT  ##############################################################
###############################################################################
    FFT = utils.NUFFT(par)

    def nFTH(x, fft, par):
        siz = np.shape(x)
        result = np.require(np.zeros(
            (par["NC"], par["NSlice"], par["NScan"], par["dimY"], par["dimX"]),
            dtype=DTYPE),
                            requirements='C')
        tmp_result = clarray.empty(
            fft.queue, (par["NScan"], 1, 1, par["dimY"], par["dimX"]),
            dtype=DTYPE)
        for j in range(siz[1]):
            for k in range(siz[2]):
                inp = clarray.to_device(
                    fft.queue,
                    np.require(x[:, j, k, ...][:, None, None, ...],
                               requirements='C'))
                fft.adj_NUFFT(tmp_result, inp)
                result[j, k, ...] = np.squeeze(tmp_result.get())
        return np.transpose(result, (2, 0, 1, 3, 4))

    images_coils = nFTH(data, FFT, par)
    images = np.require(np.sum(images_coils * (np.conj(par["C"])), axis=1),
                        requirements='C')
    del FFT, nFTH

    opt = CGReco(par)
    opt.data = data
    ###############################################################################
    # Start Reco ##################################################################
    ###############################################################################
    opt.reco_par = utils.read_config(args.config, "DEFAULT")
    opt.execute()
    result = (opt.result)
    res = opt.res
    del opt
    ###############################################################################
    # New .hdf5 save files ########################################################
    ###############################################################################
    outdir = ""
    if "heart" in args.data:
        outdir += "/heart"
    elif "brain" in args.data:
        outdir += "/brain"
    if not os.path.exists('./output'):
        os.makedirs('output')
    if not os.path.exists('./output' + outdir):
        os.makedirs("./output" + outdir)
    cwd = os.getcwd()
    os.chdir("./output" + outdir)
    f = h5py.File(
        "CG_reco_InScale_" + str(args.inscale) + "_denscor_" +
        str(args.denscor) + "_reduction_" + str(R) + "_acc_" + str(args.acc) +
        "_" + fname, "w")
    f.create_dataset("images_ifft_", images.shape, dtype=DTYPE, data=images)
    f.create_dataset("images_ifft_coils_",
                     images_coils.shape,
                     dtype=DTYPE,
                     data=images_coils)
    f.create_dataset("CG_reco", result.shape, dtype=DTYPE, data=result)
    f.create_dataset('InScale',
                     shape=par["InScale"].shape,
                     dtype=DTYPE_real,
                     data=par["InScale"])
    f.create_dataset('Bart_ref',
                     shape=img_igrid_sos.shape,
                     dtype=DTYPE,
                     data=img_igrid_sos)
    f.attrs['res'] = res
    f.flush()
    f.close()
    os.chdir(cwd)
def read_gwf(filename,
             channels,
             start=None,
             end=None,
             scaled=None,
             ctype=None,
             series_class=TimeSeries):
    """Read a dict of series data from a single GWF file

    Parameters
    ----------
    filename : `str`
        the GWF path from which to read

    channels : `~gwpy.detector.ChannelList`, `list`
        a list of channels to read from the source.

    start : `~gwpy.time.LIGOTimeGPS`, `float`, `str` optional
        GPS start time of required data, anything parseable by
        :func:`~gwpy.time.to_gps` is fine.

    end : `~gwpy.time.LIGOTimeGPS`, `float`, `str`, optional
        GPS end time of required data, anything parseable by
        :func:`~gwpy.time.to_gps` is fine.

    scaled : `bool`, optional
        apply slope and bias calibration to ADC data.

    type : `dict`, optional
        a `dict` of ``(name, channel-type)`` pairs, where ``channel-type``
        can be one of ``'adc'``, ``'proc'``, or ``'sim'``.

    series_class : `type`, optional
        the `Series` sub-type to return.

    Returns
    -------
    data : `~gwpy.timeseries.TimeSeriesDict` or similar
        a dict of ``(channel, series)`` pairs read from the GWF file.
    """
    # parse kwargs
    if not start:
        start = 0
    if not end:
        end = 0
    span = Segment(start, end)

    # open file
    stream = io_gwf.open_gwf(filename, 'r')
    nframes = stream.GetNumberOfFrames()

    # find channels
    out = series_class.DictClass()

    # loop over frames in GWF
    i = 0
    while True:
        this = i
        i += 1

        # read frame
        try:
            frame = stream.ReadFrameNSubset(this, 0)
        except IndexError:
            if this >= nframes:
                break
            raise

        # check whether we need this frame at all
        if not _need_frame(frame, start, end):
            continue

        # get epoch for this frame
        epoch = LIGOTimeGPS(*frame.GetGTime())

        # and read all the channels
        for channel in channels:
            _scaled = _dynamic_scaled(scaled, channel)
            try:
                new = _read_channel(stream,
                                    this,
                                    str(channel),
                                    ctype.get(channel, None),
                                    epoch,
                                    start,
                                    end,
                                    scaled=_scaled,
                                    series_class=series_class)
            except _Skip:  # don't need this frame for this channel
                continue
            try:
                out[channel].append(new)
            except KeyError:
                out[channel] = numpy.require(new, requirements=['O'])

        # if we have all of the data we want, stop now
        if all(span in out[channel].span for channel in out):
            break

    # if any channels weren't read, something went wrong
    for channel in channels:
        if channel not in out:
            msg = "Failed to read {0!r} from {1!r}".format(
                str(channel), filename)
            if start or end:
                msg += ' for {0}'.format(span)
            raise ValueError(msg)

    return out