예제 #1
0
def getMatrix(AllData):
  # create matrices for all the data
  numQs = len(AllData.keys())
  subjects = 12    #len(AllData[1]['RT'])
  correct = np.array(py.zeros([numQs, subjects]))
  confA = np.array(py.zeros([numQs, subjects]))
  confB = np.array(py.zeros([numQs, subjects]))
  RTs = np.array(py.zeros([numQs, subjects]))
  #print(AllData)
  for i in xrange(subjects):
    # rows
    for j in xrange(1,17):
      # columns
      correct[j-1,i] = AllData[j]['correct'][i]
      
  for i in xrange(subjects):
    for j in xrange(1,17):
      confA[j-1,i] = AllData[j]['confA'][i]
  for i in xrange(subjects):
    for j in xrange(1,17):
      confB[j-1,i] = AllData[j]['confB'][i]
  for i in xrange(subjects):
    for j in xrange(1,17):
      RTs[j-1,i] = AllData[j]['RT'][i]
  
  print(py.shape(correct), py.shape(confA), py.shape(confB), py.shape(RTs))
  return correct, confA, confB, RTs
예제 #2
0
        def residuals(params,x,y,z):
            xo = params[0]
            xs = params[1]
            yo = params[2]
            ys = params[3]
            zo = params[4]
            zs = params[5]
            xys = params[6]
            xzs = params[7]
            yzs = params[8]

            xc = empty(shape(x))
            yc = empty(shape(y))
            zc = empty(shape(z))
            for i in range(len(x)):
                _x = x[i] - xo
                _y = y[i] - yo
                _z = z[i] - zo

                xc[i] = _x * (xs + _y * xys + _z * xzs)
                yc[i] = _y * (ys + _z * yzs)
                zc[i] = _z * (zs)

            res = []
            for i in range(len(xc)):
                norm = l2norm(array([xc[i],yc[i],zc[i]])) - 1.0
                res.append(norm)

            return array(res)
예제 #3
0
def filter2d(x, y, axes=['y'], algos=['2sigma']):
    """
    Perform 2D data filtration by selected exes.
    In:
        x : ndarray, X vector
        y : ndarray, Y vector
        axes : list, axes names which are used to choose filtered values. x, y or any combination
    Out:
        xnew : ndarray, filtered X
        ynew : ndarray, filtered Y
    """
    xnew = pl.array(x, dtype='float')
    ynew = pl.array(y, dtype='float')
    mask_x = pl.ones(len(x), dtype='bool')
    mask_y = pl.ones(len(y), dtype='bool')
    if 'y' in axes:
        mask_y = filter1d(y,algos=algos)        
    if 'x' in axes:
        mask_x = filter1d(x,algos=algos)
    mask = mask_x * mask_y
    xnew *= mask
    ynew *= mask
    
    xnew = pl.ma.masked_equal(xnew,0)
    xnew = pl.ma.compressed(xnew)
    ynew = pl.ma.masked_equal(ynew,0)
    ynew = pl.ma.compressed(ynew)

    assert pl.shape(xnew) == pl.shape(ynew)
    return xnew, ynew
예제 #4
0
        def residuals(params,x,y,z):
            xo = params[0]
            xs = params[1]
            yo = params[2]
            ys = params[3]
            zo = params[4]
            zs = params[5]

            xc = empty(shape(x))
            for i in range(len(x)):
                xc[i] = (x[i] - xo) * xs

            yc = empty(shape(y))
            for i in range(len(y)):
                yc[i] = (y[i] - yo) * ys

            zc = empty(shape(z))
            for i in range(len(z)):
                zc[i] = (z[i] - zo) * zs

            res = []
            for i in range(len(xc)):
                norm = l2norm(array([xc[i],yc[i],zc[i]])) - 1.0
                res.append(norm)

            return array(res)
예제 #5
0
def g_l2_wd(x0, S, I, gamma):
    M = shape(S)[0]
    L, batch_size = shape(I)
    A = matrix(reshape(x0,[L,M]))
    E = I - A*S
    g = -E*S.T/batch_size + gamma*A
    return g.A1
예제 #6
0
def f_l2_wd(x0, S, I, gamma):
    M = shape(S)[0]
    L, batch_size = shape(I)
    A = matrix(reshape(x0,[L,M]))
    E = I - A*S
    f = 0.5*(E.A**2).sum()/batch_size + 0.5*gamma*(A.A**2).sum()
    return f
예제 #7
0
def loadMNISTImages(filename):
  f = open(filename, 'rb')

  # Verify Magic Number
  s = f.read(4)
  magic = int(s.encode('hex'),16)
  assert(magic == 2051)

  # Get Number of Images
  s = f.read(4)
  numImages = int(s.encode('hex'),16)
  s = f.read(4)
  numRows = int(s.encode('hex'),16)
  s = f.read(4)
  numCols = int(s.encode('hex'),16)

  # Get Data
  s = f.read()
  a = frombuffer(s, uint8)

  # Use 'F' to ensure that we read by column
  a = reshape(a, (numCols , numRows, numImages), order='F');
  images = transpose(a, (1, 0, 2))
  f.close()

  # Reshape to #pixels * #examples
  images  = reshape(a, (shape(images)[0] * shape(images)[1], numImages),
          order='F');
  images = double(images)/255
  return images
def plotEnsemble2D(ens,v1,v2,colordata=None,hess=None,\
		   size=50,labelBest=True,ensembleAlpha=0.75,contourAlpha=1.0):
	"""
	Plots a 2-dimensional projection of a given parameter
	ensemble, along given directions:
	     -- If v1 and v2 are scalars, project onto plane given by
		those two bare parameter directions.
	     -- If v1 and v2 are vectors, project onto those two vectors.
	
	When given colordata (either a single color, or an array
	of different colors the length of ensemble size), each point
	will be assigned a color based on the colordata.
	
	With labelBest set, the first point in the ensemble is
	plotted larger (to show the 'best fit' point for a usual 
	parameter ensemble).
	
	If a Hessian is given, cost contours will be plotted
	using plotContours2D.
	"""
	if pylab.shape(v1) is ():
		xdata = pylab.transpose(ens)[v1]
		ydata = pylab.transpose(ens)[v2]
		
		# label axes
		param1name, param2name = '',''
		try:
		    paramLabels = ens[0].keys()
		except:
		    paramLabels = None
		if paramLabels is not None:
		    param1name = ' ('+paramLabels[param1]+')'
		    param2name = ' ('+paramLabels[param2]+')'
		pylab.xlabel('Parameter '+str(v1)+param1name)
		pylab.ylabel('Parameter '+str(v2)+param2name)
	else:
		xdata = pylab.dot(ens,v1)
		ydata = pylab.dot(ens,v2)

	if colordata==None:
		colordata = pylab.ones(len(xdata))
		
	if labelBest: # plot first as larger circle
		if pylab.shape(colordata) is (): # single color
		    colordata0 = colordata
		    colordataRest = colordata
		else: # specified colors
		    colordata0 = [colordata[0]]
		    colordataRest = colordata[1:]
		scatterColors(xdata[1:],ydata[1:],colordataRest,		\
				size,alpha=ensembleAlpha)
		scatterColors([xdata[0]],[ydata[0]],colordata0,			\
				size*4,alpha=ensembleAlpha)
	else:
		scatterColors(xdata,ydata,colordata,size,alpha=ensembleAlpha)
		
	if hess is not None:
		plotApproxContours2D(hess,param1,param2,pylab.array(ens[0]),	\
			alpha=contourAlpha)
예제 #9
0
 def __mul__(self, V):
     if not self.TR:
         batchsize, v = shape(V)
         return self[0]*V + self[1] 
     else:
         batchsize, h = shape(V)
         assert(h==self.h)            
         return self[0].transpose()*V
예제 #10
0
def objfun_l2_wd(x0, S, I, gamma):
    M = shape(S)[0]
    L, batch_size = shape(I)
    A = matrix(reshape(x0,[L,M]))
    E = I - A*S 
    f = 0.5*(E.A**2).sum()/batch_size + 0.5*gamma*(A.A**2).sum()
    g = -E*S.T/batch_size + gamma*A   
    return (f,g.A1)
예제 #11
0
def correctBias(AllData):
  # correct for difficulty and plot each subject %correct vs confidence
  corrmatrix, confmatrix = returnConfMatrix(AllData)
  Qs, subjects = py.shape(corrmatrix)
  copts = [1,2,3,4,5]
  datamat = np.array(py.zeros([len(copts), subjects]))
  print(datamat)
  fig = py.figure()
  ax15 = fig.add_subplot(111) 
  i = 0
 
  while i < subjects:
    c1, c2, c3, c4, c5 = [],[],[],[],[]
    # get confidences for each subject
    j = 0
    while j < Qs:
      # get confidences and correct for each question
      if confmatrix[j][i] == 1:
        c1.append(corrmatrix[j][i])
      elif confmatrix[j][i] == 2:
        c2.append(corrmatrix[j][i])
      elif confmatrix[j][i] == 3:
        c3.append(corrmatrix[j][i])
      elif confmatrix[j][i] == 4:
        c4.append(corrmatrix[j][i])
      elif confmatrix[j][i] == 5:
        c5.append(corrmatrix[j][i])
      else:
        print('bad num encountered')
        
      j += 1
    print('i is %d' %i)
    minconf = ([py.mean(c1), py.mean(c2), py.mean(c3), 
                   py.mean(c4), py.mean(c5)])
    pmin = 10
    for p in minconf:
      if p < pmin and p != 0 and math.isnan(p) is not True:
        pmin = p
    
    print(pmin)
    datamat[0][i] = py.mean(c1)/pmin
    datamat[1][i] = py.mean(c2)/pmin
    datamat[2][i] = py.mean(c3)/pmin
    datamat[3][i] = py.mean(c4)/pmin
    datamat[4][i] = py.mean(c5)/pmin
    # print(datamat)
    print( py.shape(datamat))
    print(len(datamat[:,i]))
    ax15.plot(range(1,6), datamat[:,i], alpha=0.4, linewidth=4)
    i += 1
  
  ax15.set_ylabel('Modified Correct')
  ax15.set_xlabel('Confidence')
  ax15.set_title('All responses')
  ax15.set_xticks(np.arange(1,6))
  ax15.set_xticklabels( [1, 2, 3, 4, 5] )
  ax15.set_xlim(0,6)
예제 #12
0
def spatio_temporal(ancl):

    os.mkdir('SpatioTemporalVels')

    print('RIGHT NOW THIS IS ONLY FOR VX!!!!!!!')

    p_arr = pl.arange(0,ancl.N)
    

    # How many cycles do we want to look at?
    how_many = 10

    var_arr = pl.array([])
    for i,j in enumerate(os.listdir('.')):
        if 'poindat.txt' not in j:
            continue
        print('working on file ' + j)
        poin_num = int(j[:j.find('p')])
        cur_file = open(j,'r')
        cur_sweep_var = float(cur_file.readline().split()[-1])
        cur_data=pl.genfromtxt(cur_file)
        cur_file.close()

        var_arr = pl.append(var_arr,cur_sweep_var)
        
        count = 0
        grid = cur_data[-int(how_many*2.0*pl.pi/ancl.dt):,:ancl.N]

        # in 1D because particles never cross eachother we can order them in the images to mathch
        # their physical order.
        grid_ordered = pl.zeros(pl.shape(grid))
        # can just use the initial conditions to figure out where each is
        init_x = cur_data[0,ancl.N:2*ancl.N]
        sorted_x = sorted(init_x)
        for a,alpha in enumerate(sorted_x):
            for b,beta in enumerate(init_x):
                if alpha == beta:
                    grid_ordered[:,a]=grid[:,b]
        
    
        print('shape of grid_ordered: ' + str(pl.shape(grid_ordered)))
        
        fig = pl.figure()
        ax = fig.add_subplot(111)
        # form of errorbar(x,y,xerr=xerr_arr,yerr=yerr_arr)
        ax.imshow(grid_ordered,interpolation="nearest", aspect='auto')
        ax.set_xlabel('Particle',fontsize=30)
        #ax.set_aspect('equal')
        ax.set_ylabel(r'$ t $',fontsize=30)
        fig.tight_layout()
        fig.savefig('SpatioTemporalVels/%(number)04d.png'%{'number':poin_num})
        pl.close(fig)
예제 #13
0
    def _generate_feature_development_plots(self, important_features):
        """ This function generates the actual histogram plot"""
        # Everything is done class-wise
        for label in important_features.keys():
            # Axis limits are determined by the global maxima
            (minVal, maxVal) = (important_features[label].min(0).min(0),
                                important_features[label].max(0).max(0))                                
            nr_chans = pylab.shape(important_features[label])[0]
                                
            myFig = pylab.figure()
            myFig.set_size_inches((40,nr_chans))
            
            for i_chan in range(nr_chans):
                ax = myFig.add_subplot(nr_chans, 1, i_chan+1)
                
                # cycle line colors
                if (pylab.mod(i_chan,2) == 0): myCol = '#000080'
                else: myCol = '#003EFF'
                # plot features and black zero-line
                pylab.plot(important_features[label][i_chan,:],color=myCol)
                pylab.plot(range(len(important_features[label][i_chan,:])),
                    pylab.zeros(len(important_features[label][i_chan,:])),
                    'k--')
                pylab.ylim((minVal,maxVal))
                xmax = pylab.shape(important_features[label])[1]
                pylab.xlim((0,xmax))
                
                # draw vertical dashed line every 20 epochs
                for vertical_line_position in range(0,xmax+1,20):
                    pylab.axvline(x=vertical_line_position,
                                  ymin=0, ymax=1, color='k', linestyle='--')
                    
                # write title above uppermost subplot
                if i_chan+1 == 1:
                    pylab.title('Feature development: Amplitudes of %s Epochs'
                                % label, fontsize=40)
                # adjust the axes, i.e. remove upper and right,
                # shift the left to avoid overlaps,
                # and lower axis only @ bottom subplot
                if i_chan+1 < nr_chans:
                    self._adjust_spines(ax,['left'],i_chan)
                else:
                    self._adjust_spines(ax,['left', 'bottom'],i_chan)
                    pylab.xlabel('Number of Epoch', fontsize=36)
                # Write feature name next to the axis
                pylab.ylabel(self.corr_important_feat_names[i_chan],
                             fontsize=20, rotation='horizontal')
            # remove whitespace between subplots etc.
            myFig.subplots_adjust(bottom=0.03,left=0.08,right=0.97,
                                  top=0.94,wspace=0,hspace=0)

            self.feature_development_plot[label] = myFig
예제 #14
0
def approximate(x,y):
    """
    Linear approximation of y=f(x) using least square estimator.
    In:
        x : ndarray
        y : ndarray
    Out:
        a, b : float, as in a*x+b=y
    """
    assert pl.shape(x) == pl.shape(y)
    A = pl.vstack([x, pl.ones(len(x))]).T
    a, b = pl.lstsq(A, y)[0]
    return a, b
예제 #15
0
def choose_patches(IMAGES, L, batch_size=1000):
    sz = int(sqrt(L))
    imsz = shape(IMAGES)[0]
    num_images = shape(IMAGES)[2]
    BUFF = 4

    X = matrix(zeros([L,batch_size],'d'))
    for i in range(batch_size):
        j = int(floor(num_images * rand()))
        r = sz/2+BUFF+int(floor((imsz-sz-2*BUFF)*rand()))
        c = sz/2+BUFF+int(floor((imsz-sz-2*BUFF)*rand()))
        X[:,i] = reshape(IMAGES[r-sz/2:r+sz/2,c-sz/2:c+sz/2,j],[L,1])
    return X
예제 #16
0
파일: decode.py 프로젝트: cano3/jropack-1
def decoder(data,code,iflip):
    """convolves each data profile with the code sequence"""
    times=py.shape(data)[0]
    hts=py.shape(data)[1]
    codelength=py.shape(code)[0]
    code_rev=code[::-1] #decoding requires using the inverse of the code
    deflip=1
    #pdb.set_trace()
    for i in range (times):
        temp=py.convolve(data[i,:],code_rev)
        data[i,:]=deflip*temp[codelength-1:codelength+hts]
        deflip=deflip*iflip #call with iflip=-1 if tx has flip
        #pdb.set_trace()
    return data
예제 #17
0
파일: postprocess.py 프로젝트: sth/pyQCD
def calcaV(W,method = "ratio"):
    """Calculate aV"""
    if method == "ratio":
        return pl.log(pl.absolute(W/pl.roll(W,-1,axis=1)))
    else:
        aVs = pl.zeros(pl.shape(W))
        n = pl.arange(1,pl.size(W,axis=1)+1)
        f = lambda b,t,W: W - b[0] * pl.exp(-b[1] * t)
        
        for i in xrange(pl.size(W,axis=0)):
            params,result = optimize.leastsq(f,[1.,1.],args=(n,W[i]))
            aVs[i] = params[1] * pl.ones(pl.shape(W[i]))
            
        return aVs
예제 #18
0
def pnccd_to_image(infile, outfile):
    try:
        f = h5py.File(infile)
    except:
        raise IOError("Can't read %s. It may not be a pnCCD file." % filename)

    i1 = f.keys().index("data")
    i2 = f.values()[i1].keys().index("data1")

    data = f.values()[i1].values()[i2].value

    img = spimage.sp_image_alloc(pylab.shape(data)[0], pylab.shape(data)[1], 1)
    img.image[:, :] = data[:, :]
    spimage.sp_image_write(img, outfile, 0)
    spimage.sp_image_free(img)
예제 #19
0
def render_network(A):
    [L, M] = shape(A)
    sz = int(sqrt(L))
    buf = 1
    A = asarray(A)

    if floor(sqrt(M)) ** 2 != M:
        m = int(sqrt(M / 2))
        n = M / m
    else:
        m = int(sqrt(M))
        n = m

    array = -ones([buf + m * (sz + buf), buf + n * (sz + buf)], "d")

    k = 0
    for i in range(m):
        for j in range(n):
            clim = max(abs(A[:, k]))
            x_offset = buf + i * (sz + buf)
            y_offset = buf + j * (sz + buf)
            array[x_offset : x_offset + sz, y_offset : y_offset + sz] = reshape(A[:, k], [sz, sz]) / clim

            k += 1
    return array
예제 #20
0
    def Global_Stiffness(self):
        '''
        Generates Global Stiffness Matrix for the plane structure
        '''
        elem = self.element;
        B = py.zeros((6,6))
        for i in range (0,py.size(elem,0)): 
            #for each element find the stifness matrix
            K = py.zeros((self.n_nodes*2,self.n_nodes*2))            
            el = elem[i]
            
            #nodes formatted for input            
            [node1, node2, node3] = el;
            node1x = 2*(node1);node2x = 2*(node2);node3x = 2*(node3);
            node1y = 2*(node1)+1;node2y = 2*(node2)+1;node3y = 2*(node3)+1;
            #Area, Strain Matrix and E Matrix multiplied to get element stiffness            
            [J,B] = self.B(el)
            local_k =0.5*abs(J)*py.dot(py.transpose(B),py.dot(self.E_matrix,B))
            
            if self.debug:            
                print 'K for elem', el, '\n', local_k
            #Element K-Matrix converted into Global K-Matrix format 
            K[py.ix_([node1x,node1y,node2x,node2y,node3x,node3y],[node1x,node1y,node2x,node2y,node3x,node3y])] += local_k

            #Adding contibution into Global Stiffness           
            self.k_global += K
            
        if self.debug: 
                print 'Global Stiffness','\n', self.k_global, '\n', 'size', py.shape(self.k_global), '\n Symmetry test' , py.dot(py.inv(self.k_global),self.k_global)    
    def __init__(self, r_value, filename_coupling, amplitude, omega, cycles):
	"""
	Ode_function_call()

	Constructor.

	Parameters
	----------
	filename_coupling : string, path to the HDF5 file that contains the 
	    dipole couplings of the electronic H2+ problem.
	"""
	#Laser info.
	self.amplitude = amplitude
	self.omega = omega
	self.cycles = cycles
	self.pulse_duration = 2* pi /(self.omega) * self.cycles

	#Open files
	f = tables.openFile(filename_coupling)

	#Retrieve r value.
	r_grid = f.root.R_grid[:]
	self.r_index = argmin(abs(r_grid - r_value))	
	self.index_array = f.root.index_array[:]
	self.r = r_grid[self.r_index]

	#Retrieve Hamiltonian.
	self.H_0 = diag(f.root.E[:,self.r_index])
	self.H_1 = f.root.couplings[:,:,self.r_index]	
	
	#Close file.
	f.close()

	#Basis sizes.
	self.basis_size = shape(self.H_0)[0]
예제 #22
0
def groupConfidenceWeight(AllData):
  """
  weights answers by confidence for different groups
  """
  # everybodygetup
  subjects = range(len(AllData[1]['correct']))
  
  distribution = np.array(py.zeros([20,len(subjects)]))
  for i in subjects:
    newdist = getIndConf(AllData, i)
    print(len(newdist))
    distribution[:,i] = newdist
  m,n = py.shape(distribution)
  for i in xrange(m):
    for j in xrange(n):
      distribution[i,j] = distribution[i,j] + py.randn(1)*0.05
  
  print(distribution)
  
  fig = py.figure()
  ax20 = fig.add_subplot(111)
  for i in xrange(n):
    ax20.hist(distribution[:,i], bins=20, color='c', alpha=0.2,
              edgecolor='none')
  ax20.set_title('Weighted answers')
  ax20.set_xlabel('Distribution')
  ax20.set_ylabel('Counts')
예제 #23
0
    def _get_angles(steps,track_length):
        angles = pl.zeros(track_length-2)
        polar = pl.zeros(pl.shape(steps))
        for i in range(track_length-1):
            polar[i,0] = pl.norm(steps[i,:])
            polar[i,1] = pl.arctan(steps[i,0]/steps[i,1])

            if pl.isnan( polar[i,1]):
                polar[i,1] = 0

            if (steps[i,0] >= 0):
                if (steps[i,1] >= 0):
                    pass
                elif (steps[i,1] < 0):
                    polar[i,1] += 2.*pl.pi
            elif (steps[i,0] < 0):
                if (steps[i,1] >= 0):
                    polar[i,1] += pl.pi
                elif (steps[i,1] < 0):
                    polar[i,1] += pl.pi

        for i in range(track_length-2):
            angles[i] = polar[i+1,1] - polar[i,1]

        return angles
예제 #24
0
    def probability_density(self, psi, el_indices):
	"""
	r_grid, prob = probability_density(psi, el_indices)

	Finds the vibrational probability density of <psi> on the set of electronic states 
	with idices <el_indices>.

	Parameters
	----------
	psi : 2D complex array. The wavefunction.
	el_indices : integer list. The electronic basis functions to be looked at.

	Returns
	-------
	r_grid : 1D float array. The grid on which the probability density is given.
	prob : 1D float array. The probabilty density.
	"""
	prob = zeros(shape(self.grid))
	
	for i in el_indices:
	    coefficients = psi[:,i]
	    
	    if self.splines == None:
		wavefunction_re = self.vib_basis.construct_function_from_bspline_expansion(
		    real(coefficients), self.grid)
		wavefunction_im = self.vib_basis.construct_function_from_bspline_expansion(
		    imag(coefficients), self.grid)
	    else:
		wavefunction_re = dot(self.splines, real(coefficients))
		wavefunction_im = dot(self.splines, imag(coefficients))
	    
	    prob += wavefunction_re**2 + wavefunction_im**2

	return self.grid, prob	
예제 #25
0
파일: analysis.py 프로젝트: adrianq/netpyne
def plotConn():
    # Create plot
    figh = figure(figsize=(8,6))
    figh.subplots_adjust(left=0.02) # Less space on left
    figh.subplots_adjust(right=0.98) # Less space on right
    figh.subplots_adjust(top=0.96) # Less space on bottom
    figh.subplots_adjust(bottom=0.02) # Less space on bottom
    figh.subplots_adjust(wspace=0) # More space between
    figh.subplots_adjust(hspace=0) # More space between
    h = axes()
    totalconns = zeros(shape(f.connprobs))
    for c1 in range(size(f.connprobs,0)):
        for c2 in range(size(f.connprobs,1)):
            for w in range(f.nreceptors):
                totalconns[c1,c2] += f.connprobs[c1,c2]*f.connweights[c1,c2,w]*(-1 if w>=2 else 1)
    imshow(totalconns,interpolation='nearest',cmap=bicolormap(gap=0))

    # Plot grid lines
    hold(True)
    for pop in range(f.npops):
        plot(array([0,f.npops])-0.5,array([pop,pop])-0.5,'-',c=(0.7,0.7,0.7))
        plot(array([pop,pop])-0.5,array([0,f.npops])-0.5,'-',c=(0.7,0.7,0.7))

    # Make pretty
    h.set_xticks(range(f.npops))
    h.set_yticks(range(f.npops))
    h.set_xticklabels(f.popnames)
    h.set_yticklabels(f.popnames)
    h.xaxis.set_ticks_position('top')
    xlim(-0.5,f.npops-0.5)
    ylim(f.npops-0.5,-0.5)
    clim(-abs(totalconns).max(),abs(totalconns).max())
    colorbar()
예제 #26
0
def gadget_merge_ics( outfile, filename1, filename2, offset1, offset2, voffset1=[0.,0.,0.], voffset2=[0.,0.,0.] ):
	snap1 = gadget_readsnapname( filename1 )
	snap2 = gadget_readsnapname( filename2 )

	for i in range(3):
		snap1.pos[:,i] += offset1[i]
		snap2.pos[:,i] += offset2[i]
		
	for i in range(3):
		snap1.vel[:,i] += voffset1[i]
		snap2.vel[:,i] += voffset2[i]

	npart = snap1.npart + snap2.npart
	data = {}
	data[ 'count' ] = npart
	data[ 'pos' ] = pylab.zeros( [npart, 3] )
	data[ 'pos' ][ 0:snap1.npart, : ] = snap1.pos
	data[ 'pos' ][ snap1.npart:npart, : ] = snap2.pos
	data[ 'vel' ] = pylab.zeros( [npart, 3] )
	data[ 'vel' ][ 0:snap1.npart, : ] = snap1.vel
	data[ 'vel' ][ snap1.npart:npart, : ] = snap2.vel
	data[ 'mass' ] = pylab.zeros( npart )
	data[ 'mass' ][ 0:snap1.npart ] = snap1.data["mass"]
	data[ 'mass' ][ snap1.npart:npart ] = snap2.data["mass"]
	data[ 'u' ] = pylab.zeros( npart )
	data[ 'u' ][ 0:snap1.npart ] = snap1.data["u"]
	data[ 'u' ][ snap1.npart:npart ] = snap2.data["u"]
	nxnuc = pylab.shape( snap1.data["xnuc"] )[1]
	data[ 'xnuc' ] = pylab.zeros( [npart, nxnuc] )
	data[ 'xnuc' ][ 0:snap1.npart, : ] = snap1.data["xnuc"]
	data[ 'xnuc' ][ snap1.npart:npart, : ] = snap2.data["xnuc"]

	gadget_write_ics( outfile, data, transpose=False )
	return
예제 #27
0
def AllDataDist(AllData):
  # 
  subjects = range(len(AllData[1]['correct']))
  matrix = py.zeros([len(AllData.keys()),len(subjects)])
  
  kcount = -1
  for k in AllData.keys():
    kcount += 1
    icount = 0
    while icount < len(subjects):
      matrix[kcount][icount] = \
                               AllData[k]['correct'][icount]
      icount += 1
  
  meanmean = []
  for i in subjects[1:]:
    # create combination list
    #print(subjects[1:])
    
    perms = py.unique(list(it.combinations(subjects, i)))

    for h in range(len(perms)):
      # for each combination, get the mean correct
      means = []
      for k in range(len(matrix[:][1])):
        # for each question...
        current = []
        for j in perms[h]:
          #print(perms[h])
          # get the correct for that subject, append
          current.append(matrix[k][j])
        # then take the mode
        #print(int(stats.mode(current)[0]))
        means.append( int(stats.mode(current)[0]) )
      #print(means)
    # append mean for each group size

    meanmean.append(np.mean(means))
  allsum = sum(sum(matrix))
  m, n = py.shape(matrix)
  print('Total mean is %.3f / %.3f = %.3f '
        % ( allsum, m*n, (allsum/(m*n))))
  
  subjects = subjects[1::2]
  meanmean = meanmean[1::2]
  if len(subjects) > len(meanmean):
    subjects=subjects[1:]
  elif len(subjects) < len(meanmean):
    meanmean = meanmean[1:]
  fig = py.figure()
  ax14 = fig.add_subplot(111)
  ax14.plot(subjects, meanmean, 'bo', alpha=1)
  ax14.plot(subjects, meanmean, 'b', linewidth=3, alpha=0.2)
  ax14.set_ylim(-0.2,1.2)
  ax14.set_title('Real Data Group Size: Percent Correct')
  ax14.set_xlabel('Group size')
  ax14.set_ylabel('% Correct')
  print(meanmean)
  return meanmean
예제 #28
0
 def __init__(self, filename):
     print "Reading in file " + filename
     fitsFile = pyfits.open(filename)
     self.data = fitsFile[0].data
     self.nt, self.ny, self.nx = pylab.shape(self.data)
     # Remove the DC offset due to the spacecraft's motion
     for i in range(self.nt):
         self.data[i, :, :] -= np.mean(self.data[i, :, :])
예제 #29
0
파일: postprocess.py 프로젝트: sth/pyQCD
def Vplot(Ws):
	"""Calculate the potential function and plot it"""

	N_bstrp = input("Please enter the number of bootstraps: ")
	N_bin = input("Please enter the bin size: ")
	style = raw_input("Please enter a linestyle: ")

	Ws = bin(Ws,N_bin)
	aVs = pl.zeros((N_bstrp,) + pl.shape(Ws)[1:])
	bs = pl.zeros((N_bstrp,3))
        
	for i in xrange(N_bstrp):
		W = pl.mean(bootstrap(Ws),axis=0)
		aVs[i] = calcaV(W,method="fit")
		bs[i] = potfit(aVs[i,:,0])

			
	r = pl.arange(1,7)
	aV = pl.mean(aVs,axis=0)
	aVerr = pl.std(aVs,axis=0)
	b = pl.mean(bs,axis=0)

	a_s = 0.5 / pl.sqrt((1.65 + bs[:,1]) / bs[:,0])
	sigmas = bs[:,0] / a_s**2
	Bs = bs[:,1]
	As = bs[:,2] / a_s

	a = pl.mean(a_s)
	aerr = pl.std(a_s)
	sigma = pl.mean(sigmas)
	sigmaerr = pl.std(sigmas)
	B = pl.mean(Bs)
	Berr = pl.std(Bs)
	A = pl.mean(As)
	Aerr = pl.std(As)

	print("Fit parameters:")
	print("sigma = %f +/- %f fm^-2 = %f +/- %f MeV^2"
		% (sigma, sigmaerr, sigma * 197**2, sigmaerr * 197**2))
	print("B = %f +/- %f" % (B, Berr))
	print("A = %f +/- %f fm^-1 = %f +/- %f MeV"
		% (A, Aerr, A*197, Aerr*197))
	print("Lattice spacing, a = %f +/- %f fm = %f +/- %f MeV^-1"
		% (a, aerr, a/197, aerr/197))
	
	r_fit = pl.arange(0.25,r[-1]+1,0.1)
	aV_fit = V(b,r_fit)
	
	handles = []
	handles.append(pl.errorbar(r,aV[:,0],yerr=aVerr[:,0],fmt='o'+style[0]))
	handles.append(pl.plot(r_fit,aV_fit,style))
	pl.ylim([0,pl.nanmax(aV)+0.25])
	pl.xlim([0,pl.nanmax(r_fit)+0.25])
	pl.xlabel("$r / a$")
	pl.ylabel("$aV(r)$")

	return aV,handles
예제 #30
0
def variability(r_times):
    """
    Get HRV from RR occurance times array.
    In:
        r_times : ndarray, Relative time in seconds
    Out:
        time : ndarray, Relative time vector in seconds
        hrv : ndarray, HRV vector in milliseconds
    """

    time = pl.delete(r_times,-1)
    hrv = pl.zeros(len(r_times)-1, dtype='float')

    for i in range(0, len(r_times)-1):
        hrv[i] = (r_times[i+1]-r_times[i])* 1000
    
    assert pl.shape(time) == pl.shape(hrv)
    return time, hrv
예제 #31
0
def StommetGyre(f0, beta, g, H, dx, yu, yv, dt, nt, u, v, h, gamma, rho, tor0):
    "solves the SWE using forward-backward time-stepping and the C-grid"
    "Wind driven Stommel Gyre, linearised equations"
    
    [nx,ny] = py.shape(u)
    dy = yu[1] - yu[0]
    L = yv[-1]+dy
    torx = -tor0*py.cos(py.pi*yu/L)
    
    # loop through all times
    for it in xrange(nt):
        # update old values
        uOld = u.copy()
        vOld = v.copy()
        hOld = h.copy()
        
        # alternate between calculating u or v first
        for iu in xrange(2):
            if iu%2 == 0:
                # loop over x and y directions (using i and j)
                u = uOld + dt*\
                (
                    (f0 + beta*yu)*vatu(v)
                  - g*ddxC(h,dx)
                  - gamma*u
                  + torx/(rho*H)
                )
                
            else:
                v = vOld + dt*\
                (
                   - (f0+beta*yv)*uatv(u)
                   - g*ddyC(h,dy))
                   - gamma*v
                   + tory/(rho*H)
                 )

        # then calculate h using updated values of u and v
        h = hOld - dt*H*divC(u,v,dx,dy)
예제 #32
0
def tostring(data):
    """
    :param data: object to be converted into a JSON-compatible `str`
    :type data: any
    :return: JSON-compatible `str` version of `data`
    
    Converts `data` from its native data type to a JSON-compatible `str`.
    """
    dtype = type(data).__name__
    if dtype == 'ndarray':
        if pylab.shape(data) != (): data = list(data)
        else: data = '"' + data.tostring() + '"'
    elif dtype == 'dict' or dtype == 'tuple':
        try:
            data = json.dumps(data)
        except:
            pass
    elif dtype == 'NoneType':
        data = ''
    elif dtype == 'str' or dtype == 'unicode':
        data = json.dumps(data)

    return str(data)
예제 #33
0
파일: meshing.py 프로젝트: JacobDowns/cslvr
    def create_contour(self, var, zero_cntr, skip_pts):
        """
    Create a contour of the data field with index <var> of <dd> provided at
    initialization.  <zero_cntr> is the value of <var> to contour, <skip_pts>
    is the number of points to skip in the contour, needed to prevent overlap.
    """
        s    = "::: creating contour from %s's \"%s\" field with skipping %i " + \
               "point(s) :::"
        print_text(s % (self.dd.name, var, skip_pts), self.color)

        skip_pts = skip_pts + 1

        # create contour :
        field = self.dd.data[var]
        fig = figure()
        self.ax = fig.add_subplot(111)
        self.ax.set_aspect('equal')
        self.c = self.ax.contour(self.x, self.y, field, [zero_cntr])

        # Get longest contour:
        cl = self.c.allsegs[0]
        ind = 0
        amax = 0
        amax_ind = 0

        for a in cl:
            if size(a) > amax:
                amax = size(a)
                amax_ind = ind
            ind += 1

        # remove skip points and last point to avoid overlap :
        self.longest_cont = cl[amax_ind]
        s = "::: contour created, length %s nodes :::"
        print_text(s % shape(self.longest_cont)[0], self.color)
        self.remove_skip_points(skip_pts)
예제 #34
0
def bsmooth(options):
    #btable = 'day22_time_ave_split_spw0.B0_s'
    btable = options.btin
    tb.open(btable)
    tb.copy(options.btout, deep=True, valuecopy=True)
    tb.close()
    tb.open(options.btout, nomodify=False)
    C = tb.getcol('CPARAM')
    Cx = C * 1
    N = pl.shape(C)
    wl = options.wl
    for i in range(N[0]):
        for j in range(N[2]):
            Cx[i, :, j] = smooth(C[i, :, j],
                                 window_len=wl,
                                 window=options.window)[(wl - 1) / 2:N[1] +
                                                        (wl - 1) / 2]

    pl.plot(C[0, :, 14], 'ro', mec='red', alpha=0.5)
    pl.plot(Cx[0, :, 14], 'k-', lw=2)
    tb.putcol(columnname='CPARAM', value=Cx)
    tb.close()
    pl.savefig(options.btout + '.png', dpi=300)
    pl.close()
예제 #35
0
        TotRho = mixRho * nTotRho + (1 - mixRho) * TotRho

    # Plotting bands
    fcc.ChoosePointsInFBZ(Nkplot, type=1)
    pyplot.title("Cu band structure")
    pyplot.xlabel("k-space")
    pyplot.ylabel("Energy")
    Ek = []
    for ik, k in enumerate(fcc.kp):
        (tEk, tAr, tw0, tw1, tw2,
         twi) = ComputeEigensystem(k, fcc.Km, Olap_I, Enu, logDer, RMuffinTin,
                                   fcc.Volume)
        Ek.append(tEk)
    Ek = array(Ek)

    for i in range(shape(Ek)[1]):
        if max(Ek[:, i]) - mu > plotMM[0] and min(Ek[:, i]) - mu < plotMM[1]:
            #plot(Ek[:,i]-mu, 'k-', lw=2)
            pyplot.plot(Ek[:, i] - mu, 'k-', lw=2)
    #plot([0,len(Ek)],[0,0], 'k:')  # chemical potential line
    pyplot.plot([0, len(Ek)], [0, 0], 'k:')
    #ax=axis()
    ax = pyplot.axis()
    xlabs = [p[1] for p in fcc.Points]
    labs = [p[0] for p in fcc.Points]
    #xticks(xlabs, labs)
    pyplot.xticks(xlabs, labs)
    for ix, x in enumerate(xlabs):
        #plot([x,x], [ax[2],ax[3]], 'k:')
        pyplot.plot([x, x], [ax[2], ax[3]], 'k:')
    #axis([xlabs[0], xlabs[-1], ax[2], ax[3]])
예제 #36
0
cbar = pl.colorbar(pc, cax=ax, orientation='vertical', alpha=1)
cbar.set_label("cm s$^{-1}$")
p1.set_title('AVISO Geostrophic Velocities')

#########################################################################
print " "
print "Plotting Abrolhos series .............."
p2 = pl.subplot(222)

xmin = int(tt[0])
xmax = int(tt[-1])

pl.plot([tt[0], tt[-1]], [0, 0], 'k')
pl.grid()
pl.plot(tt, Vab, 'k', linewidth=1)
pl.plot(tt, pl.mean(Vab) * pl.ones(pl.shape(tt)), 'r', linewidth=2)
p2.set_xlim(xmin, xmax)
p2.set_ylim(-50, 50)
p2.set_ylabel("V [cm s$^{-1}$]")
p2.set_title(r"Meridional Velocity off Abrolhos Bank - 18$^\circ$S")
p2.set_axis_bgcolor('0.95')
p2.xaxis.set_major_formatter(pl.DateFormatter('%b'))
bbox_props = dict(boxstyle="round", fc="w", ec="0.5", alpha=0.9)
p2.text(dt.datetime(2008, 10, 1),
        -40,
        r"Southward = 63%",
        ha="center",
        va="center",
        size=12,
        bbox=bbox_props)
p2.text(dt.datetime(2008, 10, 1),
예제 #37
0
def FE(M,P,x,printResults=True):
	## FE analysis to solve Maxwell's equations in 2D for either H or E polarisation (=Helmholtz's equation)
	##		using periodic boundary conditions and wave expansions at the boundaries. The approach was originally
	##		implemented using Fuchi2010 and later modifications from Dossou2006 (original article) were added
	#
	#@param M		Model object containing geometrical information
	#@param P		Physics object containing physical parameters
	#@param x		The design domain with 0 corresponding to eps1 and 1 corresponding to eps2
	#						and intermediate values to weighted averages in between. See physics.interpolate
	#@param printResults	print results to stdout after simulation
	#@return			sol is the discretised solution, r are the complex reflection coefficients
	#						and t are the complex transmission coefficients

	#Calculate A and B, as the would be given in the Helmholtz equation in Eq (1) in Friis2012.
	#	(this notation makes it easy to switch between E and H field due to duality)

	A,B = P.interpolate(x)
	Mvv = assembleMvv(A,B,M,P)
	Mrv,Mvr,Mtv,Mvt,fvn,frn = assembleMxxfx(M,P)

	frn[M.Nm,0] = -M.lx			#mode 0

	lu = splu(Mvv)
	#Mhatrr and Mhattr
	b = lu.solve(Mvr)
	Mhatrr = -Mrv*b
	_addToDiagonal(Mhatrr, M.lx)
	Mhattr = -Mtv*b

	#Mhatrt and Mhattt
	b = lu.solve(Mvt)
	Mhatrt = -Mrv*b
	Mhattt = -Mtv*b
	_addToDiagonal(Mhattt, M.lx)

	#fhatrn
	b = lu.solve(fvn)
	a = Mrv*b
	fhatrn = frn-a

	#fhattn
	b = lu.solve(fvn)
	a = Mtv*b
	fhattn = -a

	MAT = pl.bmat([[Mhatrr, Mhatrt],
						[Mhattr, Mhattt]])
	RHS = pl.bmat([ [fhatrn],[fhattn]])

	V  = pl.solve(MAT,RHS)
	r = V[:M.NM,0]
	t = V[M.NM:,0]

	#Solve the system using LU factorisation (as recommended in Dossou2006, p 130, bottom)
	sol = lu.solve(fvn-Mvr*r-Mvt*t)
	print(pl.shape(r))
	print(pl.shape(t))
	print(pl.shape(sol))

	r = r.view(pl.ndarray).ravel()
	t = t.view(pl.ndarray).ravel()
	#Cast solution into a form that matches the input model
	sol = sol.reshape(M.nelx+1,M.nelz+1,order='F')
	#Print simulation results
	if printResults:
		_printResultsToScreen(M,P,r,t)
	print("r")
	print(r)
	print(t)
	print(sol)

	results = {}
	results["solution"] = sol
	results["x"] = x
	results["r"] = r
	results["t"] = t
	results.update(M.getParameters())
	results.update(P.getParameters())
	return results
예제 #38
0
dataset['images/test_set'] -= dataset['images/test_set'].mean((1, 2, 3),
                                                              keepdims=True)
dataset['images/test_set'] /= dataset['images/test_set'].max((1, 2, 3),
                                                             keepdims=True)
dataset['images/test_set'] = dataset['images/test_set'][:, :, 2:-2, 2:-2]

for MODEL in ['smallcnn', 'largecnn', 'resnet']:
    f = h5py.File('/mnt/drive1/rbalSpace/centroids/saved_mus_' + MODEL + '.h5',
                  'r',
                  swmr=True)
    mus = {'train': [], 'test': []}

    keys = sorted([i for i in f['train_set/minimizer']], key=float)
    print(keys)
    for key in keys[2:]:
        print(pl.shape(f['train_set/minimizer/' + key][...]))
        mus['train'].append(f['train_set/minimizer/' + key][...][[0, -1]])

    keys = sorted([i for i in f['test_set/accu']], key=float)
    for key in keys[1:]:
        mus['test'].append(f['test_set/accu/' + key][...][[0, -1]])
    f.close()

    n_layer = len(mus['train'])

    # test the distance thing
    print('model before training', MODEL)
    for layer in range(n_layer - 1):
        distribution = list()
        for batch in range(mus['test'][layer].shape[1]):
            for n in range(64):
예제 #39
0
PSet.update({'simulated': True})
PSet.save(url=os.path.join(datafolder, '{}.pset'.format(PSet['uuid'])))

######## SAVING STUFF ########
savestuff.update(**{'EPSC': EPSC})
savestuff.update(**{'v_EPSC': v_EPSC})
savestuff.update(**{'mean_EPSC': mean_EPSC})
savestuff.update(**{'SD_EPSC': SD_EPSC})
savestuff.update(**{'LS_EPSC': LS_EPSC})
savestuff.update(**{'R2_EPSC': R2_EPSC})

f = h5py.File(os.path.join(datafolder, 'simres.h5'), 'w')
for i in xrange(len(savestuff.keys())):
    dset = f.create_dataset(savestuff.keys()[i],
                            pl.shape(savestuff.values()[i]))
    dset[...] = savestuff[savestuff.keys()[i]]

f.close()

f = file('results_tau.txt', 'a')
f.write('%s\t%s\t%.3f\t%.3f\t%.3f\t%.3f\n' %
        (psetid, morphology, v_EPSP[1], v_EPSP[2], v_EPSC[1], v_EPSC[2]))
f.close()

from initialize_simulations import get_md5s
if PSet['uuid'] in get_md5s('PS_simres_RS') + get_md5s(
        'PS_simres_FS') + get_md5s('PS_simres_P4'):
    #
    # if P_i <= 1:
    f = file(os.path.join(datafolder, 'c_savedPickle.cpickle'), 'wb')
예제 #40
0
  def __init__(self, direc, files, flip=False, mesh=None, gen_space=True, 
               zero_edge=False, bool_data=False, req_dg=False):
    """
    The following data are used to initialize the class :
    
      direc     : Set the directory containing the input files. 
      files     : Tuple of file names.  All files are scanned for rows or 
                  columns of nans. Assume all files have the same extents.
      flip      : flip the data over the x-axis?
      mesh      : FEniCS mesh if there is one already created.
      zero_edge : Make edges of domain -0.002?
      bool_data : Convert data to boolean?
      req_dg    : Some field may require DG space?
    
    Based on thickness extents, create a rectangular mesh object.
    Also define the function space as continious galerkin, order 1.
    """
    self.directory  = direc
    self.data       = {}        # dictionary of converted matlab data
    self.rem_nans   = False
    self.chg_proj   = False     # change to other projection flag
    
    first = True  # initialize domain by first file's extents

    if direc == None and type(files) == dict:
      self.name = files.pop('dataset')
    elif direc != None:
      self.name = direc
    
    print "::: creating %s DataInput object :::" % self.name
    
    # process the data files :
    for fn in files:
     
      if direc == None and type(files) == dict:
        d_dict = files[fn]
    
      
      elif direc != None:
        d_dict = loadmat(direc + fn)
        d_dict['projection']     = d_dict['projection'][0]
        d_dict['standard lat']   = d_dict['standard lat'][0]
        d_dict['standard lon']   = d_dict['standard lon'][0]
        d_dict['lat true scale'] = d_dict['lat true scale'][0]
      
      d = d_dict["map_data"]
     
      # initialize extents :
      if first: 
        self.ny,self.nx = shape(d_dict['map_data'])
        self.x_min      = float(d_dict['map_western_edge'])
        self.x_max      = float(d_dict['map_eastern_edge'])
        self.y_min      = float(d_dict['map_southern_edge'])
        self.y_max      = float(d_dict['map_northern_edge'])
        self.proj       = str(d_dict['projection'])
        self.lat_0      = str(d_dict['standard lat'])
        self.lon_0      = str(d_dict['standard lon'])
        self.lat_ts     = str(d_dict['lat true scale'])
        self.x          = linspace(self.x_min, self.x_max, self.nx)
        self.y          = linspace(self.y_min, self.y_max, self.ny)
        self.good_x     = array(ones(len(self.x)), dtype=bool)      # no NaNs
        self.good_y     = array(ones(len(self.y)), dtype=bool)      # no NaNs
        first           = False
  
      # identify, but not remove the NaNs : 
      self.identify_nans(d, fn)
     
      # make edges all zero for interpolation of interior regions :
      if zero_edge:
        d[:,0] = d[:,-1] = d[0,:] = d[-1,:] = -0.002
        d[:,1] = d[:,-2] = d[1,:] = d[-2,:] = -0.002

      # convert to boolean : 
      if bool_data: d[d > 0] = 1
      
      # reflect over the x-axis :
      if flip: d = d[::-1, :]
      
      # add to the dictionary of arrays :
      self.data[fn.split('.')[0]] = d

    # remove un-needed rows/cols from data: 
    if self.rem_nans:
      self.remove_nans()
    
    if gen_space:
      # define a FEniCS Rectangle over the domain :
      if mesh == None:
        self.mesh = RectangleMesh(self.x_min, self.y_min, 
                                  self.x_max, self.y_max,
                                  self.nx,    self.ny)
      else:
        self.mesh = mesh
      
      # define the function space of the problem :
      self.func_space      = FunctionSpace(self.mesh, "CG", 1)
      
      # if DG space is needed :
      if req_dg:
        self.func_space_dg = FunctionSpace(self.mesh, "DG", 1)
    
    # create projection : 
    proj =   " +proj="   + self.proj \
           + " +lat_0="  + self.lat_0 \
           + " +lat_ts=" + self.lat_ts \
           + " +lon_0="  + self.lon_0 \
           + " +k=1 +x_0=0 +y_0=0 +no_defs +a=6378137 +rf=298.257223563" \
           + " +towgs84=0.000,0.000,0.000 +to_meter=1"
    self.p = Proj(proj)
예제 #41
0
def xml_to_gmsh(mesh, results=None):
    """
    function iterates through mesh and results (field) data both provided in .xml format and writes a output .msh file with the field data read in as node data. The output file same name as the field file passed or if no filed file passed will have name of input mesh.

    :param mesh:  (.xml) Mesh that is to be written to a file
    :param field: (.xml) results data
    """

    # get output file
    if results == None:
        fname = mesh.rstrip("xml") + "msh"
        output = open(fname, 'w')
    else:
        fname = results.rstrip("xml") + "msh"
        output = open(fname, 'w')

    # read files into dolfin format
    mesh = dolf.Mesh(mesh)
    Q = dolf.FunctionSpace(mesh, 'CG', 1)
    if results != None:
        field = dolf.Function(Q)
        dolf.File(results) >> field

    cell_type = mesh.type().cell_type()

    nodes = mesh.coordinates()
    n_nodes = mesh.num_vertices()

    nodes = p.hstack(
        (nodes, p.zeros((n_nodes, 3 - p.shape(mesh.coordinates())[1]))))

    cells = mesh.cells()
    n_cells = mesh.num_cells()

    output.write("$MeshFormat\n" + "2.2 0 8\n" + "$EndMeshFormat\n" +
                 "$Nodes \n" + "{0:d}\n".format(n_nodes))

    for ii, node in enumerate(nodes):
        output.write("{0:d} {1} {2} {3}\n".format(ii + 1, node[0], node[1],
                                                  node[2]))

    output.write("$EndNodes\n")

    output.write("$Elements\n" + "{0:d}\n".format(n_cells))

    for ii, cell in enumerate(cells):

        if cell_type == 1:
            output.write("{0:d} 1 0 {1:d} {2:d}\n".format(
                ii + 1, int(cell[0] + 1), int(cell[1] + 1)))

        elif cell_type == 2:
            output.write("{0:d} 2 0 {1:d} {2:d} {3:d}\n".format(
                ii + 1, int(cell[0] + 1), int(cell[1] + 1), int(cell[2] + 1)))

        elif cell_type == 3:
            output.write("{0:d} 4 0 {1:d} {2:d} {3:d} {4:d}\n".format(
                ii + 1, int(cell[0] + 1), int(cell[1] + 1), int(cell[2] + 1),
                int(cell[3] + 1)))

        else:
            print "Unknown cell type"

    output.write("$EndElements\n")

    if results != None:
        output.write("$NodeData\n" + "1\n" + "\"NodalValues\"\n" +
                     "0\n" +  # zero real tags 
                     "3\n" +  # three integer tags
                     "0\n" +  # the time step
                     "1\n" +  #1-component (scalar) field
                     "{0:d}\n".format(len(nodes)))
        for ii, node in enumerate(nodes):
            output.write("{0:d} {1:g}\n".format(ii + 1,
                                                field(node[0], node[1])))

        output.write("$EndNodeData\n")

    output.close()

    # There is some numerical precision error that prevents files created by
    # this script from being converted back into xml by dolfin-convert opening the file in gmsh and resaving fixes this
    print 'calling gmsh...'
    s.call(['gmsh', fname, '-2'])
예제 #42
0
                quit(1)

    # Get the list [of list] of spectra
    ilist = get_ilist(fname)

    # Get input parameter for each spectrum from the ilist
    dumb = get_ip(ilist)
    obslist, namelist, lbdunitlist, symlist, \
    hshiftlist, savelist, broadlist, grouplist, alplist, normlist = dumb

    #Get the wavelength range if only one spectrum
    if len(
            ilist
    ) == 1 and lbd == dft_lbd and lbdrange == dft_lbdrange and not lbdmin0 and not lbdmax0:
        a = get_data(obslist[0])
        print pl.shape(a)
        lbdmin0 = min(a[:, 0])
        lbdmax0 = max(a[:, 0])
        lbdrange = lbdmax0 - lbdmin0
        lbd = (lbdmin0 + lbdmax0) / 2.
    elif lbdmin0 and lbdmax0:
        if lbdmin0 > lbdmax0:
            print "Pb: -wmin > -wmax"
            quit(1)
        lbd = (lbdmin0 + lbdmax0) / 2.
        lbdrange = lbdmax0 - lbdmin0
    #Or use specified/default values
    else:
        lbdmin0 = lbd - lbdrange / 2.
        lbdmax0 = lbd + lbdrange / 2.
예제 #43
0
    def _generate_feature_development_plots(self, important_features):
        """ This function generates the actual histogram plot"""
        # Everything is done class-wise
        for label in important_features.keys():
            # Axis limits are determined by the global maxima
            (minVal, maxVal) = (important_features[label].min(0).min(0),
                                important_features[label].max(0).max(0))
            nr_chans = pylab.shape(important_features[label])[0]

            myFig = pylab.figure()
            myFig.set_size_inches((40, nr_chans))

            for i_chan in range(nr_chans):
                ax = myFig.add_subplot(nr_chans, 1, i_chan + 1)

                # cycle line colors
                if (pylab.mod(i_chan, 2) == 0): myCol = '#000080'
                else: myCol = '#003EFF'
                # plot features and black zero-line
                pylab.plot(important_features[label][i_chan, :], color=myCol)
                pylab.plot(
                    range(len(important_features[label][i_chan, :])),
                    pylab.zeros(len(important_features[label][i_chan, :])),
                    'k--')
                pylab.ylim((minVal, maxVal))
                xmax = pylab.shape(important_features[label])[1]
                pylab.xlim((0, xmax))

                # draw vertical dashed line every 20 epochs
                for vertical_line_position in range(0, xmax + 1, 20):
                    pylab.axvline(x=vertical_line_position,
                                  ymin=0,
                                  ymax=1,
                                  color='k',
                                  linestyle='--')

                # write title above uppermost subplot
                if i_chan + 1 == 1:
                    pylab.title(
                        'Feature development: Amplitudes of %s Epochs' % label,
                        fontsize=40)
                # adjust the axes, i.e. remove upper and right,
                # shift the left to avoid overlaps,
                # and lower axis only @ bottom subplot
                if i_chan + 1 < nr_chans:
                    self._adjust_spines(ax, ['left'], i_chan)
                else:
                    self._adjust_spines(ax, ['left', 'bottom'], i_chan)
                    pylab.xlabel('Number of Epoch', fontsize=36)
                # Write feature name next to the axis
                pylab.ylabel(self.corr_important_feat_names[i_chan],
                             fontsize=20,
                             rotation='horizontal')
            # remove whitespace between subplots etc.
            myFig.subplots_adjust(bottom=0.03,
                                  left=0.08,
                                  right=0.97,
                                  top=0.94,
                                  wspace=0,
                                  hspace=0)

            self.feature_development_plot[label] = myFig
예제 #44
0
 def test_sim_cod_data(self):
     return  # skip for now
     cf = data.get_cod_data(level=1)
     X = data.sim_cod_data(10, cf)
     assert pl.shape(X) == (10, 3)
예제 #45
0
n_x = gp.root.n_x.read()  #240 radial points
n_field = gp.root.n_field.read()
n_kinetic = gp.root.n_kinetic.read()
nu = gp.root.nu_s.read()
gp.close()

gf = tables.openFile("gyrofine" + gtime + ".h5")
alpha = gf.root.grid.alpha.read()
R = gf.root.grid.R.read()
Z = gf.root.grid.Z.read()
fineion = gf.root.density_ion1_phi01.read()
#gf.close()

#gf=tables.openFile("gyro"+gtime+".h5")
#for now build the density array for (species, fourier number, radial loc, poloidal loc)
radial_size = pylab.shape(gf.root.densityfine_real001)[1]
poloidal_size = pylab.shape(gf.root.densityfine_real001)[2]

real_ndensity_n = pylab.zeros((n_kinetic, n_n, radial_size, poloidal_size))
imag_ndensity_n = pylab.zeros((n_kinetic, n_n, radial_size, poloidal_size))
Denmag = pylab.zeros((n_kinetic, n_n, radial_size, poloidal_size + 1))

for n in range(1, n_n + 1):
    if n < 10:
        realDataName = "densityfine_real00" + str(n)
        imagDataName = "densityfine_imag00" + str(n)
    else:
        realDataName = "densityfine_real0" + str(n)
        imagDataName = "densityfine_imag0" + str(n)
    cmd1 = "gf.root." + realDataName + ".read()"
    cmd2 = "gf.root." + imagDataName + ".read()"
예제 #46
0
phases_all_shaftA = pl.load(os.path.join(memap_folder, 'phases_all_shaftA.npy'), mmap_mode=None)
phases_all_shaftC = pl.load(os.path.join(memap_folder, 'phases_all_shaftC.npy'), mmap_mode=None)

data = pl.load(os.path.join(memap_folder,'B14R9_raw.npy'), mmap_mode='r+')


# ----------Data generation-----------------
data = lio.read_all_csc(folder,  assume_same_fs=False, memmap=True, memmap_folder=memap_folder, save_for_spikedetekt=False, channels_to_save=None, return_sliced_data=False)
pl.save(os.path.join(memap_folder, 'B14R9_raw.npy'), data)

data_ecog = data[:64,:]
data_probe = data[64:,:]


data_probe_hp = pl.memmap(os.path.join(memap_folder,'data_probe_hp.dat'), dtype='int16', mode='w+', shape=pl.shape(data_probe))
for i in pl.arange(0, pl.shape(data_probe)[0]):
    data_probe_hp[i,:] = filters.high_pass_filter(data_probe[i, :], Fsampling=f_sampling, Fcutoff=f_hp_cutoff)
    data_probe_hp.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_probe_hp.npy'), data_probe_hp)


shape_data_ss = (pl.shape(data_ecog)[0], pl.shape(data_ecog)[1]/int(f_sampling/f_subsample))
data_ecog_lp_ss = pl.memmap(os.path.join(memap_folder, 'data_ecog_lp_ss.dat'), dtype='int16', mode='w+', shape=shape_data_ss)
for i in pl.arange(0, pl.shape(data_ecog)[0]):
    data_ecog_lp_ss[i,:] = signal.decimate(
        filters.low_pass_filter(data_ecog[i, :], Fsampling=f_sampling, Fcutoff=f_lp_cutoff), int(f_sampling / f_subsample))
    data_ecog_lp_ss.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_ecog_lp_ss.npy'), data_ecog_lp_ss)
예제 #47
0
    dir=dir [j1:j2:k,i1:i2:k]
    rtp=rtp [j1:j2:k,i1:i2:k]
    dissip=dissip[j1:j2:k,i1:i2:k] 
    ubot=ubot [j1:j2:k,i1:i2:k]
    wlen=wlen [j1:j2:k,i1:i2:k]
    qb=qb [j1:j2:k,i1:i2:k]
    transpx=transpx [j1:j2:k,i1:i2:k]
    transpy=transpy [j1:j2:k,i1:i2:k]
    velx=velx [j1:j2:k,i1:i2:k]
    vely=vely [j1:j2:k,i1:i2:k]
    mask=mask [j1:j2:k,i1:i2:k]
    
#print('writing netcdf file')
##________netcdf writing________________

ny,nx = pl.shape(xp)

missing_value=0
nc = netCDF4.Dataset(outfile, 'w', format='NETCDF3_CLASSIC')

nc.createDimension('x', nx)
nc.createDimension('y', ny)

nc.createDimension('ocean_time',None)
timea = nc.createVariable('ocean_time','f8',('ocean_time',))
timea.units = units
timea[:]=tim

x_nc = nc.createVariable('x', 'float', ('y','x',))
x_nc.long_name = 'x positions'
x_nc[:] = xp[:]
예제 #48
0
    def __init__(self, fname, beampwr, wls, inttime, radii=None):

        self.image_fig = pl.figure(1)
        self.imaxes = self.image_fig.add_subplot(111)

        self.xsection = pl.figure(2)
        self.xsec_axes = self.xsection.add_subplot(111)
        self.xsec_line = self.xsec_axes.plot([], [])[0]
        self.xsec_scatrlineL = self.xsec_axes.plot([], [], color='g')[0]
        self.xsec_scatrlineR = self.xsec_axes.plot([], [], color='g')[0]

        self.result_fig = pl.figure(3)
        self.result_axes = self.result_fig.add_subplot(111)
        self.result_line = self.result_axes.plot([], [])[0]

        self.center = (0, 0)
        self.inrad = 0
        self.outrad = 0
        self.rad1 = 0
        self.rad2 = 0

        self.scatr_y0 = 0
        self.scatr_y1 = 0

        self.inttime = inttime
        self.beampwr = beampwr
        self.radii = radii
        self.wls = wls
        self.wli = 0
        self.path = os.path.dirname(fname)

        self.update_image()
        self.line = self.imaxes.plot([], [], lw='3')[0]

        #reset image bounds
        xmin = 0
        xmax = pl.shape(self.imdata)[1]
        ymin = 0
        ymax = pl.shape(self.imdata)[0]
        im_bounds = [xmin, xmax, ymin, ymax]
        self.imaxes.axis(im_bounds)

        self.pwr = []
        self.pce = []

        if radii != None:
            self.inrad = min(radii)
            self.outrad = max(radii)

        self.movering = False  # boolean indicating whether mouse clicks should redraw the ring shape
        self.clickevent = self.image_fig.canvas.mpl_connect(
            'button_press_event', self.click)
        self.keydownevent = self.image_fig.canvas.mpl_connect(
            'key_press_event', self.keydown)
        self.keyupevent = self.image_fig.canvas.mpl_connect(
            'key_release_event', self.keyup)

        self.xsec_clickevent = self.xsection.canvas.mpl_connect(
            'button_press_event', self.xsec_click)
        self.xsec_keydownevent = self.xsection.canvas.mpl_connect(
            'key_press_event', self.xsec_keydown)
예제 #49
0
                uvd1 = uvd.data[j1:j2:k, i1:i2:k]
            #mask_rho_ref=mr.data[j1:j2:k,i1:i2:k]
            mask_rho = ncvar['mask_rho'][j1:j2:k, i1:i2:k]
            #mask_rho_uni=maskr.data[j1:j2:k,i1:i2:k]
            x1 = xmem[j1:j2:k, i1:i2:k]
            y1 = ymem[j1:j2:k, i1:i2:k]

            times = times[ntime:ntime + 1]

        #Rotate to x,y grid
        #u1,v1=okcl.rot2d(u,v,ang,inverse=True)

        #print('Writing NetCDF file')
        ##________NETCDF writing________________

        ny, nx = pl.shape(h1)

        missing_value = 0
        nc = netCDF4.Dataset(outfile, 'w', format='NETCDF3_CLASSIC')

        nc.createDimension(x_rho, nx)
        nc.createDimension(y_rho, ny)

        nc.createDimension('ocean_time', None)
        timea = nc.createVariable('ocean_time', 'f8', ('ocean_time', ))
        timea.units = ncvar[timename_sim].units
        timea[:] = times

        x_nc = nc.createVariable(x_rho, 'float', (
            y_rho,
            x_rho,
예제 #50
0
    def _stop_training(self, debug=False):
        """
        Finish the training, i.e. for the time series plots: take the
        accumulated time series and divide by the number of samples per
        condition.
        For the
        """
        # Compute avg
        for label in self.mean_time_series.keys():
            self.mean_time_series[label] /= self.samples_per_condition[label]
            self.time_series_histo[label] = \
                pylab.array(self.time_series_histo[label])

            # Compute error of desired type - strip the numerals:
            if self.error_type is not None:
                if self.error_type.strip('0123456789.') == 'SampleStdDev':
                    self.error[label] = \
                     pylab.sqrt(pylab.var(self.time_series_histo[label],0))
                elif self.error_type.strip('0123456789.') == 'StdError':
                    self.error[label] = \
                     pylab.sqrt(pylab.var(self.time_series_histo[label],0)) /\
                     pylab.sqrt(pylab.shape(self.time_series_histo[label])[0])

                multiplier = float(''.join([
                    nr for nr in self.error_type if (nr.isdigit() or nr == ".")
                ]))
                self.error[label] = multiplier * self.error[label]

        # other plots only if features where passed
        if (self.feature_vector != None):
            self.feature_time_series = \
                convert_feature_vector_to_time_series(self.feature_vector,
                                                      self.sample_data)

            # in the alternative scaling space, the feature "importance" is
            # determined by the feature values
            # weighted by the expected difference in time series values
            # between the two classes (difference of avg std and avg target)
            # The standard P3 and LRP cases are handeled separately to make
            # sure that the sign of the difference is consistent
            if self.alternative_scaling:
                if all([
                        True if label_iter in ['Target', 'Standard'] else False
                        for label_iter in self.mean_time_series.keys()
                ]):
                    self.feature_time_series *= (
                        self.mean_time_series['Target'] -
                        self.mean_time_series['Standard'])
                elif all([
                        True if label_iter in ['LRP', 'NoLRP'] else False
                        for label_iter in self.mean_time_series.keys()
                ]):
                    self.feature_time_series *= (
                        self.mean_time_series['LRP'] -
                        self.mean_time_series['NoLRP'])
                else:
                    self.feature_time_series *= (
                        self.mean_time_series[self.mean_time_series.keys()[0]]
                        -
                        self.mean_time_series[self.mean_time_series.keys()[1]])
                    print "AverageFeatureVis (alternative_scaling): " +\
                      "Present classes don't match the standards " +\
                      "(Standard/Target or LRP/NoLRP). Used the difference "+\
                      "%s - %s" % (self.mean_time_series.keys()[0],
                       self.mean_time_series.keys()[1]) +" for computation "+\
                       "of the alternative scaling."

            # greatest feature val that occures is used for the normalization
            # of the color-representation of the feature values
            self.max_feature_val = \
                (abs(self.feature_time_series)).max(0).max(0)
            self.normalizer = colors.Normalize(vmin=-self.max_feature_val,
                                               vmax=self.max_feature_val)
            cdict = {
                'red': [(0.0, 1.0, 1.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)],
                'green': [(0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 0.0, 0.0)],
                'blue': [(0.0, 0.0, 0.0), (0.5, 1.0, 1.0), (1.0, 1.0, 1.0)]
            }
            self.own_colormap = \
                colors.LinearSegmentedColormap('owncm', cdict, N=256)

            # sort the features with descending importance
            self.indexlist = pylab.transpose(
                self.feature_time_series.nonzero())
            indexorder = abs(
                self.feature_time_series[abs(self.feature_time_series) > self.
                                         important_feature_thresh]).argsort()

            self.indexlist = self.indexlist[indexorder[-1::-1]]  #reverse order
            self.indexlist = map(
                list, self.indexlist[:len(self.feature_vector) *
                                     self.percentage_of_features / 100])

            self.histo_plot = self._generate_histo_plot()

            try:
                # try to generate a plot of the feature crosscorrelation
                # matrix. Might fail if the threshold is set such that no
                # features are left.
                for label in self.mean_time_series.keys():
                    self.labeled_corr_matrix[label] = \
                        self._generate_labeled_correlation_matrix(label)
                    self.corr_plot[label] = \
                        self._get_corr_plot(self.corr_important_feats[label],
                                            label)

                # if 2 class labels exist, also compute the difference in the
                # cross correlation between the classes.
                if len(self.corr_important_feats.keys()) == 2:
                    self.corr_plot['Diff'] = self._get_corr_plot((
                        self.corr_important_feats
                            [self.corr_important_feats.keys()[0]]
                      - self.corr_important_feats
                            [self.corr_important_feats.keys()[1]]),
                        self.corr_important_feats.keys()[0] + ' - ' + \
                            self.corr_important_feats.keys()[1])
            except TypeError:
                import warnings
                warnings.warn("\n\nFeatureVis doesn't have enough important" +
                              " features left for correlation plots..." +
                              " Check threshold.\n")

        # Compute avg time series plot anyway
        self.ts_plot = self._generate_time_series_plot()
예제 #51
0
            start_t = float(mintic)
        data[:, ticcol] -= start_t
        data[:, toccol] -= start_t
        end_t = (toc_step - start_t) / CPU_CLOCK

        tasks = {}
        tasks[-1] = []
        for i in range(nthread * expand):
            tasks[i] = []

        # Counters for each thread when expanding.
        ecounter = []
        for i in range(nthread):
            ecounter.append(0)

        num_lines = pl.shape(data)[0]
        for line in range(num_lines):
            thread = int(data[line, threadscol])

            # Expand to cover extra lines if expanding.
            ethread = thread * expand + (ecounter[thread] % expand)
            ecounter[thread] = ecounter[thread] + 1
            thread = ethread

            tasks[thread].append({})
            tasktype = TASKTYPES[int(data[line, taskcol])]
            subtype = SUBTYPES[int(data[line, subtaskcol])]
            tasks[thread][-1]["type"] = tasktype
            tasks[thread][-1]["subtype"] = subtype
            tic = int(data[line, ticcol]) / CPU_CLOCK
            toc = int(data[line, toccol]) / CPU_CLOCK
예제 #52
0
def indices(array_like):
    shape = pl.shape(array_like)
    mesh = pl.meshgrid(*(pl.arange(n) for n in shape), indexing='ij') #ij indexing is like it.product
    return izip(*(dim.flat for dim in mesh))
예제 #53
0
#     eventid = str(eventid)
#     phys_evt = eventid[-2:]
#     eventid = eventid[:-2]
#     gate = eventid[-4:]
#     eventid = eventid[:-4]
#     subrun = eventid[-4:]
#     eventid = eventid[:-4]
#     run = eventid
#     return (run, subrun, gate, phys_evt)

f = h5py.File(filename, 'r')

have_times = False
# look for x, u, v data hits
try:
    data_shp = pylab.shape(f['features'])
except KeyError:
    print("'features' does not exist.")
    data_shp = None
    sys.exit()

data_shp = (max_evts, data_shp[1], data_shp[2], data_shp[3])
data = pylab.zeros(data_shp, dtype='float32')
data = f["features"][:max_evts].astype('float32')

labels_shp = (max_evts, )
labels = pylab.zeros(labels_shp, dtype='float32')

try:
    labels = f['Eng'][:max_evts].astype('float32')
except KeyError:
예제 #54
0
   def __init__(self,data,mask_file,threshold=0,row13=False):
      self.key=[]  
      self.x=0
      self.y=0
      self.xy=[]
      self.xx=[]
      self.yy=[]
      self.data_shape = data.shape
      # cspad 2x2 shape: (2, 185, 388)
      if len(data.shape) == 3:
         lx = data.shape[0]
         ly = data.shape[1]
         lz = data.shape[2]
         data_2d = data.reshape(lx*ly, lz)
         # transpose last 2x1
         data_2d[(lx-1)*ly:lx*ly,:] = data[lx-1,::-1,::-1]
         self.data=data_2d
      else:
         self.data=data
      self.lx,self.ly=p.shape(self.data)
      self.points=[]
      for i in range(self.lx):
          for j in range(self.ly):
           self.points.append([i,j]) 
      self.mask_file=mask_file
      if os.path.exists(self.mask_file) is True:
#         if 'edf' in self.mask_file:
#            mask_f=EdfFile.EdfFile(self.mask_file)
#            self.mymask=mask_f.GetData(0)
#            num_masks=mask_f.GetNumImages()
#            if num_masks==2:
#               self.automask=mask_f.GetData(1)
#               self.anisotropic_mask=0*self.mymask
#            if num_masks==3:
#               self.automask=mask_f.GetData(1)
#               self.anisotropic_mask=mask_f.GetData(2)
#            else:
#               self.automask=0*self.mymask
#               self.anisotropic_mask=0*self.mymask
#            if p.shape(self.mymask)!=p.shape(self.data):
#               self.mymask=n.zeros((self.lx,self.ly))
#         elif 'h5' in self.mask_file:
         if 'h5' in self.mask_file:
            #newfile=self.make_2mask_name()
            self.open_mask()
            self.anisotropic_mask=0*self.mymask
      else:
         self.mymask=n.zeros((self.lx,self.ly))
         self.automask=n.zeros((self.lx,self.ly))
         self.anisotropic_mask=n.zeros((self.lx,self.ly))
      self.old_mymask=self.mymask
      self.old_automask=self.automask
      self.automask[n.where(self.data<threshold)]=1
      print "automatically masking out " + str(int(self.automask.sum())) + " pixels below threshold=%s" % (threshold)
      #this is for masking out row13 of the CSPAD
      if (row13):
         print "automatically masking out row13 of CSPAD"
         col=181
         for i in range(8):
            self.automask[:,col]=1
            col+= 194
      #end of CSPAD part
      palette=p.cm.jet
      palette.set_bad('w',1.0)
      p.rc('image',origin = 'lower')
      p.rc('image',interpolation = 'nearest')
      p.figure(2)
      self.px=p.subplot(111)
      lowest_value_allowed = -0.999999
      self.data[n.where(self.data < lowest_value_allowed)] = lowest_value_allowed
      self.data=p.log(self.data+1)
      self.im=p.imshow(masked_array(self.data,self.mymask+self.automask+self.anisotropic_mask), cmap=palette)
      p.title('Select a ROI. Press m to mask it or u to unmask it. k to save/exit, q to exit without saving')
      self.lc,=self.px.plot((0,0),(0,0),'-+m',linewidth=1,markersize=8,markeredgewidth=1)
      self.lm,=self.px.plot((0,0),(0,0),'-+m',linewidth=1,markersize=8,markeredgewidth=1)
      self.px.set_xlim(0,self.ly)
      self.px.set_ylim(0,self.lx)
      self.colorbar=p.colorbar(self.im,pad=0.01)
      cidb=p.connect('button_press_event',self.on_click)
      cidk=p.connect('key_press_event',self.on_click)
      cidm=p.connect('motion_notify_event',self.on_move)
      p.show()
예제 #55
0
data = lio.read_all_csc(folder,
                        assume_same_fs=False,
                        memmap=True,
                        memmap_folder=memap_folder,
                        save_for_spikedetekt=False,
                        channels_to_save=None,
                        return_sliced_data=False)
pl.save(os.path.join(memap_folder, 'B14R9_raw.npy'), data)

data_ecog = data[:64, :]
data_probe = data[64:, :]

data_probe_hp = pl.memmap(os.path.join(memap_folder, 'data_probe_hp.dat'),
                          dtype='int16',
                          mode='w+',
                          shape=pl.shape(data_probe))
for i in pl.arange(0, pl.shape(data_probe)[0]):
    data_probe_hp[i, :] = filters.high_pass_filter(data_probe[i, :],
                                                   Fsampling=f_sampling,
                                                   Fcutoff=f_hp_cutoff)
    data_probe_hp.flush()
    print(i)
pl.save(os.path.join(memap_folder, 'data_probe_hp.npy'), data_probe_hp)

shape_data_ss = (pl.shape(data_ecog)[0],
                 pl.shape(data_ecog)[1] / int(f_sampling / f_subsample))
data_ecog_lp_ss = pl.memmap(os.path.join(memap_folder, 'data_ecog_lp_ss.dat'),
                            dtype='int16',
                            mode='w+',
                            shape=shape_data_ss)
for i in pl.arange(0, pl.shape(data_ecog)[0]):
예제 #56
0
    phys_evt = eventid[-2:]
    eventid = eventid[:-2]
    gate = eventid[-4:]
    eventid = eventid[:-4]
    subrun = eventid[-4:]
    eventid = eventid[:-4]
    run = eventid
    return (run, subrun, gate, phys_evt)


f = h5py.File(filename, 'r')

have_times = False
# look for x, u, v data hits
try:
    data_x_shp = pylab.shape(f['hits-x'])
    xname = 'hits-x'
except KeyError:
    print("'hits-x' does not exist.")
    data_x_shp = None
try:
    data_u_shp = pylab.shape(f['hits-u'])
    uname = 'hits-u'
except KeyError:
    print("'hits-u' does not exist.")
    data_u_shp = None
try:
    data_v_shp = pylab.shape(f['hits-v'])
    vname = 'hits-v'
except KeyError:
    print("'hits-v' does not exist.")