Esempio n. 1
0
 def __call__(self, image, mode=None, image_size=None):
     if mode is None:
         mode = self._mode
     if image_size is None:
         image_size = self._image_size
     if isinstance(image_size, int):
         image_size = (image_size, image_size)
     image = cv2.resize(self.convert(image), image_size[::-1])
     image = image <= self._black_thresh
     h2 = image_size[0] / 2
     w2 = image_size[1] / 2
     # If image_size is odd, then the last row/col is ignored
     image_h = np.logical_and(image[h2:2 * h2, :], image[:h2, :][::-1, :])
     image_v = np.logical_and(image[:, w2:2 * w2], image[:, :w2][:, ::-1])
     h, w = image_size
     if mode == 'both':
         return np.asfarray(np.hstack([np.sum(image, 0) == h,
                                       np.sum(image, 1) == w,
                                       np.sum(image_h, 1) == w,
                                       np.sum(image_v, 0) == h]))
     elif mode == 'vertical':
         return np.asfarray(np.hstack([np.sum(image, 0),
                                       np.sum(image_v, 0)]) == h)
     elif mode == 'horizontal':
         return np.asfarray(np.hstack([np.sum(image, 1),
                                       np.sum(image_h, 1)]) == w)
     else:
         raise ValueError('Unknown mode[%s]' % mode)
Esempio n. 2
0
def imshow(ax, x, y, z, *args, **kwargs):
    if (np.iscomplexobj(x)):
        x = np.asfarray(x.real)
    else:
        x = np.asfarray(x)
    if (np.iscomplexobj(y)):
        y = np.asfarray(y.real)
    else:
        y = np.asfarray(y)
    assert(len(x) == z.shape[1])
    assert(len(y) == z.shape[0])
    dx = x[1] - x[0]
    dy = y[1] - y[0]
    if (np.iscomplexobj(z)):
        zabs = abs(z)
    else:
        zabs = z
    zabs = np.asfarray(zabs)
    # Use this to center pixel around (x,y) values
    extent = (x[0]-dx/2.0, x[-1]+dx/2.0, y[0]-dy/2.0, y[-1]+dy/2.0)
    # Use this to let (x,y) be the lower-left pixel location (upper-left when origin = 'lower' is not used)
    #extent = (x[0], x[-1], y[0], y[-1])
    im = ax.imshow(zabs, extent = extent, *args, **kwargs)
    imshow_show_z(ax, z, x, y)
    ax.set_xlim((x[0], x[-1]))
    ax.set_ylim((y[0], y[-1]))
    return im
Esempio n. 3
0
    def testSerialIIRFilter(self, level=1):
	""" test seris of IIR filters in neurons
	"""
	# setup net
	self.net.setInitAlgorithm(INIT_STD)
	self.net.setSimAlgorithm(SIM_FILTER)
	self.net.init()
	
	# set IIR coeffs (biquad bandpass filter)
	b = N.array(([0.5,0.,-0.5])) / 1.5
	a = N.array(([1.5,0.,0.5])) / 1.5
	B = N.ones((self.size,6))
	A = N.ones((self.size,6))
	B[:,0:3] = B[:,0:3]*b
	A[:,0:3] = A[:,0:3]*a-0.01
	B[:,3:6] = B[:,3:6]*b
	A[:,3:6] = A[:,3:6]*a-0.01
	self.net.setIIRCoeff(B,A,2)
	
	# set output weight matrix
	wout = N.random.rand(self.outs,self.size+self.ins) * 2 - 1
	wout = N.asfarray(wout, self.dtype)
	self.net.setWout( wout )
	
	## simulate network
	indata = N.asfarray(N.random.rand(self.ins,self.sim_size),self.dtype)*2-1
	outdata = N.zeros((self.outs,self.sim_size),self.dtype)
	self.net.simulate( indata, outdata )
	
	# get data to python
	W = N.zeros((self.size,self.size),self.dtype)
	self.net.getW( W )
	Win = self.net.getWin()
	Wout = self.net.getWout()
	Wback = self.net.getWback()
	x = N.zeros((self.size))
	outtest = N.zeros((self.outs,self.sim_size),self.dtype)
	
	# initial conditions for filters
	zinit1 = N.zeros((self.size,2))
	zinit2 = N.zeros((self.size,2))
	
	# recalc algorithm in python
	for n in range(self.sim_size):
		#calc new network activation
		x = N.dot( W, x )
		x += N.dot( Win, indata[:,n] )
		if n > 0:
			x += N.dot( Wback, outtest[:,n-1] )
		# calc IIR filter:
		for i in range(self.size):
			insig = N.array(([x[i],])) # hack for lfilter
			insig,zinit1[i] = scipy.signal.lfilter(B[i,0:3], \
			                 A[i,0:3], insig, zi=zinit1[i])
			x[i],zinit2[i] = scipy.signal.lfilter(B[i,3:6], \
			                 A[i,3:6], insig, zi=zinit2[i])
		#output = Wout * [x; in]
		outtest[:,n] = N.dot( Wout, N.r_[x,indata[:,n]] )
	
	assert_array_almost_equal(outdata,outtest)
Esempio n. 4
0
def scaleSignal(img, fitParams=None, backgroundToZero=False, reference=None):
    '''
    scale the image between...
    
    backgroundToZero=True -> 0 (average background) and 1 (maximum signal)
    backgroundToZero=False -> signal+-3std
    
    reference -> reference image -- scale image to fit this one
    
    returns:
    scaled image
    '''
    img = imread(img)
    if reference is not None: 
        low, high = signalRange(img, fitParams)
        low2, high2 = signalRange(reference)
        img = np.asfarray(img)
        ampl = (high2-low2)/(high-low)
        img-=low
        img *= ampl
        img += low2
        return img
    else:
        offs, div = scaleParams(img, fitParams, backgroundToZero)
        img = np.asfarray(img)  - offs 
        img /= div 
        print 'offset: %s, divident: %s' %(offs, div)
        return img
Esempio n. 5
0
    def testCopyConstructorBP(self, level=1):
	""" test if a copied bandpass ESN generates the same result """
        
	# set bandpass parameters
	self.net.setSimAlgorithm(SIM_BP)
	f1 = N.linspace(0.1, 1., self.net.getSize())
	f2 = N.linspace(0.0001, 0.5, self.net.getSize())
	self.net.init()
	self.net.setBPCutoff(f1,f2)
	
	# set output weight matrix
	trainin = N.random.rand(self.ins,self.train_size) * 2 - 1
	trainout = N.random.rand(self.outs,self.train_size) * 2 - 1
	trainin = N.asfarray(trainin, self.dtype)
	trainout = N.asfarray(trainout, self.dtype)
	self.net.train(trainin,trainout,1)
	
	# copy network
	# ATTENTION: operator= is shallow copy !
	if self.dtype is 'float32':
		netA = SingleESN(self.net)
	else:
		netA = DoubleESN(self.net)
	
	# simulate both networks separate and test result
	indata = N.random.rand(self.ins,self.sim_size)*2-1
	indata = N.asfarray(indata, self.dtype)
	outdata = N.empty((self.outs,self.sim_size),self.dtype)
	outdataA = N.empty((self.outs,self.sim_size),self.dtype)
	self.net.simulate( indata, outdata )
	netA.simulate( indata, outdataA )
	assert_array_almost_equal(outdata,outdataA)
Esempio n. 6
0
 def checkGradient(self, funcType, *args,  **kwargs):
     self._Prepare()
     if not DerApproximatorIsInstalled:
         self.err('To perform gradients check you should have DerApproximator installed, see http://openopt.org/DerApproximator')
     
     if not getattr(self.userProvided, funcType):
         self.warn("you haven't analitical gradient provided for " + funcType[1:] + ', turning derivatives check for it off...')
         return
     if len(args)>0:
         if len(args)>1 or 'x' in kwargs:
             self.err('checkd<func> funcs can have single argument x only (then x should be absent in kwargs )')
         xCheck = asfarray(args[0])
     elif 'x' in kwargs:
         xCheck = asfarray(kwargs['x'])
     else:
         xCheck = asfarray(self.x0)
     
     maxViolation = 0.01
     if 'maxViolation' in kwargs:
         maxViolation = kwargs['maxViolation']
         
     self.disp(funcType + (': checking user-supplied gradient of shape (%d, %d)' % (getattr(self, funcType[1:])(xCheck).size, xCheck.size)))
     self.disp('according to:')
     self.disp('    diffInt = ' + str(self.diffInt)) # TODO: ADD other parameters: allowed epsilon, maxDiffLines etc
     self.disp('    |1 - info_user/info_numerical| < maxViolation = '+ str(maxViolation))        
     
     check_d1(getattr(self, funcType[1:]), getattr(self, funcType), xCheck, **kwargs)
     
     # reset counters that were modified during check derivatives
     self.nEvals[funcType[1:]] = 0
     self.nEvals[funcType] = 0
Esempio n. 7
0
    def SetInitialPoints(self, x0, radius=0.05):
        """Set Initial Points with Guess (x0)

input::
    - x0: must be a sequence of length self.nDim
    - radius: generate random points within [-radius*x0, radius*x0]
        for i!=0 when a simplex-type initial guess in required"""
        x0 = asfarray(x0)
        rank = len(x0.shape)
        if rank is 0:
            x0 = asfarray([x0])
            rank = 1
        if not -1 < rank < 2:
            raise ValueError, "Initial guess must be a scalar or rank-1 sequence."
        if len(x0) != self.nDim:
            raise ValueError, "Initial guess must be length %s" % self.nDim

        #slightly alter initial values for solvers that depend on randomness
        min = x0*(1-radius)
        max = x0*(1+radius)
        numzeros = len(x0[x0==0])
        min[min==0] = asarray([-radius for i in range(numzeros)])
        max[max==0] = asarray([radius for i in range(numzeros)])
        self.SetRandomInitialPoints(min,max)
        #stick initial values in population[i], i=0
        self.population[0] = x0.tolist()
Esempio n. 8
0
def LinearB0(Xi, Yi):
    X = np.asfarray(Xi)
    Y = np.asfarray(Yi)

    #we want a function y = m * x
    fp = lambda v, x: x * v[0]

    #the error of the function e = x - y
    e = lambda v, x, y: (fp(v, x) - y)

    #the initial value of m, we choose 1, because we thought YODA would
    #have chosen 1
    v0 = [1.0]

    vr, _success = leastsq(e, v0, args=(X, Y))

    #compute the R**2 (sqrt of the mean of the squares of the errors)
    err = np.sqrt(sum(np.square(e([vr], X, Y))) / (len(X) * len(X)))

    #Some versions of leastsq returns an array, other a scalar, so here we
    #make sure
    #it is an array
    val = np.array([vr]).flatten()

    return val, err
Esempio n. 9
0
def numerical_partials(f, p, f0=None, pmin=None, pmax=None, prel=1.e-6,
                       pabs=1.e-9, pmask=None, args=(), kwargs=None):
    """Compute partial derivatives of f(p) wrt p by finite differences."""
    if kwargs is None:
        kwargs = {}
    f0, p = f(p) if f0 is None else asfarray(f0), asfarray(p)
    dp = zeros_like(p)
    prel, pabs = prel + dp, pabs + dp
    dp = maximum(prel*absolute(p), pabs)
    pfull = p.copy()
    if pmask is not None:
        p, dp = p[pmask], dp[pmask]
    else:
        pmask = ones(p.shape, dtype=bool)
    # Assume that pmin <= p <= pmax, but check p+dp.
    if pmax is not None:
        mask = p+dp > pmax
        dp[mask] *= -1
        if mask.any():
            if pmin is not None and (p+dp < pmin).any():
                raise ValueError("pmin and pmax too close together")
    dfdp = []
    for dp, p1 in zip(dp, p+diag(dp)):
        if not dp:
            raise ValueError("zero step size, check prel and pabs")
        pfull[pmask] = p1
        dfdp.append((f(pfull, *args, **kwargs) - f0)/dp)
    return array(dfdp)
Esempio n. 10
0
    def testRidgeRegressionVsPI(self, level=1):
	""" TODO: tests if we get the same result with Ridge Regression and
	Pseudo Inverse method, if we set the regularization parameter to 0 """
        
	# init network
	self.net.setInitParam(FB_CONNECTIVITY, 0)
	self.net.setInitParam(TIKHONOV_FACTOR, 0)
	self.net.setSimAlgorithm(SIM_STD)
	self.net.setTrainAlgorithm(TRAIN_RIDGEREG)
	self.net.init()
	
	# generate data
	washout = 2
	# test with random input:
	indata = N.random.rand(self.ins,self.train_size) * 2 - 1
	outdata = N.random.rand(self.outs,self.train_size) * 2 - 1
	indata = N.asfarray( indata, self.dtype )
	outdata = N.asfarray( outdata, self.dtype )
	
	# train with ridge regression
	self.net.train( indata, outdata, washout )
	wout_ridge = self.net.getWout().copy()
	
	# make the same with PI training
	self.net.setTrainAlgorithm(TRAIN_PI)
	self.net.resetState()
	self.net.init()
	self.net.train( indata, outdata, washout )
	wout_pi = self.net.getWout().copy()
Esempio n. 11
0
    def testPI(self, level=1):
	""" test TRAIN_PI with zero input and feedback """
        
	# init network
	self.net.setSimAlgorithm(SIM_STD)
	self.net.setTrainAlgorithm(TRAIN_PI)
	self.net.init()
	
	# train network
	washout = 2
	# test with zero input:
	indata = N.zeros((self.ins,self.train_size),self.dtype)
	outdata = N.random.rand(self.outs,self.train_size) * 2 - 1
	indata = N.asfarray( indata, self.dtype )
	outdata = N.asfarray( outdata, self.dtype )
	self.net.train( indata, outdata, washout )
	wout_target = self.net.getWout().copy()
	
	# teacher forcing, collect states
	X = self._teacherForcing(indata,outdata)
	
	# restructure data
	M = N.r_[X,indata]
	M = M[:,washout:self.train_size].T
	T = outdata[:,washout:self.train_size].T
	
	# calc pseudo inverse: wout = pinv(M) * T
	wout = ( N.dot(pinv(M),T) ).T
	
	# normalize result for comparison
	wout = wout / abs(wout).max()
	wout_target = wout_target / abs(wout_target).max()
	assert_array_almost_equal(wout_target,wout,2)
Esempio n. 12
0
    def _central(self):
        ''' Return central difference function

        Member variables used
            n
            fun
            vectorized
        '''
        even_order = (np.remainder(self.n, 2) == 0)

        if self.vectorized:
            if even_order:
                f_del = lambda fun, f_x0i, x0i, h: (
                    fun(x0i + h) + fun(x0i - h)).ravel() / 2.0 - f_x0i
            else:
                f_del = lambda fun, f_x0i, x0i, h: (
                    fun(x0i + h) - fun(x0i - h)).ravel() / 2.0
        else:
            if even_order:
                f_del = lambda fun, f_x0i, x0i, h: np.asfarray(
                    [fun(x0i + h_j) + fun(x0i - h_j)
                                for h_j in h]).ravel() / 2.0 - f_x0i
            else:
                f_del = lambda fun, f_x0i, x0i, h: np.asfarray(
                    [fun(x0i + h_j) - fun(x0i - h_j)
                                        for h_j in h]).ravel() / 2.0
        return f_del
Esempio n. 13
0
def relu(x,deriv = False):
    if not deriv:
        return np.asfarray(np.maximum(0,x))
    if deriv:
        der = np.asfarray(np.maximum(0,x))
        der[der > 0] = 1
        return der
Esempio n. 14
0
def _det(xvert, yvert):
    '''Compute twice the area of the triangle defined by points with using
    determinant formula.

    Input parameters:

    xvert -- A vector of nodal x-coords (array-like).
    yvert -- A vector of nodal y-coords (array-like).

    Output parameters:

    Twice the area of the triangle defined by the points.

    Notes:

    _det is positive if points define polygon in anticlockwise order.
    _det is negative if points define polygon in clockwise order.
    _det is zero if at least two of the points are concident or if
        all points are collinear.

    '''
    xvert = np.asfarray(xvert)
    yvert = np.asfarray(yvert)
    x_prev = np.concatenate(([xvert[-1]], xvert[:-1]))
    y_prev = np.concatenate(([yvert[-1]], yvert[:-1]))
    return np.sum(yvert * x_prev - xvert * y_prev, axis=0)
Esempio n. 15
0
    def __init__(self,ad):
        """Load parameters from a FITS header into the fitting function such that
           we can evaluate the function.
           The header has keywords of the form COEFF_A, COEFF_B..., from low
           to high order.
           :param xmin,xmax:  Min,max range where to eveluate
           :param fitname:  Fitting function name used.
           :type fitname: {'polynomial','legendre',or 'chebyshev'}
           :param order: polynomial order used to evaluate the coeffs
           :param coeff: Coefficients from the fitting
           :type coeff: List
           :return: An array of evaluated values

           Example:
             ef = gfit.Pix2coord(ad)
             ef(1300)   # Evaluate wavelength at pixel 1300 in the middle row
             ef(1300,400)   # Evaluate wavelength at pixel 1300 at row 400
        """

        tbh = ad['WAVECAL',1].header
        pixsample = np.asfarray(tbh['pixrange'].split())
        self.fitname = tbh['fitname']
        self.order = tbh['fitorder']
        self.coeff = np.asfarray(tbh['fitcoeff'].split())
        # Set up the pix2wavelength evaluator function 
        xmin, xmax = pixsample
        self.xnorm = lambda x: 2.*(x - xmin) / (xmax-xmin) - 1

        f3dcoeff = []
        for k in ['A', 'B', 'C', 'D', 'E', 'F']:
            f3dcoeff.append(tbh['COEFF_'+k])
        self.f3dcoeff = np.asfarray(f3dcoeff)
Esempio n. 16
0
def iniGibbs(state):
  """Does the initialisation gibbs pass, where it incrimentally sets the starting topic assignments based on the documents so far fitted. Builds the count matrices/vectors in the state at the same time."""
  
  dist = numpy.empty(state.topicCount.shape[0], dtype = numpy.float_)
  boostQuant = state.alpha*(state.alphaMult-1.0)
  
  for w in xrange(state.state.shape[0]): # Loop the words that consititute the state
    # Calculate the unnormalised distribution...
    dist[:] = numpy.asfarray(state.docTopicCount[state.state[w,0],:]) + state.alpha

    if state.boost[state.state[w,0]]!=-1:
      boostAmount = boostQuant
      dist[state.boost[state.state[w,0]]] += boostQuant
    else:
      boostAmount = 0.0
    
    dist /= numpy.asfarray(state.docCount[state.state[w,0]]) + state.topicCount.shape[0]*state.alpha + boostAmount
    
    dist *= numpy.asfarray(state.topicWordCount[:,state.state[w,1]]) + state.beta
    dist /= numpy.asfarray(state.topicCount) + state.topicWordCount.shape[1]*state.beta

    
    # Normalise...
    dist /= dist.sum()
    
    # Select and set the state...
    state.state[w,2] = numpy.nonzero(numpy.random.multinomial(1,dist))[0][0]
    
    # Incriment the relevant counts from each of the 4 arrays...
    state.topicWordCount[state.state[w,2],state.state[w,1]] += 1
    state.topicCount[state.state[w,2]] += 1
    state.docTopicCount[state.state[w,0],state.state[w,2]] += 1
    state.docCount[state.state[w,0]] += 1
Esempio n. 17
0
def get_fasta_stats(cnarr, fa_fname):
    """Calculate GC and RepeatMasker content of each bin in the FASTA genome."""
    logging.info("Calculating GC and RepeatMasker content in %s ...", fa_fname)
    gc_rm_vals = [calculate_gc_lo(subseq)
                  for subseq in fasta_extract_regions(fa_fname, cnarr)]
    gc_vals, rm_vals = zip(*gc_rm_vals)
    return np.asfarray(gc_vals), np.asfarray(rm_vals)
  def __init__(self, fn=None):
    """
    INPUT:
      fn -- filename / None -- laser log name to use for logging simulated 
          laser data. None logged if name is None
          
    ATTRIBUTES:
      tagPos -- 4x2 float array -- corners of robot tag
      laserAxis -- 2x2 float array -- two points along axis of laser
      waypoints -- dict -- maps waypoint tag numbers to 4x2 float 
          arrays of the tag corners
    """
    # Initialize dummy values into robot and arena state
    self.tagPos = asfarray(MSG_TEMPLATE[ ROBOT_TAGID[0]])
    self.laserAxis = dot([[1,1,0,0],[0,0,1,1]],self.tagPos)/2
    self.waypoints = { tid : asfarray(MSG_TEMPLATE[tid]) for tid in waypoints }

    ### Initialize internal variables
    # Two points on the laser screen
    self.laserScreen = asfarray([[-1,-1],[1,-1]])
    # Cache for simulated TagStreamer messages
    self._msg = None
    # Output for simulated laser data
    if not fn:
      self.out = None
    else:
      self.out = opengz(fn,"w")
Esempio n. 19
0
    def nllsp2nlp(self, solver, **solver_params):

        ff = lambda x: sum(asfarray(self.f(x)) ** 2)
        p = NLP.NLP(ff, self.x0)
        # p = NLP.NLP(FF, self.x0)
        self.inspire(p, sameConstraints=True)
        if self.userProvided.df:
            p.df = lambda x: dot(2 * asfarray(self.f(x)), asfarray(self.df(x, useSparse=False)))
        p.f = ff

        def nllsp_iterfcn(*args, **kwargs):
            p.primalIterFcn(*args, **kwargs)
            p.xk = self.xk
            p.fk = p.f(p.xk)
            p.rk = self.rk
            # TODO: add nNaNs
            if self.istop != 0:
                p.istop = self.istop
            elif p.istop != 0:
                self.istop = p.istop

        p.primalIterFcn, p.iterfcn = self.iterfcn, nllsp_iterfcn

        self.iprint = -1
        p.show = False

        r = p.solve(solver, **solver_params)
        # r.ff = ff(r.xf)

        return r
Esempio n. 20
0
def MakeFrameMask(data,frame):
    pixelSize = data['pixelSize']
    scalex = pixelSize[0]/1000.
    scaley = pixelSize[1]/1000.
    blkSize = 512
    Nx,Ny = data['size']
    nXBlks = (Nx-1)/blkSize+1
    nYBlks = (Ny-1)/blkSize+1
    tam = ma.make_mask_none(data['size'])
    for iBlk in range(nXBlks):
        iBeg = iBlk*blkSize
        iFin = min(iBeg+blkSize,Nx)
        for jBlk in range(nYBlks):
            jBeg = jBlk*blkSize
            jFin = min(jBeg+blkSize,Ny)                
            nI = iFin-iBeg
            nJ = jFin-jBeg
            tax,tay = np.mgrid[iBeg+0.5:iFin+.5,jBeg+.5:jFin+.5]         #bin centers not corners
            tax = np.asfarray(tax*scalex,dtype=np.float32)
            tay = np.asfarray(tay*scaley,dtype=np.float32)
            tamp = ma.make_mask_none((1024*1024))
            tamp = ma.make_mask(pm.polymask(nI*nJ,tax.flatten(),
                tay.flatten(),len(frame),frame,tamp)[:nI*nJ])-True  #switch to exclude around frame
            if tamp.shape:
                tamp = np.reshape(tamp[:nI*nJ],(nI,nJ))
                tam[iBeg:iFin,jBeg:jFin] = ma.mask_or(tamp[0:nI,0:nJ],tam[iBeg:iFin,jBeg:jFin])
            else:
                tam[iBeg:iFin,jBeg:jFin] = True
    return tam.T
def findXing(a,b):
  """
  Find the crossing point of two lines, represented each by a pair of
  points on the line
  
  INPUT:
    a -- 2x2 -- two points, in rows
    b -- 2x2 -- two points, in rows
  
  OUTPUT: c -- 2 -- a point, as an array
  """
  a = asfarray(a)
  b = asfarray(b)
  # The nullspace of this matrix is the projective representation
  # of the intersection of the lines. Each column's nullspace is
  # one of the lines
  X = c_[a[1]-a[0],b[0]-b[1],a[0]-b[0]].T
  if X.ndim is not 2:
    DEBUG()
  Q = svd(X)[0]
  # Last singular vector is basis for nullspace; convert back from
  # projective to Cartesian representation
  q = Q[:2,2]/Q[2,2]
  c = q[0]*(a[1]-a[0])+a[0]
  return c
    def generateTrajectory(self,strokes):
	# Loop over each stroke pair
	current = self.arm.configuration
	configurations = []
	for stroke in strokes:
	    wp1 = stroke[0] # Start point of stroke
	    wp2 = stroke[1] # End point of stroke
	    # Check if current location is current waypoint (i.e. don't lift off)
	    #currentHover = np.copy(self.arm.configuration)	# Set hover over curret point
	    currentHover = np.asfarray([current[0], current[1], current[2] + 2-0])
	    print 'Current:',self.arm.configuration, currentHover
	    target1 = self.paper.paperToWorld(wp1) 	# Compute world coordinates of target
	    target2 = self.paper.paperToWorld(wp2) 	# Compute world coordinates of endpoint
	    print 'WORLD PTS:',target1,target2
	    config1 = self.arm.fullIK(target1) 		# Perform full IK on 1st target
	    config1Hover = config1 + np.asfarray([0,0,-20])	# Hover configuration over 1st target
	    config1Hover[2] = config1Hover[2] - 20	# Set height to hover
	    config2 = self.arm.fullIK(target2) 		# Perform full IK on 2nd target
	    
	    print 'Configs:', str(config1), str(config1Hover), str(currentHover)
	    current = current.reshape((3,))
	    currentHover = currentHover.reshape((3,))
	    config1Hover = config1Hover.reshape((3,))
	    config1 = config1.reshape((3,))
	    config2 = config2.reshape((3,))
	    print 'CONFIG2\n' , str(config2)
	    curHoverPts = interpolateLinear(current, currentHover, 10)
	    current = config2
	    configurations = configurations + [currentHover, config1Hover, config1, config2]
	    
	    configurations = configurations + interpolateLinear(currentHover, config1Hover, 50)
	    configurations = configurations + interpolateLinear(config1Hover, config1, 10)
	    configurations = configurations + interpolateLinear(config1, config2, 50)
	    
	return configurations
Esempio n. 23
0
def snv_on_chromosome(axis, variants, segments, genes,
                      do_trend, do_boost=False):
    # XXX only set x-limits if not already done for probes/segments
    # setup_chromosome(axis, None, segments, variants,
    #                  0.0, 1.0, "VAF")
    axis.set_ylim(0.0, 1.0)
    axis.set_ylabel("VAF")
    axis.set_xlabel("Position (Mb)")
    axis.get_yaxis().tick_left()
    axis.get_xaxis().tick_top()
    axis.tick_params(which='both', direction='out',
                     labelbottom=False, labeltop=False)

    x_mb = variants["start"] * MB
    if do_boost:
        y = variants.tumor_boost()
    else:
        y = np.asfarray(variants["alt_freq"])
    axis.scatter(x_mb, y, color=POINT_COLOR, alpha=0.3)
    # TODO - highlight genes/selection
    if segments:
        # Draw average VAF within each segment
        posns = np.asfarray(variants["start"]) # * MB
        y = np.asfarray(y)
        for v_start, v_end, v_freq in group_snvs_by_segments(posns, y,
                                                             segments):
            # ENH: color by segment gain/loss
            axis.plot([v_start * MB, v_end * MB], [v_freq, v_freq],
                      color='#C0C0C0', linewidth=2, #zorder=1,
                      solid_capstyle='round')
Esempio n. 24
0
  def calcNLL(self, doc, state):
    """Calculates the negative log likelihood of the document, given the relevant information. This is the DocState object again, but this time with the entire state object as well. Probability (Expressed as negative log likelihood.) is specificly calculated using all terms that contain a variable in the document, but none that would be identical for all documents. That is, it contains the probability of the cluster, the probability of the dp given the cluster, and the probability of the samples, which factors in both the drawing of the topic and the drawing of the word. The ordering of the samples is considered irrelevant, with both the topic and word defining uniqueness. Some subtle approximation is made - see if you can spot it in the code!"""
    self.nll = 0.0

    # Probability of drawing the cluster...
    self.nll -= math.log(state.clusterUse[doc.cluster])
    self.nll += math.log(state.clusterUse.sum()+state.clusterConc)


    # Probability of drawing the documents dp from its cluster, taking into account the abnormal entrys...
    cl = state.cluster[doc.cluster]
    logBMN = numpy.log(cl[2] / (cl[2]*numpy.asfarray(doc.behFlags)).sum())

    behInstCounts = numpy.zeros(doc.behFlags.shape[0], dtype=numpy.int32)
    instCounts = numpy.zeros(cl[0].shape[0], dtype=numpy.int32)
    for ii in xrange(doc.use.shape[0]):
      behInstCounts[doc.use[ii,0]] += 1
      if doc.use[ii,0]==0: instCounts[doc.use[ii,1]] += 1

    self.nll -= (logBMN * behInstCounts).sum()
    self.nll -= scipy.special.gammaln(behInstCounts.sum() + 1.0)
    self.nll += scipy.special.gammaln(behInstCounts + 1.0).sum()

    norm = cl[0][:,1].sum() + cl[1]
    self.nll -= (numpy.log(numpy.asfarray(cl[0][:,1])/norm)*instCounts).sum()
    self.nll -= scipy.special.gammaln(instCounts.sum() + 1.0) # Cancels with a term from the above - can optimise, but would rather have neat code.
    self.nll += scipy.special.gammaln(instCounts + 1.0).sum()


    # Count the numbers of word/topic instance pairs in the data structure - sum using a dictionary...
    samp_count = collections.defaultdict(int) # [instance,word]
    for s in xrange(doc.samples.shape[0]):
      samp_count[doc.samples[s,0],doc.samples[s,1]] += 1

    # Calculate the probability distribution of drawing each topic instance and the probability of drawing each word/topic assignment...
    inst = numpy.asfarray(doc.use[:,2])
    inst /= inst.sum() + doc.conc
    
    topicWord = numpy.asfarray(state.topicWord) + state.beta
    topicWord = (topicWord.T/topicWord.sum(axis=1)).T

    abnormTopicWord = numpy.asfarray(state.abnormTopicWord) + state.beta
    abnormTopicWord = (abnormTopicWord.T/abnormTopicWord.sum(axis=1)).T

    instLog = numpy.log(inst)
    wordLog = numpy.log(topicWord)
    abnormLog = numpy.log(abnormTopicWord)


    # Now sum into nll the probability of drawing the samples that have been drawn - gets a tad complex as includes the probability of drawing the topic from the documents dp and then the probability of drawing the word from the topic, except I've merged them such that it doesn't look like that is what is happening...
    self.nll -= scipy.special.gammaln(doc.samples.shape[0]+1.0)
    for pair, count in samp_count.iteritems():
      inst, word = pair
      beh = doc.use[inst,0]
      if beh==0:
        topic = cl[0][doc.use[inst,1],0]
        self.nll -= count * (wordLog[topic,word] + instLog[inst])
      else:
        self.nll -= count * (abnormLog[beh,word] + instLog[inst])
      self.nll += scipy.special.gammaln(count+1.0)
Esempio n. 25
0
def find_peaks(lpix, indl, indr):
    """
      Given the left and right edges of a line list,
      calculates the peak center by simply centroid algorithm
    """

    centres = []
    max_flux = []
    wth = []
    for i0,i1 in zip(indl,indr):
        fl = lpix[i0:i1]
        wa = np.arange(i0,i1)
        if not len(wa): continue
        try:
            ew = len(wa)*fl
            ewtot = np.sum(ew)
            wa_ewtot = np.sum(wa * ew)
            center = wa_ewtot / ewtot
        except:
            center = (i1-i0)/2.

        centres.append(center)
        try:
           if i0==i1:
              print 'FNDPK00:',i0
           max_flux.append(max(fl))
        except:
           print 'FNDPK:',i0,i1
        wth.append(abs(i1-i0))

    return np.asfarray(centres),np.asfarray(max_flux),np.asfarray(wth)
Esempio n. 26
0
 def __solver__(self, p):
     
     p.xk = p.x0.copy()
     p.fk = asfarray((p.f(p.x0)) ** 2).sum().flatten()
         
     p.iterfcn()
     if p.istop:
         p.xf, p.ff = p.xk, p.fk
         return 
     
     if p.userProvided.df:
         xf, cov_x, infodict, mesg, ier = leastsq(p.f, p.x0, Dfun=p.df, xtol = p.xtol, ftol = p.ftol, maxfev = p.maxFunEvals, full_output = 1)
     else:
         xf, cov_x, infodict, mesg, ier = leastsq(p.f, p.x0, xtol = p.xtol, maxfev = p.maxFunEvals, epsfcn = p.diffInt, ftol = p.ftol, full_output = 1)
     
     if ier == 1: p.istop = 1000
     else: p.istop = -1000
     p.msg = mesg
         
     ff = asfarray((p.f(xf)) ** 2).sum().flatten()
     p.xk = xf
     p.fk = ff
     
     p.xf = xf
     p.ff = ff        
     p.iterfcn()
Esempio n. 27
0
    def __init__(self, X=None, Y=None, Z=None, clipboard=False,
                 x_label=None, x_unit=None,
                 y_label=None, y_unit=None,
                 label=None, unit=None):
                    
        self.sp = None

        if isinstance(X, mesh1d):
            self.X = X
        else:
            self.X = mesh1d(np.asfarray(X, dtype='float64'))

        if isinstance(Y, mesh1d):
            self.Y = Y
        else:
            self.Y = mesh1d(np.asfarray(Y, dtype='float64'))

        self.label = label
        self.unit = unit
        self.Z = mesh1d(np.asfarray(Z, dtype='float64'), self.label, self.unit)  # np.array(Z)

        if clipboard is True:
            self.read_clipboard()
            self.__init_sp()
        elif self.Z.shape[0] > 1:
            self.__init_sp()
Esempio n. 28
0
def _det(xvert, yvert):
    """
    Compute twice the area of the triangle defined by points using the
    determinant formula.

    Parameters
    ----------
    xvert : array
        A vector of nodal x-coords.
    yvert : array
        A vector of nodal y-coords.

    Returns
    -------
    area : float
        Twice the area of the triangle defined by the points:
            area is positive if points define polygon in anticlockwise order.
            area is negative if points define polygon in clockwise order.
            area is zero if at least two of the points are concident or if
            all points are collinear.

    """
    xvert = np.asfarray(xvert)
    yvert = np.asfarray(yvert)
    x_prev = np.concatenate(([xvert[-1]], xvert[:-1]))
    y_prev = np.concatenate(([yvert[-1]], yvert[:-1]))
    return np.sum(yvert * x_prev - xvert * y_prev, axis=0)
Esempio n. 29
0
    def _cdf(self, xloc, left, right, cache):
        """
        Cumulative distribution function.

        Example:
            >>> print(chaospy.Uniform().fwd([-0.5, 0.5, 1.5, 2.5]))
            [0.  0.5 1.  1. ]
            >>> print(chaospy.Add(chaospy.Uniform(), 1).fwd([-0.5, 0.5, 1.5, 2.5]))
            [0.  0.  0.5 1. ]
            >>> print(chaospy.Add(1, chaospy.Uniform()).fwd([-0.5, 0.5, 1.5, 2.5]))
            [0.  0.  0.5 1. ]
            >>> print(chaospy.Add(1, 1).fwd([-0.5, 0.5, 1.5, 2.5]))
            [0. 0. 0. 1.]
        """
        left = evaluation.get_forward_cache(left, cache)
        right = evaluation.get_forward_cache(right, cache)

        if isinstance(left, Dist):
            if isinstance(right, Dist):
                raise evaluation.DependencyError(
                    "under-defined distribution {} or {}".format(left, right))
        elif not isinstance(right, Dist):
            return numpy.asfarray(left+right <= xloc)
        else:
            left, right = right, left
        xloc = (xloc.T-numpy.asfarray(right).T).T
        output = evaluation.evaluate_forward(left, xloc, cache=cache)
        assert output.shape == xloc.shape
        return output
Esempio n. 30
0
def CVXOPT_QP_Solver(p, solverName):
    if solverName == 'native_CVXOPT_QP_Solver': solverName = None
    cvxopt_solvers.options['maxiters'] = p.maxIter
    cvxopt_solvers.options['feastol'] = p.contol
    cvxopt_solvers.options['abstol'] = p.ftol
    cvxopt_solvers.options['reltol'] = 1e-16
    if p.iprint <= 0: 
        cvxopt_solvers.options['show_progress'] = False
        cvxopt_solvers.options['MSK_IPAR_LOG'] = 0
    xBounds2Matrix(p)
    
    f = copy(p.f).reshape(-1,1)
    
    sol = cvxopt_solvers.qp(Matrix(p.H), Matrix(p.f), Matrix(p.A), Matrix(p.b), Matrix(p.Aeq), Matrix(p.beq), solverName)
    
    p.msg = sol['status']
    if p.msg == 'optimal' :  p.istop = 1000
    else: p.istop = -100
    
    
    if sol['x'] is not None:
        p.xf = xf = asfarray(sol['x']).flatten()
        p.ff = asfarray(0.5*dot(xf, p.matMultVec(p.H, xf)) + p.dotmult(p.f, xf).sum()).flatten()
        p.duals = concatenate((asfarray(sol['y']).flatten(), asfarray(sol['z']).flatten()))
    else:
        p.ff = nan
        p.xf = nan*ones(p.n)
N_stage = [N, 60]
N_cum = np.cumsum(N_stage)
N_tot = np.sum(N_stage)
Tf = 3.
h_0 = float(Tf/N)
Tf_max = 10.
h_min = 0
h_max = float(Tf_max/N)
T = np.arange(0,Tf+h_0,h_0)
T = T.reshape(-1,1)
iter_max = 0
rviz_rate = 30

N_tot = np.sum(N_stage)
Tf0_list = [2, 1]
Tf0_vec = np.asfarray(np.array(Tf0_list,ndmin=2))
Tf_lb = [Tf0_list[0], 0.003]
Tf_lb = np.asfarray(np.array(Tf_lb,ndmin=2))
Tf_ub = [Tf0_list[0], 3]
Tf_ub = np.asfarray(np.array(Tf_ub,ndmin=2))

h0_vec = (Tf0_vec/N_stage).flatten().tolist()
lbh = (Tf_lb/N_stage).flatten().tolist()
ubh = (Tf_ub/N_stage).flatten().tolist()

#   INITIAL CONDITIONS (NUMERICAL)
nj = 2
nq = 3 # number of different q's --> q(t), qdot(t), qddot(t)
q_0 = [30, 130]
q_0 = np.deg2rad(q_0).tolist()
print q_0
Esempio n. 32
0
def find_com(feature_map):
    com_map = {}
    for digit in feature_map:
        feature_matrix = np.asfarray(feature_map[digit])
        com_map[digit] = np.mean(feature_matrix, axis=0)
    return com_map
Esempio n. 33
0
training_data_list = training_data_file.readlines()
training_data_file.close()

# train the neural network

# epochs is the number of thimes the training data set is used for training
epochs = 4000

for e in range(epochs):
    
    # go through all records in the training_data_list
    for record in training_data_list:
        # split the record by the ',' commas
        all_values = record.split(',')
        # scale and shift the inputs
        inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99 ) + 0.01
        # create the target output values (all 0.001 expect the desired value which is 0.99)
        targets = np.zeros(output_nodes) + 0.01
        # all_values[0] is the target label for this record
        targets[int(all_values[0])] = 0.99
        n.train(inputs, targets)
        pass
    pass
    

#Test#
###################################################################################################################################

# load test data
test_data_file = open("/Users/jonas/Desktop/neural_network/strom_train.csv", 'r')
test_data_list = test_data_file.readlines()
input_nodes = 1024
hidden_nodes = 1000
output_nodes = 36
learning_rate = 0.2

neural = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

# load the mnist train data CSV file
training_data = open("train_new_300.csv", 'r')
training_list = training_data.readlines()
training_data.close()

for record in training_list:
    all_values = record.split(',')
    #prepreocess the pixels in order to scale them in between 0.01 to 1.00
    inputs = (numpy.asfarray(all_values[1:]) / 255 * 0.99) + 0.01  #1*1024

    #target labels. all values are 0.01 except the correct label which has a value of 0.99
    targets = numpy.zeros(output_nodes) + 0.01
    targets[int(all_values[0])] = 0.99

    #begin the training
    neural.train(inputs, targets)

# load the mnist test data CSV file
test_data = open("test2_seg.csv", 'r')
test_list = test_data.readlines()
test_data.close()

scores = []
for record in test_list:
Esempio n. 35
0
# 2018-8-19
# test
# C:\Study\github\Lookoop\Image\Python神经网络\test.py
import numpy
import matplotlib.pyplot
import scipy.special
import os
data_file = open("C:/Study/github/Lookoop/Image/Python神经网络/mnist_dataset/mnist_train_100.csv", "r")
data_list = data_file.readlines()
data_file.close()
value = data_list[0].split(',')
image_array = numpy.asfarray(value[1:]).reshape((28, 28))

matplotlib.pyplot.imshow(image_array, cmap='Greys', interpolation="None")
matplotlib.pyplot.show()
Esempio n. 36
0
def main(args):
    args = parse_args()
    # load the mnist dataset from csv files
    image_size = 28  # width and length of mnist image
    num_labels = 10  #  i.e. 0, 1, 2, 3, ..., 9
    image_pixels = image_size * image_size
    train_data = np.loadtxt("./mnist_train.csv", delimiter=",")
    test_data = np.loadtxt("./mnist_test.csv", delimiter=",")

    # rescale image from 0-255 to 0-1
    fac = 1.0 / 255
    train_imgs = np.asfarray(train_data[:50000, 1:]) * fac
    val_imgs = np.asfarray(train_data[50000:, 1:]) * fac
    test_imgs = np.asfarray(test_data[:, 1:]) * fac
    train_labels = np.asfarray(train_data[:50000, :1])
    val_labels = np.asfarray(train_data[50000:, :1])
    test_labels = np.asfarray(test_data[:, :1])
    # convert labels to one-hot-key encoding
    train_labels = get_one_hot(train_labels, num_labels)
    val_labels = get_one_hot(val_labels, num_labels)
    test_labels = get_one_hot(test_labels, num_labels)

    print(train_imgs.shape)
    print(train_labels.shape)
    print(val_imgs.shape)
    print(val_labels.shape)
    print(test_imgs.shape)
    print(test_labels.shape)

    dataset = [[train_imgs, train_labels], [val_imgs, val_labels],
               [test_imgs, test_labels]]

    # These are only examples of parameters you can start with
    # you can tune these parameters to improve the performance of your MLP
    # this is the only part you need to change in main() function
    hiddens = [200, 80]
    # activations = [Tanh(),Tanh()]
    activations = [ReLU(), ReLU()]
    # activations = [Sigmoid(), Sigmoid()]
    lr = 0.001
    num_epochs = 100
    batch_size = 32

    # build your MLP model
    mlp = MLP(input_size=image_pixels,
              output_size=num_labels,
              hiddens=hiddens,
              activations=activations,
              weight_init_fn=random_normal_weight_init,
              bias_init_fn=zeros_bias_init,
              criterion=SoftmaxCrossEntropy(),
              lr=lr)

    if (args.just_eval == 0):
        # train the neural network
        losses = get_training_stats(mlp, dataset, num_epochs, batch_size)

        # save the parameters
        mlp.save_model()

        # visualize the training and validation loss with epochs
        training_losses, training_errors, validation_losses, validation_errors = losses

        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(16, 9))

        ax1.plot(training_losses, color='blue', label="training")
        ax1.plot(validation_losses, color='red', label='validation')
        ax1.set_title('Loss during training')
        ax1.set_xlabel('epoch')
        ax1.set_ylabel('loss')
        ax1.legend()

        ax2.plot(training_errors, color='blue', label="training")
        ax2.plot(validation_errors, color='red', label="validation")
        ax2.set_title('Error during training')
        ax2.set_xlabel('epoch')
        ax2.set_ylabel('error')
        ax2.legend()

        plt.savefig("p1.png")
        # plt.show()
    else:
        mlp.load_model(args.model_path)
        test_model(mlp, dataset, batch_size)
def dcg_at_k(r, k):
    r = np.asfarray(r)[:k]
    return np.sum(r / np.log2(np.arange(2, r.size + 2)))
Esempio n. 38
0
 if os.path.isfile(args.outFilenameStats):
     print("outFilenameStats found")
     with open(args.outFilenameStats) as f:
         for line in f:
             basename = line.split()[0]
             basename = ".".join("_".join(
                 basename.split("_")[1:]).split(".")[:-1])
             if args.expLabel is None:
                 isTrue = line.split()[1]
                 outFilenameStats_dict[basename] = isTrue
             else:
                 ExpectedProb = line.split('[')[-1]
                 ExpectedProb = ExpectedProb.split(']')[0]
                 ExpectedProb = ExpectedProb.split()
                 ExpectedProb = np.array(ExpectedProb)
                 ExpectedProb = np.asfarray(ExpectedProb, float)
                 # print(ExpectedProb)
                 # print("labels exp/true:%s, %d" % (args.expLabel, ExpectedProb.argmax()))
                 if args.threshold is None:
                     #outFilenameStats_dict[basename] = str(int(args.expLabel) == ExpectedProb.argmax())
                     tmp = args.expLabel
                     outFilenameStats_dict[basename] = str(
                         str(ExpectedProb.argmax()) in tmp.split(','))
                 else:
                     tmpT = args.threshold
                     tmpL = args.expLabel
                     tmpR = 'False'
                     tmpT = tmpT.split(',')
                     tmpL = tmpL.split(',')
                     for nL in range(len(tmpL)):
                         # print(ExpectedProb[ int(tmpL[nL]) ], float(tmpT[nL]))
Esempio n. 39
0
def dcg_at_k(r, k):
    r = np.asfarray(r)[:k]
    if r.size:
        return np.sum(
            np.subtract(np.power(2, r), 1) / np.log2(np.arange(2, r.size + 2)))
    return 0.
Esempio n. 40
0
 def __eq__(self, other: Union[Tuple[float, ...], np.ndarray]):
     if not isinstance(other, (list, tuple, np.ndarray)):
         return NotImplemented
     return np.isclose(np.asfarray(self), np.asfarray(other)).all()
        point_names = [
            "x left", "y left", "x peak", "y peak", "x trough", "y trough",
            "x_right", "y_right"
        ]

        #if peak is not in first 20 pixels or all negative
        if peak_px > 20 and np.max(y_selected) > 0:

            variables["hdf5_filename"].append(hdf5_filename)
            variables["temperatures"].append(temperature)
            variables["y_fit"].append(y_fit)
            variables["y"].append(y_selected)
            variables["colours"].append(colour)
            variables["point_xys"].append(point_xy)

temperatures = variables["temperatures"] = np.asfarray(
    variables["temperatures"])
colours = variables["colours"] = np.asfarray(variables["colours"])
points = variables["point_xys"] = np.asfarray(variables["point_xys"])
y_fit = variables["y_fit"] = np.asfarray(variables["y_fit"])
y = variables["y"] = np.asfarray(variables["y"])

#test initial polynomial fit on some spectra

for file_index in range(0, len(temperatures), int(len(temperatures) / 10.)):

    temperature = temperatures[file_index]
    axes3[0].plot(pixels,
                  y[file_index, :] + file_index / 100,
                  color=colours[file_index],
                  label="%0.1f" % temperature)
    axes3[0].plot(pixels,
Esempio n. 42
0
#-------------------------------
#a Python program to reverse an array 
x = np.arange(12, 38)
print("Original array:")
print(x)
print("Reverse array:")
x = x[::-1]
print(x)

#-------------------------------
#a Python program to an array converted to a float type
a = [1, 2, 3, 4]
print("Original array")
print(a)
x = np.asfarray(a)
print("Array converted to a float type:")
print(x)

#-------------------------------
#a Python program to create a 2d array with 1 on the border and 0 inside.
x = np.ones((5,5))
print("Original array:")
print(x)
print("1 on the border and 0 inside in the array")
x[1:-1,1:-1] = 0
print(x)
x[2:-2,2:-2] = 0

x[2:-1,2:-1] = 0
Esempio n. 43
0
from PIL import Image
from itertools import combinations
import cv2
import numpy as np

alphas = []
for i in range(1, 4):
    imgpath = '../pics/5-alpha%01d.png' % i
    arr = np.asfarray(Image.open(imgpath))
    alphas.append(arr)

a = list(combinations(range(3), 2))
for i in range(len(a)):
    arr = alphas[a[i][0]] / 255.0 * alphas[a[i][1]] / 255.0
    arr = arr > 0.2
    arr = np.asarray(arr * 255, dtype='uint8')
    kernel = np.ones((2, 2), np.uint8)
    #arr = cv2.erode(arr, kernel, iterations=1)
    #arr = cv2.dilate(arr, kernel, iterations=1)
    img = Image.fromarray(arr)
    #img.show()
    img.save('out%2d.png' % i)
    # テストデータのロード
    test_data_file = open('mnist_dataset/mnist_test.csv')
    test_data_list = test_data_file.readlines()
    test_data_file.close()

    # 学習
    epoch = 10
    for e in range(epoch):
        print('#epoch ', e)
        data_size = len(training_data_list)
        for i in range(data_size):
            if i % 1000 == 0:
                print('  train: {0:>5d} / {1:>5d}'.format(i, data_size))
            val = training_data_list[i].split(',')
            idata = (np.asfarray(val[1:]) / 255.0 * 0.99) + 0.01
            tdata = np.zeros(onodes) + 0.01
            tdata[int(val[0])] = 0.99
            nn.backprop(idata, tdata)
            pass
        pass

    # テスト
    scoreboard = []
    for record in test_data_list:
        val = record.split(',')
        idata = (np.asfarray(val[1:]) / 255.0 * 0.99) + 0.01
        tlabel = int(val[0])
        predict = nn.feedforward(idata)
        plabel = np.argmax(predict)
        scoreboard.append(tlabel == plabel)
            #Accuracies for test and train data for epoch 0
            correctly_classified = 0
            training_accuracy(correctly_classified, weights, len(traindata))
            correctly_classified = 0
            testing_accuracy(correctly_classified, weights, len(testdata))
        else:

            correctly_classified = 0
            for ldata in traindata:  #Accessing the test data
                xi = (ldata.strip()).split(
                    ",")  # Accessing each pixels of a test data
                # First column in a test example is actual target
                target = int(xi[0])
                xi = xi[1:]
                #preprocessing
                in1 = ((np.asfarray(xi)) / 255)
                # Training the data
                updated_weights = training(in1, weights, target)
                # updated_weights
                weights = updated_weights
            #computing accuracy of both test data and train data after each epoch
            training_accuracy(correctly_classified, weights, len(traindata))
            correctly_classified = 0
            testing_accuracy(correctly_classified, weights, len(testdata))
        epochs.append(e)
    compute_confusion_matrix(weights)

    # Graph of train data and test data with accuracy and epoch in y and x axis for various learning rates is plotted
    s += 1
    plot.figure(1, figsize=(10, 8))
    plot.subplot(s)
Esempio n. 46
0
 def test_asfarray_none(self):
     # Test for changeset r5065
     assert_array_equal(np.array([np.nan]), np.asfarray([None]))
Esempio n. 47
0
def quad_leja(
    order,
    dist,
    rule="fejer",
    accuracy=100,
    recurrence_algorithm="",
):
    """
    Generate Leja quadrature node.

    Args:
        order (int):
            The order of the quadrature.
        dist (chaospy.distributions.baseclass.Distribution):
            The distribution which density will be used as weight function.
        rule (str):
            In the case of ``lanczos`` or ``stieltjes``, defines the
            proxy-integration scheme.
        accuracy (int):
            In the case ``rule`` is used, defines the quadrature order of the
            scheme used. In practice, must be at least as large as ``order``.
        recurrence_algorithm (str):
            Name of the algorithm used to generate abscissas and weights. If
            omitted, ``analytical`` will be tried first, and ``stieltjes`` used
            if that fails.

    Returns:
        (numpy.ndarray, numpy.ndarray):
            abscissas:
                The quadrature points for where to evaluate the model function
                with ``abscissas.shape == (len(dist), N)`` where ``N`` is the
                number of samples.
            weights:
                The quadrature weights with ``weights.shape == (N,)``.

    Example:
        >>> abscissas, weights = quad_leja(3, chaospy.Normal(0, 1))
        >>> abscissas.round(4)
        array([[-2.7173, -1.4142,  0.    ,  1.7635]])
        >>> weights.round(4)
        array([0.022 , 0.1629, 0.6506, 0.1645])
    """
    if len(dist) > 1:
        if dist.stochastic_depedent:
            raise chaospy.StochasticallyDependentError(
                "Leja quadrature do not supper distribution with dependencies."
            )
        if isinstance(order, int):
            out = [quad_leja(order, _) for _ in dist]
        else:
            out = [quad_leja(order[_], dist[_]) for _ in range(len(dist))]

        abscissas = [_[0][0] for _ in out]
        weights = [_[1] for _ in out]
        abscissas = combine(abscissas).T
        weights = combine(weights)
        weights = numpy.prod(weights, -1)

        return abscissas, weights

    abscissas = [dist.lower, dist.mom(1).flatten(), dist.upper]
    for _ in range(int(order)):

        def objective(abscissas_):
            """Local objective function."""
            out = -numpy.sqrt(dist.pdf(abscissas_)) * numpy.prod(
                numpy.abs(abscissas[1:-1] - abscissas_))
            return out

        def fmin(idx):
            """Bound minimization."""
            try:
                x, fx = fminbound(objective,
                                  abscissas[idx],
                                  abscissas[idx + 1],
                                  full_output=1)[:2]
            except UnboundLocalError:
                x = abscissas[idx] + 0.5 * (3 - 5**0.5) * (abscissas[idx + 1] -
                                                           abscissas[idx])
                fx = objective(x)
            return x, fx

        opts, vals = zip(*[fmin(idx) for idx in range(len(abscissas) - 1)])
        index = numpy.argmin(vals)
        abscissas.insert(index + 1, opts[index])

    abscissas = numpy.asfarray(abscissas).flatten()[1:-1]
    weights = create_weights(abscissas, dist, rule, accuracy,
                             recurrence_algorithm)
    abscissas = abscissas.reshape(1, abscissas.size)

    return numpy.asfarray(abscissas), numpy.asfarray(weights).flatten()
Esempio n. 48
0
 def read_data(self, path):
     file_path = os.path.join(self.data_path, path)
     data = np.asfarray(Image.open(file_path))
     if self.normalize_data:
         data = (data - self.mean) / self.stddev
     return data
Esempio n. 49
0
    row = int(filename.split(
        '_r', 1)[1].split('_c')[0]) - 1  #since rows and cols are 1-based
    col = int(filename.split('_r', 1)[1].split('_c')[1].split('.')
              [0]) - 1  #since rows and cols are 1-based
    data = np.load(filename, allow_pickle=True)
    area = data['areas']
    intensity = data['intensities']
    XY = data['centroids']
    solidity = data['solidities']
    eccentricity = data['eccentricities']
    perimeter = data['perimeters']
    if XY.shape[0] > 0:
        #shift the centroids to account for the fov size
        representation = np.asfarray([
            (row, col, XY[ind, 0] + int(fovsize) * col,
             XY[ind, 1] + int(fovsize) * row, area[ind], intensity[ind],
             solidity[ind], eccentricity[ind], perimeter[ind])
            for ind in range(len(XY)) if area[ind] > 1
        ])
        RCs.append(representation[:, :2])  #only rows and cols
        XYs.append(representation[:, 2:4])  #only XY
        As.append(representation[:, 4:5])  #only area
        Is.append(representation[:, 5:6])  #only intensity
        Ss.append(representation[:, 6:7])
        Es.append(representation[:, 7:8])
        Ps.append(representation[:, 8:9])
RC = np.vstack(RCs)  # the full RC data as array
XY = np.vstack(XYs)  # the full XY data as array
A = np.vstack(As)  # the full area data as array
I = np.vstack(Is)  # the full intensity data as array
S = np.vstack(Ss)
E = np.vstack(Es)
    print('Applied current = {:.3f} [mA]'.format(Amps))
    print('Number of beats = {:d}'.format(NB))
    print('Carrier frequency = {:.0f} [Hz] and Beat frequency = {:.0f} [Hz]'.format(Carrier,Beat))

    # The location where the set pulse is sent
    InjSite=0
    # The time that the test pulse is sent
    InjTime=2.5*Cycle
    print('Sending a test pulse at node {:d} and {:.2f} ms'.format(InjSite,InjTime))
    
    MyCell=model(Duration,Carrier,Beat,Amps/1000,Y/1000,InjTime,InjSite)

    Results=MyCell._recordings

    TestSite=100
    T=np.asfarray(Results['t'])
    V1=np.asfarray(Results['node['+str(InjSite)+'](0.5).v'])
    V2=np.asfarray(Results['node['+str(TestSite)+'](0.5).v'])
    plt.subplot(2,1,1)
    plt.plot(T,V1)
    plt.ylabel('Membrane potential [mV]')
    plt.title('Node '+str(InjSite))

    plt.subplot(2,1,2)
    plt.plot(T,V2)
    plt.ylabel('Membrane potential [mV]')
    plt.xlabel('Time [ms]')
    plt.title('Node '+str(TestSite))

    plt.show()
Esempio n. 51
0
def mergeDistributions(d1, d2, operation):
    #assert isinstance(d1, stochasticDistribution), 'unimplemented yet'
    is_d1_stoch = isinstance(d1, stochasticDistribution)
    is_d2_stoch = isinstance(d2, stochasticDistribution)
    
    if is_d1_stoch and type(d2) == multiarray:
        #return np.array([mergeDistributions(d1, elem, operation) for elem in np.atleast_1d(d2)]).view(multiarray)
        return np.array([mergeDistributions(d1, elem, operation) for elem in \
                         (d2 if d2.ndim > 1 else d2.reshape(-1, 1)).view(np.ndarray)]).view(multiarray)
    
    if is_d2_stoch and type(d1) == multiarray:
        #return np.array([mergeDistributions(elem, d2, operation) for elem in np.atleast_1d(d1)]).view(multiarray)
        return np.array([mergeDistributions(elem, d2, operation) for elem in \
                         (d1 if d1.ndim > 1 else d1.reshape(-1, 1)).view(np.ndarray)]).view(multiarray)
        
    
    if is_d1_stoch and is_d2_stoch:
        if not hasattr(d1, 'stochDep') or not hasattr(d1, 'stochDep'):
            distrib_err_fcn()
    
    cond_same_stoch = is_d2_stoch and is_d1_stoch and set(d1.stochDep.keys()) == set(d2.stochDep.keys())
    if not is_d1_stoch or not is_d2_stoch or cond_same_stoch:
        if not is_d2_stoch: # thus d1 is stoch
            d2 = np.asfarray(d2) if operation == operator.truediv \
            or (hasattr(operator, 'div')  and operation == operator.div) and not isinstance(d2, ooarray) else np.asanyarray(d2)
            #assert d2.size == 1, 'unimplemented for size > 1 yet'
            vals2 = d2.reshape(1, -1) if d2.size > 1 else d2
            vals1 = d1.values
            distribType = d1.distribType
        elif not is_d1_stoch: # thus d2 is stoch
            d1 = np.asfarray(d1) if operation == operator.truediv \
            or (hasattr(operator, 'div')  and operation == operator.div) and not isinstance(d1, ooarray) else np.asanyarray(d1)
            assert d1.size == 1, 'unimplemented for size > 1 yet'
            vals1 = d1.reshape(1, -1) if d1.size > 1 else d1
            vals2 = d2.values
            distribType = d2.distribType
        else:#cond_same_stoch
            vals1 = d1.values
            vals2 = d2.values
            distribType = d1.distribType if d1.distribType == d2.distribType else 'undefined'
            
        Vals = operation(vals1, vals2) 
        
        r = stochasticDistribution(Vals.flatten(), 
                                 d1.probabilities.copy() if is_d1_stoch else d2.probabilities.copy(), 
                                 distribType)
        if is_d1_stoch and is_d2_stoch:
            r.stochDep = d1.stochDep.copy()
            for key, val in d2.stochDep.items():
                if key in r.stochDep:
                    r.stochDep[key] += val
                else:
                    r.stochDep[key] = val
        elif is_d1_stoch:
            r.stochDep = d1.stochDep.copy()
        else:
            if not is_d2_stoch: raise FuncDesignerException('bug in FuncDesigner kernel')
            r.stochDep = d2.stochDep.copy()
        #!!!!!!!!!!!! TODO: getOrder (for linear probs)
        
    else:
        f = lambda D1, D2:\
            operation(
                          D1.reshape(-1, 1), 
                          D2 if operation != operator.truediv \
                          or isinstance(D2, (oofun, ooarray)) \
                          or isinstance(D1, (oofun, ooarray))  \
                          else np.asfarray(D2) \
                          ).reshape(1, -1)

        distribType = d1.distribType if d1.distribType == d2.distribType else 'undefined'
        F = f(d1.values, d2.values)
        
        if np.all(d1.probabilities == d1.probabilities[0]) and np.all(d2.probabilities == d2.probabilities[0]):
            Probabilities = np.empty(d1.probabilities.size*d2.probabilities.size)
            Probabilities.fill(d1.probabilities[0] * d2.probabilities[0])
        else:
            Probabilities = (d1.probabilities.reshape(-1, 1) * d2.probabilities.reshape(1, -1)).flatten()
        
        r = stochasticDistribution(F.flatten(), Probabilities, distribType)
        
        '''                                                     adjust stochDep                                                     '''
        if len(set(d1.stochDep.keys()) & set(d2.stochDep.keys())) != 0 and len(set(d1.stochDep.keys()) | set(d2.stochDep.keys())) > 1:
#            print(d1.stochDep.keys())
#            print(d2.stochDep.keys())
#            print(set(d1.stochDep.keys()) | set(d2.stochDep.keys()))
            raise FuncDesignerException('''
            This stochastic function has structure that makes it impossible to handle in OpenOpt Suite yet.
            If gradient-based solver is involved, sometimes using derivative-free one instead (e.g. scipy_cobyla, de, bobyqa) can be successful
            ''')
        stochDep = d1.stochDep.copy()
        for key, val in d2.stochDep.items():
            if key in stochDep:
                stochDep[key] += val
            else:
                stochDep[key] = val
        r.stochDep = stochDep
        
    if is_d1_stoch:
        m1 = getattr(d1, 'maxDistributionSize', 0)
    else:
        m1 = 0
    if is_d2_stoch:
        m2 = getattr(d2, 'maxDistributionSize', 0)
    else:
        m2 = 0
    N = max((m1, m2))
    if N == 0:
        s = '''
            if one of function arguments is stochastic distribution 
            without resolving into quantified value 
            (e.g. uniform(-10,10) instead of uniform(-10,10, 100), 100 is number of point to emulate)
            then you should evaluate the function 
            onto oopoint with assigned parameter maxDistributionSize'''
        raise FuncDesignerException(s)
    r = r.reduce(N)
    r.maxDistributionSize = N

    if is_d1_stoch and hasattr(d1, '_p'):
        r._p = d1._p
    elif is_d2_stoch and hasattr(d2, '_p'):
        r._p = d2._p
        
#    if operation == operator.mul:
#        r._is_product = True
#        r._product_elements = [self, other]
    return r
Esempio n. 52
0

#open函数里的路径根据数据存储的路径来设定\n",
training_data_file = open('mnist/train.csv')
trainning_data_list = training_data_file.readlines()
print(len(trainning_data_list))
training_data_file.close()
#把数据依靠','区分,并分别读入\n",
trainning_list = trainning_data_list[900:1101]
dataArr = []
labelArr = []
for record in trainning_list:
    all_train_values = record.split(',')
    #print(all_train_values)
    inputs = numpy.sign(
        (numpy.asfarray(all_train_values[1:])) / 255.0 * 0.99)  #
    # 设置图片与数值的对应关系
    #这里出现了很大的一个错误,,数据的类型不一样
    if int(all_train_values[0]) == 0:  ###第一个数位上是一个标签数据
        labels = 1
    else:
        labels = -1
    #这里要十分小心,这里是二维数据,在实际操作过程重要始终注意行列的序号
    dataArr.append(inputs)
    labelArr.append(labels)

print(numpy.shape(dataArr)[0], numpy.shape(dataArr)[1])  #1000个数字
print(numpy.shape(labelArr)[0])  #10000个标签
print(labelArr)
########数据训练########
trainDigits(dataArr, labelArr, kTup=('rbf', 10))
Esempio n. 53
0
#create instance of neural network
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

# In[3]:

#import csv file_mnist_dataset for training NeuralNetwork
training_data_file = open("mnist_dataset/mnist_train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()

#training Neural Network

#detect all record in training_data_set
for record in training_data_list:
    all_values = record.split(',')
    inputs = (np.asfarray(all_values[1:]) / 255 * 0.09) + 0.01
    targets = np.zeros(output_nodes) + 0.01
    targets[int(all_values[0])] = 0.99
    n.train(inputs, targets)
    pass

# In[4]:

#test neural network

#import test minist data set
test_data_file = open("mnist_dataset/mnist_test.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()

all_values = test_data_list[0].split(',')
n = neuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate)

# load the mnist training data CSV file into a list
training_data_file = open("mnist_train.csv", 'r')
training_data_list = training_data_file.readlines()
training_data_file.close()

# train the neural network

# go through all records in the training data set
for record in training_data_list:
    # split the record by the ' , ' ,commas
    all_values = record.split(',')

    # scale and shift the inputs
    inputs = (np.asfarray(all_values[1:]) / 255.0 * 0.99) + 0.01
    # create the target output values (all 0.01 ,
    # except the desired label which is 0.99)
    targets = np.zeros(output_nodes) + 0.01

    # all_values[0] is the target label for this record
    targets[int(all_values[0])] = 0.99
    n.train(inputs, targets)
    pass

## 测试网络
#load the mnist test data CSV file into a list
test_data_file = open("mnist_test_10.csv", 'r')
test_data_list = test_data_file.readlines()
test_data_file.close()
Esempio n. 55
0
import numpy as np
import cv2

img = cv2.imread('E:\\Flight_2_13_2015\\NDVI\\IMG_1817.JPG', cv2.IMREAD_COLOR)
b,g,r = cv2.split(img)
stretch = 1                       #this one stretchs the bands in case they are too dim
min_sat = 0                       #used to define the minimum value for NDVI
max_sat = 0.5                     #used to define the maximum value for NDVI
NIR = np.asfarray(r)
BLU = np.asfarray(b)
NDVI = ((NIR-BLU)/(NIR+BLU))
NDVI[NDVI < min_sat] = min_sat          #filters minimum values
NDVI[NDVI > max_sat] = max_sat          #filters maximum values
NDVI = NDVI - min_sat
NDVI = stretch*(255/(max_sat-min_sat))*NDVI     #transforms the rest of the information into a 255-based system (byte)
NDVI_i = NDVI.astype('uint8')   #Needed to actually show the picture
cv2.imwrite('resultgray.png',NDVI_i) #Saving the image after operations have been conducted
cv2.imshow('image',NDVI_i)
cv2.waitKey(0)
cv2.destroyAllWindows()
Esempio n. 56
0
table3.index = (table3.index - 2) // 18

table4 = table4.drop([i for i in table4.index if (i-12)%18 != 0])
table4 = table4.drop(columns = table4.columns[0:2])
table4.index = (table4.index - 12) // 18

table5 = table5.drop([i for i in table5.index if (i-10)%18 != 0])
table5 = table5.drop(columns = table5.columns[0:2])
table5.index = (table5.index - 10) // 18

pm10 = np.array(table).reshape([260, 9])
pm25 = np.array(table2).reshape([260, 9])
co = np.array(table3).reshape([260, 9])
so2 = np.array(table4).reshape([260, 9])
rf = np.array(table5).reshape([260, 9])
pm10 = np.asfarray(pm10, float)
pm25 = np.asfarray(pm25, float)
co = np.asfarray(co, float)
so2 = np.asfarray(so2, float)
rf = np.asfarray(rf, float)
#traing6 = np.asfarray(traing6, float)
for i in range(260):
    for j in range(9):
        if pm10[i][j] == 0:
            if j == 0:
                pm10[i][j] = (pm10[i-1][8]+pm10[i-1][7])/2
            elif j == 1:
                pm10[i][j] = (pm10[i][j-1]+pm10[i-1][8])/2
            else:
                pm10[i][j] = (pm10[i][j-2]+pm10[i][j-1])/2
Esempio n. 57
0
def dcg_at_k(r, k):
    r = np.asfarray(r)[:k]
    if r.size:
        return r[0] + np.sum(r[1:] / np.log2(np.arange(2, r.size + 1)))
    return 0.
Esempio n. 58
0
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np

qb = pd.read_csv("fantasy football - QB.csv")
wr = pd.read_csv("fantasy football - WR.csv")
te = pd.read_csv("fantasy football - TE.csv")
rb = pd.read_csv("fantasy football - RB.csv")

all_pos = [qb,wr,te,rb]
all_pos_fpts = []
names = {0:"QB",1:"WR", 2: "TE",3:"RB"}
every_ten_mean = {"QB": [],"WR": [],"TE": [],"RB": []}

for i in all_pos:
    fpts = np.asfarray(list(map(lambda y: float(y),list(filter(lambda x:x!="--" ,list(i['FPTS'])))))) #removes  "--"
    all_pos_fpts.append(fpts)
    print(f"Length: {fpts.shape[0]}")
    print(f"Mean: {np.mean(fpts)}")
    print(f"Standard Deviation: {np.std(fpts)}")

pos_counter = 0
for pos in all_pos_fpts:
    counter = 1
    a = []
    for j in range(pos.shape[0]):
        a.append(pos[j])
        if counter %10 == 0:
            every_ten_mean[names[pos_counter]].append(a)
            a = []
            counter = 1
Esempio n. 59
0
#
#m1 = np.logical_and(mask1,mask2)
#m2 = np.logical_and(mask3,mask4)
#
#mask = np.logical_and(m1,m2)
#
#mask = np.invert(mask)
#
#data[mask] = 0
#wave[mask] = 0

########################################## PREPARING DATA ##############################################

stars = list(set(starlist)) # creates array of unique stars
stars = np.char.replace(stars,'Star','') # removes text string to convert to float
stars = set(np.asfarray(stars)) # converts to set

print('\n\nStars:',sorted(stars),'\n')

ext_stars = stars - set(remove) # set algebra to remove stars
ext_stars = np.asfarray(list(ext_stars)) # convert to array

stars = np.asfarray(list(stars)) # converts to array

starlist = np.char.replace(starlist,'Star','') 
starlist = np.asfarray(starlist)

################################### PLOT SAMPLE SPECTRA ################################################

#plt.figure(figsize=[10,5])
#
Esempio n. 60
0
import numpy
from matplotlib import pyplot as plt

with open('../datasets/mnist_train_100.csv') as data_file:
    first_image_values = data_file.readline().split(',')
    print(f'Depicted number is {first_image_values[0]}')
    image_array = numpy.asfarray(first_image_values[1:]).reshape((28, 28))
    plt.imshow(image_array, cmap='Greys', interpolation='None')
    plt.show()