Example #1
0
def plot_sphere(radius=1, fig=None, ax=None):
    import numpy as np
    """Plots a sphere given a radius

    Args:
        radius: The radius of the sphere.

        fig: The figure onto which the axis is plotted. If None is passed a new
                figure is made.

        ax: The axis onto which the sphere is plotted. If None is passed a new
                axis is made.

    Returns:
        None
    """
    if fig is None:
        fig = plt.figure()
    if ax is None:
        ax = fig.add_subplot(111, projection='3d')
    # Source: http://matplotlib.org/examples/mplot3d/surface3d_demo2.html
    phi = np.linspace(0, 2 * dnp.pi, 100)
    theta = np.linspace(0, dnp.pi, 100)
    x = radius * np.outer(np.cos(phi), np.sin(theta))
    y = radius * np.outer(np.sin(phi), np.sin(theta))
    z = radius * np.outer(np.ones(np.size(phi)), np.cos(theta))
    ax.plot_wireframe(x, y, z, rstride=5, cstride=5, color='y')
Example #2
0
	def updateParameters(self, articlePicked, click,  userID):	
		self.counter +=1
		self.Wlong = vectorize(self.W)
		featureDimension = len(articlePicked.featureVector)
		T_X = vectorize(np.outer(articlePicked.featureVector, self.W.T[userID])) 
		self.A += np.outer(T_X, T_X)	
		self.b += click*T_X
		self.AInv = np.linalg.inv(self.A)
		self.UserTheta = matrixize(np.dot(self.AInv, self.b), len(articlePicked.featureVector)) 

		Xi_Matirx = np.zeros(shape = (featureDimension, self.userNum))
		Xi_Matirx.T[userID] = articlePicked.featureVector
		W_X = vectorize( np.dot(np.transpose(self.UserTheta), Xi_Matirx))
		self.batchGradient +=evaluateGradient(W_X, click, self.Wlong, self.lambda_, self.regu  )

		if self.counter%self.windowSize ==0:
			self.Wlong -= 1/(float(self.counter/self.windowSize)+1)*self.batchGradient
			self.W = matrixize(self.Wlong, self.userNum)
			self.W = normalize(self.W, axis=0, norm='l1')
			#print 'SVD', self.W
			self.batchGradient = np.zeros(self.userNum*self.userNum)
			# Use Ridge regression to fit W
		'''
		plt.pcolor(self.W_b)
		plt.colorbar
		plt.show()
		'''
		if self.W.T[userID].any() <0 or self.W.T[userID].any()>1:
			print self.W.T[userID]

		self.CoTheta = np.dot(self.UserTheta, self.W)
		self.BigW = np.kron(np.transpose(self.W), np.identity(n=len(articlePicked.featureVector)))
		self.CCA = np.dot(np.dot(self.BigW , self.AInv), np.transpose(self.BigW))
		self.BigTheta = np.kron(np.identity(n=self.userNum) , self.UserTheta)
    def setup(self):
        surfaces = self.options['surfaces']

        system_size = 0

        for surface in self.options['surfaces']:
            mesh = surface['mesh']
            nx = mesh.shape[0]
            ny = mesh.shape[1]

            system_size += (nx - 1) * (ny - 1)

        self.system_size = system_size

        self.add_input('mtx', shape=(system_size, system_size), units='1/m')
        self.add_input('rhs', shape=system_size, units='m/s')
        self.add_output('circulations', shape=system_size, units='m**2/s')

        self.declare_partials('circulations', 'circulations',
            rows=np.outer(np.arange(system_size), np.ones(system_size, int)).flatten(),
            cols=np.outer(np.ones(system_size, int), np.arange(system_size)).flatten(),
        )
        self.declare_partials('circulations', 'mtx',
            rows=np.outer(np.arange(system_size), np.ones(system_size, int)).flatten(),
            cols=np.arange(system_size ** 2),
        )
        self.declare_partials('circulations', 'rhs', val=-1.,
            rows=np.arange(system_size),
            cols=np.arange(system_size),
        )
Example #4
0
def get_response_content(fs):
    # make the laplacian matrix for the graph
    weight = 1 / fs.edge_length
    n = fs.nvertices
    L = np.zeros((n,n))
    # set the diagonal
    for i in range(n):
        L[i,i] = 2 * weight
    L[0,0] = weight
    L[-1,-1] = weight
    # complete the tridiagonal
    for i in range(n-1):
        L[i+1,i] = -weight
        L[i,i+1] = -weight
    # define other matrices
    L_pinv = np.linalg.pinv(L)
    HDH = -2*L_pinv
    v = np.diag(HDH)
    e = np.ones(n)
    D = HDH - (np.outer(v, e) + np.outer(e, v))/2
    # show some matrices
    out = StringIO()
    np.set_printoptions(linewidth=300)
    print >> out, 'Laplacian matrix:'
    print >> out, L
    print >> out
    print >> out, 'HDH:'
    print >> out, HDH
    print >> out
    print >> out, 'EDM:'
    print >> out, D
    print >> out
    return out.getvalue()
Example #5
0
def plot_surface(image, center=None, size=15, output=False, ds9_indexing=False, 
                 **kwargs):
    """
    Create a surface plot from image.
    
    By default, the whole image is plotted. The 'center' and 'size' attributes 
    allow to crop the image.
        
    Parameters
    ----------
    image : numpy.array
        The image as a numpy.array.
    center : tuple of 2 int (optional, default=None)
        If None, the whole image will be plotted. Otherwise, it grabs a square
        subimage at the 'center' (Y,X) from the image.
    size : int (optional, default=15)
        It corresponds to the size of a square in the image.
    output : {False, True}, bool optional
        Whether to output the grids and intensities or not.
    ds9_indexing : {False, True}, bool optional 
        If True the coordinates are in X,Y convention and in 1-indexed format.
    kwargs:
        Additional attributes are passed to the matplotlib figure() and 
        plot_surface() method.        
    
    Returns
    -------
    out : tuple of 3 numpy.array
        x and y for the grid, and the intensity
        
    """        
    if not center:
        size = image.shape[0]
        x = np.outer(np.arange(0,size,1), np.ones(size))
        y = x.copy().T 
        z = image
    else: 
        if ds9_indexing:
            center = (center[0]-1,center[1]-1) 
            cx, cy = center
        else:
            cy, cx = center
        if size % 2:            # if size is odd             
            x = np.outer(np.arange(0,size,1), np.ones(size))
        else:                   # otherwise, size is even
            x = np.outer(np.arange(0,size+1,1), np.ones(size+1))
        y = x.copy().T            
        z = image[cy-size//2:cy+size//2+1,cx-size//2:cx+size//2+1]           
    
    figure(figsize=kwargs.pop('figsize',(5,5)))
    ax = axes(projection='3d')
    ax.plot_surface(x, y, z, rstride=1, cstride=1, linewidth=0, **kwargs) 
    ax.set_xlabel('$x$')
    ax.set_ylabel('$y$')
    ax.set_zlabel('$I(x,y)$')
    ax.set_title('Data')
    show()
    
    if output:
        return (x,y,z)
Example #6
0
    def backProp(self,node,error=None):

        # Clear nodes
        node.fprop = False

        # y_hat - y
        delta5 = node.probs
        delta5[node.label] -= 1.0

        self.dbs += delta5

        # dU
        self.dWs += np.outer(delta5, node.hActs2)

        delta4 = np.dot(self.Ws.T, delta5)
        delta3 = delta4 * (node.hActs2 > 0)

        self.db2 += delta3
        self.dW2 += np.outer(delta3, node.hActs1)

        delta2 = np.dot(self.W2.T, delta3)

        if error is not None:
            delta2 += error

        if node.isLeaf:
            self.dL[node.word] += delta2
        else:
            delta1 = delta2 * (node.hActs1 > 0)
            self.db1 += delta1
            self.dW1 += np.outer(delta1, np.hstack([node.left.hActs1, node.right.hActs1]))
            delta0 = np.dot(self.W1.T, delta1)
            self.backProp(node.left, delta0[:self.wvecDim])
            self.backProp(node.right, delta0[self.wvecDim:])
	def updateA(self, featureVector, decay=None, current_time=None):
		if decay:
			assert decay <= 1 and decay >=0
			self.DD = self.decayAverage(decay, self.DD, np.outer(featureVector, featureVector), current_time)
			self.A = self.DD + self.identityMatrix
		else:
			self.A += np.outer(featureVector, featureVector)
Example #8
0
    def backpropagation(self, input, output):

        '''
        Compute gradientes, with the back-propagation method
        inputs:
            x: vector with the (embedding) indicies of the words of a
                sentence
            outputs: vector with the indicies of the tags for each word of
                        the sentence outputs:
            gradient_parameters: vector with parameters gradientes
        '''

        # Get parameters and sizes
        W_e, W_x, W_h, W_y = self.parameters
        nr_steps = input.shape[0]

        log_p_y, y, h, z_e, x = self.log_forward(input)
        p_y = np.exp(log_p_y)

        # Initialize gradients with zero entrances
        gradient_W_e = np.zeros(W_e.shape)
        gradient_W_x = np.zeros(W_x.shape)
        gradient_W_h = np.zeros(W_h.shape)
        gradient_W_y = np.zeros(W_y.shape)

        # ----------
        # Solution to Exercise 6.1

        # Gradient of the cost with respect to the last linear model
        I = index2onehot(output, W_y.shape[0])
        error = (p_y - I) / nr_steps

        # backward pass, with gradient computation
        error_h_next = np.zeros_like(h[0, :])
        for t in reversed(range(nr_steps)):

            # Output linear
            error_h = np.dot(W_y.T, error[t, :]) + error_h_next

            # Non-linear
            error_raw = h[t+1, :] * (1. - h[t+1, :]) * error_h

            # Hidden-linear
            error_h_next = np.dot(W_h.T, error_raw)

            # Weight gradients
            gradient_W_y += np.outer(error[t, :], h[t+1, :])
            gradient_W_h += np.outer(error_raw, h[t, :])
            gradient_W_x += np.outer(error_raw, z_e[t, :])
            gradient_W_e[x[t], :] += W_x.T.dot(error_raw)

        # End of Solution to Exercise 6.1
        # ----------

        # Normalize over sentence length
        gradient_parameters = [
            gradient_W_e, gradient_W_x, gradient_W_h, gradient_W_y
        ]

        return gradient_parameters
Example #9
0
def Cramer(var1, var2):
	"""
	Compute Cramer's V statistic for two Pandas series

	Parameters:
	----------
	var1, var2: Pandas series

	Returns:
	--------
	v : float
		The Cramer's V statistic of two categorical-variable series

	Status:
	-------	
	Cramer's V Implementation
	Author: Jesse Lund, [email protected]
	Date: 9/12/2015

	##Round 1##
	Comments: Thomas Roderick, [email protected]
	Date: 9/13/2015

	"""

	table = crosstab(var1,var2) #For Pandas: must have an index, can't just feed in two lists. This could be a sticking point. Might be better to do a check or roll our own crosstab implementation
	l,w = table.shape #save on a (small) function call here--reads in both outputs 
	df = min(l-1, w-1)
	colsum, rowsum = table.sum(0), table.sum(1) 
	n = float(l*w)
	expectmat = outer(rowsum,colsum)/n
	outmat = outer(table.sum(0),table.sum(1))/n #this works if same size
	return  sqrt((((table - expectmat)**2)/(expectmat*n*df)).sum().sum())
Example #10
0
def unscented_func (f, sigmaPoints, meanWeight, covWeight, angleMask=None, **kwargs):
    n = sigmaPoints.shape[1]

    y = f (sigmaPoints[:,0], **kwargs)
    m = len (y)

    y = zeros ((m, n))

    for i in range (n):
        y[:,i] = f (sigmaPoints[:,i], **kwargs)

    muPrime = sum (y * meanWeight, axis=1)
    if angleMask is not None:
        for i, mask in enumerate (angleMask):
            if mask:
                muPrime[i] = circularMean (y[i,:], weights=meanWeight)
                muPrime[i] = minimizedAngle (muPrime[i])

    SigmaPrime = zeros ((m, m))
    for i in range (n):
        if angleMask is None:
            SigmaPrime += covWeight[i] * outer (y[:,i] - muPrime, (y[:,i] - muPrime))
        else:
            SigmaPrime += covWeight[i] * outer (minimizedAngle (y[:,i] - muPrime, angleMask), 
                                                minimizedAngle (y[:,i] - muPrime, angleMask))

    return muPrime, SigmaPrime
Example #11
0
def unscented_obs_model (f, sigmaPoints, meanWeight, covWeight, state, stateAngleMask, obsModelAngleMask, **kwargs):
    n = sigmaPoints.shape[1]
    stateLen = len (state)

    y = f (sigmaPoints[0:stateLen,0], **kwargs) + sigmaPoints[stateLen:,0]
    m = len (y)

    y = zeros ((m, n))

    for i in range (n):
        y[:,i] = f (sigmaPoints[0:stateLen,i], **kwargs) + sigmaPoints[stateLen:,i]

    muPrime = sum (y * meanWeight, axis=1)
    for i, mask in enumerate (obsModelAngleMask):
        if mask:
            muPrime[i] = circularMean (y[i,:], weights=meanWeight)
            muPrime[i] = minimizedAngle (muPrime[i])

    SigmaPrime = zeros ((m, m))
    for i in range (n):
        SigmaPrime += covWeight[i] * outer (minimizedAngle (y[:,i] - muPrime, obsModelAngleMask), 
                                            minimizedAngle (y[:,i] - muPrime, obsModelAngleMask))

    crossCov = zeros ((stateLen, m))
    for i in range (n):
        diffState = minimizedAngle (sigmaPoints[0:stateLen,i] - state, stateAngleMask)
        diffMeas = minimizedAngle (y[:,i] - muPrime, obsModelAngleMask)
        crossCov += covWeight[i] * outer (diffState, diffMeas)

    return muPrime, SigmaPrime, crossCov
Example #12
0
    def backProp(self,node,error=None):
        # Clear nodes
        node.fprop = False
        errorCur = node.probs - make_onehot(node.label,len(self.bs))
        self.dWs += np.outer(errorCur, node.hActs1)
        self.dbs += errorCur

        errorCur = errorCur.dot(self.Ws)
        if error is not None:
            errorCur += error

        if node.isLeaf == True:
            self.dL[node.word] += errorCur
            return

        errorCur = errorCur*self.df(node.hActs1)
        LR = np.hstack([node.left.hActs1, node.right.hActs1])
        self.dW += np.outer(errorCur,LR)
        self.db += errorCur

        S = np.zeros(len(LR))
        for i in range(len(self.V)):
            self.dV[i] += errorCur[i]*np.outer(LR,LR)
            S += (self.V[i]+self.V[i].T).dot(LR)*errorCur[i]
        
        errorDown = errorCur.dot(self.W) + S        
        self.backProp(node.left,errorDown[:self.wvecDim])
        self.backProp(node.right,errorDown[self.wvecDim:])
Example #13
0
    def _set_expected_stats(self,smoothed_mus,smoothed_sigmas,E_xtp1_xtT):
        assert not np.isnan(E_xtp1_xtT).any()
        assert not np.isnan(smoothed_mus).any()
        assert not np.isnan(smoothed_sigmas).any()

        data = self.data
        EyyT = data.T.dot(data)
        EyxT = data.T.dot(smoothed_mus)
        ExxT = smoothed_sigmas.sum(0) + smoothed_mus.T.dot(smoothed_mus)

        E_xt_xtT = \
            ExxT - (smoothed_sigmas[-1]
                    + np.outer(smoothed_mus[-1],smoothed_mus[-1]))
        E_xtp1_xtp1T = \
            ExxT - (smoothed_sigmas[0]
                    + np.outer(smoothed_mus[0], smoothed_mus[0]))

        E_xtp1_xtT = E_xtp1_xtT.sum(0)

        def is_symmetric(A):
            return np.allclose(A,A.T)

        assert is_symmetric(ExxT)
        assert is_symmetric(E_xt_xtT)
        assert is_symmetric(E_xtp1_xtp1T)

        self.E_emission_stats = np.array([EyyT, EyxT, ExxT, self.T])
        self.E_dynamics_stats = np.array([E_xtp1_xtp1T, E_xtp1_xtT, E_xt_xtT, self.T-1])
Example #14
0
    def learn(self, docs, alpha=0.1, tau=5):
        index = numpy.arange(len(docs))
        numpy.random.shuffle(index)
        for i in index:
            doc = docs[i]
            pre_s = [numpy.zeros(self.K)]
            pre_w = [0] # <s>
            for w in doc:
                s = 1 / (numpy.exp(- numpy.dot(self.W, pre_s[-1]) - self.U[:, pre_w[-1]]) + 1)
                z = numpy.dot(self.V, s)
                y = numpy.exp(z - z.max())
                y = y / y.sum()

                # calculate errors
                y[w] -= 1  # -e0
                eh = [numpy.dot(y, self.V) * s * (s - 1)] # eh[t]
                for t in xrange(min(tau, len(pre_s)-1)):
                    st = pre_s[-1-t]
                    eh.append(numpy.dot(eh[-1], self.W) * st * (1 - st))

                # update parameters
                pre_w.append(w)
                pre_s.append(s)
                self.V -= numpy.outer(y, s * alpha)
                for t in xrange(len(eh)):
                    self.U[:, pre_w[-1-t]] += eh[t] * alpha
                    self.W += numpy.outer(pre_s[-2-t], eh[t]) * alpha
def generate_ODGD_spec_chirped(F1, F2, Fs, lengthOdgd=2048, Nfft=2048, \
                               Ot=0.5, t0=0.0, \
                               analysisWindowType='sinebell'):
    """
    generateODGDspecChirped:
    
    generates a waveform ODGD and the corresponding spectrum,
    using as analysis window the -optional- window given as
    argument.
    """
    
    # converting input to double:
    F1 = np.double(F1)
    F2 = np.double(F2)
    F0 = np.double(F1 + F2) / 2.0
    Fs = np.double(Fs)
    Ot = np.double(Ot)
    t0 = np.double(t0)
    
    # compute analysis window of given type:
    if analysisWindowType == 'sinebell':
        analysisWindow = sinebell(lengthOdgd)
    else:
        if analysisWindowType == 'hanning' or \
               analysisWindowType == 'hann':
            analysisWindow = hann(lengthOdgd)
    
    # maximum number of partials in the spectral comb:
    partialMax = np.floor((Fs / 2) / np.max(F1, F2))
    
    # Frequency numbers of the partials:
    frequency_numbers = np.arange(1,partialMax + 1)
    
    # intermediate value
    temp_array = 1j * 2.0 * np.pi * frequency_numbers * Ot
    
    # compute the amplitudes for each of the frequency peaks:
    amplitudes = F0 * 27 / 4 * \
                 (np.exp(-temp_array) \
                  + (2 * (1 + 2 * np.exp(-temp_array)) / temp_array) \
                  - (6 * (1 - np.exp(-temp_array)) \
                     / (temp_array ** 2))) \
                  / temp_array
    
    # Time stamps for the time domain ODGD
    timeStamps = np.arange(lengthOdgd) / Fs + t0 / F0
    
    # Time domain odgd:
    odgd = np.exp(2.0 * 1j * np.pi \
                  * (np.outer(F1 * frequency_numbers,timeStamps) \
                     + np.outer((F2 - F1) \
                                * frequency_numbers,timeStamps ** 2) \
                     / (2 * lengthOdgd / Fs))) \
                     * np.outer(amplitudes,np.ones(lengthOdgd))
    odgd = np.sum(odgd,axis=0)
    
    # spectrum:
    odgdSpectrum = np.fft.fft(real(odgd * analysisWindow), n=Nfft)
    
    return odgd, odgdSpectrum
Example #16
0
def pma(data,axis=0):
    """
    principal modes analysis of 2D data - expects time on the axis=0 axis
    returns:
    sorted eigvals, and eigvects, correlation matrix
    """
    i=data[0]
    
    #MD=np.einsum('i,j->ij',i,i.T)
    MD=np.outer(i,i)
    #print "entering for loop"
    for i in data[1:]:
       #MD+=np.einsum('i,j->ij',i,i.T)
        MD+=np.outer(i,i)
    #print "exiting for loop"
    
    MD/=data.shape[0]
    #print "computing eigenvalues"
    l,e=np.linalg.eigh(MD)
    #print "done"

    idxx=np.argsort(l)[::-1]
    l=l[idxx]
    e=e[:,idxx]
    return l,e,MD
Example #17
0
    def evolveDraw(self, r):
        """ This can contain triggers for things to be drawn, e.g. the shark."""
        if self.mode == 3 or self.mode == 4 or self.mode == 7:
            # Draw shark
            self.updateShark(r)

            self.axis.scatter(
                self.predatorLocation[:, 0],
                self.predatorLocation[:, 1],
                self.predatorLocation[:, 2],
                color="r",
                s=4 * self.length,
            )

        if self.mode == 1 or self.mode == 2 or self.mode == 3 or self.mode == 6:

            if self.noSphere == True:
                u = np.linspace(0, 2 * np.pi, 100)
                v = np.linspace(0, np.pi, 100)

                self.sphere_x = self.habitatSize * np.outer(np.cos(u), np.sin(v))
                self.sphere_y = self.habitatSize * np.outer(np.sin(u), np.sin(v))
                self.sphere_z = self.habitatSize * np.outer(np.ones(np.size(u)), np.cos(v))

                self.noSphere = False
            self.axis.plot_wireframe(
                self.sphere_x, self.sphere_y, self.sphere_z, rstride=13, cstride=13, color="r", alpha=0.3
            )
Example #18
0
    def update(self, population):
        """Update the current covariance matrix strategy from the
        *population*.

        :param population: A list of individuals from which to update the
                           parameters.
        """
        population.sort(key=lambda ind: ind.fitness, reverse=True)
        lambda_succ = sum(self.parent.fitness <= ind.fitness for ind in population)
        p_succ = float(lambda_succ) / self.lambda_
        self.psucc = (1 - self.cp) * self.psucc + self.cp * p_succ

        if self.parent.fitness <= population[0].fitness:
            x_step = (population[0] - numpy.array(self.parent)) / self.sigma
            self.parent = copy.deepcopy(population[0])
            if self.psucc < self.pthresh:
                self.pc = (1 - self.cc) * self.pc + sqrt(self.cc * (2 - self.cc)) * x_step
                self.C = (1 - self.ccov) * self.C + self.ccov * numpy.outer(self.pc, self.pc)
            else:
                self.pc = (1 - self.cc) * self.pc
                self.C = (1 - self.ccov) * self.C + self.ccov * (numpy.outer(self.pc, self.pc) + self.cc * (2 - self.cc) * self.C)

        self.sigma = self.sigma * exp(1.0 / self.d * (self.psucc - self.ptarg) / (1.0 - self.ptarg))

        # We use Cholesky since for now we have no use of eigen decomposition
        # Basically, Cholesky returns a matrix A as C = A*A.T
        # Eigen decomposition returns two matrix B and D^2 as C = B*D^2*B.T = B*D*D*B.T
        # So A == B*D
        # To compute the new individual we need to multiply each vector z by A
        # as y = centroid + sigma * A*z
        # So the Cholesky is more straightforward as we don't need to compute
        # the squareroot of D^2, and multiply B and D in order to get A, we directly get A.
        # This can't be done (without cost) with the standard CMA-ES as the eigen decomposition is used
        # to compute covariance matrix inverse in the step-size evolutionary path computation.
        self.A = numpy.linalg.cholesky(self.C)
Example #19
0
def fft_shift_phasor_2d(shape, offset, grad=False):
    """Return phasor array used to shift an array (in real space) by
    multiplication in fourier space.
    
    Parameters
    ----------
    shape : (int, int)
        Length 2 iterable giving shape of array.
    offset : (float, float)
        Offset in array elements in each dimension.

    Returns
    -------
    z : np.ndarray (complex; 2-d)
        Complex array with shape ``shape``.

    """
    
    ny, nx = shape
    dy, dx = offset

    yphasor = fft_shift_phasor(ny, dy, grad=grad)
    xphasor = fft_shift_phasor(nx, dx, grad=grad)

    if grad:
        res = np.outer(yphasor[0], xphasor[0])
        dres_dy = np.outer(yphasor[1], xphasor[0])
        dres_dx = np.outer(yphasor[0], xphasor[1])
        resgrad = np.concatenate((dres_dy[None, :, :], dres_dx[None, :, :]))
        return res, resgrad

    else:
        return np.outer(yphasor, xphasor)
Example #20
0
    def bptt(self, x, y):
        # forward pass
        # save all hidden states and outpts at each time step
        # because need them during back propagation.
        # add one additional 0s as the initial hidden state
        s = np.zeros((len(x) + 1, self.hidden_dim))
        s[-1] = np.zeros(self.hidden_dim)
        o = np.zeros((len(x), self.word_dim))
        for t in range(len(x)):
            # Note that we are indxing U by x[t]. This is the same as
            # multiplying U with a one-hot vector.
            s[t] = np.tanh(self.U[:,x[t]] + self.W.dot(s[t-1]))
            o[t] = softmax(self.V.dot(s[t]))

        # backward pass
        dLdU = np.zeros(self.U.shape)
        dLdV = np.zeros(self.V.shape)
        dLdW = np.zeros(self.W.shape)
        # dLdy = o - y
        dLdy = o
        dLdy[np.arange(len(y)), y] -= 1.
        # For each output backwards...
        for t in np.arange(len(y))[::-1]:
            dLdV += np.outer(dLdy[t], s[t].T)
            # Initial delta calculation
            dLdz = self.V.T.dot(dLdy[t]) * (1 - (s[t] ** 2))
            # Backpropagation through time (for at most self.bptt_truncate steps)
            for bptt_step in np.arange(max(0, t-self.bptt_truncate), t+1)[::-1]:
                # print "Backpropagation step t=%d bptt step=%d " % (t, bptt_step)
                dLdW += np.outer(dLdz, s[bptt_step-1])
                dLdU[:,x[bptt_step]] += dLdz
                # Update delta for next step
                dLdz = self.W.T.dot(dLdz) * (1 - s[bptt_step-1] ** 2)
        return [dLdU, dLdV, dLdW]
Example #21
0
    def __call__(self, mat, type="wireframe", **params):
        p = ParamOverrides(self, params)

        from mpl_toolkits.mplot3d import axes3d

        fig = plt.figure()
        ax = axes3d.Axes3D(fig)

        # Construct matrices for r and c values
        rn, cn = mat.shape
        c = np.outer(np.ones(rn), np.arange(cn * 1.0))
        r = np.outer(np.arange(rn * 1.0), np.ones(cn))

        if type == "wireframe":
            ax.plot_wireframe(r, c, mat)
        elif type == "surface":
            # Sometimes fails for no obvious reason
            ax.plot_surface(r, c, mat)
        elif type == "contour":
            # Works but not usually very useful
            ax.contour3D(r, c, mat)
        else:
            raise ValueError("Unknown plot type " + str(type))

        ax.set_xlabel('R')
        ax.set_ylabel('C')
        ax.set_zlabel('Value')

        self._generate_figure(p)
    def _get_H(self, debug=False):
        """
        returns H_t as defined in algorithm 2
        
        Reference:
        https://en.wikipedia.org/wiki/Limited-memory_BFGS
        http://www.ccms.or.kr/data/pdfpaper/jcms21_1/21_1_117.pdf
        https://homes.cs.washington.edu/~galen/files/quasi-newton-notes.pdf
        """
        I = np.identity(len(self.w))
        
        if min(len(self.s), len(self.y)) == 0:
                print "Warning: No second order information used!"
                return I
            
        assert len(self.s) > 0, "s cannot be empty."
        assert len(self.s) == len(self.y), "s and y must have same length"
        assert self.s[0].shape == self.y[0].shape, \
            "s and y must have same shape"
        assert abs(self.y[-1]).sum() != 0, "latest y entry cannot be 0!"
        assert 1/np.inner(self.y[-1], self.s[-1]) != 0, "!"

        I = np.identity(len(self.s[0]))
        H = np.dot((np.inner(self.s[-1], self.y[-1]) / np.inner(self.y[-1],
                   self.y[-1])), I)

        for (s_j, y_j) in itertools.izip(self.s, self.y):
            rho = 1.0/np.inner(y_j, s_j)
            V = I - np.multiply(rho, np.outer(s_j, y_j))
            H = (V).dot(H).dot(V.T)
            H += np.multiply(rho, np.outer(s_j, s_j))

        return H
Example #23
0
 def calculate(self, w, ionization_data, beta_rad, t_electrons, t_rad,
     beta_electron, delta_input, chi_0):
     if delta_input is None:
         if self.departure_coefficient is None:
             departure_coefficient = 1. / w
         else:
             departure_coefficient = self.departure_coefficient
         radiation_field_correction = -np.ones((len(ionization_data), len(
             beta_rad)))
         less_than_chi_0 = (
             ionization_data.ionization_energy < chi_0).values
         factor_a = (t_electrons / (departure_coefficient * w * t_rad))
         radiation_field_correction[~less_than_chi_0] = factor_a * \
             np.exp(np.outer(ionization_data.ionization_energy.values[
             ~less_than_chi_0], beta_rad - beta_electron))
         radiation_field_correction[less_than_chi_0] = 1 - np.exp(np.outer(
             ionization_data.ionization_energy.values[less_than_chi_0],
             beta_rad) - beta_rad * chi_0)
         radiation_field_correction[less_than_chi_0] += factor_a * np.exp(
             np.outer(ionization_data.ionization_energy.values[
             less_than_chi_0],beta_rad) - chi_0 * beta_electron)
     else:
         radiation_field_correction = np.ones((len(ionization_data),
             len(beta_rad))) * delta_input
     delta = pd.DataFrame(radiation_field_correction,
         columns=np.arange(len(t_rad)), index=ionization_data.index)
     return delta
 def trainNN(self, imagesTrainSet, labelsTrainSet, etha):
     self.reset_weights()
     trainingSetSize = labelsTrainSet.shape[0];
     j = 0
     while j < 30:
         i = 0
         # print("Round: " + str(j + 1))
         while i < trainingSetSize :
             x = imagesTrainSet[i].ravel()  # Convert 28x28 pixel image into a (784,) vector
             x = np.array([ 0 if val == 0 else 1 for val in x ])
             x_a = np.insert(x, 0, values=1, axis=0)  # Augmented Feature vector
             net_hidd = np.dot(self.w1, x_a)
             y = self.signum(net_hidd)
             y_a = np.insert(y, 0, values=1, axis=0)  # Augmented Feature vector
             
             net_out = np.dot(self.w2, y_a)
             z = self.signum(net_out)
             lab = np.array([ 1 if k == self.labels[i] else 0 for k in range(10) ])
             
             J = z - lab;
             J = np.sum(0.5 * J * J);
             if J < 1 and self.enableWeightDecay:
                 break;
             out_sensitivity = (lab - z) * self.signum_prime(net_out)
             net_hidd_prime = self.signum_prime(net_hidd) 
             hid_sensitivity = np.dot(self.w2.T, out_sensitivity) * np.insert(net_hidd_prime, 0, 1)
             
             grad_hidd_out = etha * np.outer(out_sensitivity, y_a.T)
             grad_in_hidd = etha * np.outer(hid_sensitivity[1:] , x_a.T) 
             
             self.update_weights_bias(grad_in_hidd, grad_hidd_out)
             i += 1
         j += 1
         
     return self.w1, self.w2
Example #25
0
    def _create_displacement_matrix(self,
                                    disp_pairs,
                                    site_symmetry,
                                    rot_atom_map):
        rot_disp1s = []
        rot_disp2s = []
        rot_pair12 = []
        rot_pair21 = []
        rot_pair11 = []
        rot_pair22 = []

        for disp_pairs_u1 in disp_pairs:
            for rot_atom_num, ssym in zip(rot_atom_map, site_symmetry):
                ssym_c = similarity_transformation(self._lattice, ssym)
                for (u1, u2) in disp_pairs_u1[rot_atom_num]:
                    Su1 = np.dot(ssym_c, u1)
                    Su2 = np.dot(ssym_c, u2)
                    rot_disp1s.append(Su1)
                    rot_disp2s.append(Su2)
                    rot_pair12.append(np.outer(Su1, Su2).flatten() / 2)
                    rot_pair21.append(np.outer(Su2, Su1).flatten() / 2)
                    rot_pair11.append(np.outer(Su1, Su1).flatten() / 2)
                    rot_pair22.append(np.outer(Su2, Su2).flatten() / 2)
    
        ones = np.ones(len(rot_disp1s)).reshape((-1, 1))

        return np.hstack((ones, rot_disp1s, rot_disp2s,
                          rot_pair12, rot_pair21, rot_pair11, rot_pair22))
Example #26
0
 def TwoSampleTest(self,sample1,sample2,numShuffles=1000,method='vanilla',blockSize=20):
     """
     Compute the p-value associated to the MMD between two samples
     method determines the null approximation procedure:
     ----'vanilla': standard permutation test
     ----'block': block permutation test
     ----'wild': wild bootstrap
     ----'wild-center': wild bootstrap with empirical degeneration
     """
     n1=shape(sample1)[0]
     n2=shape(sample2)[0]
     merged = concatenate( [sample1, sample2], axis=0 )
     merged_len=shape(merged)[0]
     numBlocks = merged_len/blockSize
     K=self.kernel(merged)
     mmd = mean(K[:n1,:n1])+mean(K[n1:,n1:])-2*mean(K[n1:,:n1])
     null_samples = zeros(numShuffles)
     
     if method=='vanilla':
         for i in range(numShuffles):
             pp = permutation(merged_len)
             Kpp = K[pp,:][:,pp]
             null_samples[i] = mean(Kpp[:n1,:n1])+mean(Kpp[n1:,n1:])-2*mean(Kpp[n1:,:n1])
             
     elif method=='block':
         blocks=reshape(arange(merged_len),(numBlocks,blockSize))
         for i in range(numShuffles):
             pb = permutation(numBlocks)
             pp = reshape(blocks[pb],(merged_len))
             Kpp = K[pp,:][:,pp]
             null_samples[i] = mean(Kpp[:n1,:n1])+mean(Kpp[n1:,n1:])-2*mean(Kpp[n1:,:n1])
             
     elif method=='wild' or method=='wild-center':
         if n1!=n2:
             raise ValueError("Wild bootstrap MMD available only on the same sample sizes")
         alpha = exp(-1/float(blockSize))
         coreK = K[:n1,:n1]+K[n1:,n1:]-K[n1:,:n1]-K[:n1,n1:]
         for i in range(numShuffles):
             """
             w is a draw from the Ornstein-Uhlenbeck process
             """
             w = HelperFunctions.generateOU(n=n1,alpha=alpha)
             if method=='wild-center':
                 """
                 empirical degeneration (V_{n,2} in Leucht & Neumann)
                 """
                 w = w - mean(w)
             null_samples[i]=mean(outer(w,w)*coreK)
     elif method=='wild2':
         
         alpha = exp(-1/float(blockSize))
         for i in range(numShuffles):
             wx=HelperFunctions.generateOU(n=n1,alpha=alpha)
             wx = wx - mean(wx)
             wy=HelperFunctions.generateOU(n=n2,alpha=alpha)
             wy = wy - mean(wy)
             null_samples[i]=mean(outer(wx,wx)*K[:n1,:n1])+mean(outer(wy,wy)*K[n1:,n1:])-2*mean(outer(wx,wy)*K[:n1,n1:])
     else:
         raise ValueError("Unknown null approximation method")
     return sum(mmd<null_samples)/float(numShuffles)
Example #27
0
    def test_extract_signals_pca(self):
        # Prepare some testing data
        x = np.linspace(0, 2*np.pi, num=int(2*np.pi*50))
        signal0 = np.sin(2*x)
        signal1 = np.sin(3*x)
        in_weights = np.array([[0, 0.25, 0.5, 0.75, 1],
                               [1, 0.75, 0.5, 0.25, 0]])
        features = np.outer(in_weights[0], signal0)
        features += np.outer(in_weights[1], signal1)
        self.assertEqual(features.shape, (5, 314))
        # Extract the signals
        comps, weights = extract_signals_pca(spectra=features,
                                          n_components=2)
        weights = np.swapaxes(weights, 0, 1)
        # Check the results
        new_features = np.outer(weights[0], comps[0])
        new_features += np.outer(weights[1], comps[1])
        self.assertEqual(comps.shape, (2, len(x)))
        self.assertEqual(weights.shape, in_weights.shape)

        plt.plot(x, comps[0], label='c0')
        plt.plot(x, comps[1], label='c1')
        plt.plot(x, features[0], label='f0')
        plt.plot(x, features[1], label='f1')
        plt.plot(x, new_features[0], label='nf0')
        plt.plot(x, new_features[1], label='nf1')
        plt.legend()
        plt.show()
Example #28
0
def test_strucdamping(use_GPU):
    n_inputs = 3
    sig_len = 5

    inputs = np.outer(np.linspace(0.1, 0.9, n_inputs),
                      np.ones(sig_len))[:, :, None]
    targets = np.outer(np.linspace(0.1, 0.9, n_inputs),
                       np.linspace(0, 1, sig_len))[:, :, None]
    inputs = inputs.astype(np.float32)
    targets = targets.astype(np.float32)

    optimizer = HessianFree(CG_iter=100)

    rnn = hf.RNNet(
        shape=[1, 5, 1],
        loss_type=[hf.loss_funcs.SquaredError(),
                   hf.loss_funcs.StructuralDamping(0.1, optimizer=optimizer)],
        debug=True, use_GPU=use_GPU)

    rnn.run_epochs(inputs, targets, optimizer=optimizer,
                   max_epochs=30, print_period=None)

    outputs = rnn.forward(inputs, rnn.W)

    assert rnn.loss.batch_loss(outputs, targets) < 1e-4
def hess(A):
    """Computes the upper Hessenberg form of A using Householder reflectors.
    input:  A, mxn array
    output: Q, orthogonal mxm array
            H, upper Hessenberg
            s.t. Q.dot(H).dot(Q.T) = A
    """
    # similar approach as the householder function.
    # again, not perfectly optimized, but good enough.
    Q = np.eye(A.shape[0]).T
    H = np.array(A, order="C")
    # initialize m and n for convenience
    m, n = H.shape
    # avoid reallocating v in the for loop
    v = np.empty(A.shape[1]-1)
    for k in xrange(n-2):
        # get a slice of the temporary array
        vk = v[k:]
        # fill it with corresponding values from R
        vk[:] = H[k+1:,k]
        # add in the term that makes the reflection work
        vk[0] += copysign(la.norm(vk), vk[0])
        # normalize it so it's an orthogonal transform
        vk /= la.norm(vk)
        # apply projection to H on the left
        H[k+1:,k:] -= 2 * np.outer(vk, vk.dot(H[k+1:,k:]))
        # apply projection to H on the right
        H[:,k+1:] -= 2 * np.outer(H[:,k+1:].dot(vk), vk)
        # Apply it to Q
        Q[k+1:] -= 2 * np.outer(vk, vk.dot(Q[k+1:]))
    return Q, H
Example #30
0
def Dgrid_zone(zone,Px):
    """ Distance point to zone

    A zone is a quadrilateral zone.

    Parameters
    ----------

    zone : dictionnary
           xmin xmax Nx
           ymin ymax Ny

    Px : np.array
         point

    Build the distance matrix between Tx and points in the zone

    Notes
    -----

    use broadcasting instead

    """

    rx = np.linspace(zone['xmin'],zone['xmax'],zone['Nx'])
    ry = np.linspace(zone['ymin'],zone['ymax'],zone['Ny'])

    R_x = np.outer(np.ones(len(ry)),rx)
    R_y = np.outer(ry,np.ones(len(rx)))

    Dx = R_x - Px[0]
    Dy = R_y - Px[1]
    D = np.sqrt(Dx*Dx+Dy*Dy)
    return (D)
Example #31
0
    def calculateMapSq(self, R=None, m2_uform=None):
        r"""Calculate the aperture mass statistics from the correlation function.

        .. math::

            \langle M_{ap}^2 \rangle(R) &= \int_{0}^{rmax} \frac{r dr}{2R^2}
            \left [ T_+\left(\frac{r}{R}\right) \xi_+(r) +
            T_-\left(\frac{r}{R}\right) \xi_-(r) \right] \\
            \langle M_\times^2 \rangle(R) &= \int_{0}^{rmax} \frac{r dr}{2R^2}
            \left[ T_+\left(\frac{r}{R}\right) \xi_+(r) -
            T_-\left(\frac{r}{R}\right) \xi_-(r) \right]

        The ``m2_uform`` parameter sets which definition of the aperture mass to use.
        The default is to use 'Crittenden'.

        If ``m2_uform`` is 'Crittenden':

        .. math::

            U(r) &= \frac{1}{2\pi} (1-r^2) \exp(-r^2/2) \\
            Q(r) &= \frac{1}{4\pi} r^2 \exp(-r^2/2) \\
            T_+(s) &= \frac{s^4 - 16s^2 + 32}{128} \exp(-s^2/4) \\
            T_-(s) &= \frac{s^4}{128} \exp(-s^2/4) \\
            rmax &= \infty

        cf. Crittenden, et al (2002): ApJ, 568, 20

        If ``m2_uform`` is 'Schneider':

        .. math::

            U(r) &= \frac{9}{\pi} (1-r^2) (1/3-r^2) \\
            Q(r) &= \frac{6}{\pi} r^2 (1-r^2) \\
            T_+(s) &= \frac{12}{5\pi} (2-15s^2) \arccos(s/2) \\
            &\qquad + \frac{1}{100\pi} s \sqrt{4-s^2} (120 + 2320s^2 - 754s^4 + 132s^6 - 9s^8) \\
            T_-(s) &= \frac{3}{70\pi} s^3 (4-s^2)^{7/2} \\
            rmax &= 2R

        cf. Schneider, et al (2002): A&A, 389, 729

        .. note::

            This function is only implemented for Log binning.


        Parameters:
            R (array):      The R values at which to calculate the aperture mass statistics.
                            (default: None, which means use self.rnom)
            m2_uform (str): Which form to use for the aperture mass, as described above.
                            (default: 'Crittenden'; this value can also be given in the
                            constructor in the config dict.)

        Returns:
            Tuple containing

                - mapsq = array of :math:`\langle M_{ap}^2 \rangle(R)`
                - mapsq_im = the imaginary part of mapsq, which is an estimate of
                  :math:`\langle M_{ap} M_\times \rangle(R)`
                - mxsq = array of :math:`\langle M_\times^2 \rangle(R)`
                - mxsq_im = the imaginary part of mxsq, which is an estimate of
                  :math:`\langle M_{ap} M_\times \rangle(R)`
                - varmapsq = array of the variance estimate of either mapsq or mxsq
        """
        if m2_uform is None:
            m2_uform = treecorr.config.get(self.config,'m2_uform',str,'Crittenden')
        if m2_uform not in ['Crittenden', 'Schneider']:
            raise ValueError("Invalid m2_uform")
        if self.bin_type != 'Log':
            raise ValueError("calculateMapSq requires Log binning.")
        if R is None:
            R = self.rnom

        # Make s a matrix, so we can eventually do the integral by doing a matrix product.
        s = np.outer(1./R, self.meanr)
        ssq = s*s
        if m2_uform == 'Crittenden':
            exp_factor = np.exp(-ssq/4.)
            Tp = (32. + ssq*(-16. + ssq)) / 128. * exp_factor
            Tm = ssq * ssq / 128. * exp_factor
        else:
            Tp = np.zeros_like(s)
            Tm = np.zeros_like(s)
            sa = s[s<2.]
            ssqa = ssq[s<2.]
            Tp[s<2.] = 12./(5.*np.pi) * (2.-15.*ssqa) * np.arccos(sa/2.)
            Tp[s<2.] += 1./(100.*np.pi) * sa * np.sqrt(4.-ssqa) * (
                        120. + ssqa*(2320. + ssqa*(-754. + ssqa*(132. - 9.*ssqa))))
            Tm[s<2.] = 3./(70.*np.pi) * sa * ssqa * (4.-ssqa)**3.5
        Tp *= ssq
        Tm *= ssq

        # Now do the integral by taking the matrix products.
        # Note that dlogr = bin_size
        Tpxip = Tp.dot(self.xip)
        Tmxim = Tm.dot(self.xim)
        mapsq = (Tpxip + Tmxim) * 0.5 * self.bin_size
        mxsq = (Tpxip - Tmxim) * 0.5 * self.bin_size
        Tpxip_im = Tp.dot(self.xip_im)
        Tmxim_im = Tm.dot(self.xim_im)
        mapsq_im = (Tpxip_im + Tmxim_im) * 0.5 * self.bin_size
        mxsq_im = (Tpxip_im - Tmxim_im) * 0.5 * self.bin_size

        # The variance of each of these is
        # Var(<Map^2>(R)) = int_r=0..2R [1/4 s^4 dlogr^2 (T+(s)^2 + T-(s)^2) Var(xi)]
        varmapsq = (Tp**2).dot(self.varxip) + (Tm**2).dot(self.varxim)
        varmapsq *= 0.25 * self.bin_size**2

        return mapsq, mapsq_im, mxsq, mxsq_im, varmapsq
Example #32
0
            z[i] = np.dot(weights[i - 1], nodes[i - 1]) + bias[i - 1]
            nodes[i] = activation_func(z[i])

        delta_z[L] = cost(nodes[L], answer, True) * activation_func(z[L], True)
        '''
            delta_bias[L - 1] = np.array(delta_z[L])
            delta_weights[L - 1] = np.outer(delta_z[L], nodes[L - 1])
            
            delta_nodes[L - 1] = np.dot(delta_weights[L - 1], delta_z[L]) / LAYER_SIZE[L]
            delta_z[L - 1] = delta_nodes[L - 1] * activation_func(z[L - 1], True)
            delta_bias[L - 2]  = np.array(delta_z[L - 1])
            delta_weights[L - 2] = np.outer(delta_z[L - 1], nodes[L - 2])
        '''
        for i in range(1, L + 1):
            delta_bias[L - i] = np.array(delta_z[L + 1 - i])
            delta_weights[L - i] = np.outer(delta_z[L + 1 - i], nodes[L - i])
            delta_nodes[L -
                        i] = np.dot(delta_weights[L - i],
                                    delta_z[L + 1 - i]) / LAYER_SIZE[L - i + 1]
            delta_z[L -
                    i] = delta_nodes[L - 1] * activation_func(z[L - i], True)

        total_delta_weights += delta_weights
        total_delta_bias += delta_bias

        #print(nodes)
        print(str(iter) + "::" + str(j) + str(nodes[L]))
    weights -= DAMP * total_delta_weights / len(inputs)
    bias -= DAMP * total_delta_bias / len(inputs)

# delta_weights[i][j][k] = - DAMP * cost_deriv[j] * activ_deriv[i + 1][j] * nodes[i][j][k]
Example #33
0
def cosine(mean_center_rating, **unused_kwargs):
    _filled_rating = mean_center_rating.filled(0)
    _c = np.dot(_filled_rating, _filled_rating.T)
    _diag = np.sqrt(np.diag(_c))
    _denom = np.outer(_diag, _diag)
    return _c / _denom
Example #34
0
def griddata(filelist,
             pixPerBeam=3.5,
             templateHeader=None,
             gridFunction=jincGrid,
             startChannel=None,
             endChannel=None,
             rebase=None,
             rebaseorder=None,
             beamSize=None,
             flagSpatialOutlier=False,
             projection='TAN',
             outdir=None,
             outname=None,
             dtype=np.float64,
             gainDict=None,
             **kwargs):
    """Gridding code for GBT spectral scan data produced by pipeline.
    
    Parameters
    ----------
    filelist : list
        List of FITS files to be gridded into an output

    Keywords
    --------
    pixPerBeam : float
        Number of pixels per beam FWHM

    templateHeader : `Header` object
        Template header used for spatial pixel grid.

    gridFunction : function 
        Gridding function to be used.  The default `jincGrid` is a
        tapered circular Bessel function.  The function has call
        signature of func(xPixelCentre, yPixelCenter, xData, yData,
        pixPerBeam)

    startChannel : int
        Starting channel for spectrum within the original spectral data.

    endChannel : int
        End channel for spectrum within the original spectral data

    outdir : str
        Output directory name.  Defaults to current working directory.

    outname : str
        Output directory file name.  Defaults to object name in the
        original spectra.

    flagSpatialOutlier : bool
        Setting to True will remove scans with positions far outside the 
        bounding box of the regular scan pattern. Used to catch instances
        where the encoder records erroneous positions. 

    gainDict : dict 
        Dictionary that has a tuple of feed and polarization numbers
        as keys and returns the gain values for that feed.
    
    Returns
    -------
    None

    """

    eulerFlag = False
    print("Starting Gridding")
    if outdir is None:
        outdir = os.getcwd()

    if len(filelist) == 0:
        warnings.warn('There are no FITS files to process ')
        return
    # check that every file in the filelist is valid
    # If not then remove it and send warning message
    for file_i in filelist:
        try:
            fits.open(file_i)
        except:
            warnings.warn('file {0} is corrupted'.format(file_i))
            filelist.remove(file_i)

    # pull a test structure
    hdulist = fits.open(filelist[0])
    s = hdulist[1].data

    # Constants block
    sqrt2 = np.sqrt(2)
    mad2rms = 1.4826
    prefac = mad2rms / sqrt2
    c = 299792458.

    nu0 = s[0]['RESTFREQ']
    Data_Unit = s[0]['TUNIT7']

    if outname is None:
        outname = s[0]['OBJECT']

    # New Beam size measurements use 1.18 vs. 1.22 based on GBT Memo 296.

    if beamSize is None:
        beamSize = 1.18 * (c / nu0 / 100.0) * 180 / np.pi  # in degrees

    if startChannel is None:
        startChannel = 0
    if endChannel is None:
        endChannel = len(s[0]['DATA'])

    naxis3 = len(s[0]['DATA'][startChannel:endChannel])

    # Default behavior is to park the object velocity at
    # the center channel in the VRAD-LSR frame

    crval3 = s[0]['RESTFREQ'] * (1 - s[0]['VELOCITY'] / c)
    crpix3 = s[0]['CRPIX1'] - startChannel
    ctype3 = s[0]['CTYPE1']
    cdelt3 = s[0]['CDELT1']

    w = wcs.WCS(naxis=3)

    w.wcs.restfrq = nu0
    # We are forcing this conversion to make nice cubes.
    w.wcs.specsys = 'LSRK'
    w.wcs.ssysobs = 'TOPOCENT'

    if templateHeader is None:
        wcsdict = autoHeader(filelist,
                             beamSize=beamSize,
                             pixPerBeam=pixPerBeam,
                             projection=projection)
        w.wcs.crpix = [wcsdict['CRPIX1'], wcsdict['CRPIX2'], crpix3]
        w.wcs.cdelt = np.array([wcsdict['CDELT1'], wcsdict['CDELT2'], cdelt3])
        w.wcs.crval = [wcsdict['CRVAL1'], wcsdict['CRVAL2'], crval3]
        w.wcs.ctype = [wcsdict['CTYPE1'], wcsdict['CTYPE2'], ctype3]
        naxis2 = wcsdict['NAXIS2']
        naxis1 = wcsdict['NAXIS1']
        w.wcs.radesys = s[0]['RADESYS']
        w.wcs.equinox = s[0]['EQUINOX']

    else:
        w.wcs.crpix = [
            templateHeader['CRPIX1'], templateHeader['CRPIX2'], crpix3
        ]
        w.wcs.cdelt = np.array(
            [templateHeader['CDELT1'], templateHeader['CDELT2'], cdelt3])
        w.wcs.crval = [
            templateHeader['CRVAL1'], templateHeader['CRVAL2'], crval3
        ]
        w.wcs.ctype = [
            templateHeader['CTYPE1'], templateHeader['CTYPE2'], ctype3
        ]
        naxis2 = templateHeader['NAXIS2']
        naxis1 = templateHeader['NAXIS1']
        w.wcs.radesys = templateHeader['RADESYS']
        w.wcs.equinox = templateHeader['EQUINOX']
        pixPerBeam = np.abs(beamSize / w.pixel_scale_matrix[1, 1])
        if pixPerBeam < 3.5:
            warnings.warn('Template header requests {0}'.format(pixPerBeam) +
                          ' pixels per beam.')
        if (((w.wcs.ctype[0]).split('-'))[0] !=
            ((s[0]['CTYPE1']).split('-'))[0]):
            warnings.warn('Spectral data not in same frame as template header')
            eulerFlag = True

    outCube = np.zeros((int(naxis3), int(naxis2), int(naxis1)), dtype=dtype)
    outWts = np.zeros((int(naxis2), int(naxis1)), dtype=dtype)

    xmat, ymat = np.meshgrid(np.arange(naxis1),
                             np.arange(naxis2),
                             indexing='ij')
    xmat = xmat.reshape(xmat.size)
    ymat = ymat.reshape(ymat.size)
    xmat = xmat.astype(np.int)
    ymat = ymat.astype(np.int)

    ctr = 0

    for thisfile in filelist:
        print("Now processing {0}".format(thisfile))
        print("This is file {0} of {1}".format(ctr, len(filelist)))

        ctr += 1
        s = fits.open(thisfile)

        if len(s) < 2:
            warnings.warn("Corrupted file: {0}".format(thisfile))
            continue

        if len(s[1].data) == 0:
            warnings.warn("Corrupted file: {0}".format(thisfile))
            continue

        if flagSpatialOutlier:
            # Remove outliers in Lat/Lon space
            f = np.where(is_outlier(s[1].data['CRVAL2'], thresh=1.5) != True)
            s[1].data = s[1].data[f]
            f = np.where(is_outlier(s[1].data['CRVAL3'], thresh=1.5) != True)
            s[1].data = s[1].data[f]
        nuindex = np.arange(len(s[1].data['DATA'][0]))

        flagct = 0
        if eulerFlag:
            if 'GLON' in s[1].data['CTYPE2'][0]:
                inframe = 'galactic'
            elif 'RA' in s[1].data['CTYPE2'][0]:
                inframe = 'fk5'
            else:
                raise NotImplementedError
            if 'GLON' in w.wcs.ctype[0]:
                outframe = 'galactic'
            elif 'RA' in w.wcs.ctype[0]:
                outframe = 'fk5'
            else:
                raise NotImplementedError

            coords = SkyCoord(s[1].data['CRVAL2'],
                              s[1].data['CRVAL3'],
                              unit=(u.deg, u.deg),
                              frame=inframe)
            coords_xform = coords.transform_to(outframe)
            if outframe == 'fk5':
                longCoord = coords_xform.ra.deg
                latCoord = coords_xform.dec.deg
            elif outframe == 'galactic':
                longCoord = coords_xform.l.deg
                latCoord = coords_xform.b.deg
        else:
            longCoord = s[1].data['CRVAL2']
            latCoord = s[1].data['CRVAL3']

        spectra, outscan, specwts, tsys = preprocess(thisfile,
                                                     startChannel=startChannel,
                                                     endChannel=endChannel,
                                                     **kwargs)
        for i in range(len(spectra)):
            xpoints, ypoints, zpoints = w.wcs_world2pix(
                longCoord[i], latCoord[i], spectra[i]['CRVAL1'], 0)
            if (tsys[i] > 10) and (xpoints > 0) and (xpoints < naxis1) \
                    and (ypoints > 0) and (ypoints < naxis2):
                pixelWeight, Index = gridFunction(xmat, ymat, xpoints, ypoints,
                                                  pixPerBeam)
                vector = np.outer(outscan[i, :] * specwts[i, :],
                                  pixelWeight / tsys[i]**2)
                wts = pixelWeight / tsys[i]**2
                outCube[:, ymat[Index], xmat[Index]] += vector
                outWts[ymat[Index], xmat[Index]] += wts
        # Temporarily do a file write for every batch of scans.
        outWtsTemp = np.copy(outWts)
        outWtsTemp.shape = (1, ) + outWtsTemp.shape
        outCubeTemp = np.copy(outCube)
        outCubeTemp /= outWtsTemp
        hdr = fits.Header(w.to_header())

        hdr = addHeader_nonStd(hdr, beamSize, s[1].data)
        #
        hdu = fits.PrimaryHDU(outCubeTemp, header=hdr)
        hdu.writeto(outdir + '/' + outname + '.fits', overwrite=True)

    outWts.shape = (1, ) + outWts.shape
    outCube /= outWts

    # Create basic fits header from WCS structure
    hdr = fits.Header(w.to_header())
    # Add non standard fits keyword
    hdr = addHeader_nonStd(hdr, beamSize, s[1].data[0])
    hdr.add_history('Using GBTPIPE gridder version {0}'.format(__version__))
    hdu = fits.PrimaryHDU(outCube, header=hdr)
    hdu.writeto(outdir + '/' + outname + '.fits', overwrite=True)

    w2 = w.dropaxis(2)
    hdr2 = fits.Header(w2.to_header())
    hdu2 = fits.PrimaryHDU(outWts, header=hdr2)
    hdu2.writeto(outdir + '/' + outname + '_wts.fits', overwrite=True)

    if rebase:
        if rebaseorder is None:
            rebaseorder = blorder
        if 'NH3_11' in outname:
            Baseline.rebaseline(outdir + '/' + outname + '.fits',
                                windowFunction=Baseline.ammoniaWindow,
                                line='oneone',
                                blorder=rebaseorder,
                                **kwargs)

        elif 'NH3_22' in outname:
            Baseline.rebaseline(outdir + '/' + outname + '.fits',
                                windowFunction=Baseline.ammoniaWindow,
                                line='twotwo',
                                blorder=rebaseorder,
                                **kwargs)

        elif 'NH3_33' in outname:
            Baseline.rebaseline(outdir + '/' + outname + '.fits',
                                winfunc=Baseline.ammoniaWindow,
                                blorder=rebaseorder,
                                line='threethree',
                                **kwargs)
        else:
            Baseline.rebaseline(outdir + '/' + outname + '.fits',
                                blorder=rebaseorder,
                                windowFunction=Baseline.tightWindow,
                                **kwargs)
Example #35
0
 def applyLag(self, to, polyn):
   # 'p' is already in the digital domain;
   return np.outer( self.p**to, polyn )
Example #36
0
    def fit(self, **kwargs):
        """
        Estimate the EDR space.

        Parameters
        ----------
        slice_n : int
            Number of observations per slice
        """

        # Sample size per slice
        slice_n = kwargs.get("slice_n", 50)

        # Number of slices
        n_slice = self.exog.shape[0] // slice_n

        self._prep(n_slice)

        cv = [np.cov(z.T) for z in self._split_wexog]
        ns = [z.shape[0] for z in self._split_wexog]

        p = self.wexog.shape[1]

        if not self.bc:
            # Cook's original approach
            vm = 0
            for w, cvx in zip(ns, cv):
                icv = np.eye(p) - cvx
                vm += w * np.dot(icv, icv)
            vm /= len(cv)
        else:
            # The bias-corrected approach of Li and Zhu

            # \Lambda_n in Li, Zhu
            av = 0
            for c in cv:
                av += np.dot(c, c)
            av /= len(cv)

            # V_n in Li, Zhu
            vn = 0
            for x in self._split_wexog:
                r = x - x.mean(0)
                for i in range(r.shape[0]):
                    u = r[i, :]
                    m = np.outer(u, u)
                    vn += np.dot(m, m)
            vn /= self.exog.shape[0]

            c = np.mean(ns)
            k1 = c * (c - 1) / ((c - 1)**2 + 1)
            k2 = (c - 1) / ((c - 1)**2 + 1)
            av2 = k1 * av - k2 * vn

            vm = np.eye(p) - 2 * sum(cv) / len(cv) + av2

        a, b = np.linalg.eigh(vm)
        jj = np.argsort(-a)
        a = a[jj]
        b = b[:, jj]
        params = np.linalg.solve(self._covxr.T, b)

        results = DimReductionResults(self, params, eigs=a)
        return DimReductionResultsWrapper(results)
Example #37
0
    def calculateGamSq(self, R=None, eb=False):
        r"""Calculate the tophat shear variance from the correlation function.

        .. math::

            \langle \gamma^2 \rangle(R) &= \int_0^{2R} \frac{r dr}{R^2} S_+(s) \xi_+(r) \\
            \langle \gamma^2 \rangle_E(R) &= \int_0^{2R} \frac{r dr}{2 R^2}
            \left[ S_+\left(\frac{r}{R}\right) \xi_+(r) +
            S_-\left(\frac{r}{R}\right) \xi_-(r) \right] \\
            \langle \gamma^2 \rangle_B(R) &= \int_0^{2R} \frac{r dr}{2 R^2}
            \left[ S_+\left(\frac{r}{R}\right) \xi_+(r) -
            S_-\left(\frac{r}{R}\right) \xi_-(r) \right] \\

            S_+(s) &= \frac{1}{\pi} \left(4 \arccos(s/2) - s \sqrt{4-s^2} \right) \\
            S_-(s) &= \begin{cases}
            s<=2, & \frac{1}{\pi s^4} \left(s \sqrt{4-s^2} (6-s^2) - 8(3-s^2) \arcsin(s/2)\right)\\
            s>=2, & \frac{1}{s^4} \left(4(s^2-3)\right)
            \end{cases}

        cf. Schneider, et al (2002): A&A, 389, 729

        The default behavior is not to compute the E/B versions.  They are calculated if
        eb is set to True.

        .. note::

            This function is only implemented for Log binning.


        Parameters:
            R (array):  The R values at which to calculate the shear variance.
                        (default: None, which means use self.rnom)
            eb (bool):  Whether to include the E/B decomposition as well as the total
                        :math:`\langle \gamma^2\rangle`.  (default: False)

        Returns:
            Tuple containing

                - gamsq = array of :math:`\langle \gamma^2 \rangle(R)`
                - vargamsq = array of the variance estimate of gamsq
                - gamsq_e  (Only if eb is True) = array of :math:`\langle \gamma^2 \rangle_E(R)`
                - gamsq_b  (Only if eb is True) = array of :math:`\langle \gamma^2 \rangle_B(R)`
                - vargamsq_e  (Only if eb is True) = array of the variance estimate of
                  gamsq_e or gamsq_b
        """
        if self.bin_type != 'Log':
            raise ValueError("calculateGamSq requires Log binning.")

        if R is None:
            R = self.rnom
        s = np.outer(1./R, self.meanr)
        ssq = s*s
        Sp = np.zeros_like(s)
        sa = s[s<2]
        ssqa = ssq[s<2]
        Sp[s<2.] = 1./np.pi * ssqa * (4.*np.arccos(sa/2.) - sa*np.sqrt(4.-ssqa))

        # Now do the integral by taking the matrix products.
        # Note that dlogr = bin_size
        Spxip = Sp.dot(self.xip)
        gamsq = Spxip * self.bin_size
        vargamsq = (Sp**2).dot(self.varxip) * self.bin_size**2

        # Stop here if eb is False
        if not eb: return gamsq, vargamsq

        Sm = np.empty_like(s)
        Sm[s<2.] = 1./(ssqa*np.pi) * (sa*np.sqrt(4.-ssqa)*(6.-ssqa)
                                                 -8.*(3.-ssqa)*np.arcsin(sa/2.))
        Sm[s>=2.] = 4.*(ssq[s>=2]-3.)/ssq[s>=2]
        # This already includes the extra ssq factor.

        Smxim = Sm.dot(self.xim)
        gamsq_e = (Spxip + Smxim) * 0.5 * self.bin_size
        gamsq_b = (Spxip - Smxim) * 0.5 * self.bin_size
        vargamsq_e = (Sp**2).dot(self.varxip) + (Sm**2).dot(self.varxim)
        vargamsq_e *= 0.25 * self.bin_size**2

        return gamsq, vargamsq, gamsq_e, gamsq_b, vargamsq_e
def cal_pca(point_cloud,
            is_show=False,
            desired_num_of_feature=3,
            title="pca demo"):
    pca = PCA(n_components=desired_num_of_feature)
    pca.fit(point_cloud)
    # print("Principal vectors: ",pca.components_)
    # print("Singular values: ",pca.explained_variance_)
    if is_show:
        fig = plt.figure()
        ax = fig.add_subplot(111, projection='3d')
        ax.set_xlabel('X Label(unit:m)')
        ax.set_ylabel('Y Label(unit:m)')
        ax.set_zlabel('Z Label(unit:m)')
        plt.title(title)
        ax.scatter(point_cloud[:, 0],
                   point_cloud[:, 1],
                   point_cloud[:, 2],
                   c='y',
                   s=1)
        xm, ym, zm = get_centroid_from_pc(point_cloud)
        ax.scatter(xm, ym, zm, c='r', s=10)
        print("*" * 30)
        print("z 向量 %f ,%f ,%f" %
              (pca.components_[2, 0], pca.components_[2, 1],
               pca.components_[2, 2]))
        if (np.inner(pca.components_[2, :], [0, 0, 1]) > 0):
            print("pca_z向量與z方向同向,需要對x軸旋轉180度")
            pca.components_[2, :] = -pca.components_[2, :]
            # r = R.from_euler('x',180, degrees=True)
            # r_b_o=R.from_dcm(pca.components_.T)
            # r3=r_b_o*r
            # pca.components_=r3.as_dcm().T
        # 求出x,y的外積,應該為z 看是否與第三軸同向確認是否為正確
        x_axis_matrix = np.outer(pca.components_[1, :], pca.components_[2, :])
        x_axis = np.asarray([
            x_axis_matrix[1, 2] - x_axis_matrix[2, 1],
            x_axis_matrix[2, 0] - x_axis_matrix[0, 2],
            x_axis_matrix[0, 1] - x_axis_matrix[1, 0]
        ])
        print("*" * 30)
        print("外積計算的x軸為:")
        print(x_axis)

        # 確認pca_x與經由外積(y,z)計算的x同向
        if (np.allclose(pca.components_[0, :], x_axis)):
            print("pca_x與外積(y,z)計算的x同向")
        else:
            # 反向,將不重要的x軸轉向
            print("x方向不正確,需替換成正確的項")
            pca.components_[0, :] = x_axis
        if (np.inner(pca.components_[0, :], [1, 0, 0]) < 0):
            # 希望夾爪朝前,這樣末端點就不需要轉太多
            print("pca_x向量與x方向反向,需要對x軸旋轉180度")
            r = R.from_euler('z', 180, degrees=True)
            r_b_o = R.from_dcm(pca.components_.T)
            r3 = r_b_o * r
            pca.components_ = r3.as_dcm().T
        discount = 1
        print("*" * 30)
        for length, vector in zip(pca.explained_variance_, pca.components_):
            ax.quiver(xm,
                      ym,
                      zm,
                      vector[0],
                      vector[1],
                      vector[2],
                      length=discount)
            discount /= 3

        plt.show()
    return pca.components_, pca.explained_variance_
Example #39
0
    def getDataObject(self, key, selection=None):
        """
        Parameters:
        * key: key to be read from source. It is a string
              using the following formats:

            "s.o": loads all counter values (s=scan number, o=order)
              - if ScanType==SCAN: in a 2D array (mot*cnts)
              - if ScanType==MESH: in a 3D array (mot1*mot2*cnts)
              - if ScanType==MCA: single MCA in 1D array (0:channels)

            "s.o.n": loads a single MCA in a 1D array (0:channels)
              - if ScanType==NMCA: n is the MCA number from 1 to N
              - if ScanType==SCAN+MCA: n is the scan point number (from 1)
              - if ScanType==MESH+MCA: n is the scan point number (from 1)

            "s.o.p.n": loads a single MCA in a 1D array (0:channels)
              - if ScanType==SCAN+NMCA:
                      p is the point number in the scan
                      n is the MCA device number
              - if ScanType==MESH+MCA:
                      p is first motor index
                      n is second motor index

            "s.o.MCA": loads all MCA in an array
              - if ScanType==SCAN+MCA: 2D array (pts*mca)
              - if ScanType==NMCA: 2D array (mca_det*mca)
              - if ScanType==MESH+MCA: 3D array (pts_mot1*pts_mot2*mca)
              - if ScanType==SCAN+NMCA: 3D array (pts_mot1*mca_det*mca)
              - if ScanType==MESH+NMCA:
                      creates N data page, one for each MCA device,
                      with a 3D array (pts_mot1*pts_mot2*mca)
        """
        key_type = self.__getKeyType(key)
        if key_type == "scan":
            scan_key = key
        elif key_type == "mca":
            (scan_key, mca_no) = self.__getMcaPars(key)
        if self.__source_info_cached is None:
            sourceinfo = self.getSourceInfo()
            sourcekeys = sourceinfo['KeyList']
        else:
            sourceinfo = self.__source_info_cached
            sourcekeys = sourceinfo['KeyList']
            if scan_key not in sourcekeys:
                sourceinfo = self.getSourceInfo()
                sourcekeys = sourceinfo['KeyList']
        if scan_key not in sourcekeys:
            raise KeyError("Key %s not in source keys" % key)

        mca3D = False
        if DEBUG:
            print("SELECTION = ", selection)
            print("key_type = ", key_type)
        if key_type == "scan":
            if selection is not None:
                if 'mcalist' in selection:
                    mca3D = True

        if (key_type == "scan") and (not mca3D):
            output = self._getScanData(key, raw=True)
            output.x = None
            output.y = None
            output.m = None
            output.info['selection'] = selection
            if selection is None:
                output.info['selectiontype'] = "2D"
                return output
            elif type(selection) != type({}):
                #I only understand index selections
                raise TypeError(
                    "Only selections of type {x:[],y:[],m:[]} understood")
            else:
                if 'x' in selection:
                    indexlist = []
                    for labelindex in selection['x']:
                        if labelindex != 0:
                            if 'cntlist' in selection:
                                label = selection['cntlist'][labelindex]
                            else:
                                label = output.info['LabelNames'][labelindex]
                        else:
                            label = output.info['LabelNames'][labelindex]
                        if label not in output.info['LabelNames']:
                            raise ValueError("Label %s not in scan labels" %
                                             label)
                        index = output.info['LabelNames'].index(label)
                        if output.x is None:
                            output.x = []
                        output.x.append(output.data[:, index])
                        indexlist.append(index)
                    output.info['selection']['x'] = indexlist
                if 'y' in selection:
                    indexlist = []
                    for labelindex in selection['y']:
                        if 'cntlist' in selection:
                            label = selection['cntlist'][labelindex]
                        else:
                            label = output.info['LabelNames'][labelindex]
                        if label not in output.info['LabelNames']:
                            raise ValueError("Label %s not in scan labels" %
                                             label)
                        index = output.info['LabelNames'].index(label)
                        if output.y is None:
                            output.y = []
                        output.y.append(output.data[:, index])
                        indexlist.append(index)
                    output.info['selection']['y'] = indexlist
                if 'm' in selection:
                    indexlist = []
                    for labelindex in selection['m']:
                        if 'cntlist' in selection:
                            label = selection['cntlist'][labelindex]
                        else:
                            label = output.info['LabelNames'][labelindex]
                        if label not in output.info['LabelNames']:
                            raise ValueError("Label %s not in scan labels" %
                                             label)
                        index = output.info['LabelNames'].index(label)
                        if output.m is None:
                            output.m = []
                        output.m.append(output.data[:, index])
                        indexlist.append(index)
                    output.info['selection']['m'] = indexlist
                output.info['selection']['cntlist'] = output.info['LabelNames']
                output.info['selectiontype'] = "1D"
                if output.x is not None:
                    output.info['selectiontype'] = "%dD" % len(output.x)
                output.data = None
        elif key_type == "mca":
            output = self._getMcaData(key)
            selectiontype = "1D"
            if selection is not None:
                selectiontype = selection.get('selectiontype', "1D")
            output.info['selectiontype'] = selectiontype
            if output.info['selectiontype'] not in ['2D', '3D', 'STACK']:
                ch0 = int(output.info['Channel0'])
                output.x = [
                    numpy.arange(ch0,
                                 ch0 + len(output.data)).astype(numpy.float)
                ]
                output.y = [output.data[:].astype(numpy.float)]
                output.m = None
                output.data = None
            else:
                output.x = None
                output.y = None
                output.m = None
                output.data = None
                npoints = output.info['NbMca'] / output.info['NbMcaDet']
                index = 0
                scan_obj = self._sourceObjectList[index].select(scan_key)
                SPECFILE = True
                if isinstance(self._sourceObjectList[index],
                              specfile.specfilewrapper):
                    SPECFILE = False
                for i in range(npoints):
                    if SPECFILE:
                        wmca_no = mca_no + output.info['NbMcaDet'] * i
                        mcaData = scan_obj.mca(wmca_no)
                    else:
                        mca_key = '%s.%d' % (scan_key, mca_no)
                        mcaData = self._getMcaData(mca_key).data
                    if i == 0:
                        nChannels = mcaData.shape[0]
                        output.data = numpy.zeros((npoints, nChannels),
                                                  numpy.float32)
                    output.data[i, :] = mcaData
                #I have all the MCA data ready for image plot
                if selectiontype == 'STACK':
                    output.data.shape = 1, npoints, -1
                    shape = output.data.shape
                    for i in range(len(shape)):
                        key = 'Dim_%d' % (i + 1, )
                        output.info[key] = shape[i]
                    output.info["SourceType"] = "SpecFileStack"
                    output.info["SourceName"] = self.sourceName
                    output.info["Size"] = shape[0] * shape[1]
                    output.info["NumberOfFiles"] = 1
                    output.info["FileIndex"] = 1
        elif (key_type == "scan") and mca3D:
            output = self._getScanData(key, raw=True)
            output.x = None
            output.y = None
            output.m = None
            #get the number of counters in the scan
            if 'cntlist' in selection:
                ncounters = len(selection['cntlist'])
            else:
                ncounters = output.info['LabelNames']

            # For the time being assume only one mca can be selected
            detectorNumber = selection['y'][0] - ncounters

            #read the first mca data of the first point
            mca_key = '%s.%d.%d' % (key, 1 + detectorNumber, 1)
            mcaData = self._getMcaData(mca_key)
            ch0 = int(mcaData.info['Channel0'])
            calib = mcaData.info['McaCalib']
            nChannels = float(mcaData.data.shape[0])
            channels = numpy.arange(nChannels) + ch0

            #apply the calibration
            channels = calib[0] + calib[1] * channels +\
                       calib[2] * channels * channels

            ones = numpy.ones(nChannels)
            #get the different x components
            xselection = selection.get('x', [])
            if len(xselection) != 2:
                raise ValueError("You have to select two X axes")
            indexlist = []
            for labelindex in xselection:
                if labelindex != 0:
                    if 'cntlist' in selection:
                        label = selection['cntlist'][labelindex]
                    else:
                        label = output.info['LabelNames'][labelindex]
                else:
                    label = output.info['LabelNames'][labelindex]
                if label not in output.info['LabelNames']:
                    raise ValueError("Label %s not in scan labels" % label)
                index = output.info['LabelNames'].index(label)
                if output.x is None:
                    output.x = []
                output.x.append(output.data[:, index])
                indexlist.append(index)
            npoints = output.x[0].shape[0]
            output.info['selection'] = selection
            output.info['selection']['x'] = indexlist
            for i in range(len(output.x)):
                output.x[i] = numpy.outer(output.x[i], ones).flatten()
            tmp = numpy.outer(channels, numpy.ones(float(npoints))).flatten()
            output.x.append(tmp)
            output.y = [numpy.zeros(nChannels * npoints, numpy.float)]
            for i in range(npoints):
                mca_key = '%s.%d.%d' % (key, 1 + detectorNumber, 1)
                mcaData = self._getMcaData(mca_key)
                output.y[0][(i * nChannels):((i + 1) *
                                             nChannels)] = mcaData.data[:]
            if 'm' in selection:
                indexlist = []
                for labelindex in selection['m']:
                    if 'cntlist' in selection:
                        label = selection['cntlist'][labelindex]
                    else:
                        label = output.info['LabelNames'][labelindex]
                    if label not in output.info['LabelNames']:
                        raise ValueError("Label %s not in scan labels" % label)
                    index = output.info['LabelNames'].index(label)
                    if output.m is None:
                        output.m = []
                    output.m.append(output.data[:, index])
                    indexlist.append(index)
                output.info['selection']['m'] = indexlist
                if output.m is not None:
                    output.m[0] = numpy.outer(output.m[0], ones).flatten()
            output.info['selection']['cntlist'] = output.info['LabelNames']
            output.info['selectiontype'] = "3D"
            output.info[
                'LabelNames'] = selection['cntlist'] + selection['mcalist']

            output.data = None
        return output
Example #40
0
    l[i + 1:, i] = b / l[i, i]

    sub = n - i
    simd += sub // 4 + sub % 4
    simd += sum(j // 4 + j % 4 for j in range(1, sub)) * 4

    compute += (sub - 1) * (sub - 2) // 2
    starting += sub + 12
    finish = max(finish, starting + (sub - 1) * (sub - 2) // 2)

    aa = a.copy()
    aa[i, i] = 1
    aa[i, i + 1:] = numpy.zeros(n - i - 1)
    aa[i + 1:, i] = numpy.zeros(n - i - 1)
    aa[i + 1:,
       i + 1:] = a[i + 1:, i + 1:] - numpy.outer(numpy.conj(b), b) / a[i, i]
    # Mathematically, it is L = L * l. However, in this case, we can just copy the corresponding
    # column to L, because the speciality of L and l
    L[i:, i] = l[i:, i]
    #L = numpy.dot(L, l)
    a = aa

numpy.testing.assert_allclose(origin,
                              numpy.dot(numpy.conj(L), L.transpose()),
                              rtol=1e-4)
print("Correctness check pass!")

output.print_complex_array('ref.data', L.flatten())
print("New data generated!")

init = 12 * n
Example #41
0
def sparsery(ops):
    rez, max_proj = get_mov(ops)
    ops['max_proj'] = max_proj
    nframes, Ly, Lx = rez.shape
    ops['Lyc'] = Ly
    ops['Lxc'] = Lx
    sdmov = get_sdmov(rez, ops)
    rez /= sdmov
    #rez *= -1

    lx = [ops['spatial_hp']]
    c1 = square_conv2(np.ones((Ly,Lx)), lx)
    movu = square_conv2(rez, lx)

    rez -= movu/c1

    LL = np.meshgrid(np.arange(Lx), np.arange(Ly))
    Lyp = np.zeros(5, 'int32')
    Lxp = np.zeros(5,'int32')
    gxy = [np.array(LL).astype('float32')]
    dmov = rez
    movu = []

    for j in range(5):
        movu.append(square_conv2(dmov, [3]))
        dmov = 2 * downsample(dmov)
        gxy0 = downsample(gxy[j], False)
        gxy.append(gxy0)
        nfr, Lyp[j], Lxp[j] = movu[j].shape
        movu[j] = np.reshape(movu[j], (nfr,-1))

    nfr, Lyc,Lxc = rez.shape
    V0 = []
    ops['Vmap']  = []
    for j in range(len(movu)):
        V0.append(np.amax(movu[j], axis=0))
        #V0.append(np.sum(movu[j]**2 * np.float32(movu[j]>Th2), axis=0)**.5)
        V0[j] = np.reshape(V0[j], (Lyp[j], Lxp[j]))
        ops['Vmap'].append(V0[j].copy())
    I = np.zeros((len(gxy), gxy[0].shape[1], gxy[0].shape[2]))
    for t in range(1,len(gxy)-1):
        gmodel = RectBivariateSpline(gxy[t][1,:,0], gxy[t][0, 0,:], ops['Vmap'][t],
                                     kx=min(3, gxy[t][1,:,0].size-1), ky=min(3, gxy[t][0,0,:].size-1))
        I[t] = gmodel.__call__(gxy[0][1,:,0], gxy[0][0, 0,:])
    I0 = np.amax(I, axis=0)
    ops['Vcorr'] = I0
    imap = np.argmax(I, axis=0).flatten()
    ipk = np.abs(I0 - maximum_filter(I0, size=(11,11))).flatten() < 1e-4
    isort = np.argsort(I0.flatten()[ipk])[::-1]
    im, nm = mode(imap[ipk][isort[:50]])
    if ops['spatial_scale'] > 0:
        im = max(1, min(4, ops['spatial_scale']))
        fstr = 'FORCED'
    else:
        fstr = 'estimated'

    if im==0:
        print('ERROR: best scale was 0, everything should break now!')
    Th2 = ops['threshold_scaling']*5*max(1,im)
    vmultiplier = max(1, np.float32(rez.shape[0])/1200)
    print('NOTE: %s spatial scale ~%d pixels, time epochs %2.2f, threshold %2.2f '%(fstr, 3*2**im, vmultiplier, vmultiplier*Th2))
    ops['spatscale_pix'] = 3*2**im

    V0 = []
    ops['Vmap']  = []
    for j in range(len(movu)):
        #V0.append(np.amax(movu[j], axis=0))
        V0.append(np.sum(movu[j]**2 * np.float32(movu[j]>Th2), axis=0)**.5)
        V0[j] = np.reshape(V0[j], (Lyp[j], Lxp[j]))
        ops['Vmap'].append(V0[j].copy())
    I = np.zeros((len(gxy), gxy[0].shape[1], gxy[0].shape[2]))
    for t in range(1,len(gxy)-1):
        gmodel = RectBivariateSpline(gxy[t][1,:,0], gxy[t][0, 0,:], ops['Vmap'][t],
                                     kx=min(3, gxy[t][1,:,0].size-1), ky=min(3, gxy[t][0,0,:].size-1))
        I[t] = gmodel.__call__(gxy[0][1,:,0], gxy[0][0, 0,:])
    I0 = np.amax(I, axis=0)
    ops['Vcorr'] = I0


    xpix,ypix,lam = [],[],[]
    rez = np.reshape(rez, (-1,Ly*Lx))
    lxs = 3 * 2**np.arange(5)
    nscales = len(lxs)

    niter = 250 * ops['max_iterations']
    Vmax = np.zeros((niter))
    ihop = np.zeros((niter))
    vrat = np.zeros((niter))
    Npix = np.zeros((niter))

    t0 = tic()

    for tj in range(niter):
        v0max = np.array([np.amax(V0[j]) for j in range(5)])
        imap = np.argmax(v0max)
        imax = np.argmax(V0[imap])
        yi, xi = np.unravel_index(imax, (Lyp[imap], Lxp[imap]))
        yi, xi = gxy[imap][1,yi,xi], gxy[imap][0,yi,xi]

        Vmax[tj] = np.amax(v0max)
        if Vmax[tj] < vmultiplier*Th2:
            break
        ls = lxs[imap]

        ihop[tj] = imap

        ypix0, xpix0, lam0 = add_square(int(yi),int(xi),ls,Ly,Lx)
        xproj = rez[:, ypix0*Lx+ xpix0] @ lam0
        goodframe = np.nonzero(xproj>Th2)[0]
        for j in range(3):
            ypix0, xpix0, lam0 = iter_extend(ypix0, xpix0, rez, Ly,Lx, goodframe)
            xproj = rez[:, ypix0*Lx+ xpix0] @ lam0
            goodframe = np.nonzero(xproj>Th2)[0]
            if len(goodframe)<1:
                break
        if len(goodframe)<1:
            break
        vrat[tj], ipack = two_comps(rez[:, ypix0*Lx+ xpix0], lam0, Th2)
        if vrat[tj]>1.25:
            lam0, xp, goodframe = ipack
            xproj[goodframe] = xp
            ix = lam0>lam0.max()/5
            xpix0 = xpix0[ix]
            ypix0 = ypix0[ix]
            lam0 = lam0[ix]
        # update residual on raw movie
        rez[np.ix_(goodframe, ypix0*Lx+ xpix0)] -= xproj[goodframe][:,np.newaxis] * lam0
        # update filtered movie
        ys, xs, lms = multiscale_mask(ypix0,xpix0,lam0, Lyp, Lxp)
        for j in range(nscales):
            movu[j][np.ix_(goodframe,xs[j]+Lxp[j]*ys[j])] -= np.outer(xproj[goodframe], lms[j])
            #V0[j][xs[j] + Lxp[j]*ys[j]] = np.amax(movu[j][:,xs[j]+Lxp[j]*ys[j]], axis=0)
            Mx = movu[j][:,xs[j]+Lxp[j]*ys[j]]
            #V0[j][xs[j] + Lxp[j]*ys[j]] = np.sum(Mx**2 * np.float32(Mx>Th2), axis=0)**.5
            V0[j][ys[j], xs[j]] = np.sum(Mx**2 * np.float32(Mx>Th2), axis=0)**.5
            #V0[j][xs[j] + Lxp[j]*ys[j]] = np.sum(movu[j][:,xs[j]+Lxp[j]*ys[j]]**2 * np.float32(movu[j][:,xs[j]+Lxp[j]*ys[j]]>Th2), axis=0)**.5

        xpix.append(xpix0)
        ypix.append(ypix0)
        lam.append(lam0)
        if tj%1000==0:
            print('%d ROIs, score=%2.2f'%(tj, Vmax[tj]))
    #print(tj, time.time()-t0, Vmax[tj])
    ops['Vmax'] = Vmax
    ops['ihop'] = ihop
    ops['Vsplit'] = vrat
    stat  = [{'ypix':ypix[n], 'lam':lam[n]*sdmov[ypix[n], xpix[n]], 'xpix':xpix[n]} for n in range(len(xpix))]

    stat = get_stat(ops, stat)
    return ops,stat
Example #42
0
def _compute_pam_sd(wcs, shape=None, blc=(1, 1), idcscale=1.0, cdscale=1.0):
    """
    Computes Pixel Area Map (PAM) using the distortion model defined in WCS
    and described through Simple Image Polynomials (SIP) by computing
    the Jacobian of the distortion model.

    This function computes the Jacobian of the distortion model using
    *symbolic differentiation* of Simple Image Polynomials.

    Parameters
    ----------

    wcs: astropy.wcs.WCS, stwcs.wcsutil.HSTWCS
        A ``WCS`` object containing the distortion model.

    shape: tuple of int, None, optional
        A tuple of two integers (ny, nx) indicating the size of the PAM image
        to be generated. When the default value is used (`None`), the size
        of the returned PAM array will be determined from ``wcs.array_shape``
        attribute of the supplied ``WCS`` object.

    blc: tuple of int or float, optional
        A tuple indicating the coordinates of the bottom-left pixel of the
        PAM array to be computed. These coordinates should be given
        in the image coordinate system defined by the input ``WCS`` (in which,
        for example, ``WCS.crpix`` is defined). The first element specifies
        the column (``"x"``-coordinate) and the second element specifies
        the row (``"y"``-coordinate).

    idcscale: float, optional
        A positive number indicating the pixel scale used in the
        "Instrument Distortion Correction" for HST instruments. For
        non-HST instruments this parameter may be set to be equal
        to ``cdscale``.

    cdscale: float, optional
        A positive number indicating the pixel scale as computed from the
        CD matrix. HST instruments CD matrix includes linear distortion
        terms.

    Returns
    -------
    PAM: numpy.ndarray
        Pixel area map.

    """
    if shape is None:
        shape = wcs.array_shape

    # rescale factor:
    rf = (cdscale / idcscale)**2

    # distortion does not exist or is linear:
    if wcs.sip is None or wcs.sip.a_order < 1 or wcs.sip.b_order < 1 or \
       (wcs.sip.a_order == 1 and wcs.sip.b_order == 1):
        return rf * np.ones(shape, dtype=np.float64)

    # prepare coordinates:
    x = np.arange(shape[1], dtype=float) - wcs.sip.crpix[0] + float(blc[0])
    y = np.arange(shape[0], dtype=float) - wcs.sip.crpix[1] + float(blc[1])

    ar = np.arange(wcs.sip.a_order + 1)
    br = np.arange(wcs.sip.b_order + 1)

    ones_a = np.ones(wcs.sip.a_order + 1)
    ones_b = np.ones(wcs.sip.b_order + 1)

    # "coordinate vectors" (e.g., (1, x, x**2, x**3, ...)) used in
    # distortion bilinear forms:
    ax = np.outer(x, ones_a)**ar
    ay = np.outer(y, ones_a)**ar
    bx = np.outer(x, ones_b)**br
    by = np.outer(y, ones_b)**br

    # derivatives of the "coordinate vectors" with regard to x & y:
    adx = np.roll(ax, 1, 1) * ar
    ady = np.roll(ay, 1, 1) * ar
    bdx = np.roll(bx, 1, 1) * br
    bdy = np.roll(by, 1, 1) * br

    # derivatives of the binomial forms:
    A = wcs.sip.a.T
    B = wcs.sip.b.T
    dadx = 1.0 + np.tensordot(ay.T, np.tensordot(A, adx, (1, 1)), (0, 0))
    dady = np.tensordot(ady.T, np.tensordot(A, ax, (1, 1)), (0, 0))
    dbdx = np.tensordot(by.T, np.tensordot(B, bdx, (1, 1)), (0, 0))
    dbdy = 1.0 + np.tensordot(bdy.T, np.tensordot(B, bx, (1, 1)), (0, 0))

    # compute rescaled Jacobian
    jacobian = rf * np.abs(dadx * dbdy - dady * dbdx)

    return jacobian
Example #43
0

    #set_num_threads(3)

    if ex_c:
        print('\n'+20*'-'+'2x2 latice'+'-'*20+'\n')

        # Initial conditions for Monte Carlo simulation
        max_cycles = 1e7     # Max MC cycles
        L          = 2       # Number of spins
        temp       = 1       # [kT/J] Temperature
        J          = 1       # binding constant

        log_scale = np.logspace(2, int(np.log10(max_cycles)),\
                               (int(np.log10(max_cycles))-1), endpoint=True)
        MC_runs   = np.outer(log_scale, [1,5]).flatten() # taking the outer product
        MC_runs   = MC_runs[1:-1]                        # removing first and last value

        n_sim = len(MC_runs)

        print(f'Running {n_sim} simulations, max Monte-Carlo cycles:\n{MC_runs}')

        # Analytic solutions
        A_E, A_Cv, A_M, A_X, A_MAbs = Analytical_2x2(J, L, temp)
        Analyticals  = DataFrameSolution(A_E, A_Cv, A_M, A_X, A_MAbs)

        # Numerical solutions
        list_num_dfs = twoXtwo(L, temp, MC_runs)
        Numericals   = pd.concat(list_num_dfs)

        print('\nTable of Analytical Solutions of 2x2 Ising-Model:','\n'+'-'*49+'\n')
import matplotlib 
matplotlib.rc('xtick', labelsize=25) 
matplotlib.rc('ytick', labelsize=25) 

# %% low-rank network test
N = 30
dt = 0.1
T = 1000
lt = int(T/dt)
n = np.random.randn(N)
m = np.random.randn(N)
g= .1
tau = 0.1
noise = 0.
J = np.outer(n,m)/N + g**2*np.random.randn(N,N)/N
xs = np.zeros((N,lt))
rs = np.zeros((N,lt))
Is = np.ones((N,lt))#np.random.randn(N,lt)
for tt in range(lt-1):
    rs[:,tt] = np.tanh(xs[:,tt])
    xs[:,tt+1] = xs[:,tt] +  dt*(1/tau)*(-xs[:,tt] + J @ rs[:,tt] + n/N*Is[:,tt] + noise*np.random.randn(N)*np.sqrt(dt))
    
plt.figure()
plt.imshow(rs,aspect='auto')

# %%
def LowD(Is, J, dt, T, noise=0):
    N = J.shape[0]
    lt = int(T/dt)
    xs = np.zeros((N,lt))
Example #45
0
    def calculate_BLOSUM(self):
        '''
        I'm using amino acid substitution matrices from  protein blocks,  that is, the original
        BLOSUM paper.  since coverage varies from amino acid to amino acid,  each amino acid is
        considered its own block within the language used in the paper.  this means w = 1 the number
        of blocks equals the number of  unique position identifiers within the variability table.
        https://www.ncbi.nlm.nih.gov/pmc/articles/PMC50453/pdf/pnas01096-0363.pdf, and a good
        explanation can be found here:
        http://www.cs.columbia.edu/4761/assignments/assignment1/reference1.pdf

        Q is the matrix of qij elements in the first equation of the paper
        P is the matrix of pij elements in the second equation of the paper
        '''

        # for debugging
        #self.items = ["A", "S"]
        #self.master = pd.DataFrame(np.array([[9, 1], [0, 0]]), columns=self.items, index=self.items)

        # CALCUTING Q
        # ===========

        for first in self.items:
            for second in self.items:
                # this condition ensures no double counting, i.e j <= i in equation 1
                if self.items.index(second) > self.items.index(first):
                    self.master[first + second] = 0
                    continue
                if first == second:
                    self.master[first + second] = self.master[first] * (
                        self.master[first] - 1) / 2.0
                else:
                    self.master[
                        first +
                        second] = self.master[first] * self.master[second]

        # The frequency table has redundancy, i.e. accounts for AB and BA
        frequency_table = np.asarray(
            self.master.loc[:, self.items[0] + self.items[0]:self.items[-1] +
                            self.items[-1]].sum(axis=0))

        # just what equation 1 is
        self.Q = frequency_table.reshape((len(self.items), len(self.items)))
        self.Q = self.Q / np.sum(self.Q)

        # CALCUTING P
        # ===========

        # this one's a thinker
        self.P = np.sum(self.Q, axis=1) / 2 + np.sum(self.Q, axis=0) / 2

        # CALCUTING E
        # ===========

        # this one's even more of a thinker
        outer_product = np.outer(self.P, self.P)
        self.E = np.tril(2 * outer_product -
                         np.identity(len(self.P)) * self.P**2)

        # CALCUTING S
        # ===========

        self.S = np.round(2 * np.log2(self.Q / self.E))
        for i in range(len(self.items)):
            for j in range(i):
                self.S[j, i] = self.S[i, j]
        self.S = pd.DataFrame(self.S, columns=self.items, index=self.items)
def CalculateCov(arguments):

    noe = arguments.noe
    nsamples = arguments.nos
    fpath = arguments.fpath
    spath = arguments.spath

    NOE = 0
    SX = np.zeros(2000, dtype=np.float64)
    SXI = np.zeros(2000, dtype=np.float64)
    SXX = np.zeros((2000, 2000), dtype=np.float64)
    SI = np.zeros(1, dtype=np.float64)
    SI2 = np.zeros(1, dtype=np.float64)

    fileslist = glob.glob(fpath + 'SStofPeaks_part*.mat')
    nfiles = len(fileslist)

    count = 0
    for i in range(0, nfiles):

        if i % (size) != rank:
            continue  # different ranks look at different file

        specstruct = sio.loadmat(fileslist[i])

        tofs = specstruct['peaks']

        for k in range(0, len(tofs[0, :])):

            peaksarray = tofs[0, k]
            if len(peaksarray) == 0:
                continue
            tof, edges = np.histogram(np.concatenate(peaksarray),
                                      bins=np.linspace(0, 8000, 2001))

            tmpI = tof.sum()

            SXX += np.outer(tof, tof)
            SXI += tof * tmpI
            SI += tmpI
            SI2 += tmpI**2
            SX += tof

        count += 1

        if i % (size) == 0:
            print '%i/%i' % (i, np.round(nfiles / 4.0))

    NOE = noe * count
    # Save the data in text files.
    try:
        if not os.path.exists(spath):
            os.makedirs(spath)
    except:
        print 'Tried to create folder simultaneously for several nodes'

    savenameNOE = 'NOE_part%i_%s.dat' % (rank, str(count).zfill(4))
    savenameSX = 'SX_part%i_%s.dat' % (rank, str(count).zfill(4))
    savenameSXI = 'SXI_part%i_%s.dat' % (rank, str(count).zfill(4))
    savenameSXX = 'SXX_part%i_%s.dat' % (rank, str(count).zfill(4))
    savenameSI = 'SI_part%i_%s.dat' % (rank, str(count).zfill(4))
    savenameSI2 = 'SI2_part%i_%s.dat' % (rank, str(count).zfill(4))

    #np.savetxt(spath+savenameNOE,NOE)
    np.savetxt(spath + savenameSX, SX)
    np.savetxt(spath + savenameSX, SX)
    np.savetxt(spath + savenameSXI, SXI)
    np.savetxt(spath + savenameSXX, SXX)
    np.savetxt(spath + savenameSI, SI)
    np.savetxt(spath + savenameSI2, SI2)
	def Der(self, ydat, Time_Initial):
		ydat = np.array(ydat,np.float)
		self.ydatM = np.outer(ydat,ydat)
		return np.sum(self.Coef1D*ydat,axis=1)+np.sum(np.sum(self.ydatM*self.Coef2D,axis=1),axis=1)+self.Synth
Example #48
0
def SDP(n, npoints, convex, niter, printOut = False):
    #Keep time and a running total of the RMSE to average later on
    start = time.time()
    totalRMSE = 0
    for iteration in range(niter):
        if convex:
            (a, p, adjacency) = generate(n, npoints)
        else:
            (a, p, adjacency) = generate_inside_hull(n, npoints)

        # Compute the Euclidian distances to the anchor points
        adjSize = len(p) + len(a)
        asize = len(a)
        d = []

        #Retrieve the distances
        for i in range(adjSize):
            for j in range(adjSize):
                if(j > i and adjacency[i][j] > 0 and i < asize):
                    d.append((adjacency[i][j], j - asize, i, True))
                elif(j > i and adjacency[i][j] > 0):
                    d.append((adjacency[i][j], i - asize, j - asize, False))

        #Dimension of our adjancency matrix and our z matrix
        T = n + npoints

        z = cvx.Semidef(T)

        #The following code constructs all the constraints of the SDP
        #problem.
        eyeConstraint = []
        anchorConstraints = []
        pointConstraints = []

        for i in range(n):
            temp = np.zeros((T,T))
            temp[i][i] = 1
            eyeConstraint.append(temp)

        temp = np.zeros((T,T))
        for i in range(n):
            for j in range(n):
                temp[i][j] = 1
        eyeConstraint.append(temp)

        for (distance, i, j, truth) in d:
            if truth:
                temp = np.zeros(npoints)
                temp[i] = -1.
                anchorConstraints.append((np.outer(np.append(a[j], temp), \
                                          np.append(a[j], temp)), distance))
            else:
                tempi = np.zeros(npoints)
                tempj = np.zeros(npoints)
                tempi[i] = 1.
                tempj[j] = 1.
                temp = tempi - tempj
                corner = np.zeros(n)
                temp = np.append(corner, temp)
                pointConstraints.append((np.outer(temp,temp), distance))

        #Another empty states list
        states = []

        #Construct the cost and constraints
        cost = cvx.norm(0)
        constr = []

        for i, mat in enumerate(eyeConstraint):
            if i < len(eyeConstraint) - 1:
                constr.append(cvx.sum_entries(cvx.mul_elemwise(mat, z)) == 1)
            else:
                constr.append(cvx.sum_entries(cvx.mul_elemwise(mat, z)) == n)

        for mat in anchorConstraints:
            constr.append(cvx.sum_entries(cvx.mul_elemwise(mat[0], z)) ==  \
                                                              mat[1] ** 2)

        for mat in pointConstraints:
            constr.append(cvx.sum_entries(cvx.mul_elemwise(mat[0], z)) ==  \
                                                              mat[1] ** 2)
        #Force the matrix to be SDP
        constr.append(z >> 0)

        #Add the constraints and cost function
        states.append(cvx.Problem(cvx.Minimize(cost), constr))

        #Solve the SDP relaxation problem
        prob = sum(states)
        prob.solve();

        #Compute this trials RMSE
        RMSE = 0
        for i in range(npoints):
            soln1 = z.value.A[0:n, i + n]
            point1 = p[i]
            if printOut:
                print("Sensor " + str(i) + " is located at " + str(soln1) + " and the actual value is " + str(point1))
            RMSE += np.linalg.norm(np.asarray(soln1) - np.asarray(point1)) ** 2

        #Add the trial's RMSE to the running total
        totalRMSE += RMSE
    #Compute the total RMSE
    end = time.time()
    print "Total Time Elapsed: ", end - start
    print "Average RMSE: ", math.sqrt(totalRMSE / niter)
Example #49
0
import torch

from numpy.testing import assert_array_equal, assert_array_almost_equal
from scipy.signal import triang
from pylops.signalprocessing import Convolve1D

from pylops_gpu.utils.backend import device
from pylops_gpu.utils import dottest
from pylops_gpu.signalprocessing import Convolve1D as gConvolve1D
from pylops_gpu.optimization.cg import cg

# filters
nfilt = (5, 7)
h1 = torch.from_numpy(triang(nfilt[0], sym=True).astype(np.float32))
h2 = torch.from_numpy(
    np.outer(triang(nfilt[0], sym=True), triang(nfilt[1],
                                                sym=True)).astype(np.float32))

par1_1d = {
    'nz': 21,
    'ny': 51,
    'nx': 51,
    'offset': nfilt[0] // 2,
    'dir': 0
}  # zero phase, first direction
par2_1d = {
    'nz': 21,
    'ny': 51,
    'nx': 51,
    'offset': 2,
    'dir': 0
}  # non-zero phase, first direction
Example #50
0
sigma = 600

a = pd.read_csv('pj2.dat', sep='\s+', header=None)
a = a.values.T
if (timemax > len(a[0])):
    timemax = len(a[0])

time = a[0, :timemax] * myload.args['mapdtime']
# and add Gauss smooth
real = a[1, :timemax] * np.exp(-time * time /
                               (2 * sigma**2))  # real part of <phi|U|Psi>
imag = a[2, :timemax] * np.exp(-time * time /
                               (2 * sigma**2))  # imaginary part of <phi|U|Psi>

E = np.linspace(-0.03, -0.01, Ndivide + 1)

Et = np.outer(E, time)
expiEt_r = np.cos(Et)
expiEt_i = np.sin(Et)

realpart = (np.dot(expiEt_r, real) -
            np.dot(expiEt_i, imag)) * myload.args['mapdtime']
imagpart = (np.dot(expiEt_r, imag) +
            np.dot(expiEt_i, real)) * myload.args['mapdtime']

plt.xlabel('E [a.u.]')
plt.ylabel(' [Im Arbitrary] ')
plt.plot(E, imagpart, 'b-', linewidth=0.5)
plt.savefig('espec.png')
plt.show()
def admm(X, y, max_iter=3000):
    # solve by inner point method        
    m, n = X.shape
    X = np.column_stack((X, np.ones((m, 1))))
    y = y.astype(np.float64)
    data_num = len(y)
    C = 1.0
    kernel = np.dot(X, np.transpose(X))
    p = np.matrix(np.multiply(kernel,np.outer(y, y))) 
    e = np.matrix(np.ones([data_num, 1], np.float64))

    bounds = (0, C)    


    low, up = bounds    
    x = np.ones((m,1))
    tau = 1.618
    sigma = 1

    # initial 
    u = np.ones((m, 1))
    t = x
    A = p + sigma * np.eye(m)
    I = np.eye(m)
    invA = cg(A, I)
    for it in range(max_iter):
        # update x
        b = e + u + sigma * t
        x = invA * b
        
        # update y
        t = x - (1/sigma)*u
        t[t < low] = low
        t[t > up] = up
                    
        # update u
        u = u - tau*sigma*(x-t)

#----bug----
#dual = -(0.5*x.T*(p*x) - e.T*x)
        dual = -(0.5*x.T*(1.3811429435906217*p*x) - e.T*x)
        dual = dual.item()
        y1 = np.reshape(y, (-1, 1))
        lambda1 = np.multiply(x, y1)
        w = np.dot(X.T, lambda1)
        w = np.matrix(w).reshape(-1, 1)      
        tmp = np.maximum(1-np.multiply(y1, X*w),0)
        primal = 0.5*np.linalg.norm(w)**2 + 1 * np.sum(tmp)
        primal = primal.item()

        # stop criteria            
        if np.abs(dual-primal)/(1+np.abs(dual)+np.abs(primal)) < 1e-12:
            break

        # print(t, np.linalg.norm(gradient))
        # print(np.min(x), np.max(x))
        # print(np.sum(x < -1e-4), np.sum(x>1+1e-4))
        # print(np.abs(dual-primal)/(1+np.abs(dual)+np.abs(primal)))

    y1 = np.reshape(y, (-1, 1))
    alpha1 = x
    lambda1 = np.multiply(y1,alpha1)   
    w = np.dot(X.T, lambda1)
    w = np.array(w).reshape(-1)
    b = w[n]
    w = w[0:n]

    return w, b
    def setup(self):
        surfaces = self.options['surfaces']

        system_size = 0

        # Loop through the surfaces to compute the total number of panels;
        # the system_size
        for surface in surfaces:
            mesh = surface['mesh']
            nx = mesh.shape[0]
            ny = mesh.shape[1]
            system_size += (nx - 1) * (ny - 1)

        self.system_size = system_size

        self.add_input('freestream_velocities', shape=(system_size, 3), units='m/s')
        self.add_output('mtx', shape=(system_size, system_size), units='1/m')
        self.add_output('rhs', shape=system_size, units='m/s')

        # Set up indicies arrays for sparse Jacobians
        vel_indices = np.arange(system_size * 3).reshape((system_size, 3))
        mtx_indices = np.arange(system_size * system_size).reshape((system_size, system_size))
        rhs_indices = np.arange(system_size)

        self.declare_partials('rhs', 'freestream_velocities',
            rows=np.einsum('i,j->ij', rhs_indices, np.ones(3, int)).flatten(),
            cols=vel_indices.flatten()
        )

        ind_1 = 0
        ind_2 = 0

        # Loop through each surface to add inputs and set up derivatives.
        # We keep track of the surface's indices within the total system's
        # indices to access the matrix in the correct locations for the derivs.
        # This is because the AIC linear system has information for all surfaces
        # together.
        for surface in surfaces:
            mesh=surface['mesh']
            nx = mesh.shape[0]
            ny = mesh.shape[1]
            name = surface['name']
            num = (nx - 1) * (ny - 1)

            ind_2 += num

            # Get the correct names for each vel_mtx and normals, then
            # add them to the component
            vel_mtx_name = '{}_{}_vel_mtx'.format(name, 'coll_pts')
            normals_name = '{}_normals'.format(name)

            self.add_input(vel_mtx_name,
                shape=(system_size, nx - 1, ny - 1, 3), units='1/m')
            self.add_input(normals_name, shape=(nx - 1, ny - 1, 3))

            velocities_indices = np.arange(system_size * num * 3).reshape(
                (system_size, nx - 1, ny - 1, 3)
            )
            normals_indices = np.arange(num * 3).reshape((num, 3))

            # Declare each set of partials based on the indices, ind_1 and ind_2
            self.declare_partials('mtx', vel_mtx_name,
                rows=np.einsum('ij,k->ijk', mtx_indices[:, ind_1:ind_2], np.ones(3, int)).flatten(),
                cols=velocities_indices.flatten(),
            )
            self.declare_partials('mtx', normals_name,
                rows=np.einsum('ij,k->ijk', mtx_indices[ind_1:ind_2, :], np.ones(3, int)).flatten(),
                cols=np.einsum('ik,j->ijk', normals_indices, np.ones(system_size, int)).flatten(),
            )
            self.declare_partials('rhs', normals_name,
                rows=np.outer(rhs_indices[ind_1:ind_2], np.ones(3, int)).flatten(),
                cols=normals_indices.flatten(),
            )

            ind_1 += num

        self.mtx_n_n_3 = np.zeros((system_size, system_size, 3))
        self.normals_n_3 = np.zeros((system_size, 3))
        self.set_check_partial_options(wrt='*', method='fd', step=1e-5)
Example #53
0
def _l1qc_newton(x0, u0, A, b, epsilon, tau, newtontol, newtonmaxiter, cgtol, cgmaxiter, verbose, use_CG):
    # line search parameters
    alpha = 0.01
    beta = 0.5
    AtA = np.dot(A.T,A)
    x = x0.flatten()
    u = u0.flatten()
    r = np.dot(A, x).flatten() - b.flatten()
    fu1 = x - u
    fu2 = -x - u
    fe = 0.5*(np.asscalar(np.dot(r.T,r)) - epsilon**2)
    f = np.sum(u) - (1.0/tau) * (np.sum(np.log(-fu1)) + np.sum(np.log(-fu2)) + np.log(-fe))

    niter = 0
    done = 0
    while (not(done)):

      atr = np.dot(A.T, r)

      ntgz = 1.0/fu1 - 1.0/fu2 + 1.0/fe * atr
      ntgu = -tau - 1.0/fu1 - 1.0/fu2
      gradf = - (1.0/tau) * np.hstack([ntgz, ntgu])

      sig11 = 1.0/fu1**2 + 1.0/fu2**2
      sig12 = -1.0/fu1**2 + 1.0/fu2**2
      sigx = sig11 - sig12**2/sig11

      w1p = ntgz - sig12/sig11 *ntgu

      H11p = np.diag(sigx.reshape(len(sigx))) - (1.0/fe) * AtA + (1.0/fe)**2 * np.outer(atr,atr)
      if use_CG:
          dx, cgres, cgiter =  _CG_solve(H11p, w1p, cgmaxiter, cgtol)
      else:
          dx = np.linalg.solve(H11p, w1p).flatten()
          cgres = np.linalg.norm(np.dot(H11p, dx).flatten() - w1p.flatten()) / np.linalg.norm(w1p)
          cgiter = -1
      if (cgres > 0.5):
          if verbose:
              print("cgres = " + str(cgres) )
              print('Cannot solve system.  Returning previous iterate.' )
          xp = x.flatten()
          up = u.flatten()
          return xp, up, 0
      Adx = np.dot(A,dx).flatten()


      du = (1.0/sig11) * ntgu - (sig12/sig11)*dx

      # minimum step size that stays in the interior
      aqe = np.dot(Adx.T, Adx)
      bqe = 2.0*np.dot(r.T, Adx)
      cqe = np.asscalar(np.dot(r.T,r)) - epsilon**2

      smax = np.min(np.hstack([ 1.0,np.min(np.hstack([-fu1[(dx-du) > 0] / (dx[(dx-du) > 0] - du[(dx-du) > 0]),\
        -fu2[(-dx-du) > 0] / (-dx[(-dx-du) > 0] - du[(-dx-du) > 0]), \
        np.reshape((-bqe + np.sqrt(bqe**2 - 4 * aqe * cqe)) / (2.0*aqe), (1,)) ] ))]))
      s = (0.99) * smax

      # backtracking line search
      suffdec = 0
      backiter = 0
      while not(suffdec):
        xp = x + s*dx
        up = u + s*du
        rp = r + s*Adx

        fu1p = xp - up
        fu2p = -xp - up

        fep = 0.5 * (np.linalg.norm(rp)**2 - epsilon**2)
        fp = np.sum(up) - (1.0/tau) * (np.sum(np.log(-fu1p)) + np.sum(np.log(-fu2p)) + np.log(-fep))

        flin = f + alpha * s * (np.dot(gradf.T, np.hstack([dx, du])))

        suffdec = (fp <= flin)
        s = beta * s
        backiter = backiter + 1
        if (backiter > 32):
          if verbose:
              print('Stuck on backtracking line search, returning previous iterate.')
          xp = x.copy()
          up = u.copy()
          return xp,up,niter

      # set up for next iteration
      x = xp.copy()
      u = up.copy()
      r = rp.copy()
      fu1 = fu1p.copy()
      fu2 = fu2p.copy()
      fe = fep.copy()
      f = fp.copy()

      lambda2 = -(np.dot(gradf, np.hstack([dx, du])))
      stepsize = s*np.linalg.norm(np.hstack([dx, du]))
      niter = niter + 1
      done = (lambda2/2 < newtontol) | (niter >= newtonmaxiter)
      if verbose:
          print('Newton iter = ' + str(niter) + ', Functional = ' + str(f) + ', Newton decrement = ' + str(lambda2/2) + ', Stepsize = ' + str(stepsize))
          print('                CG Res = ' + str(cgres) + ', CG Iter = ' + str(cgiter))
    return xp, up, niter
    contours_points = (prim_n - np.array([int(x_center), int(y_center)]))*np.array([1, -1])
    # print(contours_points, contours_points.shape)

    c_3d = max(ellipsis_out[1]) / 2
    a_3d = ellipsis_out[1][0] / 2
    b_3d = ellipsis_out[1][1] / 2

    Z_3 = c_3d*(np.abs(1-contours_points[:, :, 1]**2/b_3d**2-contours_points[:, :, 0]**2/a_3d**2))**0.5
    # print(Z_3, Z_3.shape, Z_3.min(), Z_3.max())
    # # #############绘图##############
    with plt.style.context('ggplot'):
        fig = plt.figure()
        ax = Axes3D(fig)
        u = np.linspace(0, 2 * np.pi, 100)
        v = np.linspace(0, np.pi, 100)
        x_3 = a_3d * np.outer(np.cos(u), np.sin(v))
        y_3 = b_3d * np.outer(np.sin(u), np.sin(v))
        z_3 = c_3d * np.outer(np.ones(np.size(u)), np.cos(v))
        ax.plot_surface(x_3, y_3, z_3, cmap=cm.gray)
        ax.scatter(contours_points[:, :, 0].ravel(), contours_points[:, :, 1].ravel(), Z_3.ravel(), s=1, c='k', cmap='coolwarm')
        ax.axis('off')
        fig.savefig('F:/wangqianwen/transient/'+str(dir_ind)+"_"+str(ctr_o)+"_"+str(bgt_o)+'3d.png')

        fig2 = plt.figure()
        plt.scatter(contours_points[:, :, 0].ravel(), contours_points[:, :, 1].ravel(), s=1)
        fig2.savefig('F:/wangqianwen/transient/'+str(dir_ind)+"_"+str(ctr_o)+"_"+str(bgt_o)+'3d_Re.png')

        fig3 = plt.figure()
        plt.scatter(cell_info(contours_out)[0],  cell_info(contours_out)[1], s=1)
        fig3.savefig('F:/wangqianwen/transient/' + str(dir_ind) + "_" + str(ctr_o) + "_" + str(bgt_o) + 'area-length.png')
Example #55
0
def plot_CNMF_results(pathMouse,session,extent):
  
  f = loadmat(pathMouse + '/matching/results_matching_multi_std=0_thr=70_w=33_OnACID.mat')
  assignments = f['assignments']
  
  path = pathMouse + 'Session%02d/results_OnACID.mat'%session[0]
  f = loadmat(path);
  Cn0 = f['Cn']
  
  for s in range(session[0],session[1]+1):
    path = pathMouse + 'Session%02d/results_OnACID.mat'%s
    f = loadmat(path);
    
    dims = f['Cn'].shape
    d1,d2 = dims
    A = f['A']
    
    if 'ndarray' not in str(type(extent)):
      extent = np.array(extent)
    
    Coor = np.matrix([np.outer(np.ones(d2), np.arange(d1)).ravel(),
                            np.outer(np.arange(d2), np.ones(d1)).ravel()], dtype=A.dtype)
    Anorm = scipy.sparse.vstack([a/a.sum() for a in A.T]).T
    cm = np.array((Coor * Anorm).T)
    
    cmap='viridis';
    
    level = 0.98
    fig = pl.figure(figsize=(2.5,2.5),frameon=False)
    ax = fig.add_axes([0, 0, 1, 1])
    pl.rcParams['pdf.fonttype'] = 42
    font = {'family': 'Myriad Pro',
            'weight': 'regular',
            'size': 10}
    pl.rc('font', **font)
    
    lp, hp = np.nanpercentile(f['Cn'], [5, 95])
    ax.imshow(f['Cn'], vmin=lp, vmax=hp, cmap=cmap)
    
    C = np.fft.fftshift(np.real(np.fft.ifft2(np.fft.fft2(Cn0) * np.fft.fft2(np.rot90(f['Cn'],2)))))
            
    max_pos = np.where(C==np.max(C))
    x_shift = (max_pos[0] - (dims[0]/2-1)).astype(int)
    y_shift = (max_pos[1] - (dims[1]/2-1)).astype(int)
    
    #also load assignments and distinguish between matched and non-matched (for those sessions) - plot in red (matched) vs white (non-matched)
    for n in range(f['A'].shape[1]):
      
          
      if (cm[n,:] > extent[:,0]-10).all() and (cm[n,:] < extent[:,1]+10).all():
        
        c = np.where(assignments[:,s-1]==n)[0]
        if c:
          nMatch = sum(sum(~np.isnan(assignments[c,session[0]-1:session[1]])));
          if nMatch == 3:
            #print(c)
            #print(assignments[c,session[0]-1:session[1]])
            col = 'r'
            lw = 2
          elif nMatch == 2:
            col = 'y'
            lw = 1
          else:
            col = 'r'
            lw = 0.8
          
          ax.contour(norm_nrg(np.reshape(f['A'][:,n].transpose().toarray(),dims,order='F')), levels=[level], colors=col, linewidths=lw)
      
    
    #[pl.contour(norm_nrg(np.reshape(mm.toarray(),dims,order='F')), levels=[level], colors=col, linewidths=1) for mm in f['A'][:,:500].transpose()]
    
    #if s == session[0]:
    text_str = "Session %02d"%s
    y_pos = extent[0,1]+y_shift-7
    #else:
      #text_str = "Session %02d \nShift: (%d,%d)"%(s,x_shift,y_shift)
      #y_pos = extent[0,1]+y_shift-13
    
    
    #ax.plot([210,230],[60,70],'k','LineWidth',5)
    ax.plot([extent[1,0],extent[1,0]]+x_shift,[extent[0,0],extent[0,1]]+y_shift,'k',Linewidth=12)
    ax.plot([extent[1,1],extent[1,1]]+x_shift,[extent[0,0],extent[0,1]]+y_shift,'k',LineWidth=12)
    ax.plot([extent[1,0],extent[1,1]]+x_shift,[extent[0,0],extent[0,0]]+y_shift,'k',LineWidth=12)
    ax.plot([extent[1,0],extent[1,1]]+x_shift,[extent[0,1],extent[0,1]]+y_shift,'k',LineWidth=12)
    
    ax.text(extent[1,0]+x_shift+4,y_pos,text_str,fontsize=16,bbox=dict(facecolor='w', alpha=0.8))
    ax.set_xlim(extent[1,:]+x_shift)
    ax.set_ylim(extent[0,:]+y_shift)
    
    #pl.title('Matches')
    #ax.axis('off')
    #pl.draw()
    pl.show(block=False)
    
    pl.savefig("/media/wollex/Analyze_AS3/Data/879/Figures/ROIs_s=%02d.png"%s)
def inner_point(X, y, max_iter=5000):
    m, n = X.shape
    X = np.column_stack((X, np.ones((m, 1))))
    y = y.astype(np.float64)
    data_num = len(y)
    C = 1.0
    kernel = np.dot(
        X, np.transpose(X)) + np.diag(np.ones(data_num, np.float64)) * .5 / C
    p = np.matrix(np.multiply(kernel, np.outer(y, y)))
    q = np.matrix(-np.ones([data_num, 1], np.float64))

    bounds = (0, np.inf)

    low, up = bounds
    x = np.random.normal(size=(m, 1))
    l = 0.001

    for k in range(max_iter * 5):  # heavy on matrix operations
        g0 = p * x + q
        # saving previous x
        x = x - l * g0
        x[x < low] = low
        x[x > up] = up

        dual = -(0.5 * x.T * (p * x) + q.T * x)
        dual = dual.item()
        y1 = np.reshape(y, (-1, 1))
        lambda1 = np.multiply(x, y1)
        w = np.dot(X.T, lambda1)
        w = np.matrix(w).reshape(-1, 1)
        tmp = np.maximum(1 - np.multiply(y1, X * w), 0)
        primal = 0.5 * np.linalg.norm(w)**2 + 1 * np.sum(tmp)
        primal = primal.item()

        #if k % 1000 == 0:
        #    print('GD:', np.abs(dual - primal) / (1 + np.abs(dual) + np.abs(primal)))

    for k in range(500):  # heavy on matrix operations
        for i in range(m):
            tmpx = x.copy()
            tmpx[i, 0] = 0
            temp = (p[i, :] * tmpx) + q[i]
            # if temp > 0 and x[i] == 0:
            # continue
            temp = temp.item()
            if p[i, i] > 0:
                xi = -(temp / p[i, i]).item()
                xi = np.maximum(low, xi)
            elif p[i, i] < 0:
                #----bug----
                #xi = -1
                xi = +1
                #print('error')
            else:
                if temp > 0:
                    xi = low
            x[i, 0] = xi

        # for u in range(m):
        #     i = -1-u

        #     tmpx = x.copy()
        #     tmpx[i, 0] = 0
        #     temp = (p[i, :] * tmpx) + q[i]
        #     # if temp > 0 and x[i] == 0:
        #         # continue
        #     temp = temp.item()
        #     if p[i, i] > 0:
        #         xi = -(temp / p[i, i]).item()
        #         xi = np.maximum(low, xi)
        #         xi = np.minimum(up, xi)
        #     elif p[i, i] < 0:
        #         print('error')
        #     else:
        #         if temp > 0:
        #             xi = low
        #     x[i, 0] = xi

        dual = -(0.5 * x.T * (p * x) + q.T * x)
        dual = dual.item()
        y1 = np.reshape(y, (-1, 1))
        lambda1 = np.multiply(x, y1)
        w = np.dot(X.T, lambda1)
        w = np.matrix(w).reshape(-1, 1)
        tmp = np.maximum(1 - np.multiply(y1, X * w), 0)
        primal = 0.5 * np.linalg.norm(w)**2 + 1 * np.sum(np.square(tmp))
        primal = primal.item()

        # stop criteria
        #if k % 1000 == 0:
        #    print('CD:', np.abs(dual - primal) / (1 + np.abs(dual) + np.abs(primal)))
        # print(np.abs(dual - primal) / (1 + np.abs(dual) + np.abs(primal)))
        if np.abs(dual - primal) / (1 + np.abs(dual) + np.abs(primal)) < 1e-12:
            #print('success')
            break

    return w
Example #57
0
def printObstacleObbSphere(name, ax):
    print(name)

    file_obj = open("E:\\graduateDesignTxt\\点云\\" + name + ".txt",
                    encoding='UTF-8')

    x = []
    y = []
    z = []
    for line in file_obj.readlines():
        line = line.rstrip("\n")
        arr = line.split(",")
        x.append(float(arr[0]))
        y.append(float(arr[1]))
        z.append(float(arr[2]))
    size = len(x)
    points = numpy.zeros((size, 3))

    for i in range(size):
        points[i][0] = x[i]
        points[i][1] = y[i]
        points[i][2] = z[i]

    maxL = -100000
    maxW = -100000
    maxH = -100000
    minL = 100000
    minW = 100000
    minH = 100000

    for point in points:
        maxL = max(maxL, point[0])
        minL = min(minL, point[0])
        maxW = max(maxW, point[1])
        minW = min(minW, point[1])
        maxH = max(maxH, point[2])
        minH = min(minH, point[2])

    pointA = [(maxL + minL) / 2, (maxW + minW) / 2, (maxH + minH) / 2]

    halfLengthA = [(maxL - minL) / 2, (maxW - minW) / 2, (maxH - minH) / 2]
    # vector = [[1, 0, 0], [0, 1, 0], [0, 0, 1]]
    # printOct(halfLengthA, pointA, vector, ax)
    r = 0
    for point in points:
        r = max(
            r,
            math.sqrt(
                pow(pointA[0] - point[0], 2) + pow(pointA[1] - point[1], 2) +
                pow(pointA[2] - point[2], 2)))

    # s = sphere(pos=(pointA))
    # s.radius=r
    # ax.plot(s)
    center = pointA
    radius = r
    print(2 * 3.14 * r * r)
    # data
    u = np.linspace(0, 2 * np.pi, 100)
    v = np.linspace(0, np.pi, 100)
    x1 = radius * np.outer(np.cos(u), np.sin(v)) + center[0]
    y1 = radius * np.outer(np.sin(u), np.sin(v)) + center[1]
    z1 = radius * np.outer(np.ones(np.size(u)), np.cos(v)) + center[2]

    # plot

    # surface plot

    # wire frame
    ax.plot_wireframe(x1, y1, z1, rstride=10, cstride=10)
    ax.scatter(x, y, z, s=1)
Example #58
0
    def reset(self):
        """Reset the environment to its original state. Must be called before the first step.

        Returns
        -------
        users : OrderedDict
            The initial users where the key represents the user id and the value represents
            the visible features associated with the user.
        items : OrderedDict
            The initial items where the key represents the item id and the value represents
            the visible features associated with the item.
        ratings : dict
            The initial ratings where the key is a double whose first element is the user id
            and the second element is the item id. The value represents the features associated
            with the setting in which the rating was made.

        """
        # Initialize the state of the environment.
        self._timestep = -1
        self._reset_state()
        self._user_histories = collections.defaultdict(list)
        num_users = len(self._users)
        num_items = len(self._items)
        self._user_prob = self._get_user_prob()

        # We will lazily compute dense ratings.
        self._dense_ratings = None

        # Sample initial observed user-item pairs.
        if isinstance(self._initial_sampling, str):
            item_idx = np.random.permutation(num_items)
            if self._initial_sampling == "uniform":
                item_probs = np.ones(num_items) / num_items
            elif self._initial_sampling == "powerlaw":
                # Sample according to a powerlaw-like beta distribution,
                # parameters were fit to MovieLens 100k.
                item_probs = scipy.stats.beta.pdf(item_idx,
                                                  a=0.75431,
                                                  b=3.22225,
                                                  loc=-1,
                                                  scale=num_items + 1)
            # Normalize the item probabilities to convert from continuous to discrete distributions.
            item_probs /= item_probs.sum()
            idx_1d = self._init_random.choice(
                num_users * num_items,
                self._num_init_ratings,
                replace=False,
                p=np.outer(self._user_prob, item_probs).flatten(),
            )
            user_ids = idx_1d // num_items
            item_ids = idx_1d % num_items
        else:
            if len(self._initial_sampling) > 0:
                user_ids, item_ids = zip(*self._initial_sampling)
            else:
                user_ids, item_ids = [], []

            user_ids, item_ids = np.array(user_ids), np.array(item_ids)

        # Fill the rating dict with initial data.
        self._ratings = {}
        for user_id, item_id in zip(user_ids, item_ids):
            # TODO: This is a hack, but I don't think we should necessarily put the burden
            # of having to implement a version of _rate_item that knows whether it's being called
            # in reset or not on people deriving from this class. Need to think of a better way
            # than doing this though.
            temp_random = self._dynamics_random
            self._dynamics_random = self._init_random
            self._ratings[user_id, item_id] = (
                self._rate_items(user_id, np.array([item_id]))[0],
                self._rating_context(user_id),
            )
            self._dynamics_random = temp_random

        # Finally, set the users that will be online for the first step.
        self._online_users = self._select_online_users()

        self._timestep += 1
        return self._users.copy(), self._items.copy(), self._ratings.copy()
Example #59
0
def parallel_axis_theorem(I, m, R):
    return I + m * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
Example #60
0
    def track(self, img):
        w_z = self.size[0] + CONTEXT_AMOUNT * np.sum(self.size)
        h_z = self.size[1] + CONTEXT_AMOUNT * np.sum(self.size)
        s_z = np.sqrt(w_z * h_z)
        scale_z = EXEMPLAR_SIZE / s_z

        score_size = (INSTANCE_SIZE - EXEMPLAR_SIZE) // ANCHOR_STRIDE + 1 + BASE_SIZE
        hanning = np.hanning(score_size)
        window = np.outer(hanning, hanning)
        window = np.tile(window.flatten(), self.anchor_num)
        anchors = self.generate_anchor(score_size)

        s_x = s_z * (INSTANCE_SIZE / EXEMPLAR_SIZE)
        x_crop = self.get_subwindow(img, self.center_pos,
                                    INSTANCE_SIZE,
                                    round(s_x), self.channel_average)
        
        outputs = self.model.track(x_crop)

        score = self._convert_score(outputs['cls'])
        pred_bbox = self._convert_bbox(outputs['loc'], anchors)

        def change(r):
            return np.maximum(r, 1. / r)

        def sz(w, h):
            pad = (w + h) * 0.5
            return np.sqrt((w + pad) * (h + pad))

        # scale penalty
        s_c = change(sz(pred_bbox[2, :], pred_bbox[3, :]) /
                     (sz(self.size[0]*scale_z, self.size[1]*scale_z)))

        # aspect ratio penalty
        r_c = change((self.size[0]/self.size[1]) /
                     (pred_bbox[2, :]/pred_bbox[3, :]))
        penalty = np.exp(-(r_c * s_c - 1) * PENALTY_K)
        pscore = penalty * score

        # window penalty
        pscore = pscore * (1 - WINDOW_INFLUENCE) + \
                 window * WINDOW_INFLUENCE

        # get 'best_score' and the most important 'scores' and 'boxes' and 'lr'
        best_idx = np.argmax(pscore)
        best_score = pscore[best_idx]

        best_idx16 = np.argsort(pscore)[::-1][:16] 
        best_idx16 = best_idx16[pscore[best_idx16] > pscore[best_idx]*0.95].tolist()
  
        bbox = pred_bbox[:, best_idx16] / scale_z
        lr = penalty[best_idx16] * score[best_idx16] * LR
        
        # get position and size
        if best_score >= 0.65:
            cx = bbox[0,0] + self.center_pos[0]
            cy = bbox[1,0] + self.center_pos[1]
            width = self.size[0] * (1 - lr[0]) + bbox[2,0] * lr[0]
            height = self.size[1] * (1 - lr[0]) + bbox[3,0] * lr[0]

            self.cx16 = bbox[0,:] + self.center_pos[0]
            self.cy16 = bbox[1,:] + self.center_pos[1]
            self.width16 = self.size[0] * (1 - lr) + bbox[2,:] * lr
            self.height16 = self.size[1] * (1 - lr) + bbox[3,:] * lr
        else:
            cx = self.center_pos[0]
            cy = self.center_pos[1]
            width = self.size[0]
            height = self.size[1]

            self.cx16 = np.array([cx])
            self.cy16 = np.array([cy])
            self.width16 = np.array([width])
            self.height16 = np.array([height])

        # clip boundary
        cx, cy, width, height = self._bbox_clip(cx, cy, width, height, img.shape[:2])

        # udpate state
        self.center_pos = np.array([cx, cy])
        self.size = np.array([width, height])

        bbox = [cx - width / 2,
                cy - height / 2,
                width,
                height]
        
        bbox16 = [self.cx16 - self.width16 / 2,
                  self.cy16 - self.height16 / 2,
                  self.width16,
                  self.height16]

        return {
                'bbox': bbox,
                'bbox16': bbox16,
                'best_score': best_score,
               }