def plot_histogram(X, Y, w, b):
    ''' Plots a histogram of classifier outputs (w^T X) for each class with pl.hist 
    The title of the histogram is the accuracy of the classification
    Accuracy = #correctly classified points / N 
    
    Definition:     plot_histogram(X, Y, w, b)
    Input:          X       -  DxN array of N data points with D features
                    Y       -  1D array of length N of class labels
                    w       -  1D array of length D, weight vector 
                    b       -  bias term for linear classification   
    
    '''
    # ... your code here

    #Data:
    output = (w.T.dot(X) - b)
    wrong = (sp.sign(output) != Y).nonzero()[0]
    correct = (sp.sign(output) == Y).nonzero()[0]

    #Info:
    acc = float(correct.shape[0])/float(output.shape[0]) * 100.
    non_target = [output[i] for i in correct]
    target = [output[i] for i in wrong]

    #Plot:
    pl.hist(non_target, bins = 10, histtype='bar',color='b', rwidth=0.4, label=['non-target'])
    pl.hist(target, bins = 10, histtype='bar', color='g', rwidth=0.4, label=['target'])
    pl.xlabel('w^T X')
    pl.title('Acc %d'%acc +'%')
    pl.legend()
Exemple #2
0
 def rpropUpdate(self, w):
     """ edit the update vector according to the rprop mechanism. """
     n = self.xdim
     self.wStored.append(w.copy())
     self.rpropPerformance.append(self.fx[0])
     self.oldParams.append(self.combineParams(self.alpha, self.x, self.factorSigma))            
     if self.generation > 0: 
         neww = zeros(len(w))
         self.delta.append(zeros((self.mu*(n*(n+1)/2+n+1))))            
         for i in range(len(w)-1):
             self.delta[self.generation][i] = self.delta[self.generation - 1][i]
             assert len(self.wStored[self.generation]) == len(self.wStored[self.generation-1])
             if self.wStored[self.generation][i] * self.wStored[self.generation-1][i] > 0.0:
                 self.delta[self.generation][i] = min(self.delta[self.generation-1][i] * self.etaPlus, self.rpropMaxUpdate)
                 if self.rpropUseGradient:
                     neww[i] = self.wStored[self.generation][i] * self.delta[self.generation][i]
                 else:
                     neww[i] = sign(self.wStored[self.generation][i]) * self.delta[self.generation][i]
             elif self.wStored[self.generation][i] * self.wStored[self.generation-1][i] < 0.0:
                 self.delta[self.generation][i] = max(self.delta[self.generation - 1][i] * self.etaMin, self.rpropMinUpdate)
                 if self.rpropPerformance[self.generation] < self.rpropPerformance[self.generation - 1]:                    
                     # undo the last update
                     neww[i] = self.oldParams[self.generation-1][i] - self.oldParams[self.generation][i]                        
                 self.wStored[self.generation][i] = 0.0
             elif self.wStored[self.generation][i] * self.wStored[self.generation - 1][i] == 0.0:
                 if self.rpropUseGradient:
                     neww[i] = self.wStored[self.generation][i] * self.delta[self.generation][i]
                 else:
                     neww[i] = sign(self.wStored[self.generation][i]) * self.delta[self.generation][i]
         self.updateVariables(neww)              
def train_perceptron(X,Y,iterations=200,eta=.1):
    ''' Trains a linear perceptron
    Definition:  w, b, acc  = train_perceptron(X,Y,iterations=200,eta=.1)
    Input:       X       -  DxN array of N data points with D features
                 Y       -  1D array of length N of class labels {-1, 1}
                 iter    -  optional, number of iterations, default 200
                 eta     -  optional, learning rate, default 0.1
    Output:      w       -  1D array of length D, weight vector
                 b       -  bias term for linear classification
    '''
    #include the bias term by adding a row of ones to X
    X = sp.concatenate((sp.ones((1,X.shape[1])), X))
    #initialize weight vector
    weights = sp.ones((X.shape[0]))/X.shape[0]
    for it in sp.arange(iterations):
        # indices of misclassified data
        wrong = (sp.sign(weights.dot(X)) != Y).nonzero()[0]
        if wrong.shape[0] > 0:
            # pick a random misclassified data point
            m = wrong[sp.random.random_integers(0, wrong.shape[0]-1)]
            #update weight vector (use variable learning rate (eta/(1.+it)) )
            weights = weights  + (eta/(1.+it)) * X[:, m] * Y[m];
            # compute accuracy
            wrong = (sp.sign(weights.dot(X)) != Y).nonzero()[0]
    b = -weights[0]
    w = weights[1:]
    return w,b
Exemple #4
0
    def predict(self, Xtest):

        print "starting predict with:",Xtest.shape[0]," samples : ",datetime.datetime.now()
        num_batches = Xtest.shape[0] / float(self.n_pred_samples)

        test_ids = []

        for i in range(0, int(num_batches)):
            test_ids.append(range((i) * self.n_pred_samples, (i + 1) * self.n_pred_samples))

        if (num_batches - int(num_batches)) > 0:
            test_ids.append(range(int(num_batches) * self.n_pred_samples, Xtest.shape[0]))

        # values = [(test_id, exp_ids) for test_id in test_ids for exp_ids in self.X_exp_ids]
        yhattotal = []
        for i in range(0,len(test_ids)):
            # print "computing result with batches:",i," of:",len(test_ids)
            yraw = Parallel(n_jobs=self.workers)(delayed(svm_predict_raw_batches)(self.X, Xtest[test_ids[i]], \
                                                           self.w, v, self.gamma) for v in self.X_exp_ids)
            yhat = sp.sign(sp.vstack(yraw).mean(axis=0))
            yhattotal.append(yhat)

        yhattotal = [item for sublist in yhattotal for item in sublist]
        print "stopping predict:", datetime.datetime.now()
        return sp.sign(yhattotal)
def crossvalidate(X,Y, f=5, trainfun=train_ncc):
    '''
    Test generalization performance of a linear classifier by crossvalidation
    Definition:     crossvalidate(X,Y, f=5, trainfun=train_ncc)
    Input:      X        -  DxN array of N data points with D features
                Y        -  1D array of length N of class labels
                f        - number of cross-validation folds
                trainfun - function for linear classification training
    Output:     acc_train - (f,) array of accuracies in test train folds
                acc_test  - (f,) array of accuracies in each test fold
    '''
    N = f*(X.shape[-1]/f)
    idx = sp.reshape(sp.arange(N),(f,N/f))
    acc_train = sp.zeros((f))
    acc_test = sp.zeros((f))

    for ifold in sp.arange(f):
        testidx = sp.zeros((f),dtype=bool)
        testidx[ifold] = 1
        test = idx[testidx,:].flatten()
        train = idx[~testidx,:].flatten()
        w,b = trainfun(X[:,train],Y[train])
        acc_train[ifold] = sp.sum(sp.sign(w.dot(X[:,train])-b)==Y[train])/sp.double(train.shape[0])
        acc_test[ifold] = sp.sum(sp.sign(w.dot(X[:,test])-b)==Y[test])/sp.double(test.shape[0])

    # pdb.set_trace()
    return acc_train,acc_test
Exemple #6
0
 def f(self,xarr,t):
     x0dot = -self.coef*pl.exp(-self.k*(1.0+abs(xarr[3]-1.0)))*pl.sin(self.w*t-self.k*xarr[2])
     x1dot = pl.sign(xarr[3]-1.0)*self.coef*pl.exp(-self.k*(1.0+abs(xarr[3]-1.0)))*pl.cos(self.w*t-self.k*xarr[2]) -\
             pl.sign(xarr[3]-1.0)*9.8
     x2dot = xarr[0]
     x3dot = xarr[1]
     return [x0dot,x1dot,x2dot,x3dot]
    def zero_crossing_rate(self, frames):
        nf = len(frames)              # 帧数
        zcr = np.zeros(nf)           # 初始化
        for k in range(nf):
            x_sub = frames[k]
            x_sub1 = x_sub[:-1]                   
            x_sub2 = x_sub[1:]
            zcr[k] = np.sum(np.abs(scipy.sign(x_sub1) - scipy.sign(x_sub2))) / 2 / len(x_sub1)

        return zcr
def dsekl_test_predict(dname='sonar', num_test=1000, maxN=1000):
    print "started loading:", datetime.datetime.now()
    Xtotal, Ytotal = load_realdata(dname)

    print "loading data done!", datetime.datetime.now()
    # decrease dataset size
    N = Xtotal.shape[0]
    if maxN > 0:
        N = sp.minimum(Xtotal.shape[0], maxN)

    Xtotal = Xtotal[:N + num_test]
    Ytotal = Ytotal[:N + num_test]

    # randomize datapoints
    print "randomization", datetime.datetime.now()
    sp.random.seed(0)
    idx = sp.random.permutation(Xtotal.shape[0])
    print idx
    Xtotal = Xtotal[idx]
    Ytotal = Ytotal[idx]

    # divide test and train
    print "dividing in train and test", datetime.datetime.now()
    Xtest = Xtotal[N:N+num_test]
    Ytest = Ytotal[N:N+num_test]
    Xtrain = Xtotal[:N]
    Ytrain = Ytotal[:N]

    print "densifying", datetime.datetime.now()
    # unit variance and zero mean
    Xtrain = Xtrain.todense()
    Xtest = Xtest.todense()

    if not sp.sparse.issparse(Xtrain):
        scaler = StandardScaler()
        print "fitting scaler", datetime.datetime.now()
        scaler.fit(Xtrain)  # Don't cheat - fit only on training data
        print "transforming data train", datetime.datetime.now()
        Xtrain = scaler.transform(Xtrain)
        print "transforming data test", datetime.datetime.now()
        Xtest = scaler.transform(Xtest)
    else:
        scaler = StandardScaler(with_mean=False)
        scaler.fit(Xtrain)
        Xtrain = scaler.transform(Xtrain)
        Xtest = scaler.transform(Xtest)

    DS = pickle.load(file("DS","rb"))

    res_hl = DS.predict_support_hardlimits(Xtest)
    res_hl = sp.mean(sp.sign(res_hl) != Ytest)
    print "res_hl",res_hl
    res_perc = DS.predict_support_percentiles(Xtest)
    res_perc = sp.mean(sp.sign(res_perc) != Ytest)
    print "res_perc",res_perc
 def performAstroActions(self):
     for j, active in enumerate(self.astro_statuses):
         assert active in [-1, 0, 1]
         if active == 1:
             assert sign(self.remaining_active_durs[j]) == 1
             self.neur_in_ws[:,j] += self.neur_in_ws[:,j] * self.incr_percent
             self.remaining_active_durs[j] -= 1
         elif active == -1:
             assert sign(self.remaining_active_durs[j]) == -1
             self.neur_in_ws[:,j] += self.neur_in_ws[:,j] * -self.decr_percent
             self.remaining_active_durs[j] += 1
 def performAstroActions(self):
     for j, active in enumerate(self.astro_statuses):
         assert active in [-1, 0, 1]
         if active == 1:
             # need to check output weights are modified in the correct way
             assert sign(self.remaining_active_durs[j]) == 1
             self.in_ws[:,j] += self.in_ws[:,j] * self.incr_percent
             self.remaining_active_durs[j] -= 1
         elif active == -1:
             assert sign(self.remaining_active_durs[j]) == -1
             self.in_ws[:,j] += self.in_ws[:,j] * -self.decr_percent
             self.remaining_active_durs[j] += 1
def svdInverse(mat,maxEig=1e10,minEig=1e-10): #1e10,1e-10
    u,w,vt = scipy.linalg.svd(mat)
    if any(w==0.):
        raise ZeroDivisionError, "Singular matrix."
    wInv = w ** -1
    largeIndices = pylab.find( abs(wInv) > maxEig )
    if len(largeIndices) > 0: print "svdInverse:",len(largeIndices),"large singular values out of",len(w)
    wInv[largeIndices] = maxEig*scipy.sign(wInv[largeIndices])
    smallIndices = pylab.find( abs(wInv) < minEig )
    if len(smallIndices) > 0: print "svdInverse:",len(smallIndices),"small singular values out of",len(w)
    wInv[smallIndices] = minEig*scipy.sign(wInv[smallIndices])
    return scipy.dot( scipy.dot(vt.T,scipy.diag(wInv)), u.T )
Exemple #12
0
def expectation_prop_inner(m0,V0,Y,Z,F,z,needed):
    #expectation propagation on multivariate gaussian for soft inequality constraint
    #m0,v0 are mean vector , covariance before EP
    #Y is inequality value, Z is sign, 1 for geq, -1 for leq, F is softness variance
    #z is number of ep rounds to run
    #returns mt, Vt the value and variance for observations created by ep
    m0=sp.array(m0).flatten()
    V0=sp.array(V0)
    n = V0.shape[0]
    print "expectation prpagation running on "+str(n)+" dimensions for "+str(z)+" loops:"
    mt =sp.zeros(n)
    Vt= sp.eye(n)*float(1e10)
    m = sp.empty(n)
    V = sp.empty([n,n])
    conv = sp.empty(z)
    for i in xrange(z):
        
        #compute the m V give ep obs
        m,V = gaussian_fusion(m0,mt,V0,Vt)
        mtprev=mt.copy()
        Vtprev=Vt.copy()
        for j in [k for k in xrange(n) if needed[k]]:
            print [i,j]
            #the cavity dist at index j
            tmp = 1./(Vt[j,j]-V[j,j])
            v_ = (V[j,j]*Vt[j,j])*tmp
            m_ = tmp*(m[j]*Vt[j, j]-mt[j]*V[j, j])
            alpha = sp.sign(Z[j])*(m_-Y[j]) / (sp.sqrt(v_+F[j]))
            pr = PhiR(alpha)
            
            
            if sp.isnan(pr):
                
                pr = -alpha
            beta = pr*(pr+alpha)/(v_+F[j])
            kappa = sp.sign(Z[j])*(pr+alpha) / (sp.sqrt(v_+F[j]))
            
            #print [alpha,beta,kappa,pr]
            mt[j] = m_+1./kappa
            #mt[j] = min(abs(mt[j]),1e5)*sp.sign(mt[j])
            Vt[j,j] = min(1e10,1./beta - v_)
        #print sp.amax(mtprev-mt)
        #print sp.amax(sp.diagonal(Vtprev)-sp.diagonal(Vt))
        #TODO make this a ratio instead of absolute
        delta = max(sp.amax(mtprev-mt),sp.amax(sp.diagonal(Vtprev)-sp.diagonal(Vt)))
        conv[i]=delta
    print "EP finished with final max deltas "+str(conv[-3:])
    V = V0.dot(spl.solve(V0+Vt,Vt))
    m = V.dot((spl.solve(V0,m0)+spl.solve(Vt,mt)).T)
    return mt, Vt
 def featurex(self):
     """
     feature X:(c1, r1,c2,r2,c3,r3,...,#crrtness)
     """
     self.user=set()
     for sample in self.train_data:
         self.user.add(sample['user'])
     self.user=list(self.user)
     self.user_inv={}
     for n in xrange(0,len(self.user)):
         self.user_inv[self.user[n]]=n
     self.X=sp.zeros((len(self.user),len(self.questions.category)*2+1),dtype=float)
     
     for sample in self.train_data:
         ratio=(float(sample['position']))/len(self.questions.questions[sample['question']][3])
         user_idx=self.user_inv[sample['user']]
         ques_idx=self.questions.category_inv[self.questions.questions[sample['question']][2]]
         self.X[user_idx,ques_idx*2]+=1.0
         self.X[user_idx,ques_idx*2+1]+=abs(ratio)
         self.X[user_idx,-1]+=sp.sign(ratio)
     #normalization
     for n in xrange(0,len(self.X)):
         for m in xrange(0,len(self.questions.category)):
             if self.X[n,m*2]>0.5:
                 self.X[n,m*2+1]/=(self.X[n,m*2])
         self.X[n,-1]/=sum(self.X[n,0:-1:2])
         self.X[n,0:-1:2]/=sum(self.X[n,0:-1:2])
def plot_histogram(X, Y, w, b):
    ''' Plots a histogram of classifier outputs (w^T X) for each class with pl.hist 
    The title of the histogram is the accuracy of the classification
    Accuracy = #correctly classified points / N 
    
    Definition:     plot_histogram(X, Y, w, b)
    Input:          X       -  DxN array of N data points with D features
                    Y       -  1D array of length N of class labels
                    w       -  1D array of length D, weight vector 
                    b       -  bias term for linear classification   
    
    '''
    #calculate the correct classified
    correct = (sp.sign(w.dot(X) - b) == Y).nonzero()[0]

    #class labels 1
    target = w.dot(X[:, (Y == 1)])
    #class balels -1
    non_target = w.dot(X[:, (Y == -1)])

    pl.title("Acc %0.0f%%" % (float(correct.shape[0]) / X.shape[1] * 100,))
    pl.xlabel('w^T X')
    pl.hist(non_target)
    pl.hist(target)
    pl.legend(["non target", "target"], loc=0)
    pl.show()
Exemple #15
0
def calc_modal_vector(atoms1,atoms2):
    """
        Calculate the 'modal vector', i.e. the difference vector between the two configurations.
        The minimum image convention is applied!
    """
    from scipy.linalg import inv
    from scipy        import array,dot
    from scipy        import sign,floor
    cell1 = atoms1.get_cell()
    cell2 = atoms2.get_cell()

    # The cells need to be the same (otherwise the whole process won't make sense)
    if (cell1 != cell2).any():
        raise ValueError("Encountered different cells in atoms1 and atoms2. Those need to be the same.")
    cell = cell1

    icell = inv(cell)
                                            
    frac1 = atoms1.get_scaled_positions()
    frac2 = atoms2.get_scaled_positions()
    modal_vector_frac = frac1 - frac2
    for i in range(modal_vector_frac.shape[0]):
        for j in range(modal_vector_frac.shape[1]):
            if abs(modal_vector_frac[i,j]) > .5:
                value = modal_vector_frac[i,j]
                vsign = sign(modal_vector_frac[i,j])
                absvalue = abs(value)
                modal_vector_frac[i,j] = value - vsign*floor(absvalue+.5)
    return dot(modal_vector_frac,cell)
def north_direction(lat):
    '''get the north direction relative to image positive y coordinate'''
    dlatdx = nd.filters.sobel(lat,axis=1,mode='constant',cval=sp.nan) #gradient in x-direction
    dlatdy = nd.filters.sobel(lat,axis=0,mode='constant',cval=sp.nan)
    ydir = lat[-1,0] -lat[0,0] # check if latitude is ascending or descending in y axis
    # same step might have to be done with x direction.
    return sp.arctan2(dlatdx,dlatdy*sp.sign(ydir) )*180/sp.pi
Exemple #17
0
def PlotEigenvectors(eigVects, net = None, title = None):
    nEv = 3
    nOv = len(eigVects[:,0])
    for jj in range(nEv):
        subplot(nEv, 1, jj+1)
        if jj == 0 and title is not None:
            title(title)

        bar(range(nOv), eigVects[:,jj]/scipy.linalg.norm(eigVects[:,jj]))
        axis([-1, nOv] + axis()[2:])

        if net is not None:
            mags = zip(abs(eigVects[:,jj]), range(nOv), eigVects[:,jj])
            mags.sort()
            mags.reverse()
            for mag, index, val in mags[:5]:
                name = net.optimizableVars[index].name
                if name is None:
                    name = net.optimizableVars[index].id
                text(index, val + scipy.sign(val)*0.05, 
                           name,
                           horizontalalignment='center',
                           verticalalignment='center')

        a = list(axis())
        a[0:2] = [-.03*nOv, nOv*1.03]
        a[2] -= 0.1
        a[3] += 0.1
        axis(a)
Exemple #18
0
    def to_bool_array(self):
        """
        Returns a dense matrix (`scipy.sparse.csr_matrix`), which rows
        represent the number of spike trains and the columns represent the
        binned index position of a spike in a spike train.
        The matrix columns contain **True**, which indicate a spike and
        **False** for non spike.

        Returns
        -------
        bool matrix : numpy.ndarray
            Returns a dense matrix representation of the sparse matrix,
            with **True** indicating a spike and **False*** for
            non spike.
            The **Trues** in the columns represent the index
            position of the spike in the spike train and rows represent the
            number of spike trains.

        Examples
        --------
        >>> import elephant.conversion as conv
        >>> import neo as n
        >>> import quantities as pq
        >>> a = n.SpikeTrain([0.5, 0.7, 1.2, 3.1, 4.3, 5.5, 6.7] * pq.s, t_stop=10.0 * pq.s)
        >>> x = conv.BinnedSpikeTrain(a, num_bins=10, binsize=1 * pq.s, t_start=0 * pq.s)
        >>> print(x.to_bool_array())
        [[ True  True False  True  True  True  True False False False]]

        See also
        --------
        scipy.sparse.csr_matrix
        scipy.sparse.csr_matrix.toarray
        """
        return abs(scipy.sign(self.to_array())).astype(bool)
Exemple #19
0
	def classify(self, xL, xR):
		a1L, a1R, a2L, a2LR, a2R, a3, z1Lb, z1LRb, z1Rb, z2b, xLb, xRb = self.forward_pass(xL, xR)
		if self.k == 2 :
			classif = sp.sign(a3);
		else :
			classif = sp.argmax(a3,axis=0);
		return a3, classif
 def performAstrocyteActions(self):
     i = len(self.neuronal_input_connection.params)/self.dim
     for j, active in enumerate(self.astrocyte_statuses):
         J = j*i
         assert active in [-1, 0, 1]
         if active == 1:
             # NEED TO CHECK _setParameters and _params method
             assert sign(self.remaining_active_durations[j]) == 1
             self.neuronal_input_connection.params[J:J+i] += \
               self.neuronal_input_connection.params[J:J+i]*self.increment_percent
             self.remaining_active_durations[j] -= 1
         elif active == -1:
             assert sign(self.remaining_active_durations[j]) == -1
             self.neuronal_input_connection.params[J:J+i] += \
               self.neuronal_input_connection.params[J:J+i]*-self.decrement_percent
             self.remaining_active_durations[j] += 1
Exemple #21
0
    def predict(self, Xtest,number_of_redraws=1000):
        yraw = Parallel(n_jobs=8)(delayed(svm_predict_raw)(self.X, Xtest, \
                                                            self.w, self.n_expand_samples, self.gamma, i) for i in
                                   range(number_of_redraws))

        yhat = sp.sign(sp.vstack(yraw).mean(axis=0))
        return yhat
Exemple #22
0
    def __call__(self, gradient, error):
        products = self.previous_gradient * gradient
        signs = sign(gradient)

        # For positive gradient parts.
        positive = (products > 0).astype('int8')
        pos_step = self.step * self.upfactor * positive
        clip(pos_step, -self.bound, self.bound)
        pos_update = self.values - signs * pos_step

        # For negative gradient parts.
        negative = (products < 0).astype('int8')
        neg_step = self.step * self.downfactor * negative
        clip(neg_step, -self.bound, self.bound)
        if error <= self.previous_error:
            # If the error has decreased, do nothing.
            neg_update = zeros(gradient.shape)
        else:
            # If it has increased, move back 2 steps.
            neg_update = self.more_prev_values
        # Set all negative gradients to zero for the next step.
        gradient *= positive

        # Bookkeeping.
        self.previous_gradient = gradient
        self.more_prev_values = self.prev_values
        self.prev_values = self.values.copy()
        self.previous_error = error

        # Updates.
        self.step[:] = pos_step + neg_step
        self.values[:] = positive * pos_update + negative * neg_update

        return self.values
    def bin_correlation_nu(self, lag_inds, freq_diffs, norms=False, cross_power=False):
        """"bin the correlation function in frequency only"""  
        nf = len(self.freq_inds)
        n_diffs = len(freq_diffs)
        # Allowcate memory.
        corrf = sp.zeros((n_diffs, len(lag_inds)))
        countsf = sp.zeros(n_diffs, dtype=int)
        for ii in range(nf):
            for jj in range(nf):
                if norms:
                    thiscorr = (self.corr[ii, jj, lag_inds] *
                                self.norms[ii, jj, sp.newaxis])
                else:
                    thiscorr = self.corr[ii, jj, lag_inds]
                df = abs(self.freq1[ii] - self.freq2[jj])
                for kk in range(1, n_diffs - 1):
                    if (abs(freq_diffs[kk] - df) <= abs(freq_diffs[kk - 1] - df)
                        and abs(freq_diffs[kk] - df) < abs(freq_diffs[kk + 1] - df)):
                        d_ind = kk
                if abs(freq_diffs[0] - df) < abs(freq_diffs[1] - df):
                    d_ind = 0
                if abs(freq_diffs[-1] - df) <= abs(freq_diffs[-2] - df):
                    d_ind = n_diffs - 1
                corrf[d_ind, :] += thiscorr
                countsf[d_ind] += 1
        corrf /= countsf[:, sp.newaxis]

        if cross_power:
            pdat = corrf * 1e3
        else:
            pdat = sp.sign(corrf) * sp.sqrt(abs(corrf)) * 1e3

        return pdat
Exemple #24
0
 def classify_output(self, y):
     """Classify SVM output"""
     cl = int(s.sign(y))
     if cl == 0:
         import random
         print "Warning, class = 0, assigning label 1..."
         cl = 1
     return cl
Exemple #25
0
 def classify(self, x):
     """Classify the input (one point) as +1 or -1"""
     output = self.compute_layers_output(x)
     output_class = int(s.sign(output))
     if output_class == 0: 
         print >> sys.stderr, "DATA CLASSIFIED WITH 0; deciding to put class 1"
         output_class = 1
     return output_class
def fit_svm_dskl_emp(X,Y,Xtest,Ytest,its=100,eta=1.,C=.1,nPredSamples=10,nExpandSamples=10, kernel=(GaussianKernel,(1.))):
    Wemp = sp.randn(len(Y))
    Eemp = []

    for it in range(1,its+1):
        Wemp = step_dskl_empirical(X,Y,Wemp,eta/it,C,kernel,nPredSamples,nExpandSamples)
        Eemp.append(sp.mean(Ytest != sp.sign(predict_svm_emp(X,Xtest,Wemp,kernel)))) 
    return Eemp
Exemple #27
0
 def __init__(self, *args, **kwargs):
     MultiModalFunction.__init__(self, *args, **kwargs)
     self._mu0 = 2.5
     self._s = 1 - 1 / (2 * sqrt(self.xdim + 20) - 8.2)
     self._mu1 = -sqrt((self._mu0 ** 2 - 1) / self._s)
     self._signs = sign(randn(self.xdim))
     self._R1 = orth(rand(self.xdim, self.xdim))
     self._R2 = orth(rand(self.xdim, self.xdim))
     self._diags = generateDiags(100, self.xdim)
Exemple #28
0
def softThreshold(coeffs,thresh):
	new_coeffs = []
	for j in coeffs:
		new_coeffs.append(sp.copy(j))
	for j in xrange(1,len(new_coeffs)):
		for i in new_coeffs[j]:
			i[sp.absolute(i)<thresh] = 0
			i[sp.absolute(i)>=thresh] -= (sp.sign(i[sp.absolute(i)>=thresh]))*thresh
	return new_coeffs
def hyperparameter_search_dskl(reps=2,dname='sonar',maxN=1000,num_test=10000):
    Xtotal, Ytotal = load_realdata(dname)

    if maxN > 0:
        N = sp.minimum(Xtotal.shape[0], maxN)
    else:
        N = Xtotal.shape[0]

    params_dksl = {
        'n_pred_samples': [300],#[int(N/2*0.01)],#,int(N/2*0.02),int(N/2*0.03)],
        'n_expand_samples': [300],#[int(N/2*0.01)],#,int(N/2*0.02),int(N/2*0.03)],
        'n_its': [10000],
        'eta': [0.999],
        'C': 10. ** sp.arange(-30., -19., 1.),#2
        'gamma': [10.], #** sp.arange(0., 4., 1.),#2
        'workers': [7],
    }

    print "checking parameters:\n",params_dksl

    Eemp = []

    for irep in range(reps):
        print "repetition:",irep," of ",reps
        #idx = sp.random.randint(low=0,high=Xtotal.shape[0],size=N + num_test)
        Xtrain = Xtotal[:N,:]#idx[:N],:]
        Ytrain = Ytotal[:N]#idx[:N]]
        # TODO: check when if we have enough data here.
        Xtest = Xtotal[N:N+num_test,:]#idx[N:N+num_test],:]
        Ytest = Ytotal[N:N+num_test]#idx[N:N+num_test]]

        # Xtrain = Xtrain.todense()
        # Xtest = Xtest.todense()
        if not sp.sparse.issparse(Xtrain):
            scaler = StandardScaler()
            scaler.fit(Xtrain)  # Don't cheat - fit only on training data
            Xtrain = scaler.transform(Xtrain)
            Xtest = scaler.transform(Xtest)
        else:
            scaler = StandardScaler(with_mean=False)
            scaler.fit(Xtrain)
            Xtrain = scaler.transform(Xtrain)
            Xtest = scaler.transform(Xtest)
        print "Training empirical"
        # clf = GridSearchCV(DSEKL(),params_dksl,n_jobs=-1,verbose=1,cv=2).fit(Xtrain,Ytrain)
        clf = GridSearchCV(DSEKLBATCH(),params_dksl,n_jobs=-1,verbose=1,cv=2).fit(Xtrain,Ytrain)
        print clf.best_estimator_.get_params()
        Eemp.append(sp.mean(sp.sign(clf.best_estimator_.transform(Xtest))!=Ytest))
        print "Emp: %0.2f"%(Eemp[-1])
        fname = custom_data_home + "clf_scmallscale_" + dname + "_nt" + str(N) + "_" + str(irep) + str(datetime.datetime.now())
        f = open(fname,'wb')
        print "saving to file:", fname
        pickle.dump(clf, f, pickle.HIGHEST_PROTOCOL)
    print "***************************************************************"
    print "Data set [%s]: Emp_avg: %0.2f+-%0.2f"%(dname,sp.array(Eemp).mean(),sp.array(Eemp).std())
    print "***************************************************************"
def run_realdata(reps=2,dname='sonar',maxN=1000):
    Xtotal,Ytotal = load_realdata(dname)

    params_dksl = {
            'n_pred_samples': [500,1000],
            'n_expand_samples': [500,1000],
            'n_its':[1000],
            'eta':[1.],
            'C':10.**sp.arange(-8.,4.,2.),#**sp.arange(-8,-6,1),#[1e-6],#
            'gamma':10.**sp.arange(-4.,4.,2.),#**sp.arange(-1.,2.,1)#[10.]#
            'workers':[500,1000]
            }
    
    params_batch = {
            'C':10.**sp.arange(-8.,4.,2.),
            'gamma':10.**sp.arange(-4.,4.,2.)
            }
 
    if maxN > 0:
        N = sp.minimum(Xtotal.shape[0],maxN)
    else:
        N = Xtotal.shape[0]

    Eemp,Ebatch = [],[]
    num_train = int(0.9*N)
    for irep in range(reps):
        print "repetition:",irep," of ",reps
        idx = sp.random.randint(low=0,high=Xtotal.shape[0],size=N)
        Xtrain = Xtotal[idx[:num_train],:]
        Ytrain = Ytotal[idx[:num_train]]
        Xtest = Xtotal[idx[num_train:],:]
        Ytest = Ytotal[idx[num_train:]]

        # Xtrain = Xtrain.todense()
        # Xtest = Xtest.todense()
        if not sp.sparse.issparse(Xtrain):
            scaler = StandardScaler()
            scaler.fit(Xtrain)  # Don't cheat - fit only on training data
            Xtrain = scaler.transform(Xtrain)
            Xtest = scaler.transform(Xtest)
        else:
            scaler = StandardScaler(with_mean=False)
            scaler.fit(Xtrain)
            Xtrain = scaler.transform(Xtrain)
            Xtest = scaler.transform(Xtest)
        print "Training empirical"
        clf = GridSearchCV(DSEKL(),params_dksl,n_jobs=10,verbose=1,cv=2).fit(Xtrain,Ytrain)
        Eemp.append(sp.mean(sp.sign(clf.best_estimator_.transform(Xtest))!=Ytest))
        clf_batch = GridSearchCV(svm.SVC(),params_batch,n_jobs=1000,verbose=1,cv=3).fit(Xtrain,Ytrain)
        Ebatch.append(sp.mean(clf_batch.best_estimator_.predict(Xtest)!=Ytest))
        print "Emp: %0.2f - Batch: %0.2f"%(Eemp[-1],Ebatch[-1])
        print clf.best_estimator_.get_params()
        print clf_batch.best_estimator_.get_params()
    print "***************************************************************"
    print "Data set [%s]: Emp_avg: %0.2f+-%0.2f - Ebatch_avg: %0.2f+-%0.2f"%(dname,sp.array(Eemp).mean(),sp.array(Eemp).std(),sp.array(Ebatch).mean(),sp.array(Ebatch).std())
    print "***************************************************************"
Exemple #31
0
def metastable_u0s(k, includeNegativeu0=True):
    # compute all the values of u0 corresponding to metastable boundaries
    u0List = [0]

    if k < 0:
        raise NotImplemented(
            'Metastable boundaries for negative k not implemented')

    # compute metastable bc for n positive
    n = 0
    err = lambda u0: k * u0 - abs(sin(u0 / 2))
    while sign(err(2 * pi * (n + .5))) != sign(err(2 * pi * (n + 1))):
        u0 = brentq(err, 2 * pi * (n + .5), 2 * pi * (n + 1))
        u0List.append(u0)

        if includeNegativeu0:
            # add negative metastable u0
            u0List.insert(0, -u0)

        n += 1

    return np.array(u0List)
Exemple #32
0
    def getBetaSNPste(self):
        """
        get standard errors on betas

        Returns
        -------
        beta_ste : ndarray
        """
        beta = self.getBetaSNP()
        pv = self.getPv()
        z = sp.sign(beta) * sp.sqrt(st.chi2(1).isf(pv))
        ste = beta / z
        return ste
Exemple #33
0
def construct_co_occurrence_matrix(sparse_tokens):
    """
    Calculates a log-scaled co-occurrence matrix between all elements of a count matrix using a dot product.
    :param sparse_tokens: A sparse matrix of counts
    :return: Log-scaled co-occurrence matrix
    """
    # compute normalized log co-occurrence counts
    co_occur = (sparse_tokens.T).dot(sparse_tokens).todense()
    co_occur_log_matrix = sp.special.xlogy(
        sp.sign(co_occur), co_occur
    ) - np.log(co_occur.sum())
    np.fill_diagonal(co_occur_log_matrix, 0)
    return co_occur_log_matrix
Exemple #34
0
def centralityEigenvector(G, max_iter=50, tol=0):
    M = nx.to_scipy_sparse_matrix(G,
                                  nodelist=list(G),
                                  weight=None,
                                  dtype=float)
    eigenvalue, eigenvector = linalg.eigs(M.T,
                                          k=1,
                                          which='LR',
                                          maxiter=max_iter,
                                          tol=tol)
    largest = eigenvector.flatten().real
    norm = sp.sign(largest.sum()) * sp.linalg.norm(largest)
    return dict(zip(G, largest / norm))
Exemple #35
0
 def initializer(self, shape):
     """通过随机正交矩阵进行LU分解初始化
     """
     import scipy as sp
     import scipy.linalg
     random_matrix = sp.random.randn(shape[-1], shape[-1])
     random_orthogonal = sp.linalg.qr(random_matrix)[0]
     p, l, u = sp.linalg.lu(random_orthogonal)
     u_diag_sign = sp.sign(sp.diag(u))
     u_diag_abs_log = sp.log(abs(sp.diag(u)))
     l_mask = 1 - sp.tri(shape[-1]).T  # l的mask,下三角全1阵(但对角线全0)
     u_mask = 1 - sp.tri(shape[-1])  # u的mask,上三角全1阵(但对角线全0)
     return p, l, u, u_diag_sign, u_diag_abs_log, l_mask, u_mask
Exemple #36
0
def signal_to_noise(probability_of_detection, probability_of_false_alarm,
                    number_of_pulses, swerling_type):
    """
    Calculate the single pulse signal to noise for non-coherent integration.
    :param probability_of_detection: The probability of detection.
    :param probability_of_false_alarm: The probability of false alarm.
    :param number_of_pulses: The number of pulses to be non-coherently integrated.
    :param swerling_type: The Swerling target type.
    :return: The single pulse signal to noise.
    """
    # First parameter, based on Swerling type
    if swerling_type == 'Swerling 0':
        k = sys.float_info.max
    elif swerling_type == 'Swerling 1':
        k = 1
    elif swerling_type == 'Swerling 2':
        k = number_of_pulses
    elif swerling_type == 'Swerling 3':
        k = 2
    else:
        k = 2 * number_of_pulses

    # Second parameter, based on number of pulses
    if number_of_pulses < 40:
        alpha = 0
    else:
        alpha = 0.25

    # Calculated parameters
    eta = sqrt(-0.8 * log(4 * probability_of_false_alarm * (1.0 - probability_of_false_alarm))) \
          + sign(probability_of_detection - 0.5) * sqrt(-0.8 * log(4 * probability_of_detection * (1.0 - probability_of_detection)))
    x = eta * (eta + 2.0 * sqrt(0.5 * number_of_pulses + alpha - 0.25))

    # Constants
    c1 = (((17.7006 * probability_of_detection - 18.4496) *
           probability_of_detection + 14.5339) * probability_of_detection -
          3.525) / k
    c2 = (1.0 / k) * (exp(27.31 * probability_of_detection - 25.14) +
                      (probability_of_detection - 0.8) *
                      (0.7 * log(1e-5 / probability_of_false_alarm) +
                       (2.0 * number_of_pulses - 2)) / 80)

    if 0.1 <= probability_of_detection <= 0.872:
        cdb = c1
    elif 0.872 <= probability_of_detection <= 0.99:
        cdb = c1 + c2

    c = 10.0**(cdb / 10.0)

    # Signal to noise ratio
    return 10.0 * log10(c * x / number_of_pulses)
def fit_svm_dskl_comparison(X,Y,its=100,eta=1.,C=.1,nPredSamples=10,nExpandSamples=10, kernel=(GaussianKernel,(1.))):
    
    # split into train and test
    Xtest = X[:,:len(Y)/2]
    Ytest = Y[:len(Y)/2]
    X = X[:,(len(Y)/2):]
    Y = Y[(len(Y)/2):]

    # random gaussian for rks
    Zrks = sp.randn(len(Y),X.shape[0]) / (kernel[1]**2)
    Wrks = sp.randn(len(Y))

    Wemp = sp.randn(len(Y))

    Erks = []
    Eemp = []

    for it in range(1,its+1):
        Wrks = step_dskl_rks(X,Y,Wrks,Zrks,eta/it,C,nPredSamples,nExpandSamples)
        Wemp = step_dskl_empirical(X,Y,Wemp,eta/it,C,kernel,nPredSamples,nExpandSamples)
        Erks.append(sp.mean(Ytest != sp.sign(predict_svm_rks(Xtest,Wrks,Zrks))))
        Eemp.append(sp.mean(Ytest != sp.sign(predict_svm_emp(X,Xtest,Wemp,kernel)))) 
    return Eemp,Erks
def fit_svm_kernel(X,Y,its=100,eta=1.,C=.1,kernel=(GaussianKernel,(1.)),nPredSamples=10,nExpandSamples=10):
    D,N = X.shape[0],X.shape[1]
    X = sp.vstack((sp.ones((1,N)),X))
    W = sp.randn(len(Y))
    for it in range(its):
        print "Iteration %4d Accuracy %0.3f"%(it,sp.mean(Y==sp.sign(kernel[0](X,X,kernel[1]).dot(W))))
        rnpred = sp.random.randint(low=0,high=N,size=nsamples)
        rnexpand = sp.random.randint(low=0,high=N,size=nsamples)
        # compute gradient 
        G = compute_gradient(Y[rnpred],X[:,rnpred],X[:,rnexpand],W[rnexpand],kernel,C)
        # update 
        W[rnexpand] -= eta/(it+1.) * G
	
    return W
Exemple #39
0
 def _bess(npts, x1, x2, x1err, x2err, cerr):
     """
     Do the entire regression calculation for 4 slopes:
       OLS(Y|X), OLS(X|Y), bisector, orthogonal
     """
     # calculate sigma's for datapoints using length of confidence
     # intervals
     sig11var = sum(x1err**2) / npts
     sig22var = sum(x2err**2) / npts
     sig12var = sum(cerr) / npts
     # calculate means and variances
     x1av = scipy.average(x1)
     x1var = scipy.std(x1)**2
     x2av = scipy.average(x2)
     x2var = scipy.std(x2)**2
     covar_x1x2 = sum((x1 - x1av) * (x2 - x2av)) / npts
     # compute the regression slopes for OLS(X2|X1), OLS(X1|X2),
     # bisector and orthogonal
     b = scipy.zeros(4)
     b[0] = (covar_x1x2 - sig12var) / (x1var - sig11var)
     b[1] = (x2var - sig22var) / (covar_x1x2 - sig12var)
     b[2] = (b[0] * b[1] - 1 + scipy.sqrt((1 + b[0] ** 2) * \
            (1 + b[1] ** 2))) / (b[0] + b[1])
     b[3] = 0.5 * ((b[1] - 1 / b[0]) + scipy.sign(covar_x1x2) * \
            scipy.sqrt(4 + (b[1] - 1 / b[0]) ** 2))
     # compute intercepts for above 4 cases:
     a = x2av - b * x1av
     # set up variables to calculate standard deviations of slope
     # and intercept
     xi = []
     xi.append(((x1 - x1av) * \
                (x2 - b[0] * x1 - a[0]) + b[0] * x1err ** 2) / \
               (x1var - sig11var))
     xi.append(((x2 - x2av) * (x2 - b[1] * x1 - a[1]) + x2err ** 2) / \
               covar_x1x2)
     xi.append((xi[0] * (1 + b[1] ** 2) + xi[1] * (1 + b[0] ** 2)) / \
               ((b[0] + b[1]) * \
                scipy.sqrt((1 + b[0] ** 2) * (1 + b[1] ** 2))))
     xi.append((xi[0] / b[0] ** 2 + xi[1]) * b[3] / \
               scipy.sqrt(4 + (b[1] - 1 / b[0]) ** 2))
     zeta = []
     for i in xrange(4):
         zeta.append(x2 - b[i] * x1 - x1av * xi[i])
     # calculate  variance for all a and b
     bvar = scipy.zeros(4)
     avar = scipy.zeros(4)
     for i in xrange(4):
         bvar[i] = scipy.std(xi[i])**2 / npts
         avar[i] = scipy.std(zeta[i])**2 / npts
     return a, b, avar, bvar, xi, zeta
Exemple #40
0
    def draw_weighted_adjacency_matrix(self, inmatrix, labels=None, pos=None):
        # remove connections with low values ...
        if self.threshold:
            inmatrix = self.apply_threshold(inmatrix, self.threshold)
        adjmat = S.sign(inmatrix)
        dgr = nx.from_numpy_matrix(adjmat, create_using=nx.DiGraph())
        if pos is None:
            pos = nx.graphviz_layout(dgr)

        edgewidth = []
        for (u, v) in dgr.edges():
            flow = inmatrix[u][v]
            # normalize flow by dividing by some number
            flow = flow / self.flow_scaler
            # now done above
            # set flow = 0 when less than some value ...
            # so flow originally less than 100
            # if flow < 0.5: flow = 0
            edgewidth.append(flow)
        selfflows = []
        # normalize size of nodes by dividing by arbitrary constants
        for nn in dgr.nodes():
            flowtoself = inmatrix[nn][nn] / self.total_scaler
            selfflows.append(flowtoself)
        # assume nodes are enumerated in right order
        totalflows = inmatrix.sum(axis=1) / self.total_scaler
        nx.draw_networkx_edges(dgr,
                               pos,
                               width=edgewidth,
                               edge_color='b',
                               alpha=0.5,
                               node_size=0)
        nx.draw_networkx_nodes(dgr,
                               pos,
                               node_size=totalflows,
                               node_color='k',
                               alpha=1.0)
        nx.draw_networkx_nodes(dgr,
                               pos,
                               node_size=selfflows,
                               node_color='y',
                               alpha=1.0)
        nx.draw_networkx_labels(dgr,
                                pos,
                                labels=labels,
                                font_size=self.font_size,
                                font_color='r')
        return (dgr, pos)
Exemple #41
0
    def __init__(self, documents, tfidf):
        self.tfidf = tfidf
        self.vector_tfidf = tfidf.fit_transform(documents)
        tfidf.use_idf = False
        tfidf.norm = None
        self.count_tf = tfidf.fit_transform(documents)
        self.matrix_tfidf = self.vector_tfidf
        self.matrix_binary = csr_matrix(scipy.sign(self.count_tf.toarray()))

        #self.XT = self.matrix_binary.T * self.matrix_binary
        #self.XD = self.matrix_binary * self.matrix_binary.T

        self.KT = np.dot(self.matrix_tfidf.T, self.matrix_tfidf)
        #self.KD = self.matrix_tfidf * self.matrix_tfidf.T

        self.terms = self.tfidf.get_feature_names()
Exemple #42
0
    def captive_system_func(self, v, t, v_cap):
        """
        Function governing the captive trajectory system dynamics , i.e., f(v,t) where

        dv/dt = f(v,t).

        Note, In this case v is ignored and v_cap is used as the forces remain
        constant between realtime system updates. The system is simulated by
        running an odeint integration for the system evolution between every
        realtime system update.
        """
        # Get mass and damping
        m = param.sub_body_mass + param.sub_mount_mass
        # Compute function value
        val = -scipy.sign(v_cap) * self.drag_func(v_cap) / m + self.force_func(
            v_cap, t) / m
        return val
Exemple #43
0
    def train(self):
        total_batch = int(self.n_items / self.batch_size)
        idxs = np.random.permutation(self.n_items)  # shuffled ordering
        loss = []
        for i in range(total_batch):
            batch_set_idx = idxs[i * self.batch_size: (i + 1) * self.batch_size]

            _, loss_ = self.sess.run(
                [self.optimizer, self.loss],
                feed_dict={
                    self.rating_matrix: self.train_matrix[:, batch_set_idx].toarray(),
                    self.rating_matrix_mask: scipy.sign(self.train_matrix[:, batch_set_idx].toarray()),
                    self.keep_rate_net: 1  # 0.95
                })

            loss.append(loss_)
        return np.mean(loss)
def train_perceptron(X, Y, iterations=200, eta=.1):
    ''' Trains a linear perceptron
    Definition:  w, b, acc  = train_perceptron(X,Y,iterations=200,eta=.1)
    Input:       X       -  DxN array of N data points with D features
                 Y       -  1D array of length N of class labels {-1, 1}
                 iter    -  optional, number of iterations, default 200
                 eta     -  optional, learning rate, default 0.1
    Output:      w       -  1D array of length D, weight vector 
                 b       -  bias term for linear classification                          
                 acc     -  1D array of length iter, contains classification accuracies 
                            after each iteration  
                            Accuracy = #correctly classified points / N 
    '''
    acc = sp.zeros((iterations))
    #include the bias term by adding a row of ones to X
    X = sp.concatenate((sp.ones((1, X.shape[1])), X))
    #initialize weight vector
    weights = sp.ones((X.shape[0])) / X.shape[0]
    for it in sp.arange(iterations):
        # indices of misclassified data
        wrong = (sp.sign(weights.dot(X)) != Y).nonzero()[0]
        # compute accuracy acc[it]
        acc[it] = 1 - (
            wrong.shape[0] / float(X.shape[1])
        )  # we have to convert one thing to make it a float divison
        # ... your code here
        if wrong.shape[0] > 0:
            # pick a random misclassified data point
            # ... your code here
            #update weight vector (use variable learning rate (eta/(1.+it)) )
            # ... your code here
            xidx = sp.random.choice(wrong)
            x = X[:, xidx]
            y = Y[xidx]
            weights = weights + (eta / (1. + it)) * x * y
            # pick a random misclassified data point
            # ... your code here
            #update weight vector (use variable learning rate (eta/(1.+it)) )
            # ... your code here
            if it % 20 == 0:
                print "Iteration %d:" % it + "Accuracy %0.2f" % acc[it]
    b = -weights[0]
    w = weights[1:]
    #return weight vector, bias and accuracies
    return w, b, acc
Exemple #45
0
def load_bci_data(fname):
    ''' Loads BCI data (one subject, copy-spelling experiment) from <fname> 
    Definition:  X, Y = load_bci_data(fname)
    Input:       fname   - string
    Output:      X       -  DxN array with N images with D pixels
                 Y       -  1D array of length N of class labels 
                            (1- target, -1 - non-target)                         
    '''
    # load the data
    data = io.loadmat(fname)
    # extract time-electrode features and labels
    X = data['X']
    Y = data['Y']
    # collapse the time-electrode dimensions
    X = sp.reshape(X, (X.shape[0] * X.shape[1], X.shape[2]))
    # transform the labels to (-1,1)
    Y = sp.sign((Y[0, :] > 0) - .5)
    return X, Y
def guess_initial_parameters(mean, phi0, omega):
    mean_max = mean.max()
    mean_min = mean.min()
    offset = (mean_max + mean_min) / 2.
    amplitude = (mean_max - mean_min) / 2.
    if phi0 is None or omega is None:
        y = mean - offset
        y2 = savgol_filter(y, 11, 1, mode="nearest")
        if phi0 is None:
            cos_phi0 = clip(y2[0] / amplitude, -1., 1.)
            if y2[1] > y2[0]:
                phi0 = -arccos(cos_phi0)
            else:
                phi0 = arccos(cos_phi0)
        if omega is None:
            zero_crossings = scipy.where(scipy.diff(scipy.sign(y2)))[0]
            omega = pi / scipy.average(scipy.diff(zero_crossings))
    return offset, amplitude, phi0, omega
Exemple #47
0
    def getF(self):
        """
        returns F=RB_{\Phi}(\Psi), often calculated for grad-shafranov
        solutions. Not implemented on LIUQE

        Returns:
            F (Array): [nt,npsi] array of F=RB_{\Phi}(\Psi)
            Not stored on LIUQE nodes
        Raises:
            ValueError: if module cannot retrieve data from MDS tree.
        """
        if self._fpol is None:
            try:
                fluxFFNode = self._MDSTree.getNode(self._root+'::ttpr_coeffs')
                # Liuqe the data are divided by mu_0
                # and following the way chease work we normalize
                # for mu_0/B0
                # we normalize appropriately as done in
                # read_results_for_chease
                _zb0 = self.getBtVac()[:, None]
                _zb0Sign = scipy.sign(_zb0.mean())
                duData = (fluxFFNode.data() *
                          scipy.constants.mu_0)
                # then we build an appropriate grid
                nPsi = self.getRmidPsi().shape[1]
                psiV = scipy.linspace(1, 0, nPsi)
                rad = [psiV]
                for i in range(duData.shape[1]-1):
                    rad += [rad[-1]*psiV*(i+1)/(i+2)]
                rad = scipy.vstack(rad)
                _du = scipy.dot(duData, rad)
                self._fpol = _zb0Sign*scipy.sqrt(
                    scipy.reshape(
                        self.getFluxAxis(),
                        (self.getFluxAxis().size, 1))*2 *
                    scipy.subtract(_du.transpose(),
                                   _du[:, -1].transpose()).transpose() +
                    (_zb0*0.88)**2)

                self._defaultUnits['_fpol'] = 'T*m'
            except TreeException:
                raise ValueError('data retrieval failed.')
        return self._fpol.copy()
    def __call__(self, gradient, error=None):
        """ calculates parameter change based on given gradient and returns updated parameters """
        # check if gradient has correct dimensionality, then make array """
        if len(gradient) != len(self.values):
            raise Exception("{} is not equal to {}".format(
                str(gradient), str(self.values)))

        gradient_arr = asarray(gradient)

        if self.rprop:
            rprop_theta = self.rprop_theta

            # update parameters
            self.values += sign(gradient_arr) * rprop_theta

            # update rprop meta parameters
            dirSwitch = self.lastgradient * gradient_arr
            rprop_theta[dirSwitch > 0] *= self.etaplus
            idx = dirSwitch < 0
            rprop_theta[idx] *= self.etaminus
            gradient_arr[idx] = 0

            # upper and lower bound for both matrices
            rprop_theta = rprop_theta.clip(min=self.deltamin,
                                           max=self.deltamax)

            # save current gradients to compare with in next time step
            self.lastgradient = gradient_arr.copy()

            self.rprop_theta = rprop_theta

        else:
            # update momentum vector (momentum = 0 clears it)
            self.momentumvector *= self.momentum

            # update parameters (including momentum)
            self.momentumvector += self.alpha * gradient_arr
            self.alpha *= self.alphadecay

            # update parameters
            self.values += self.momentumvector

        return self.values
Exemple #49
0
    def sort_reduce_tfidf(self, n):

        # Converte para Matrix
        mtx = self.vector_tfidf.toarray()

        # Converte matrix espaca em array para ordenar por TFIDF
        s = [np.sum(x) for x in mtx.T]
        sorted_s = sorted(s)
        sorted_s = np.array(sorted_s)
        if (not n): n = len(s)

        s_id_sorted = sorted(range(len(s)), key=lambda k: s[k],
                             reverse=True)[:n]

        # print(s_id_sorted)
        self.matrix_tfidf = mtx.T[s_id_sorted].T
        self.matrix_binary = scipy.sign(self.matrix_tfidf)
        self.terms = np.array(self.tfidf.get_feature_names())[s_id_sorted]
        return
def get_e_in(theta, s, x, y, u=None):
    e_in = 0
    correct = []
    incorrect = []
    if u is None:
        n = len(x)
    else:
        n = sum(u)

    for i in range(len(x)):
        if s * sign(x[i] - theta) != y[i]:
            incorrect.append(i)
            if u is None:
                e_in += 1 / n
            else:
                e_in += u[i] / n
        else:
            correct.append(i)
    return e_in, correct, incorrect
def hess(A):
    """Computes the upper Hessenberg form of A using Householder reflectors.
	input:  A, mxn array
	output: Q, orthogonal mxm array
			H, upper Hessenberg
			s.t. QHQ' = A
	"""
    H = A.copy()
    m, n = H.shape
    Q = sp.eye(m, m)
    for k in sp.arange(n - 2):
        v = H[k + 1:m, k].copy()
        v[0] += sp.sign(v[0]) * la.norm(v)
        v = v / la.norm(v)
        v = v.reshape(m - k - 1, 1)
        P = sp.eye(m, m)
        P[k + 1:m, k + 1:m] -= 2 * sp.dot(v, v.T)
        Q = sp.dot(P, Q)
        H = sp.dot(P, sp.dot(H, P.T))
    return Q.T, H
def hqr(A):
    """Finds the QR decomposition of A using Householder reflectors.
	input: 	A, mxn array with m>=n
	output: Q, orthogonal mxm array
	        R, upper triangular mxn array
	        s.t QR = A
	"""
    R = A.copy()
    m, n = R.shape
    Q = sp.eye(m, m)
    for k in sp.arange(n - 1):
        v = R[k:m, k].copy()
        v[0] += sp.sign(v[0]) * la.norm(v)
        v = v / la.norm(v)
        v = v.reshape(m - k, 1)
        P = sp.eye(m, m)
        P[k:m, k:m] -= 2 * sp.dot(v, v.T)
        Q = sp.dot(P, Q)
        R = sp.dot(P, R)
    return Q.T, R
Exemple #53
0
def Horizontal(lat_sites, lon_sites, lat_events, lon_events, lengths, azimuths,
               widths, dips, depths, depths_to_top, projection,
               trace_start_lat, trace_start_lon, rupture_centroid_x,
               rupture_centroid_y):
    """Distance function that calculates Horizontal.

    Horizontal is the shortest horizontal distance (km) from a site to the line
    defined
    by extending the surface rupture trace to infinity.

                 ^ north
                /
               /\azimuth
        start 0======---+--------
               \        |
                \       |
                 \      |
                  \     | Rx
                   \    |
                    \   |
                     \  |
                      \ |
                       \|
                        .
                      site

    We get Horizontal by using ll2xy() to convert start/site positions to x and y
    in the coordinates relative to start and with axes shown in fig 3.1
    of the manual.  Rx is therefore the y value.
    """

    # get correct dimensionality
    lat_sites = lat_sites[:, newaxis]
    lon_sites = lon_sites[:, newaxis]

    # get x,y position of sites w.r.t. origin 'start'
    (_, Rx) = ll2xy(lat_sites, lon_sites, trace_start_lat, trace_start_lon,
                    azimuths)

    # limit distance to 1.0km minimum
    return where(abs(Rx) < DISTANCE_LIMIT, sign(Rx) * DISTANCE_LIMIT, Rx)
Exemple #54
0
    def scale_DLogistic(self, steepness):
        '''  Scale the video with a double logistic of steepness.
          Double Logistic Definition (bottom):
               http://en.wikipedia.org/wiki/Logistic_function

          Use a double logistic to normalize the 1-band video array in Va and 
          then rescale output to the 0:1 range

          Some asserts that are required.
          1. Video is float32 already

          TODO: Inplace operation should be supported.  But, the sp.sign and 
          sp.exp don't seem to allow out= keywords even though their doc says 
          they do.
        '''

        assert (self.V.dtype == sp.float32)
        Va = self.V
        Va = sp.sign(Va) * (1.0 - sp.exp(-(Va / steepness) * (Va / steepness)))
        Va = (Va + 1.0) / 2.0

        return asvideo(Va)
Exemple #55
0
def create_my_plot(ad_time, gd_time, ad_score):
    FF = lambda a, b: sc.sign(a - b) * a / b

    ad_time[ad_time == 0] = 9
    gd_time[gd_time == 0] = 9
    print("ad_time".format(np.mean(ad_time, axis=0)))
    print("".format(np.mean(gd_time, axis=0)))

    plt.imshow(sc.mean(FF(ad_time, gd_time), axis=0),
               cmap=plt.cm.get_cmap('RdBu'),
               vmin=-9,
               vmax=9)
    cb = plt.colorbar()
    tick_locator = ticker.MaxNLocator(nbins=9)
    cb.locator = tick_locator
    cb.update_ticks()
    cb.ax.set_yticklabels(
        ['1/8', '1/6', '1/4', '1/2', '1', '2', '4', '6', '8'])
    plt.xticks(np.arange(6), ('200', '500', '1000', '2000', '5000', '10000'))
    plt.yticks(np.arange(7),
               ('30000', '20000', '10000', '5000', '2500', '1000', '200'))

    plt.show()

    GG = lambda a, b: sc.log(a / b)

    plt.figure()
    plt.imshow(sc.mean(sc.log(ad_score), axis=0),
               cmap=plt.cm.get_cmap('Reds'),
               vmin=-18,
               vmax=0)
    cb = plt.colorbar()
    tick_locator = ticker.MaxNLocator(nbins=5)
    cb.locator = tick_locator
    cb.update_ticks()
    cb.ax.set_yticklabels(['1e-8', '1e-6', '1e-4', '1e-2', '1'])
    plt.xticks(np.arange(6), ('200', '500', '1000', '2000', '5000', '10000'))
    plt.yticks(np.arange(7),
               ('30000', '20000', '10000', '5000', '2500', '1000', '200'))
Exemple #56
0
    def bin_correlation_nu(self,
                           lag_inds,
                           freq_diffs,
                           norms=False,
                           cross_power=False):
        """"bin the correlation function in frequency only"""
        nf = len(self.freq_inds)
        n_diffs = len(freq_diffs)
        # Allowcate memory.
        corrf = sp.zeros((n_diffs, len(lag_inds)))
        countsf = sp.zeros(n_diffs, dtype=int)
        for ii in range(nf):
            for jj in range(nf):
                if norms:
                    thiscorr = (self.corr[ii, jj, lag_inds] *
                                self.norms[ii, jj, sp.newaxis])
                else:
                    thiscorr = self.corr[ii, jj, lag_inds]
                df = abs(self.freq1[ii] - self.freq2[jj])
                for kk in range(1, n_diffs - 1):
                    if (abs(freq_diffs[kk] - df) <=
                            abs(freq_diffs[kk - 1] - df)
                            and abs(freq_diffs[kk] - df) <
                            abs(freq_diffs[kk + 1] - df)):
                        d_ind = kk
                if abs(freq_diffs[0] - df) < abs(freq_diffs[1] - df):
                    d_ind = 0
                if abs(freq_diffs[-1] - df) <= abs(freq_diffs[-2] - df):
                    d_ind = n_diffs - 1
                corrf[d_ind, :] += thiscorr
                countsf[d_ind] += 1
        corrf /= countsf[:, sp.newaxis]

        if cross_power:
            pdat = corrf * 1e3
        else:
            pdat = sp.sign(corrf) * sp.sqrt(abs(corrf)) * 1e3

        return pdat
Exemple #57
0
def showfullmulti( matfile ):
  """
  Author: Disa Mhembere
  contact: [email protected]
    1. False color graph from a mat file
    2. Mono-coloring for binirized mat file
    Both full graphs not upper/lower triangular
  """
  matcontents = loadmat ( matfile )

  graphcsc = matcontents[ "fibergraph" ]
  graphdata = np.array ( graphcsc.todense() )
  graphdata = graphdata + graphdata.T
  print graphdata [ 0:4, 0:4 ]
  fig = matplotlib.pyplot.pcolormesh( graphdata[:,:] )
  matplotlib.pyplot.colorbar()
  matplotlib.pyplot.show ()

  bw_graphdata = scipy.sign(graphdata)
  fig2 = matplotlib.pyplot.pcolormesh( bw_graphdata[ :,: ], cmap='Greys')#, interpolation='nearest' )
  matplotlib.pyplot.colorbar()
  matplotlib.pyplot.show ()
Exemple #58
0
def plot_eigvect(vect, labels=None, bottom=0, num_label=5, label_offset=0.15):
    """
    Plot a given eigenvector.

    If a list of labels is passed in, the largest (in magnitude) num_label bars
      will be labeled on the plot. label_offset controls how much the labels
      are shifted from the top of the bars for clarity.
    bottom controls where the bar plot is centered along the y axis. This is 
      useful for plotting several e'vectors on the same axes.
    """
    # The 0.4 centers the bars on their numbers, accounting for the default
    #  bar width of 0.8
    vect = scipy.real(vect)
    max_index = scipy.argmax(abs(vect))
    if vect[max_index] < 0:
        vect = -vect
    bar(scipy.arange(len(vect)) - 0.4,
        vect / scipy.linalg.norm(vect),
        bottom=bottom)
    a = list(axis())
    a[0:2] = [-.03 * len(vect) - 0.4, (len(vect) - 1) * 1.03 + 0.4]

    if labels is not None:
        mags = zip(abs(vect), range(len(vect)), vect)
        mags.sort()
        mags.reverse()
        for mag, index, val in mags[:num_label]:
            name = labels[index]
            text(index,
                 val + scipy.sign(val) * label_offset,
                 name,
                 horizontalalignment='center',
                 verticalalignment='center')

        a[2] -= 0.1
        a[3] += 0.1

    axis(a)
Exemple #59
0
    def get_updates(self, params, constraints, loss):
        """ calculates parameter change based on given gradient and returns
            updated parameters
            check if gradient has correct dimensionality, then make array """
        gradient_arr = asarray(gradient)

        if self.rprop:
            rprop_theta = self.rprop_theta

            # update parameters
            self.values += sign(gradient_arr) * rprop_theta

            # update rprop meta parameters
            dirSwitch = self.lastgradient * gradient_arr
            rprop_theta[dirSwitch > 0] *= self.etaplus
            idx =  dirSwitch < 0
            rprop_theta[idx] *= self.etaminus
            gradient_arr[idx] = 0

            # upper and lower bound for both matrices
            rprop_theta = rprop_theta.clip(min=self.deltamin, max=self.deltamax)

            # save current gradients to compare with in next time step
            self.lastgradient = gradient_arr.copy()

            self.rprop_theta = rprop_theta

        else:
            # update momentum vector (momentum = 0 clears it)
            self.momentumvector *= self.momentum
            # update parameters (including momentum)
            self.momentumvector += self.alpha * gradient_arr
            self.alpha *= self.alphadecay

            # update parameters
            self.values += self.momentumvector

        return self.values
    def kMeans(img,k):
        '''
        Applique la méthode des k-means sur une image pour la segmenter
        @param img: image à traiter (créer précédemment grâce à "imread()")
        @param k: nombre de clusters
        @return: l'image après traitement
        '''
        # convert to np.float32
        res = img.reshape((-1, 3))
        res = np.float32(res)

        # define criteria, number of clusters(K) and apply kmeans()
        criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
        ret, label, center = cv2.kmeans(res, k, None, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)

        # Now convert back into uint8, and make original image
        center = np.uint8(center)
        res = center[label.flatten()]
        res = (res/np.min(res))-1 # normaliser à 0
        res = scipy.sign(res) # Binarisation
        res = res * 255 # pour afficher en noir blanc
        res2 = res.reshape((img.shape))
        return res2