Ejemplo n.º 1
0
def negSamplingCostAndGradient(y1, target, w2, K=10):
    """ Negative sampling cost function for word2vec models """
    ###################################################################
    # Implement the cost and gradients for one predicted word vector  #
    # and one target word vector as a building block for word2vec     #
    # models, using the negative sampling technique. K is the sample  #
    # size. You might want to use dataset.sampleTokenIdx() to sample  #
    # a random word index.                                            #
    # Input/Output Specifications: same as softmaxCostAndGradient     #
    # We will not provide starter code for this function, but feel    #
    # free to reference the code you previously wrote for this        #
    # assignment!                                                     #
    ###################################################################
    indexes = list(set([dataset.sampleTokenIdx() for i in range(K)]))
    if(target in indexes):
        indexes.remove(target)
    K1 = len(indexes)
    w2sampled = np.zeros((K1+1, np.shape(w2)[1]))
    w2sampled[K1] = w2[target]
    for i in range(K1):
        w2sampled[i] = w2[indexes[i]]
    out = np.dot(w2sampled,y1)
    cost =  -math.log(sigmoid(out[K1]))-sum(np.log(sigmoid(-out)))+np.log(sigmoid(-out[K1]))
    d2Sampled = sigmoid(out)
    d2Sampled[K1] -= 1.0
    grad2 = np.dot(np.row_stack(d2Sampled),np.column_stack(y1))
    d1 = np.dot(np.transpose(w2sampled), np.row_stack(d2Sampled))
    grad2A = np.zeros((np.shape(w2)[0], np.shape(w2)[1]))
    for i in range(K1):
        grad2A[indexes[i]] += grad2[i]
    grad2A[target] = grad2[K1]
    return cost, d1, grad2A
Ejemplo n.º 2
0
def plot_attention_loss(train_plot, test_plot):

    testing_freq = (test_plot["iter"][1] - test_plot["iter"][0]) / (train_plot["iter"][1] - train_plot["iter"][0])
    loss_y = train_plot["iter"]

    pl.figure(1)
    test_loss = test_plot["dir_loss"]
    train_loss = train_plot["dir_loss"]
    test_loss = numpy.row_stack(test_loss)
    test_loss = numpy.tile(test_loss, (1, testing_freq))
    test_loss = list(test_loss.flatten())
    test_loss += [test_loss[-1]] * max(0, len(train_loss) - len(test_loss))
    test_loss = test_loss[: len(train_loss)]
    pl.plot(loss_y, train_loss, "k-", label="Train", linewidth=0.75, marker="x")
    pl.plot(loss_y, test_loss, "r-", label="Test", linewidth=0.75)
    pl.legend(loc="best")
    pl.xlabel("Iter")
    pl.title("Direction Loss")

    pl.figure(2)
    test_loss = test_plot["cls_loss"]
    train_loss = train_plot["cls_loss"]
    test_loss = numpy.row_stack(test_loss)
    test_loss = numpy.tile(test_loss, (1, testing_freq))
    test_loss = list(test_loss.flatten())
    test_loss += [test_loss[-1]] * max(0, len(train_loss) - len(test_loss))
    test_loss = test_loss[: len(train_loss)]
    pl.plot(loss_y, train_loss, "k-", label="Train", linewidth=0.75, marker="x")
    pl.plot(loss_y, test_loss, "r-", label="Test", linewidth=0.75)
    pl.legend(loc="best")
    pl.xlabel("Iter")
    pl.title("Classification Loss")
    pl.show()
Ejemplo n.º 3
0
def mirrorpad(img, width):                            
    """Return the image resulting from padding width amount of pixels on each                                                  
    sides of the image img.  The padded values are mirror image with respect to                                                
    the borders of img.  Width can be an integer or a tuple of the form (north,                                                
    south, east, west).                                                                                                        
    """                                                                                                                        
    n, s, e, w = _get4widths(width)                                                                                            
    row = N.shape(img)[0]                                                                                                      
                                                                                                                                                               
    col = N.shape(img)[1]                                                                                                      
    
    if n != 0:                                                                                                                 
        north = img[:n,:]                                                                                                      
        img = N.row_stack((north[::-1,:], img))                                                                                
                                                                                       
    if s != 0:                                                                                                                 
        south = img[-s:,:]                                                                                                     
        img = N.row_stack((img, south[::-1,:]))                                                                                
        
    if e != 0:                                                                                                                 
        east = img[:,-e:]                                                                                                      
        img = N.column_stack((img, east[:,::-1]))                                                                              
        
    if w != 0:                                                                                                                 
        west = img[:,:w]                                                                                                       
        img = N.column_stack((west[:,::-1], img))                                                                              
        
        
        
        return img
Ejemplo n.º 4
0
 def solveMartix(self):
     MNA1 = np.column_stack((self.C, self.D))
     MNA2 = np.column_stack((self.G, self.B))
     MNA = np.row_stack((MNA2, MNA1))
     RHS = np.row_stack((self.U, self.I))
     x = np.linalg.lstsq(MNA, RHS)
     self.x = x[0]
Ejemplo n.º 5
0
def plot_attention_loss(train_plot, test_plot) :
	
	testing_freq = (test_plot['iter'][1]-test_plot['iter'][0]) / (train_plot['iter'][1]-train_plot['iter'][0])
	loss_y = train_plot['iter']
	
	pl.figure(1)
	test_loss = test_plot['dir_loss']
	train_loss = train_plot['dir_loss']
	test_loss = numpy.row_stack(test_loss)
	test_loss = numpy.tile(test_loss, (1, testing_freq))
	test_loss = list(test_loss.flatten())
	test_loss += [test_loss[-1]] * max(0,len(train_loss) - len(test_loss))
	test_loss = test_loss[:len(train_loss)]
	pl.plot(loss_y, train_loss, 'k-', label='Train', linewidth=0.75, marker='x')
	pl.plot(loss_y, test_loss,  'r-', label='Test' , linewidth=0.75)
	pl.legend(loc='best')
	pl.xlabel('Iter')
	pl.title('Direction Loss')

	pl.figure(2)
	test_loss = test_plot['cls_loss']
	train_loss = train_plot['cls_loss']
	test_loss = numpy.row_stack(test_loss)
	test_loss = numpy.tile(test_loss, (1, testing_freq))
	test_loss = list(test_loss.flatten())
	test_loss += [test_loss[-1]] * max(0,len(train_loss) - len(test_loss))
	test_loss = test_loss[:len(train_loss)]
	pl.plot(loss_y, train_loss, 'k-', label='Train', linewidth=0.75, marker='x')
	pl.plot(loss_y, test_loss,  'r-', label='Test' , linewidth=0.75)
	pl.legend(loc='best')
	pl.xlabel('Iter')
	pl.title('Classification Loss')
	pl.show()
Ejemplo n.º 6
0
    def __call__(self, event):
        if event.button == 1:
            print 'You press button 1 to zomm, to mask use right button'
        if event.button >=2:
            if event.inaxes!=self.graph.axes: return
#            self.bt.append(event.button)
            self.xs.append(event.xdata)
            
            if len(self.xs)%2 == 0:  # only odd numbers 
                if event.button == 3:
                    self.wei.append(0)
                    print 'You press button 3 to mask, maksed regions is', self.xs[-2],self.xs[-1],self.wei[-1]
                    self.tmp=np.column_stack((self.xs[-2],self.xs[-1],self.wei[-1]))
                    if (self.masked[0][0])==0:
                         self.masked=np.row_stack((self.tmp))
                    else:
                        self.masked=np.row_stack((self.masked,self.tmp))
                    plot(self.xso[(self.xso>=self.xs[-2]) & (self.xso<=self.xs[-1])],self.yso[(self.xso>=self.xs[-2]) & (self.xso<=self.xs[-1])],color='red',lw='2')
                    self.graph.figure.canvas.draw()
                if event.button == 2:
                    self.wei.append(2)
                    print 'You press button 2 attributed weight 2 for this region', self.xs[-2],self.xs[-1],self.wei[-1]
                    self.tmp=np.column_stack((self.xs[-2],self.xs[-1],self.wei[-1]))
                    if (self.masked[0][0])==0:
                         self.masked=np.row_stack((self.tmp))
                    else:
                        self.masked=np.row_stack((self.masked,self.tmp))
                    plot(self.xso[(self.xso>=self.xs[-2]) & (self.xso<=self.xs[-1])],self.yso[(self.xso>=self.xs[-2]) & (self.xso<=self.xs[-1])],color='green',lw='2')                   
                    self.graph.figure.canvas.draw()
Ejemplo n.º 7
0
def getstats_base(X, linds):
    sval = {}
    for l_ind in linds:
        print(l_ind, X['model_state']['layers'][l_ind]['name'])
        layer = X['model_state']['layers'][l_ind]
        w = layer['weights'][0]
        karray = stats.kurtosis(w)
        kall = stats.kurtosis(w.ravel())
        cf0 = np.corrcoef(w)
        cf0t = np.corrcoef(w.T)
        wmean = w.mean(1)
        w2mean = (w**2).mean(1)
        lname = X['model_state']['layers'][l_ind]['name']
        sval[lname] = {'karray': karray, 'kall': kall, 'corr0': cf0, 'corr0_t': cf0t,
                            'wmean': wmean, 'w2mean': w2mean}

        if 'filterSize' in X['model_state']['layers'][l_ind]:
            fs = X['model_state']['layers'][l_ind]['filterSize'][0]
            ws = w.shape
            w = w.reshape((ws[0] / (fs**2), fs, fs, ws[1]))
            mat = np.row_stack([np.row_stack([w[i, j, :, :] for i in range(w.shape[0])]).T for j in range(w.shape[1])] )
            cf = np.corrcoef(mat.T)
            cft = np.corrcoef(mat)
            mat2 = np.row_stack([np.row_stack([w[i, :, :, j] for i in range(w.shape[0])]).T for j in range(w.shape[3])] )
            cf2 = np.corrcoef(mat2.T)
            cf2t = np.corrcoef(mat2)
            sval[lname].update({'corr': cf, 'corr2': cf2, 'corr_t': cft, 'corr2_t': cf2t})

    return sval
Ejemplo n.º 8
0
Archivo: hdr.py Proyecto: arfu2016/hdr
    def train(self, training_data_array): 
        data = training_data_array[0] #dict
            # Step 2: Forward propagation
        a1 = np.mat(data['y0']).T
            # 400*1 matrix
        z2 = np.dot(self.theta1, np.row_stack((1, a1)))
            # num_hidden_nodes*1 matrix
        a2 = self.sigmoid(z2)

        z3 = np.dot(self.theta2, np.row_stack((1, a2)))
            # 10*1 matrix
        a3 = self.sigmoid(z3)

            # Step 3: Back propagation
        y = [0] * 10 # y is a python list for easy initialization and is later turned into an np matrix (2 lines down).
        y[data['label']] = 1
            # 1*10 list
                      
        delta3 = a3 - np.mat(y).T
            # 10*1 matrix
        z2plus = np.row_stack((0, z2))
            # (num_hidden_nodes+1)*1 matrix
        delta2 = np.multiply(np.dot(self.theta2.T, delta3), self.sigmoid_prime(z2plus))
            # (num_hidden_nodes+1)*1 matrix
        delta2 = delta2[1:,0]
            # num_hidden_nodes*1 matrix

            # Step 4: Update weights
        self.theta1 -= self.LEARNING_RATE * np.dot(delta2, np.row_stack((1, a1)).T)
        self.theta2 -= self.LEARNING_RATE * np.dot(delta3, np.row_stack((1, a2)).T)
Ejemplo n.º 9
0
def conf2yap(conf_fname, yap_filename):
    print("Yap file : ", yap_filename)
    positions, radii, meta = clff.read_conf_file(conf_fname)
    positions[:, 0] -= float(meta['lx'])/2
    positions[:, 1] -= float(meta['ly'])/2
    positions[:, 2] -= float(meta['lz'])/2

    if 'np_fixed' in meta:
        # for conf with fixed particles
        split_line = len(positions) - int(meta['np_fixed'])
        pos_mobile, pos_fixed = np.split(positions, [split_line])
        rad_mobile, rad_fixed = np.split(radii, [split_line])
        yap_out = pyp.layer_switch(3)
        yap_out = pyp.add_color_switch(yap_out, 3)
        yap_out = np.row_stack((yap_out,
                                particles_yaparray(pos_mobile, rad_mobile)))
        yap_out = pyp.add_layer_switch(yap_out, 4)
        yap_out = pyp.add_color_switch(yap_out, 4)
        yap_out = np.row_stack((yap_out,
                                particles_yaparray(pos_fixed, rad_fixed)))
    else:
        yap_out = pyp.layer_switch(3)
        yap_out = pyp.add_color_switch(yap_out, 3)
        yap_out = np.row_stack((yap_out,
                                particles_yaparray(positions, radii)))

    pyp.savetxt(yap_filename, yap_out)
Ejemplo n.º 10
0
def find_goto_point_surface_1(grid,pt,display_list=None):
    ''' returns p_erratic,p_edge,surface_height
        p_erratic - point where the erratic's origin should go to. (in thok0 frame)
        p_edge - point on the edge closest to pt.
        p_erratic,p_edge are 3x1 matrices.
        surface_height - in thok0 coord frame.
    '''
    pt_thok = pt
    
    # everything is happening in the thok coord frame.
    close_pt,approach_vector = find_approach_direction(grid,pt_thok,display_list)
    if close_pt == None:
        return None,None,None

    # move perpendicular to approach direction.
#    lam = -(close_pt[0:2,0].T*approach_vector)[0,0]
#    lam = min(lam,-0.3) # atleast 0.3m from the edge.
    lam = -0.4
    goto_pt = close_pt[0:2,0] + lam*approach_vector # this is where I want the utm
                                                    # to be
    err_to_thok = tr.erraticTglobal(tr.globalTthok0(np.matrix([0,0,0]).T))
    goto_pt_erratic = -err_to_thok[0,0]*approach_vector + goto_pt # this is NOT
                # general. It uses info about the two frames. If frames move, bad
                # things can happen.

    if display_list != None:
        display_list.append(pu.CubeCloud(np.row_stack((goto_pt,close_pt[2,0])),color=(0,250,250), size=(0.012,0.012,0.012)))
        display_list.append(pu.Line(np.row_stack((goto_pt,close_pt[2,0])).A1,close_pt.A1,color=(255,20,0)))

    p_erratic = np.row_stack((goto_pt_erratic,np.matrix((close_pt[2,0]))))
    print 'p_erratic in thok0:', p_erratic.T

    return p_erratic,close_pt,close_pt[2,0]
Ejemplo n.º 11
0
Archivo: hdr.py Proyecto: arfu2016/hdr
    def _nnCostFunction(self, the_thetas, *args):
        
        the_thetas = np.mat(the_thetas)
        theta1 = np.reshape(the_thetas[0,0:self.num_hidden_nodes*401], (400+1, self.num_hidden_nodes)).T
        theta2 = np.reshape(the_thetas[0,self.num_hidden_nodes*401:], (self.num_hidden_nodes+1, 10)).T
        training_data_array = args
        
        J=0        
        for data in training_data_array:
            a1 = np.mat(data.fig).T
            # 400*1 matrix
            z2 = np.dot(theta1, np.row_stack((1, a1)))
            # num_hidden_nodes*1 matrix
            a2 = self.sigmoid(z2)

            z3 = np.dot(theta2, np.row_stack((1, a2)))
            # 10*1 matrix
            a3 = self.sigmoid(z3)

            y = [0] * 10 # y is a python list for easy initialization and is later turned into an np matrix (3 lines down).
            y[data.label] = 1
            # 1*10 list          
            
            for j in range(10):
                J = J + np.mat(y).T[j,0]*math.log(a3[j,0])+(1-np.mat(y).T[j,0])*math.log(1-a3[j,0])
                # numerically a3[j,0] could be smaller than 0 or larger than 1 a bit

        J = -J/self.sample_num + self.LAMBDA/(2*self.sample_num)*(np.multiply(theta1[:,1:], theta1[:,1:]).sum()+np.multiply(theta2[:,1:], theta2[:,1:]).sum())
        # print J 
        
        return J
Ejemplo n.º 12
0
    def __init__(self,d,l, dtype=None):

        self.d = d
        self.l = l

        [self.smolyak_points, self.smolyak_indices] = smolyak_grids(d,l)

        self.u_grid = array( self.smolyak_points.T, order='C')

        self.isup = max(max(self.smolyak_indices))
        self.n_points = len(self.smolyak_points)
        #self.grid = self.real_gri

        Ts = chebychev( self.smolyak_points.T, self.n_points - 1 )
        C = []
        for comb in self.smolyak_indices:
            p = reduce( mul, [Ts[comb[i],i,:] for i in range(self.d)] )
            C.append(p)
        C = numpy.row_stack( C )

        # C is such that :  X = theta * C
        self.__C__ = C
        self.__C_inv__ = numpy.linalg.inv(C)  # would be better to store preconditioned matrix

        self.bounds = numpy.row_stack([(0,1)]*d)
Ejemplo n.º 13
0
Archivo: mf.py Proyecto: NicozRobin/ml
    def test(self, ratings):
        L = ratings.kv_dict.items()
        R = np.matrix([I[1] for I in L]).T 
        sqr_err = list()
       
        au = len(ratings.rows) - len(self.user_factor)
        self.user_factor = np.row_stack([self.user_factor, np.matrix(np.zeros([au, self.factor]))])
        self.user_bias = np.row_stack([self.user_bias, np.matrix(np.zeros([au, 1]))])
 
        ai = len(ratings.cols) - len(self.item_factor)
        self.item_factor = np.row_stack([self.item_factor, np.matrix(np.zeros([ai, self.factor]))])
        self.item_bias = np.row_stack([self.item_bias, np.matrix(np.zeros([ai, 1]))])
 
        for s in range(0, len(L), self.batch_size):
            mini_batch = L[s : s + self.batch_size]
            r = R[s : s + self.batch_size]
            
            uid = [I[0][0] for I in mini_batch]
            iid = [I[0][1] for I in mini_batch]
 
            base_line = self.mu + self.user_bias[uid] + self.item_bias[iid]
            r_pred = base_line + np.sum(np.multiply(self.user_factor[uid], self.item_factor[iid]), 1)

            err = r - r_pred
            sqr_err.append(float(err.T * err) / self.batch_size)
        
        rmse = math.sqrt(np.mean(np.matrix(sqr_err)))
        # sys.stderr.write('RMSE: %f\n' % (rmse))
        
        return rmse
Ejemplo n.º 14
0
	def save_Ian_E_H_Yen(self,folder):
		if not self.B_lower is None:
			print ('self.B_lower is not None, you should convert your problem with convertToOnesideInequalitySystem first')
			raise
		if not np.all(self.lowerbounds==0):
			print ('lower bound constraint on variables should at 0')
			raise
		
		import os		
		Aeq=self.Aequalities.tocoo()
		tmp=np.row_stack(([Aeq.shape[0],Aeq.shape[1],0.0],np.column_stack((Aeq.row+1,Aeq.col+1,Aeq.data))))
		np.savetxt(os.path.join(folder,'Aeq'),tmp,fmt='%d %d %f')
		np.savetxt(os.path.join(folder,'beq'),self.Bequalities,fmt='%f')
		np.savetxt(os.path.join(folder,'c'),self.costsvector,fmt='%f')
		nbvariables=self.costsvector.size
		upperbounded=np.nonzero(~np.isinf(self.upperbounds))[0]
		nbupperbounded=len(upperbounded)
		Aineq2=sparse.coo_matrix((np.ones(nbupperbounded),(np.arange(nbupperbounded),upperbounded)),(nbupperbounded,nbvariables))
		Aineq=scipy.sparse.vstack((self.Ainequalities,Aineq2)).tocoo()
		bupper=np.hstack((self.B_upper,self.upperbounds[upperbounded]))
		tmp=np.row_stack(([Aineq.shape[0],Aineq.shape[1],0.0],np.column_stack((Aineq.row+1,Aineq.col+1,Aineq.data))))
		np.savetxt(os.path.join(folder,'A'),tmp,fmt='%d %d %f')	
		np.savetxt(os.path.join(folder,'b'),bupper,fmt='%f')	
	
		with open(os.path.join(folder,'meta'), 'w') as f:
			f.write('nb	%d\n'%nbvariables)
			f.write('nf	%d\n'%0)
			f.write('mI	%d\n'%Aineq.shape[0])
			f.write('mE	%d\n'%Aeq.shape[0])
Ejemplo n.º 15
0
def softmaxCostAndGradient(y1, target, w2):
    """ Softmax cost function for word2vec models """
    ###################################################################
    # Implement the cost and gradients for one predicted word vector  #
    # and one target word vector as a building block for word2vec     #
    # models, assuming the softmax prediction function and cross      #
    # entropy loss.                                                   #
    # Inputs:                                                         #
    #   - predicted: numpy ndarray, predicted word vector (\hat{r} in #
    #           the written component)                                #
    #   - target: integer, the index of the target word               #
    #   - outputVectors: "output" vectors for all tokens              #
    # Outputs:                                                        #
    #   - cost: cross entropy cost for the softmax word prediction    #
    #   - gradPred: the gradient with respect to the predicted word   #
    #           vector                                                #
    #   - grad: the gradient with respect to all the other word       # 
    #           vectors                                               #
    # We will not provide starter code for this function, but feel    #
    # free to reference the code you previously wrote for this        #
    # assignment!                                                     #
    ###################################################################
    out = np.dot(w2,y1)
    y2 = softmaxactual(out)
    cost = -math.log(y2[target])
    y2[target] -= 1.0
    d2= y2
    grad2 = np.dot(np.row_stack(d2),np.column_stack(y1))
    d1 = np.dot(np.transpose(w2), np.row_stack(d2))
    return cost, d1, grad2
Ejemplo n.º 16
0
    def add_margins(self, matrix, regions=2):
        nrow, ncol = np.shape(matrix)

        assert regions<=2
        for i in range(regions):
            nc = ncol/regions
            nr = nrow

            if i==0:
                m = matrix[:, 0:nc]
            else:
                m = matrix[:, nc:]

            left_col = np.ones((nr+2,1))
            bottom_row = np.ones((1, nc))
            top_row = np.reshape([[0,1] for _ in range(nc/2)], (1, nc))
            right_row = np.reshape([[0,1] for _ in range(nr/2+1)], (nr+2, 1))
            m = np.row_stack((top_row, m))
            m = np.row_stack((m, bottom_row))
            m = np.column_stack((left_col, m))
            m = np.column_stack((m, right_row))
            if i == 0:
                ans = m
            else:
                ans = np.column_stack((ans, m))
        return ans
Ejemplo n.º 17
0
def multi_scale_test(image, max_im_shrink):
    # shrink detecting and shrink only detect big face
    st = 0.5 if max_im_shrink >= 0.75 else 0.5 * max_im_shrink
    det_s = detect_face(image, st)
    if max_im_shrink > 0.75:
        det_s = np.row_stack((det_s,detect_face(image,0.75)))
    index = np.where(np.maximum(det_s[:, 2] - det_s[:, 0] + 1, det_s[:, 3] - det_s[:, 1] + 1) > 30)[0]
    det_s = det_s[index, :]
    # enlarge one times
    bt = min(2, max_im_shrink) if max_im_shrink > 1 else (st + max_im_shrink) / 2
    det_b = detect_face(image, bt)

    # enlarge small iamge x times for small face
    if max_im_shrink > 1.5:
        det_b = np.row_stack((det_b,detect_face(image,1.5)))
    if max_im_shrink > 2:
        bt *= 2
        while bt < max_im_shrink: # and bt <= 2:
            det_b = np.row_stack((det_b, detect_face(image, bt)))
            bt *= 2

        det_b = np.row_stack((det_b, detect_face(image, max_im_shrink)))

    # enlarge only detect small face
    if bt > 1:
        index = np.where(np.minimum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1) < 100)[0]
        det_b = det_b[index, :]
    else:
        index = np.where(np.maximum(det_b[:, 2] - det_b[:, 0] + 1, det_b[:, 3] - det_b[:, 1] + 1) > 30)[0]
        det_b = det_b[index, :]

    return det_s, det_b
Ejemplo n.º 18
0
Archivo: live.py Proyecto: tdent/pycbc
def _build_series(series, dim_names, comment, delta_name, delta_unit):
    from glue.ligolw import array as ligolw_array
    Attributes = ligolw.sax.xmlreader.AttributesImpl
    elem = ligolw.LIGO_LW(
            Attributes({u"Name": unicode(series.__class__.__name__)}))
    if comment is not None:
        elem.appendChild(ligolw.Comment()).pcdata = comment
    elem.appendChild(ligolw.Time.from_gps(series.epoch, u"epoch"))
    elem.appendChild(ligolw_param.Param.from_pyvalue(u"f0", series.f0,
                                                     unit=u"s^-1"))
    delta = getattr(series, delta_name)
    if numpy.iscomplexobj(series.data.data):
        data = numpy.row_stack((numpy.arange(len(series.data.data)) * delta,
                             series.data.data.real, series.data.data.imag))
    else:
        data = numpy.row_stack((numpy.arange(len(series.data.data)) * delta,
                                series.data.data))
    a = ligolw_array.Array.build(series.name, data, dim_names=dim_names)
    a.Unit = str(series.sampleUnits)
    dim0 = a.getElementsByTagName(ligolw.Dim.tagName)[0]
    dim0.Unit = delta_unit
    dim0.Start = series.f0
    dim0.Scale = delta
    elem.appendChild(a)
    return elem
Ejemplo n.º 19
0
def setup_data(db,h,w):
    corners = np.array([[0,0],[0,h-1],[w-1,0],[w-1,h-1]])
    im1_pts = np.array(db_unpack(db)[0])
    im1_pts = np.row_stack([im1_pts,corners])
    im2_pts = np.array(db_unpack(db)[1])
    im2_pts = np.row_stack([im2_pts,corners])
    return im1_pts,im2_pts
 def predict_test(batch, model, params,  **kwparams):
   """ some code duplication here with forward pass, but I think we want the freedom in future """
   F = np.row_stack(x['image']['feat'] for x in batch)
   lda_enabled = params.get('lda',0)
   L = np.zeros((params.get('image_encoding_size',128),lda_enabled))
   if lda_enabled:
      L = np.row_stack(x['image']['topics'] for x in batch)
   We = model['We']
   if lda_enabled:
     Wlda = model['Wlda']
     lda = L.dot(Wlda)
   be = model['be']
   Xe = F.dot(We) + be # Xe becomes N x image_encoding_size
   #print('L shape', L.shape)
   #print('Wlda shape', Wlda.shape)
   generator_str = params['generator']
   Generator = decodeGenerator(generator_str)
   Ys = []
   guide_input = params.get('guide',None)
   for i,x in enumerate(batch):
     Xi = Xe[i,:]
     if guide_input=='cca':
       guide = get_guide(guide_input,F[i,:],kwparams.get('ccaweights'))
     else:
       guide = get_guide(guide_input,F[i,:],L=L[i,:])
     if (lda_enabled and guide_input=="image") or (lda_enabled and not guide_input):
       guide = lda[i,:]
       print 'guide = lda'
     gen_Y = Generator.predict(Xi, guide, model, model['Ws'], params, **kwparams)
     Ys.append(gen_Y)
   return Ys
Ejemplo n.º 21
0
 def printMartix(self):
     MNA1 = np.column_stack((self.C, self.D))
     MNA2 = np.column_stack((self.G, self.B))
     MNA = np.row_stack((MNA2, MNA1))
     RHS = np.row_stack((self.U, self.I))
     print "MNA", MNA
     print "RHS", RHS
Ejemplo n.º 22
0
    def get_scan(self, avoid_duplicate=False, avg=1, remove_graze=True):
        ''' avoid_duplicate - prevent duplicate scans which will happen if get_scan is
            called at a rate faster than the scanning rate of the hokuyo.
            avoid_duplicate == True, get_scan will block till new scan is received.
            (~.2s for urg and 0.05s for utm)
        '''
        l = []
        l2 = []
        for i in range(avg):
            hscan = self.hokuyo.get_scan(avoid_duplicate)
            l.append(hscan.ranges)
            l2.append(hscan.intensities)

        ranges_mat = np.row_stack(l)
        ranges_mat[np.where(ranges_mat==0)] = -1000. # make zero pointvery negative
        ranges_avg = (ranges_mat.sum(0)/avg)
        if self.flip:
            ranges_avg = np.fliplr(ranges_avg)
        hscan.ranges = ranges_avg
    
        intensities_mat = np.row_stack(l2)
        if self.hokuyo_type == 'utm':
            hscan.intensities = (intensities_mat.sum(0)/avg)

        if remove_graze:
            if self.hokuyo_type=='utm':
                hp.remove_graze_effect_scan(hscan)
            else:
                print 'hokuyo_scan.Hokuyo.get_scan: hokuyo type is urg, but remove_graze is True'

        return hscan
Ejemplo n.º 23
0
    def test_smolyak(self):

        import numpy

        f = lambda x: numpy.row_stack([
            x[0,:] * x[1,:]**0.5,
            x[1,:] * x[1,:] - x[0,:] * x[0,:]
        ])

        bounds = numpy.row_stack([
            [0.5,0.1],
            [2,3]
        ])

        from dolo.numeric.smolyak import SmolyakGrid
        sg = SmolyakGrid(bounds,3)

        values = f(sg.grid)
        sg.fit_values(values)
        theta_0 = sg.theta.copy()

        def fobj(theta):
            sg.theta = theta
            return sg(sg.grid)

        fobj(theta_0)
Ejemplo n.º 24
0
    def __init__(self, grid, start, orientation='up', conf='white'):
        """
        Initialize parameters
        """
        self.rows = grid[0]
        self.cols = grid[1]
        self.orientation = orientation
        self.conf = conf
        self.current = start

        if conf == 'white':
            self.grid = np.zeros((self.rows, self.cols), dtype=int)

        elif conf == 'black':
            self.grid = np.ones((self.rows, self.cols), dtype=int)

        elif conf == 'stripes':
            re = np.r_[ 41 * [0, 0] ]
            ro = np.r_[ 41 * [1, 1] ]
            self.grid = np.row_stack(51 * (re, ro))[:-1]

        elif conf == 'checker':
            re = np.r_[ 41 * [0, 1] ]
            ro = np.r_[ 41 * [1, 0] ]
            self.grid = np.row_stack(51 * (re, ro))[:-1]

        elif conf == 'random':
            a = np.random.random((self.rows, self.cols)) + 0.5
            self.grid = a.astype(int)
Ejemplo n.º 25
0
 def array_to_ij(self, array):
     assert array.shape == (self._iilen, self._jjlen), "{} != {}, {}".format(array.shape, self._iilen, self._jjlen)
     #arange in the same order (and starting pos) as gchem grid
     array = np.roll(array, 6, 0)
     lons_regridded = [regrid(array[:,j], sum, 10) for j in range(self._jjlen)]
     cols = map(lambda a: regrid_lats(np.array(a)), zip(*lons_regridded))
     regridded = np.row_stack(cols)
     return np.row_stack(((i+1,j+1,regridded[i,j]) for i,j in it.product(range(72),range(46))))
Ejemplo n.º 26
0
def absbound(mat):
	mat1=np.zeros((2,N))
	mat=np.row_stack((mat1,mat))
	mat=np.row_stack((mat,mat1))
	mat2=np.zeros((N+4,2))
	mat=np.column_stack((mat2,mat))
	mat=np.column_stack((mat,mat2))
	return mat
Ejemplo n.º 27
0
def prepare_data( batch, wordtoix, maxlen=None,sentTagMap=None,ixw = None):
    """Create the matrices from the datasets.

    This pad each sequence to the same length: the lenght of the
    longest sequence or maxlen.

    if maxlen is set, we will cut all sequence to this maximum
    length.

    This swap the axis!
    """
    seqs = []
    xI = np.row_stack(x['image']['feat'] for x in batch)

    for ix,x in enumerate(batch):
      seqs.append([0] + [wordtoix[w] for w in x['sentence']['tokens'] if w in wordtoix] + [0])

    # x: a list of sentences
    lengths = [len(s) for s in seqs]

    if maxlen is not None:
        new_seqs = []
        new_labels = []
        new_lengths = []
        for l, s, y in zip(lengths, seqs, labels):
            if l < maxlen:
                new_seqs.append(s)
                new_labels.append(y)
                new_lengths.append(l)
        lengths = new_lengths
        labels = new_labels
        seqs = new_seqs

        if len(lengths) < 1:
            return None, None, None

    n_samples = len(seqs)
    maxlen = np.max(lengths)

    xW = np.zeros((maxlen, n_samples)).astype('int64')
    x_mask = np.zeros((maxlen, n_samples)).astype(theano.config.floatX)
    for idx, s in enumerate(seqs):
        xW[:lengths[idx], idx] = s
        x_mask[:lengths[idx], idx] = 1.
        if sentTagMap != None:
            for i,sw in enumerate(s):
                if sentTagMap[batch[idx]['sentence']['sentid']].get(ixw[sw],'') == 'JJ':
                    x_mask[i,idx] = 2 
                

    inp_list = [xW, xI, x_mask]

    if 'aux_inp' in batch[0]['image']:
      xAux = np.row_stack(x['image']['aux_inp'] for x in batch)
      #xAux = np.tile(xAux,[maxlen,1,1])
      inp_list.append(xAux)

    return inp_list, (np.sum(lengths) - n_samples)
def plot_srhel():
    print("    SR Helicity")
    # Set Figure Size (1000 x 800)
    plt.figure(figsize=(width,height),frameon=False)
    P = nc.variables['P']
    PB = nc.variables['PB']
    UU = nc.variables['U']
    VV = nc.variables['V']
    PH = nc.variables['PH']
    PHB = nc.variables['PHB']
    
    # Need pressures, temps and mixing ratios
    PR = P[time] + PB[time]
    PHT = np.add(PH[time],PHB[time])
    ZH = np.divide(PHT, 9.81)
    U = UU[time]
    V = VV[time]
    

    for j in range(len(U[1,:,1])):
		curcol_c = []
		curcol_Umo = []
		curcol_Vmo = []
		for i in range(len(V[1,1,:])):
				sparms = severe.SRHEL_CALC(U[:,j,i], V[:,j,i], ZH[:,j,i], PR[:,j,i])
				curcol_c.append(sparms[0])		
				curcol_Umo.append(sparms[1])
				curcol_Vmo.append(sparms[2])
		np_curcol_c = np.array(curcol_c)
		np_curcol_Umo = np.array(curcol_Umo)
		np_curcol_Vmo = np.array(curcol_Vmo)

		if j == 0:
			srhel = np_curcol_c
			U_srm = np_curcol_Umo
			V_srm = np_curcol_Vmo
		else:
			srhel = np.row_stack((srhel, np_curcol_c))
			U_srm = np.row_stack((U_srm, np_curcol_Umo))
			V_srm = np.row_stack((V_srm, np_curcol_Vmo))

    #print "       SRHEL: ", np.shape(srhel)

    # Now plot
    SRHEL_LEVS = range(50,800,50)
    srhel = np.nan_to_num(srhel)
    SRHEL=plt.contourf(x,y,srhel,SRHEL_LEVS)

    u_mo_kts = U_srm * 1.94384449
    v_mo_kts = V_srm * 1.94384449
    plt.barbs(x_th,y_th,u_mo_kts[::thin,::thin],\
		v_mo_kts[::thin,::thin], length=5,\
		sizes={'spacing':0.2},pivot='middle')
    title = '0-3 km SRHelicity, Storm Motion (kt)'
    prodid = 'hlcy'
    units = "m" + u'\u00B2' + '/s' + u'\u00B2'

    drawmap(SRHEL, title, prodid, units) 	
Ejemplo n.º 29
0
def _extract_grid_lines(LINES):
    point_list = []
    line_list  = []
    for (x, y, z) in LINES:
        assert x.ndim == y.ndim == z.ndim
        assert x.shape == y.shape, "Arrays x and y must have same shape."
        assert y.shape == z.shape, "Arrays y and z must have same shape."
        #
        xyz = [x.ravel(), y.ravel(), z.ravel()]
        points = np.column_stack(xyz).ravel()
        points.shape = (-1, 3)
        lines = np.zeros((0, 2), dtype='l')
        #
        grid = np.arange(x.size, dtype='l').reshape(x.shape)
        if x.ndim == 1:
            p0 = grid[:-1].ravel()
            p1 = grid[+1:].ravel()
            verts = [p0,p1]
            lines = np.column_stack(verts).ravel()
            lines.shape = (-1, 2)
        elif x.ndim == 2:
            p0 = grid[:-1, :-1].ravel()
            p1 = grid[+1:, :-1].ravel()
            p2 = grid[+1:, +1:].ravel()
            p3 = grid[:-1, +1:].ravel()
            verts = [p0,p1,p2,p3]
            polys = np.column_stack(verts).ravel()
            polys.shape = (-1, 4)
            lines = polys[:,[0,1,1,2,2,3,3,0]].ravel()
            lines.shape = (-1, 2)
        elif x.ndim == 3:
            p0 = grid[:-1, :-1, :-1].ravel()
            p1 = grid[+1:, :-1, :-1].ravel()
            p2 = grid[+1:, +1:, :-1].ravel()
            p3 = grid[:-1, +1:, :-1].ravel()
            p4 = grid[:-1, :-1, +1:].ravel()
            p5 = grid[+1:, :-1, +1:].ravel()
            p6 = grid[+1:, +1:, +1:].ravel()
            p7 = grid[:-1, +1:, +1:].ravel()
            verts = [p0,p1,p2,p3,
                     p4,p5,p6,p7,
                     p0,p1,p5,p4,
                     p2,p6,p7,p3,
                     p0,p3,p7,p4,
                     p1,p2,p6,p5]
            polys = np.column_stack(verts).ravel()
            polys.shape = (-1, 4)
            lines = polys[:,[0,1,1,2,2,3,3,0]].ravel()
            lines.shape = (-1, 2)
        point_list.append(points)
        line_list.append(lines)
    offset = 0
    for points, lines in zip(point_list, line_list):
        lines += offset
        offset += len(points)
    return (np.row_stack(point_list),
            np.row_stack(line_list))
Ejemplo n.º 30
0
def xyzToHomogenous(v, floating_vector=False):
    """ convert 3XN matrix, to 4XN matrix in homogeneous coords
    """
#   v = np.matrix(v)
#   v = v.reshape(3,1)
    if floating_vector == False:
        return np.row_stack((v, np.ones(v.shape[1])))
    else:
        return np.row_stack((v, np.zeros(v.shape[1])))
def quick_color_check(target_matrix, source_matrix, num_chips):
    """ Quickly plot target matrix values against source matrix values to determine
    over saturated color chips or other issues.

    Inputs:
    source_matrix      = an nrowsXncols matrix containing the avg red, green, and blue values for each color chip
                            of the source image
    target_matrix      = an nrowsXncols matrix containing the avg red, green, and blue values for each color chip
                            of the target image
    num_chips          = number of color card chips included in the matrices (integer)

    :param source_matrix: numpy.ndarray
    :param target_matrix: numpy.ndarray
    :param num_chips: int
    """
    # Imports
    from plotnine import ggplot, geom_point, geom_smooth, theme_seaborn, facet_grid, geom_label, scale_x_continuous, \
        scale_y_continuous, scale_color_manual, aes
    import pandas as pd

    # Extract and organize matrix info
    tr = target_matrix[:num_chips, 1:2]
    tg = target_matrix[:num_chips, 2:3]
    tb = target_matrix[:num_chips, 3:4]
    sr = source_matrix[:num_chips, 1:2]
    sg = source_matrix[:num_chips, 2:3]
    sb = source_matrix[:num_chips, 3:4]

    # Create columns of color labels
    red = []
    blue = []
    green = []
    for i in range(num_chips):
        red.append('red')
        blue.append('blue')
        green.append('green')

    # Make a column of chip numbers
    chip = np.arange(0, num_chips).reshape((num_chips, 1))
    chips = np.row_stack((chip, chip, chip))

    # Combine info
    color_data_r = np.column_stack((sr, tr, red))
    color_data_g = np.column_stack((sg, tg, green))
    color_data_b = np.column_stack((sb, tb, blue))
    all_color_data = np.row_stack((color_data_b, color_data_g, color_data_r))

    # Create a dataframe with headers
    dataset = pd.DataFrame({
        'source': all_color_data[:, 0],
        'target': all_color_data[:, 1],
        'color': all_color_data[:, 2]
    })

    # Add chip numbers to the dataframe
    dataset['chip'] = chips
    dataset = dataset.astype({
        'color': str,
        'chip': str,
        'target': float,
        'source': float
    })

    # Make the plot
    p1 = ggplot(dataset, aes(x='target', y='source', color='color', label='chip')) + \
        geom_point(show_legend=False, size=2) + \
        geom_smooth(method='lm', size=.5, show_legend=False) + \
        theme_seaborn() + facet_grid('.~color') + \
        geom_label(angle=15, size=7, nudge_y=-.25, nudge_x=.5, show_legend=False) + \
        scale_x_continuous(limits=(-5, 270)) + scale_y_continuous(limits=(-5, 275)) + \
        scale_color_manual(values=['blue', 'green', 'red'])

    # Autoincrement the device counter
    params.device += 1

    # Reset debug
    if params.debug is not None:
        if params.debug == 'print':
            p1.save(os.path.join(params.debug_outdir, 'color_quick_check.png'))
        elif params.debug == 'plot':
            print(p1)
Ejemplo n.º 32
0
def train_controller(Controller, Controller_optim, rnd_fix, rnd_learn,
                     rnd_learn_optim, trainloader, testloader, model,
                     optimizer, criterion, hyperparams):
    """
    The controller is updated with a score function gradient estimator
    (i.e., REINFORCE), with the reward being c/valid_ppl, where valid_ppl
    is computed on a minibatch of validation data.

    A moving average baseline is used.

    The controller is trained for 2000 steps per epoch.
    """
    logger = Logger(os.path.join(hyperparams['checkpoint'], 'search_log.txt'),
                    title='')
    logger.set_names([
        'Loss', 'Baseline', 'Reward', 'Action', 'Binary', 'Valid Loss',
        'Valid Acc.', 'Sparse', 'rnd_loss', 'p'
    ])

    logger_all = Logger(os.path.join(hyperparams['checkpoint'],
                                     'search_log_all_sampled.txt'),
                        title='')
    logger_all.set_names([
        'Loss', 'Baseline', 'Reward', 'Action', 'Binary', 'Valid Loss',
        'Valid Acc.', 'Sparse'
    ])

    model_fc = Controller
    model_fc.train()

    model_rnd_fix = rnd_fix
    model_rnd_fix.eval()
    model_rnd_learn = rnd_learn
    model_rnd_learn.train()

    baseline = None
    total_loss = 0
    buffer = ReplayBuffer(30)
    buffer_sparse = ReplayBuffer(30)
    update_mode = 'online'

    for step in range(hyperparams['controller_max_step']):
        print('************************* (' + str(step + 1) + '/' +
              str(hyperparams['controller_max_step']) + ')******')
        adjust_learning_rate(optimizer, step, args, hyperparams)
        actions, log_probs = model_fc.sample(replay=None)

        #sample N connection for val (for updating theta)
        actions_validate, log_probs_validate = model_fc.sample(
            batch_size=args.validate_size)

        decimal_code_all = []
        binary_code_all = []

        # get the action code (binary to decimal)
        actions_code_1, binary_code_1 = get_action_code(actions)
        decimal_code_all.append(actions_code_1)
        binary_code_all.append(binary_code_1)

        for i in range(actions_validate[0].size(0)):
            binary_code = ''
            for action in actions_validate:
                binary_code = binary_code + str(action[i].item())
            decimal_code = int(binary_code, 2)
            decimal_code_all.append(decimal_code)
            binary_code_all.append(binary_code)

        #get reward (train one "step")
        rewards_org, val_acc, val_loss, sparse_portion = get_reward(
            None, testloader, model, optimizer, criterion, actions, step,
            hyperparams)
        rewards_validate, val_acc_validate, val_loss_validate, sparse_portion_validate = get_reward(
            None, testloader, model, optimizer, criterion, actions_validate,
            step, hyperparams)

        val_acc = np.row_stack((val_acc, val_acc_validate))
        val_loss = np.row_stack((val_loss, val_loss_validate))
        #buf_rewards
        rewards = np.row_stack((rewards_org, rewards_validate))

        # #buf_action
        temp_action = torch.cat(actions).view(1, -1)
        temp_actions_validate = torch.cat(actions_validate).view(
            args.validate_size, -1)
        buf_action = torch.cat((temp_action, temp_actions_validate), 0)

        # #buf_prob
        temp_prob = torch.exp(log_probs).detach()
        temp_prob_val = torch.exp(log_probs_validate).detach()
        buf_prob = torch.cat((temp_prob, temp_prob_val), 0)

        # #store - buffer
        buffer.add_new(buf_action, rewards, buf_prob, step)

        #cal rnd
        val_fix = model_rnd_fix(buf_action.float())
        val_learn = model_rnd_learn(buf_action.float())
        reward_rnd = ((val_fix - val_learn)**2).sum()
        rewards = rewards + reward_rnd.detach().cpu().data.numpy()
        # moving average baseline
        if baseline is None:
            baseline = rewards.mean()
        else:
            decay = hyperparams['ema_baseline_decay']
            baseline = decay * baseline + (1 - decay) * rewards.mean()

        adv = rewards - baseline
        adv = adv
        log_probs = torch.cat((log_probs, log_probs_validate), 0)
        loss = -log_probs * tools.get_variable(adv, True, requires_grad=False)

        loss = loss.mean(dim=0, keepdim=True)

        loss = loss.sum()

        # update
        Controller_optim.zero_grad()
        rnd_learn_optim.zero_grad()
        loss.backward()
        reward_rnd.mean().backward()
        if hyperparams['controller_grad_clip'] > 0:
            torch.nn.utils.clip_grad_norm(model_fc.parameters(),
                                          hyperparams['controller_grad_clip'])
        rnd_learn_optim.step()
        Controller_optim.step()



        log = 'Step: {step}| Loss: {loss:.4f}| Action: {act} |Baseline: {base:.4f}| ' \
              'Reward {re:.4f}| Valid Acc {acc:.4f}'.format(loss=loss.item(), base=baseline, act = binary_code,
                                                            re=rewards[0].item(), acc=val_acc[0].item(), step=step)
        print(log)
        logger.append([loss.item(), baseline, rewards[0].item(), actions_code_1, binary_code_1, \
                            val_loss[0].item(), val_acc[0].item(), (binary_code.count('0')/len(binary_code)), reward_rnd.item(), torch.exp(log_probs).mean().item()])
        for i in range(len(binary_code_all)):
            logger_all.append([
                loss.item(), baseline, rewards[i].item(), decimal_code_all[i],
                binary_code_all[i], val_loss[i].item(), val_acc[i].item(),
                (binary_code_all[i].count('0') / len(binary_code_all[i]))
            ])

        save_checkpoint(
            {
                'iters': step + 1,
                'state_dict': model_fc.state_dict(),
                'optimizer': Controller_optim.state_dict(),
            },
            checkpoint=hyperparams['checkpoint'])
        save_checkpoint(
            {
                'state_dict': model.state_dict(),
                'optimizer': optimizer.state_dict()
            },
            checkpoint=hyperparams['checkpoint'],
            filename='model.pth.tar')

        if step >= 10:

            # use old data (buff)
            replay = buffer.sample(num=1)
            log_probs, replay_rewards, prob_ratio = model_fc.sample(
                replay=replay)
            adv = replay_rewards - baseline
            eps_clip = 0.1

            temp_surr1 = (prob_ratio.detach() * log_probs).mm(
                tools.get_variable(adv, True, requires_grad=False))
            temp_surr2 = (
                torch.clamp(prob_ratio.detach(), 1 - eps_clip, 1 + eps_clip) *
                log_probs).mm(
                    tools.get_variable(adv, True, requires_grad=False))
            surr1 = temp_surr1.sum() / (args.validate_size + 1)
            surr2 = temp_surr2.sum() / (args.validate_size + 1)

            loss = 0.1 * -torch.min(surr1, surr2)

            # update
            Controller_optim.zero_grad()
            loss.backward()

            if hyperparams['controller_grad_clip'] > 0:
                torch.nn.utils.clip_grad_norm(
                    model_fc.parameters(), hyperparams['controller_grad_clip'])
            Controller_optim.step()
Ejemplo n.º 33
0
def plot_decision_rule(model,
                       dr,
                       state,
                       plot_controls=None,
                       bounds=None,
                       n_steps=10,
                       s0=None,
                       **kwargs):
    """
    Plots decision rule

    Parameters:
    -----------

    model:
        "fg" or "fga" model

    dr:
        decision rule

    state:
        state variable that is supposed to vary

    plot_controls: string, list or None
        - if None, return a pandas dataframe
        - if a string denoting a control variable, plot this variable as a
          function of the state
        - if a list of strings, plot several variables

    bounds: array_like
        the state variable varies from bounds[0] to bounds[1]. By default,
        boundaries are looked for in the the decision rule then in
        the model.

    n_steps: int
        number of points to be plotted

    s0: array_like or None
        value for the state variables, that remain constant. Defaults to
        `model.calibration['states']`

    Returns:
    --------

    dataframe or plot, depending on the value of `plot_controls`

    """

    states_names = model.symbols['states']
    controls_names = model.symbols['controls']
    index = states_names.index(str(state))

    if bounds is None:
        if hasattr(dr, 'a'):
            bounds = [dr.a[index], dr.b[index]]
        else:
            approx = model.options['approximation_space']
            bounds = [approx['a'][index], approx['b'][index]]

    values = numpy.linspace(bounds[0], bounds[1], n_steps)
    if s0 is None:
        s0 = model.calibration['states']
    svec = numpy.row_stack([s0] * n_steps)
    svec[:, index] = values
    xvec = dr(svec)

    l = [svec, xvec]
    series = model.symbols['states'] + model.symbols['controls']

    if 'auxiliary' in model.functions:

        p = model.calibration['parameters']
        pp = numpy.row_stack([p] * n_steps)
        avec = model.functions['auxiliary'](svec, xvec, pp)
        l.append(avec)
        series.extend(model.symbols['auxiliaries'])

    tb = numpy.concatenate(l, axis=1)
    df = pandas.DataFrame(tb, columns=series)

    if plot_controls is None:
        return df
    else:
        if isinstance(plot_controls, str):
            cn = plot_controls
            pyplot.plot(values, df[cn], **kwargs)
        else:
            for cn in plot_controls:
                pyplot.plot(values, df[cn], label=cn, **kwargs)
            pyplot.legend()
        pyplot.xlabel('state = {}'.format(state))
Ejemplo n.º 34
0
def solve_risky_ss(model, X_bar, X_s, verbose=False):

    import numpy
    from dolo.compiler.compiling import compile_function
    import time
    from dolo.compiler.compiler_global import simple_global_representation

    [y, x, parms] = model.read_calibration()
    sigma = model.read_covariances()

    sgm = simple_global_representation(model, substitute_auxiliary=True)

    states = sgm['states']
    controls = sgm['controls']
    shocks = sgm['shocks']
    parameters = sgm['parameters']
    f_eqs = sgm['f_eqs']
    g_eqs = sgm['g_eqs']

    g_args = [s(-1) for s in states] + [c(-1) for c in controls] + shocks
    f_args = states + controls + [v(1)
                                  for v in states] + [v(1) for v in controls]
    p_args = parameters

    g_fun = compile_function(g_eqs, g_args, p_args, 2, return_function=True)
    f_fun = compile_function(f_eqs, f_args, p_args, 3, return_function=True)

    epsilons_0 = np.zeros((sigma.shape[0]))

    from numpy import dot
    from dolo.numeric.tensor import sdot, mdot

    def residuals(X, sigma, parms, g_fun, f_fun):

        import numpy

        dummy_x = X[0:1, 0]
        X_bar = X[1:, 0]
        S_bar = X[0, 1:]
        X_s = X[1:, 1:]

        [n_x, n_s] = X_s.shape

        n_e = sigma.shape[0]

        xx = np.concatenate([S_bar, X_bar, epsilons_0])

        [g_0, g_1, g_2] = g_fun(xx, parms)
        [f_0, f_1, f_2,
         f_3] = f_fun(np.concatenate([S_bar, X_bar, S_bar, X_bar]), parms)

        res_g = g_0 - S_bar

        # g is a first order function
        g_s = g_1[:, :n_s]
        g_x = g_1[:, n_s:n_s + n_x]
        g_e = g_1[:, n_s + n_x:]
        g_se = g_2[:, :n_s, n_s + n_x:]
        g_xe = g_2[:, n_s:n_s + n_x, n_s + n_x:]

        # S(s,e) = g(s,x,e)
        S_s = g_s + dot(g_x, X_s)
        S_e = g_e
        S_se = g_se + mdot(g_xe, [X_s, numpy.eye(n_e)])

        # V(s,e) = [ g(s,x,e) ; x( g(s,x,e) ) ]
        V_s = np.row_stack([S_s, dot(X_s, S_s)])  # ***

        V_e = np.row_stack([S_e, dot(X_s, S_e)])

        V_se = np.row_stack([S_se, dot(X_s, S_se)])

        # v(s) = [s, x(s)]
        v_s = np.row_stack([numpy.eye(n_s), X_s])

        # W(s,e) = [xx(s,e); yy(s,e)]
        W_s = np.row_stack([v_s, V_s])

        #return

        nn = n_s + n_x
        f_v = f_1[:, :nn]
        f_V = f_1[:, nn:]
        f_1V = f_2[:, :, nn:]
        f_VV = f_2[:, nn:, nn:]
        f_1VV = f_3[:, :, nn:, nn:]

        #        E = lambda v: np.tensordot(v, sigma,  axes=((2,3),(0,1)) ) # expectation operator

        F = f_0 + 0.5 * np.tensordot(
            mdot(f_VV, [V_e, V_e]), sigma, axes=((1, 2), (0, 1)))

        F_s = sdot(f_1, W_s)
        f_see = mdot(f_1VV, [W_s, V_e, V_e]) + 2 * mdot(f_VV, [V_se, V_e])
        F_s += 0.5 * np.tensordot(f_see, sigma, axes=(
            (2, 3), (0, 1)))  # second order correction

        resp = np.row_stack(
            [np.concatenate([dummy_x, res_g]),
             np.column_stack([F, F_s])])

        return resp

    #    S_bar = s_fun_init( numpy.atleast_2d(X_bar).T ,parms).flatten()
    #    S_bar = S_bar.flatten()
    S_bar = [
        y[i] for i, v in enumerate(model.variables)
        if v in model['variables_groups']['states']
    ]
    S_bar = np.array(S_bar)

    X0 = np.row_stack(
        [np.concatenate([np.zeros(1), S_bar]),
         np.column_stack([X_bar, X_s])])

    from dolo.numeric.solver import solver

    fobj = lambda X: residuals(X, sigma, parms, g_fun, f_fun)

    if verbose:
        val = fobj(X0)
        print('val')
        print(val)

    #    exit()

    t = time.time()

    sol = solver(fobj,
                 X0,
                 method='lmmcp',
                 verbose=verbose,
                 options={
                     'preprocess': False,
                     'eps1': 1e-15,
                     'eps2': 1e-15
                 })

    if verbose:
        print('initial guess')
        print(X0)
        print('solution')
        print sol
        print('initial residuals')
        print(fobj(X0))
        print('residuals')
        print fobj(sol)
        s = time.time()

    if verbose:
        print('Elapsed : {0}'.format(s - t))
        #sol = solver(fobj,X0, method='fsolve', verbose=True, options={'preprocessor':False})

    norm = lambda x: numpy.linalg.norm(x, numpy.inf)
    if verbose:
        print("Initial error: {0}".format(norm(fobj(X0))))
        print("Final error: {0}".format(norm(fobj(sol))))

        print("Solution")
        print(sol)

    X_bar = sol[1:, 0]
    S_bar = sol[0, 1:]
    X_s = sol[1:, 1:]

    # compute transitions
    n_s = len(states)
    n_x = len(controls)
    [g, dg, junk] = g_fun(np.concatenate([S_bar, X_bar, epsilons_0]), parms)
    g_s = dg[:, :n_s]
    g_x = dg[:, n_s:n_s + n_x]

    P = g_s + dot(g_x, X_s)

    if verbose:
        eigenvalues = numpy.linalg.eigvals(P)
        print eigenvalues
        eigenvalues = [abs(e) for e in eigenvalues]
        eigenvalues.sort()
        print(eigenvalues)

    return [S_bar, X_bar, X_s, P]
Ejemplo n.º 35
0
            points_virtual = []
            if pos_camera == "left":
                for key in list(project_keyboard.black_keys())[:3]:
                    pixel_corner_img = find_black_key_corner(key)
                    bbox_virtual3d = project_keyboard.bounding_box(key)
                    pixel_corner_virtual = np.append(bbox_virtual3d[-2,:], 1)
                    points_virtual.append(pixel_corner_virtual)
                    points_img.append(pixel_corner_img)
            else:
                for key in list(project_keyboard.black_keys())[-3:]:
                    pixel_corner_img = find_black_key_corner(key)
                    bbox_virtual3d = project_keyboard.bounding_box(key)
                    pixel_corner_virtual = np.append(bbox_virtual3d[-1,:], 1)
                    points_virtual.append(pixel_corner_virtual)
                    points_img.append(pixel_corner_img)
            points_virtual = np.row_stack(points_virtual)
            points_img = np.row_stack(points_img)

            # Find 3d projection with black keys
            T_virtual3d_to_img = project_keyboard.perspective_transformation_3d(T_virtual_to_img, points_virtual, points_img)
            T_virtual3d_to_virtual = T_img_to_virtual.dot(T_virtual3d_to_img)
            points_img_flat = points_virtual.dot(T_virtual3d_to_img.T)
            points_img_flat /= points_img_flat[:,-1,np.newaxis]
            points_virtual_flat = points_img_flat.dot(T_img_to_virtual.T)
            points_virtual_flat /= points_virtual_flat[:,-1,np.newaxis]
            points_virtual_flat = points_virtual_flat.astype(np.int32)

            cv2.circle(img_virtual, tuple(points_virtual_flat[0,:2][::-1]), 10, (0,0,255), 3)
            cv2.circle(img_virtual, tuple(points_virtual_flat[1,:2][::-1]), 10, (0,0,255), 3)
            cv2.circle(img_virtual, tuple(points_virtual_flat[2,:2][::-1]), 10, (0,0,255), 3)
Ejemplo n.º 36
0
file_names = [
    f for f in listdir('./Labels/001') if isfile(join(r'./Labels/001', f))
]
if file_names[0] == '.DS_Store':
    del file_names[0]
#print file_names
start_matrix = [['xmin', 'ymin', 'xmax', 'ymax', 'image_name', 'class']]
for i in range(len(file_names)):
    if file_names[i] != 'test.txt':
        textfile_name = file_names[i]
        label_matrix = np.loadtxt(os.path.join('./Labels/001', textfile_name),
                                  dtype='str')
        dim = label_matrix.shape
        if dim[0] == 0:
            continue
        if len(dim) == 1:
            label_matrix = label_matrix.reshape((1, dim[0]))
        print textfile_name
        dim_new = label_matrix.shape
        column_vec = np.zeros(shape=(dim_new[0], )).astype(np.str)
        #image_name = textfile_name[0:-3] + 'jpg'
        column_vec[:, ] = textfile_name
        new_matrix = np.zeros(shape=(dim_new[0], 6)).astype(np.str)
        new_matrix[:, 0:4] = label_matrix[:, 0:4]
        new_matrix[:, 4] = column_vec
        new_matrix[:, 5] = label_matrix[:, 4]
        start_matrix = np.row_stack((start_matrix, new_matrix))
print start_matrix
fmt = '%s %s %s %s %s %s'
np.savetxt('label.txt', start_matrix, fmt=fmt)
Ejemplo n.º 37
0
 def extend_row(self, dim2arr):
     row = np.zeros(dim2arr.shape[1])
     dim2arr = np.row_stack((dim2arr, row))
     return dim2arr
Ejemplo n.º 38
0
def findMainDirectionEMA(lines):
    #FINDMAINDIRECTION compute vp from set of lines
    #   Detailed explanation goes here
    print('Computing vanishing point:\n')

    # arcList = [];
    # for i = 1:length(edge)
    #     panoLst = edge(i).panoLst;
    #     if size(panoLst,1) == 0
    #         continue;
    #     end
    #     arcList = [arcList; panoLst];
    # end

    ## initial guess
    segNormal = lines[:, 0:3]
    segLength = lines[:, 6]
    segScores = np.ones([lines.shape[0], 1])
    #lines(:,8);

    shortSegValid = segLength < 5 * np.pi / 180
    segNormal = segNormal[~shortSegValid, :]
    segLength = segLength[~shortSegValid]
    segScores = segScores[~shortSegValid]

    numLinesg = segNormal.shape[0]
    [candiSet, tri] = icosahedron2sphere.icosahedron2sphere(3)
    ang = np.arccos(np.sum(
        candiSet[tri[0, 0], :] * candiSet[tri[0, 1], :])) / np.pi * 180
    binRadius = ang / 2
    [initXYZ, score, angle] = sphereHoughVote(segNormal, segLength, segScores,
                                              2 * binRadius, 2, candiSet, True)

    if len(initXYZ) == 0:
        print('Initial Failed\n')
        mainDirect = []
        return

    print('Initial Computation: #d candidates, #d line segments\n',
          candiSet.shape[0], numLinesg)
    print(
        'direction 1: #f #f #f\ndirection 2: #f #f #f\ndirection 3: #f #f #f\n',
        initXYZ[0, 0], initXYZ[0, 1], initXYZ[0, 2], initXYZ[1, 0],
        initXYZ[1, 1], initXYZ[1, 2], initXYZ[2, 0], initXYZ[2, 1], initXYZ[2,
                                                                            2])
    ## iterative refine
    iter_max = 3
    [candiSet, tri] = icosahedron2sphere.icosahedron2sphere(5)
    numCandi = candiSet.shape[0]
    angD = np.arccos(np.sum(
        candiSet[tri[0, 0], :] * candiSet[tri[0, 1], :])) / np.pi * 180
    binRadiusD = angD / 2
    curXYZ = initXYZ
    tol = np.linspace(4 * binRadius, 4 * binRadiusD, iter_max)
    # shrink down #ls and #candi
    for iter in np.arange(iter_max):
        dot1 = np.abs(np.sum(segNormal * repmat(curXYZ[0, :], numLinesg, 1),
                             1))
        dot2 = np.abs(np.sum(segNormal * repmat(curXYZ[1, :], numLinesg, 1),
                             1))
        dot3 = np.abs(np.sum(segNormal * repmat(curXYZ[2, :], numLinesg, 1),
                             1))
        valid1 = dot1 < np.cos((90 - tol[iter]) * np.pi / 180)
        valid2 = dot2 < np.cos((90 - tol[iter]) * np.pi / 180)
        valid3 = dot3 < np.cos((90 - tol[iter]) * np.pi / 180)
        valid = valid1 | valid2 | valid3

        if (sum(valid) == 0):
            print('ZERO line segment for voting\n')
            break

        subSegNormal = segNormal[valid, :]
        subSegLength = segLength[valid]
        subSegScores = segScores[valid]

        dot1 = np.abs(np.sum(candiSet * repmat(curXYZ[0, :], numCandi, 1), 1))
        dot2 = np.abs(np.sum(candiSet * repmat(curXYZ[1, :], numCandi, 1), 1))
        dot3 = np.abs(np.sum(candiSet * repmat(curXYZ[2, :], numCandi, 1), 1))
        valid1 = dot1 > np.cos(tol[iter] * np.pi / 180)
        valid2 = dot2 > np.cos(tol[iter] * np.pi / 180)
        valid3 = dot3 > np.cos(tol[iter] * np.pi / 180)
        valid = valid1 | valid2 | valid3

        if (sum(valid) == 0):
            print('ZERO candidate for voting\n')
            break

        subCandiSet = candiSet[valid, :]

        [tcurXYZ, _,
         _] = sphereHoughVote(subSegNormal, subSegLength, subSegScores,
                              2 * binRadiusD, 2, subCandiSet, True)

        if (len(tcurXYZ) == 0):
            print('NO answer found!\n')
            break

        curXYZ = tcurXYZ

        print('#d-th iteration: #d candidates, #d line segments\n', iter,
              subCandiSet.shape[0], len(subSegScores))

    print(
        'direction 1: #f #f #f\ndirection 2: #f #f #f\ndirection 3: #f #f #f\n',
        curXYZ[0, 0], curXYZ[0, 1], curXYZ[0, 2], curXYZ[1, 0], curXYZ[1, 1],
        curXYZ[1, 2], curXYZ[2, 0], curXYZ[2, 1], curXYZ[2, 2])
    mainDirect = curXYZ

    mainDirect[0, :] = mainDirect[0, :] * np.sign(mainDirect[0, 2])
    mainDirect[1, :] = mainDirect[1, :] * np.sign(mainDirect[1, 2])
    mainDirect[2, :] = mainDirect[2, :] * np.sign(mainDirect[2, 2])

    uv = CoordsTransform.xyz2uvN(mainDirect, 0)
    I1 = np.argmax(uv[1, :])
    J = np.setdiff1d([
        0,
        1,
        2,
    ], I1)
    I2 = np.argmin(np.abs(np.sin(uv[0, J])))
    I2 = J[I2]
    I3 = np.setdiff1d([0, 1, 2], [I1, I2])
    mainDirect = np.row_stack(
        (mainDirect[I1, :], mainDirect[I2, :], mainDirect[I3, :]))

    mainDirect[0, :] = mainDirect[0, :] * np.sign(mainDirect[0, 2])
    mainDirect[1, :] = mainDirect[1, :] * np.sign(mainDirect[1, 1])
    mainDirect[2, :] = mainDirect[2, :] * np.sign(mainDirect[2, 0])

    mainDirect = np.row_stack((mainDirect, -mainDirect))

    # score = 0;

    return [mainDirect, score, angle]
Ejemplo n.º 39
0
def get_neighbour_info(source_geo_def, target_geo_def, radius_of_influence,
                       neighbours=8, epsilon=0, reduce_data=True,
                       nprocs=1, segments=None):
    """Returns neighbour info

    Parameters
    ----------
    source_geo_def : object
        Geometry definition of source
    target_geo_def : object
        Geometry definition of target
    radius_of_influence : float
        Cut off distance in meters
    neighbours : int, optional
        The number of neigbours to consider for each grid point
    epsilon : float, optional
        Allowed uncertainty in meters. Increasing uncertainty
        reduces execution time
    reduce_data : bool, optional
        Perform initial coarse reduction of source dataset in order
        to reduce execution time
    nprocs : int, optional
        Number of processor cores to be used
    segments : int or None
        Number of segments to use when resampling.
        If set to None an estimate will be calculated

    Returns
    -------
    (valid_input_index, valid_output_index,
    index_array, distance_array) : tuple of numpy arrays
        Neighbour resampling info
    """

    if source_geo_def.size < neighbours:
        warnings.warn('Searching for %s neighbours in %s data points' %
                      (neighbours, source_geo_def.size))

    if segments is None:
        cut_off = 3000000
        if target_geo_def.size > cut_off:
            segments = int(target_geo_def.size / cut_off)
        else:
            segments = 1

    # Find reduced input coordinate set
    valid_input_index, source_lons, source_lats = _get_valid_input_index(source_geo_def, target_geo_def,
                                                                         reduce_data,
                                                                         radius_of_influence,
                                                                         nprocs=nprocs)

    # Create kd-tree
    try:
        resample_kdtree = _create_resample_kdtree(source_lons, source_lats,
                                                  valid_input_index,
                                                  nprocs=nprocs)
    except EmptyResult:
        # Handle if all input data is reduced away
        valid_output_index, index_array, distance_array = \
            _create_empty_info(source_geo_def, target_geo_def, neighbours)
        return (valid_input_index, valid_output_index, index_array,
                distance_array)

    if segments > 1:
        # Iterate through segments
        for i, target_slice in enumerate(geometry._get_slice(segments,
                                                             target_geo_def.shape)):

            # Query on slice of target coordinates
            next_voi, next_ia, next_da = \
                _query_resample_kdtree(resample_kdtree, source_geo_def,
                                       target_geo_def,
                                       radius_of_influence, target_slice,
                                       neighbours=neighbours,
                                       epsilon=epsilon,
                                       reduce_data=reduce_data,
                                       nprocs=nprocs)

            # Build result iteratively
            if i == 0:
                # First iteration
                valid_output_index = next_voi
                index_array = next_ia
                distance_array = next_da
            else:
                valid_output_index = np.append(valid_output_index, next_voi)
                if neighbours > 1:
                    index_array = np.row_stack((index_array, next_ia))
                    distance_array = np.row_stack((distance_array, next_da))
                else:
                    index_array = np.append(index_array, next_ia)
                    distance_array = np.append(distance_array, next_da)
    else:
        # Query kd-tree with full target coordinate set
        full_slice = slice(None)
        valid_output_index, index_array, distance_array = \
            _query_resample_kdtree(resample_kdtree, source_geo_def,
                                   target_geo_def,
                                   radius_of_influence, full_slice,
                                   neighbours=neighbours,
                                   epsilon=epsilon,
                                   reduce_data=reduce_data,
                                   nprocs=nprocs)

    # Check if number of neighbours is potentially too low
    if neighbours > 1:
        if not np.all(np.isinf(distance_array[:, -1])):
            warnings.warn(('Possible more than %s neighbours '
                           'within %s m for some data points') %
                          (neighbours, radius_of_influence))

    return valid_input_index, valid_output_index, index_array, distance_array
Ejemplo n.º 40
0
def combineEdgesN(edges):
    #COMBINEEDGES Combine some small line segments, should be very conservative
    #   lines: combined line segments
    #   ori_lines: original line segments
    #   line format: [nx ny nz projectPlaneID umin umax LSfov score]
    arcList = []
    for i in np.arange(edges.shape[0]):
        panoLst = edges[i].panoLst
        if panoLst[0].shape[0] == 0:
            continue

        if (len(arcList)) == 0:
            arcList = panoLst
        else:
            arcList = np.row_stack((arcList, panoLst))

    ## ori lines
    numLine = arcList.shape[0]
    ori_lines = np.zeros((numLine, 8))
    areaXY = np.abs(
        np.sum(arcList[:, 0:3] * repmat([0, 0, 1], arcList.shape[0], 1), 1))
    areaYZ = np.abs(
        np.sum(arcList[:, 0:3] * repmat([1, 0, 0], arcList.shape[0], 1), 1))
    areaZX = np.abs(
        np.sum(arcList[:, 0:3] * repmat([0, 1, 0], arcList.shape[0], 1), 1))

    vec = [areaXY, areaYZ, areaZX]

    #[_, planeIDs] = np.max(vec,  1); # 1:XY 2:YZ 3:ZX
    planeIDs = np.argmax(vec, 0)

    for i in np.arange(numLine):
        ori_lines[i, 0:3] = arcList[i, 0:3]
        ori_lines[i, 3] = planeIDs[i]
        coord1 = arcList[i, 3:6]
        coord2 = arcList[i, 6:9]

        uv = CoordsTransform.xyz2uvN(np.row_stack((coord1, coord2)),
                                     planeIDs[i])
        umax = np.max(uv[0, :]) + np.pi
        umin = np.min(uv[0, :]) + np.pi
        if umax - umin > np.pi:
            ori_lines[i, 4:6] = np.column_stack((umax, umin)) / 2 / np.pi
    #         ori_lines(i,7) = umin + 1 - umax;
        else:
            ori_lines[i, 4:6] = np.column_stack((umin, umax)) / 2 / np.pi
    #         ori_lines(i,7) = umax - umin;

        ori_lines[i, 6] = np.arccos(
            np.sum(coord1 * coord2) /
            (np.linalg.norm(coord1, 2) * np.linalg.norm(coord2, 2)))
        ori_lines[i, 7] = arcList[i, 9]

    # valid = ori_lines(:,3)<0;
    # ori_lines(valid,1:3) = -ori_lines(valid,1:3);

    ## additive combination
    lines = ori_lines
    # panoEdge = paintParameterLine( lines, 1024, 512);
    # figure; imshow(panoEdge);
    for iter in np.arange(3):
        numLine = lines.shape[0]
        valid_line = np.ones([numLine], dtype=bool)
        for i in np.arange(numLine):
            #         fprintf('#d/#d\n', i, numLine);
            if valid_line[i] == False:
                continue

            dotProd = np.sum(lines[:, 0:3] * repmat(lines[i, 0:3], numLine, 1),
                             1)
            valid_curr = (np.abs(dotProd) > np.cos(
                1 * np.pi / 180)) & valid_line
            valid_curr[i] = False
            valid_ang = np.where(valid_curr)
            for j in valid_ang[0]:
                range1 = lines[i, 4:6]
                range2 = lines[j, 4:6]
                valid_rag = intersection(range1, range2)
                if valid_rag == False:
                    continue

                # combine
                I = np.argmax(np.abs(lines[i, 0:3]))
                if lines[i, I] * lines[j, I] > 0:
                    nc = lines[i, 0:3] * lines[i, 6] + lines[j, 0:3] * lines[j,
                                                                             6]
                else:
                    nc = lines[i, 0:3] * lines[i, 6] - lines[j, 0:3] * lines[j,
                                                                             6]

                nc = nc / np.linalg.norm(nc, 2)

                if insideRange(range1[0], range2):
                    nrmin = range2[0]
                else:
                    nrmin = range1[0]

                if insideRange(range1[1], range2):
                    nrmax = range2[1]
                else:
                    nrmax = range1[1]

                u = np.array([nrmin, nrmax]) * 2 * np.pi - np.pi
                v = CoordsTransform.computeUVN(nc, u, lines[i, 3])
                xyz = CoordsTransform.uv2xyzN(np.column_stack((u, v)),
                                              lines[i, 3])
                length = np.arccos(np.sum(xyz[0, :] * xyz[1, :]))
                scr = (lines[i, 6] * lines[i, 7] +
                       lines[j, 6] * lines[j, 7]) / (lines[i, 6] + lines[j, 6])

                nc = np.append(nc, lines[i, 3])
                nc = np.append(nc, nrmin)
                nc = np.append(nc, nrmax)
                nc = np.append(nc, length)
                nc = np.append(nc, scr)
                newLine = nc
                lines[i, :] = newLine
                valid_line[j] = False

        lines = lines[valid_line, :]
        print('iter: #d, before: #d, after: #d\n', iter, len(valid_line),
              sum(valid_line))

    return [lines, ori_lines]
    ''' 
Ejemplo n.º 41
0
    def attack(self,
               target,
               n_perturbations=None,
               direct_attack=True,
               structure_attack=True,
               feature_attack=False,
               n_influencers=5,
               ll_constraint=True,
               ll_cutoff=0.004,
               disable=False):

        super().attack(target, n_perturbations, direct_attack,
                       structure_attack, feature_attack)

        if feature_attack and not is_binary(self.x):
            raise RuntimeError(
                "Attacks on the node features are currently only supported for binary attributes."
            )

        if ll_constraint and self.allow_singleton:
            raise RuntimeError(
                '`ll_constraint` is failed when `allow_singleton=True`, please set `attacker.allow_singleton=False`.'
            )

        logits_start = self.compute_logits()
        best_wrong_class = self.strongest_wrong_class(logits_start)

        if structure_attack and ll_constraint:
            # Setup starting values of the likelihood ratio test.
            degree_sequence_start = self.degree
            current_degree_sequence = self.degree.astype('float64')
            d_min = 2
            S_d_start = np.sum(
                np.log(degree_sequence_start[degree_sequence_start >= d_min]))
            current_S_d = np.sum(
                np.log(
                    current_degree_sequence[current_degree_sequence >= d_min]))
            n_start = np.sum(degree_sequence_start >= d_min)
            current_n = np.sum(current_degree_sequence >= d_min)
            alpha_start = compute_alpha(n_start, S_d_start, d_min)
            log_likelihood_orig = compute_log_likelihood(
                n_start, alpha_start, S_d_start, d_min)

        if len(self.influence_nodes) == 0:
            if not direct_attack:
                # Choose influencer nodes
                infls, add_infls = self.get_attacker_nodes(
                    n_influencers, add_additional_nodes=True)
                self.influence_nodes = np.concatenate(
                    (infls, add_infls)).astype("int")
                # Potential edges are all edges from any attacker to any other node, except the respective
                # attacker itself or the node being attacked.
                self.potential_edges = np.row_stack([
                    np.column_stack(
                        (np.tile(infl, self.n_nodes - 2),
                         np.setdiff1d(np.arange(self.n_nodes),
                                      np.array([self.target, infl]))))
                    for infl in self.influence_nodes
                ])
            else:
                # direct attack
                influencers = [self.target]
                self.potential_edges = np.column_stack(
                    (np.tile(self.target, self.n_nodes - 1),
                     np.setdiff1d(np.arange(self.n_nodes), self.target)))
                self.influence_nodes = np.array(influencers)

        self.potential_edges = self.potential_edges.astype("int32")

        for _ in tqdm(range(self.n_perturbations),
                      desc='Peturbing Graph',
                      disable=disable):
            if structure_attack:
                # Do not consider edges that, if removed, result in singleton edges in the graph.
                if not self.allow_singleton:
                    filtered_edges = filter_singletons(self.potential_edges,
                                                       self.modified_adj)
                else:
                    filtered_edges = self.potential_edges

                if ll_constraint:
                    # Update the values for the power law likelihood ratio test.
                    deltas = 2 * (1 - self.modified_adj[tuple(
                        filtered_edges.T)].A.ravel()) - 1
                    d_edges_old = current_degree_sequence[filtered_edges]
                    d_edges_new = current_degree_sequence[
                        filtered_edges] + deltas[:, None]
                    new_S_d, new_n = update_Sx(current_S_d, current_n,
                                               d_edges_old, d_edges_new, d_min)
                    new_alphas = compute_alpha(new_n, new_S_d, d_min)
                    new_ll = compute_log_likelihood(new_n, new_alphas, new_S_d,
                                                    d_min)
                    alphas_combined = compute_alpha(new_n + n_start,
                                                    new_S_d + S_d_start, d_min)
                    new_ll_combined = compute_log_likelihood(
                        new_n + n_start, alphas_combined, new_S_d + S_d_start,
                        d_min)
                    new_ratios = -2 * new_ll_combined + 2 * (
                        new_ll + log_likelihood_orig)

                    # Do not consider edges that, if added/removed, would lead to a violation of the
                    # likelihood ration Chi_square cutoff value.
                    powerlaw_filter = filter_chisquare(new_ratios, ll_cutoff)
                    filtered_edges = filtered_edges[powerlaw_filter]

                # Compute new entries in A_hat_square_uv
                a_hat_uv_new = self.compute_new_a_hat_uv(filtered_edges)
                # Compute the struct scores for each potential edge
                struct_scores = self.struct_score(a_hat_uv_new,
                                                  self.compute_XW())
                best_edge_ix = struct_scores.argmin()
                best_edge_score = struct_scores.min()
                best_edge = filtered_edges[best_edge_ix]

            if feature_attack:
                # Compute the feature scores for each potential feature perturbation
                feature_ixs, feature_scores = self.feature_scores()
                best_feature_ix = feature_ixs[0]
                best_feature_score = feature_scores[0]

            if structure_attack and feature_attack:
                # decide whether to choose an edge or feature to change
                if best_edge_score < best_feature_score:
                    change_structure = True
                else:
                    change_structure = False

            elif structure_attack:
                change_structure = True
            elif feature_attack:
                change_structure = False

            if change_structure:
                # perform edge perturbation
                u, v = best_edge
                modified_adj = self.modified_adj.tolil(copy=False)
                modified_adj[(u, v)] = modified_adj[(
                    v, u)] = 1 - modified_adj[(u, v)]
                self.modified_adj = modified_adj.tocsr(copy=False)
                self.adj_norm = normalize_adj(modified_adj)
                self.structure_flips.append((u, v))

                if ll_constraint:
                    # Update likelihood ratio test values
                    current_S_d = new_S_d[powerlaw_filter][best_edge_ix]
                    current_n = new_n[powerlaw_filter][best_edge_ix]
                    current_degree_sequence[best_edge] += deltas[
                        powerlaw_filter][best_edge_ix]
            else:

                modified_x = self.modified_x.tolil(copy=False)
                modified_x[tuple(
                    best_feature_ix)] = 1 - modified_x[tuple(best_feature_ix)]
                self.modified_x = modified_x.tocsr(copy=False)
                self.feature_flips.append(tuple(best_feature_ix))
    all_landmarks[i, :] = (all_landmarks[i, :] - row_min) / (row_max - row_min)
# Convert the numerical traits to be binary classes
for i in range(491):
    row_mean = np.mean(all_traits_value[i, :])
    all_traits_value[i, :] = all_traits_value[i, :] - row_mean

images = [cv2.imread(file) for file in\
glob.glob('*/data/img/*.jpg')]

# Calculate the Hog features for all 491 images and Combine the hogs features with landmarks
Hogs,hogImage= feature.hog(images[0], orientations=8, pixels_per_cell=(32, 32),cells_per_block=(2, 2),\
transform_sqrt=True, block_norm="L1",visualise=True)
for im in images:
    h,_=feature.hog(im, orientations=8, pixels_per_cell=(32, 32),cells_per_block=(2, 2),\
    transform_sqrt=True, block_norm="L1",visualise=True)
    Hogs = np.row_stack((Hogs, h))
Hogs = Hogs[1:492, :]
hog_dat = np.column_stack((all_landmarks, Hogs))  # 491x6432


# Min-Max Normalization
def min_max(dat):
    for i in range(dat.shape[0]):
        row_min = min(dat[i, :])
        row_max = max(dat[i, :])
        dat[i, :] = (dat[i, :] - row_min) / (row_max - row_min)
    return (dat)


# Function to extract Hog Features
def extract_hog_features(path):
Ejemplo n.º 43
0
    ax1.scatter(umur, ketebalan[i], s=400)  # scatter plot

# posisi label sumbu primer
ax1.set_xlabel("umur (dalam juta tahun)", fontsize=20)
ax1.xaxis.set_label_position('top')
ax1.set_ylabel("kedalaman", fontsize=20)
ax1.set_ylim(bottom=0, top=maximum)

ax1.invert_yaxis()

ax1.tick_params(axis="x", labelsize=20)
ax1.tick_params(axis="y", labelsize=20)
ax2.tick_params(axis="y", labelsize=20)

np.savetxt("csv/file_name.csv",
           np.row_stack((umur, ketebalan)),
           delimiter=",",
           fmt='%s')
np.savetxt("csv/temperature.csv",
           np.row_stack((umur, temperature)),
           delimiter=",",
           fmt='%s')

# np.savetxt("csv/tt_index.csv", TTI, delimiter=",", fmt='%s')

plt.title("Lopatin Burial History", fontsize=20)
ax1.legend(bbox_to_anchor=(1, 0), ncol=4, prop={'size':
                                                20})  # menampilkan legenda
fig.tight_layout()
plt.xlim(min(umur), max(umur))
ax1.invert_xaxis()
Ejemplo n.º 44
0
#     # img.axs[1][1].errorbar(g2, new_pdf_result2[i*sub_row], new_pdf_result2[i*sub_row + 1])
#
# img.axs[0][0].errorbar(g1, pdf_result[0], pdf_result[1], fmt=" ", capsize=img.cap_size, label="g1 ori_PDF", marker="v")
# img.axs[0][1].errorbar(g2, pdf_result[3], pdf_result[4], fmt=" ", capsize=img.cap_size, label="g2 ori_PDF", marker="v")
# img.axs[0][0].legend()
# img.axs[0][1].legend()
# img.show_img()



img = Image_Plot(cap_size=4, xpad=0.2, ypad=0.2)
img.subplots(1, 2)
shear_row_idx = [i*sub_row for i in range(iters)]
shear_sig_row_idx = [i*sub_row+1 for i in range(iters)]
print(shear_row_idx)
new_pdf_g1 = numpy.row_stack((pdf_result[0], new_pdf_result1[shear_row_idx]))
new_pdf_g1_sig = numpy.row_stack((pdf_result[1], new_pdf_result1[shear_sig_row_idx]))
new_pdf_g2 = numpy.row_stack((pdf_result[3], new_pdf_result2[shear_row_idx]))
new_pdf_g2_sig = numpy.row_stack((pdf_result[4], new_pdf_result2[shear_sig_row_idx]))
# print(new_pdf_g1)
# print(new_pdf_result1[shear_row_idx])
for i in range(shear_num):
    pass
    # img.axs[0][0].errorbar(range(iters+1), new_pdf_g1[:,i]-g1[i], new_pdf_g1_sig[:,i],
    #                        capsize=img.cap_size, label="$\delta$ g1-%d"%i, marker="s")
    # img.axs[0][1].errorbar(range(iters+1), new_pdf_g2[:,i]-g2[i], new_pdf_g1_sig[:,i],
    #                        capsize=img.cap_size, label="$\delta$g2-%d"%i, marker="s")

for i in range(iters+1):
    img.axs[0][0].errorbar(g1, new_pdf_g1[i]-g1, new_pdf_g1_sig[i],
                           capsize=img.cap_size, label="$\delta$ g1 iter-%d"%i, fmt=" ",marker="s",mfc="none")
Ejemplo n.º 45
0
def stackplot(axes,
              x,
              *args,
              labels=(),
              colors=None,
              baseline='zero',
              **kwargs):
    """
    Draw a stacked area plot.

    Parameters
    ----------
    x : 1d array of dimension N

    y : 2d array (dimension MxN), or sequence of 1d arrays (each dimension 1xN)

        The data is assumed to be unstacked. Each of the following
        calls is legal::

            stackplot(x, y)               # where y is MxN
            stackplot(x, y1, y2, y3, y4)  # where y1, y2, y3, y4, are all 1xNm

    baseline : {'zero', 'sym', 'wiggle', 'weighted_wiggle'}
        Method used to calculate the baseline:

        - ``'zero'``: Constant zero baseline, i.e. a simple stacked plot.
        - ``'sym'``:  Symmetric around zero and is sometimes called
          'ThemeRiver'.
        - ``'wiggle'``: Minimizes the sum of the squared slopes.
        - ``'weighted_wiggle'``: Does the same but weights to account for
          size of each layer. It is also called 'Streamgraph'-layout. More
          details can be found at http://leebyron.com/streamgraph/.

    labels : Length N sequence of strings
        Labels to assign to each data series.

    colors : Length N sequence of colors
        A list or tuple of colors. These will be cycled through and used to
        colour the stacked areas.

    **kwargs
        All other keyword arguments are passed to `Axes.fill_between()`.


    Returns
    -------
    list : list of `.PolyCollection`
        A list of `.PolyCollection` instances, one for each element in the
        stacked area plot.
    """

    y = np.row_stack(args)

    labels = iter(labels)
    if colors is not None:
        axes.set_prop_cycle(color=colors)

    # Assume data passed has not been 'stacked', so stack it here.
    # We'll need a float buffer for the upcoming calculations.
    stack = np.cumsum(y, axis=0, dtype=np.promote_types(y.dtype, np.float32))

    if baseline == 'zero':
        first_line = 0.

    elif baseline == 'sym':
        first_line = -np.sum(y, 0) * 0.5
        stack += first_line[None, :]

    elif baseline == 'wiggle':
        m = y.shape[0]
        first_line = (y * (m - 0.5 - np.arange(m)[:, None])).sum(0)
        first_line /= -m
        stack += first_line

    elif baseline == 'weighted_wiggle':
        total = np.sum(y, 0)
        # multiply by 1/total (or zero) to avoid infinities in the division:
        inv_total = np.zeros_like(total)
        mask = total > 0
        inv_total[mask] = 1.0 / total[mask]
        increase = np.hstack((y[:, 0:1], np.diff(y)))
        below_size = total - stack
        below_size += 0.5 * y
        move_up = below_size * inv_total
        move_up[:, 0] = 0.5
        center = (move_up - 0.5) * increase
        center = np.cumsum(center.sum(0))
        first_line = center - 0.5 * total
        stack += first_line

    else:
        errstr = "Baseline method %s not recognised. " % baseline
        errstr += "Expected 'zero', 'sym', 'wiggle' or 'weighted_wiggle'"
        raise ValueError(errstr)

    # Color between x = 0 and the first array.
    color = axes._get_lines.get_next_color()
    coll = axes.fill_between(x,
                             first_line,
                             stack[0, :],
                             facecolor=color,
                             label=next(labels, None),
                             **kwargs)
    coll.sticky_edges.y[:] = [0]
    r = [coll]

    # Color between array i-1 and array i
    for i in range(len(y) - 1):
        color = axes._get_lines.get_next_color()
        r.append(
            axes.fill_between(x,
                              stack[i, :],
                              stack[i + 1, :],
                              facecolor=color,
                              label=next(labels, None),
                              **kwargs))
    return r
Ejemplo n.º 46
0
        self.sum_data = sum_data
        self.sum_all = sum_all

        return delta

    def predict(self, X):
        delta = self.calc_delta(X)
        return np.argmax(self.delta, axis=1)


#Test on synthetic data
alpha1 = 10, 1
points1 = np.random.dirichlet(alpha1, size=(100))

alpha2 = 1, 10
points2 = np.random.dirichlet(alpha2, size=(100))

alpha3 = 30, 30
points3 = np.random.dirichlet(alpha3, size=(100))

x = np.row_stack((points1, points2, points3))

#Fit Model

dmm = DirichletMixture(n_clusters=3, max_iter=100, threshold=0.001)
dmm.fit(x)
cluster = dmm.predict(x)
plt.figure()
plt.scatter(x[:, 0], x[:, 1], c=cluster, cmap='plasma')
plt.show()
Ejemplo n.º 47
0
def train(model_name, fold, run=None, resume_epoch=-1):
    global optimizer, scheduler
    model_str = build_model_str(model_name, fold, run)

    model_info = MODELS[model_name]

    checkpoints_dir = f'{BaseConfig.checkpoints_dir}/{model_str}'
    tensorboard_dir = f'{BaseConfig.tensorboard_dir}/{model_str}'
    oof_dir = f'{BaseConfig.oof_dir}/{model_str}'
    os.makedirs(checkpoints_dir, exist_ok=True)
    os.makedirs(tensorboard_dir, exist_ok=True)
    os.makedirs(oof_dir, exist_ok=True)
    print('\n', model_name, '\n')

    logger = SummaryWriter(log_dir=tensorboard_dir)

    model = model_info.factory(**model_info.args)
    model = model.cuda()

    # try:
    #     torchsummary.summary(model, (8, 400, 400))
    #     print('\n', model_name, '\n')
    # except:
    #     raise
    #     pass

    model = torch.nn.DataParallel(model).cuda()
    model = model.cuda()

    dataset_train = dataset_3d_v2.IntracranialDataset(
        csv_file='5fold-rev3.csv',
        folds=[f for f in range(BaseConfig.nb_folds) if f != fold],
        random_slice=True,
        preprocess_func=albumentations.Compose([
            albumentations.ShiftScaleRotate(shift_limit=16. / 256,
                                            scale_limit=0.05,
                                            rotate_limit=30,
                                            interpolation=cv2.INTER_LINEAR,
                                            border_mode=cv2.BORDER_REPLICATE,
                                            p=0.75),
            albumentations.Flip(),
            albumentations.RandomRotate90(),
            albumentations.pytorch.ToTensorV2()
        ]),
        **model_info.dataset_args)

    dataset_valid = dataset_3d_v2.IntracranialDataset(
        csv_file='5fold.csv',
        folds=[fold],
        random_slice=False,
        return_all_slices=True,
        preprocess_func=albumentations.pytorch.ToTensorV2(),
        **model_info.dataset_args)

    model.train()
    if model_info.optimiser == 'radam':
        optimizer = radam.RAdam(model.parameters(), lr=model_info.initial_lr)
    elif model_info.optimiser == 'sgd':
        optimizer = torch.optim.SGD(model.parameters(),
                                    lr=model_info.initial_lr,
                                    momentum=0.95,
                                    nesterov=True)
    elif model_info.optimiser == 'adabound':
        optimizer = adabound.AdaBound(model.parameters(),
                                      lr=model_info.initial_lr,
                                      final_lr=0.1)

    milestones = [32, 48, 64]
    if model_info.optimiser_milestones:
        milestones = model_info.optimiser_milestones

    if model_info.scheduler == 'steps':
        scheduler = optim.lr_scheduler.MultiStepLR(optimizer,
                                                   milestones=milestones,
                                                   gamma=0.2)
    elif model_info.scheduler == 'cos_restarts':
        scheduler = CosineAnnealingLRWithRestarts(optimizer=optimizer,
                                                  T_max=8,
                                                  T_mult=1.2)

    print(
        f'Num training images: {len(dataset_train)} validation images: {len(dataset_valid)}'
    )

    if resume_epoch > -1:
        checkpoint = torch.load(f'{checkpoints_dir}/{resume_epoch:03}.pt')
        model.module.load_state_dict(checkpoint['model_state_dict'])
        optimizer.load_state_dict(checkpoint['optimizer_state_dict'])

    data_loaders = {
        'train':
        DataLoader(dataset_train,
                   num_workers=16,
                   shuffle=True,
                   drop_last=True,
                   batch_size=model_info.batch_size),
        'val':
        DataLoader(dataset_valid, shuffle=False, num_workers=4, batch_size=1)
    }

    class_weights = torch.tensor([1.0, 1.0, 1.0, 1.0, 1.0, 2.0]).cuda()

    def criterium(y_pred, y_true):
        y_pred = y_pred.reshape(-1, 6)
        y_true = y_true.reshape(-1, 6)
        cw = class_weights.repeat(y_pred.shape[0], 1)
        return F.binary_cross_entropy_with_logits(y_pred, y_true, cw)

    # fit new layers first:
    if resume_epoch == -1 and model_info.is_pretrained:
        model.train()
        model.module.freeze_encoder()
        data_loader = data_loaders['train']
        pre_fit_steps = len(dataset_train) // model_info.batch_size // 8
        data_iter = tqdm(enumerate(data_loader), total=pre_fit_steps)
        epoch_loss = []
        initial_optimizer = torch.optim.Adam(model.parameters(), lr=2e-5)
        # initial_optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=0.9)
        for iter_num, data in data_iter:
            if iter_num > pre_fit_steps:
                break
            with torch.set_grad_enabled(True):
                img = data['image'].float().cuda()
                labels = data['labels'].float().cuda()
                pred = model(img)
                loss = criterium(pred, labels)
                # loss.backward()
                (loss / model_info.accumulation_steps).backward()
                if (iter_num + 1) % model_info.accumulation_steps == 0:
                    torch.nn.utils.clip_grad_norm_(model.parameters(), 100.0)
                    initial_optimizer.step()
                    initial_optimizer.zero_grad()
                epoch_loss.append(float(loss))

                data_iter.set_description(
                    f'Loss: Running {np.mean(epoch_loss[-100:]):1.4f} Avg {np.mean(epoch_loss):1.4f}'
                )
        del initial_optimizer
    model.module.unfreeze_encoder()

    phase_period = {'train': 1, 'val': 2}

    for epoch_num in range(resume_epoch + 1, 80):
        for phase in ['train', 'val']:
            if epoch_num % phase_period[phase] == 0:
                model.train(phase == 'train')
                epoch_loss = []
                epoch_labels = []
                epoch_predictions = []
                epoch_sample_paths = []

                if 'on_epoch' in model.module.__dir__():
                    model.module.on_epoch(epoch_num)

                data_loader = data_loaders[phase]
                data_iter = tqdm(enumerate(data_loader),
                                 total=len(data_loader))
                for iter_num, data in data_iter:
                    img = data['image'].float().cuda()
                    labels = data['labels'].float().cuda()

                    with torch.set_grad_enabled(phase == 'train'):
                        pred = model(img)
                        loss = criterium(pred, labels)

                        if phase == 'train':
                            (loss / model_info.accumulation_steps).backward()
                            if (iter_num +
                                    1) % model_info.accumulation_steps == 0:
                                torch.nn.utils.clip_grad_norm_(
                                    model.parameters(), 32.0)
                                optimizer.step()
                                optimizer.zero_grad()

                        epoch_loss.append(float(loss))

                        epoch_labels.append(
                            np.row_stack(labels.detach().cpu().numpy()))
                        epoch_predictions.append(
                            np.row_stack(
                                torch.sigmoid(pred).detach().cpu().numpy()))

                        # print(labels.shape, epoch_labels[-1].shape, pred.shape, epoch_predictions[-1].shape)
                        epoch_sample_paths += data['path']

                    data_iter.set_description(
                        f'{epoch_num} Loss: Running {np.mean(epoch_loss[-100:]):1.4f} Avg {np.mean(epoch_loss):1.4f}'
                    )

                epoch_labels = np.row_stack(epoch_labels)
                epoch_predictions = np.row_stack(epoch_predictions)
                if phase == 'val':
                    # recalculate loss as depth dimension is variable
                    epoch_loss_mean = float(
                        F.binary_cross_entropy(
                            torch.from_numpy(epoch_predictions).cuda(),
                            torch.from_numpy(epoch_labels).cuda(),
                            class_weights.repeat(epoch_labels.shape[0], 1)))
                    print(epoch_loss_mean)
                    logger.add_scalar(f'loss_{phase}', epoch_loss_mean,
                                      epoch_num)
                else:
                    logger.add_scalar(f'loss_{phase}', np.mean(epoch_loss),
                                      epoch_num)
                logger.add_scalar('lr', optimizer.param_groups[0]['lr'],
                                  epoch_num)  # scheduler.get_lr()[0]
                try:
                    log_metrics(logger=logger,
                                phase=phase,
                                epoch_num=epoch_num,
                                y=epoch_labels,
                                y_hat=epoch_predictions)
                except Exception:
                    pass

                if phase == 'val':
                    torch.save(
                        {
                            'epoch': epoch_num,
                            'sample_paths': epoch_sample_paths,
                            'epoch_labels': epoch_labels,
                            'epoch_predictions': epoch_predictions,
                        }, f'{oof_dir}/{epoch_num:03}.pt')

            logger.flush()

            if phase == 'val':
                scheduler.step(epoch=epoch_num)
            else:
                # print(f'{checkpoints_dir}/{epoch_num:03}.pt')
                torch.save(
                    {
                        'epoch': epoch_num,
                        'model_state_dict': model.module.state_dict(),
                        'optimizer_state_dict': optimizer.state_dict(),
                    }, f'{checkpoints_dir}/{epoch_num:03}.pt')
Ejemplo n.º 48
0
    def calc_detector_projection(self):
        h_mat = self.predict_miller_indices(
        )  # get predicted set of miller indices
        h_mat = np.array(h_mat)
        cell_mat = np.array([
            self.xparamdict['a_axis'], self.xparamdict['b_axis'],
            self.xparamdict['c_axis']
        ]).transpose()
        rlp = np.linalg.inv(cell_mat)  # (a*, b*, c*) matrix

        # incident beam vector
        s0 = np.array(self.xparamdict['incident_beam'])
        beam_vector = s0 / np.linalg.norm(s0) / self.xparamdict['wavelength']

        rot_vector = np.array(
            self.xparamdict['gonio_coord'])  # obtained from rotation axis
        rot = np.empty((3, 3), dtype=np.float)
        rot[:, 1] = rot_vector / np.linalg.norm(rot_vector)
        rot[:, 0] = np.cross(rot[:, 1], beam_vector)
        rot[:, 0] /= np.linalg.norm(rot[:, 0])
        rot[:, 2] = np.cross(rot[:, 0], rot[:, 1])
        # detector coordinate system
        d_mat = np.array([
            self.xparamdict['det_axis1'], self.xparamdict['det_axis2'],
            self.xparamdict['det_axis3']
        ])
        d_mat = d_mat.transpose()

        # 1st predicted spot as vector on Ewald sphere
        p0star = np.dot(h_mat, rlp)
        p0star_gonio = np.dot(p0star,
                              rot)  # same predicted spot on gonio coordinate
        beam_gonio = np.dot(rot.transpose(),
                            beam_vector)  # incident beam on gonio coordinate
        print(beam_gonio.shape)
        p0star_sq_dist = np.sum(
            p0star**2,
            axis=1)  # sq distance of predicted spot from the rotation axis

        pstar_gonio = np.empty(p0star_gonio.shape)  # a 3x3 matrix
        pstar_gonio[:,
                    2] = (-0.5 * p0star_sq_dist -
                          beam_gonio[1] * p0star_gonio[:, 1]) / beam_gonio[2]
        pstar_gonio[:, 1] = p0star_gonio[:, 1]
        pstar_gonio[:,
                    0] = p0star_sq_dist - pstar_gonio[:,
                                                      1]**2 - pstar_gonio[:,
                                                                          2]**2  # sqrt taken at later stage

        sel_index = pstar_gonio[:,
                                0] > 0  # exclude blind region with this criteria, filters the matrices

        h_mat = h_mat[sel_index]
        p0star_gonio = p0star_gonio[sel_index]
        pstar_gonio = pstar_gonio[sel_index]

        pstar_gonio[:, 0] = np.sqrt(pstar_gonio[:, 0])

        projected_hkl = np.empty((0, 3), dtype=np.int)  # h,k,l as row vector
        position_on_detector = np.empty(
            (0, 4),
            dtype=np.float)  # Find detector projection, x, y, phi, zeta

        for sign in (+1, -1):
            pstar_gonio[:, 0] *= sign
            phi = np.arctan2((p0star_gonio[:, 2] * pstar_gonio[:, 0] -
                              p0star_gonio[:, 1] * pstar_gonio[:, 2]),
                             (p0star_gonio[:, 1] * pstar_gonio[:, 1] +
                              p0star_gonio[:, 2] * pstar_gonio[:, 2]))

            refl_std = np.cross(pstar_gonio, beam_gonio)
            refl_std /= np.linalg.norm(refl_std, axis=1).reshape(
                refl_std.shape[0], 1)  # convert into column vector
            zeta = refl_std[:, 1]

            Svector = beam_vector + np.dot(pstar_gonio, rot.transpose())
            Svec_on_det = np.dot(Svector, d_mat)
            sel_index = self.xparamdict['detZ'] * Svec_on_det[:, 2] > 0
            Svec_on_det, h_mat, phi, zeta = Svec_on_det[sel_index], h_mat[
                sel_index], phi[sel_index], zeta[sel_index]

            det_pos_x = self.xparamdict['beamX'] + self.xparamdict['detZ']*Svec_on_det[:, 0] / Svec_on_det[:, 2] \
                        / self.xparamdict['pixelsize']

            det_pos_y = self.xparamdict['beamY'] + self.xparamdict['detZ'] * Svec_on_det[:, 1] / Svec_on_det[:, 2] \
                        / self.xparamdict['pixelsize']

            projected_hkl = np.row_stack([projected_hkl, h_mat])
            position_on_detector = np.row_stack([
                position_on_detector,
                np.column_stack([det_pos_x, det_pos_y, phi, zeta])
            ])

        return projected_hkl, position_on_detector
Ejemplo n.º 49
0
def ref_pts(Lico_right, Lico_left, Lico_bot, Lico_up, Lico_top, Lico_side):

    eq_A_right = [-Lico_right[0], 1]
    eq_A_left = [-Lico_left[0], 1]
    eq_A_bot = [-Lico_bot[0], 1]
    eq_A_up = [-Lico_up[0], 1]
    eq_A_top = [-Lico_top[0], 1]
    eq_A_side = [-Lico_side[0], 1]

    eq_b_right = Lico_right[1]
    eq_b_left = Lico_left[1]
    eq_b_bot = Lico_bot[1]
    eq_b_up = Lico_up[1]
    eq_b_top = Lico_top[1]
    eq_b_side = Lico_side[1]

    mat_A_up_left = np.row_stack((eq_A_up, eq_A_left))
    mat_b_up_left = np.row_stack((eq_b_up, eq_b_left))
    mat_A_up_right = np.row_stack((eq_A_up, eq_A_right))
    mat_b_up_right = np.row_stack((eq_b_up, eq_b_right))
    mat_A_up_side = np.row_stack((eq_A_up, eq_A_side))
    mat_b_up_side = np.row_stack((eq_b_up, eq_b_side))
    mat_A_bot_left = np.row_stack((eq_A_bot, eq_A_left))
    mat_b_bot_left = np.row_stack((eq_b_bot, eq_b_left))
    mat_A_bot_right = np.row_stack((eq_A_bot, eq_A_right))
    mat_b_bot_right = np.row_stack((eq_b_bot, eq_b_right))
    mat_A_bot_side = np.row_stack((eq_A_bot, eq_A_side))
    mat_b_bot_side = np.row_stack((eq_b_bot, eq_b_side))
    mat_A_top_side = np.row_stack((eq_A_top, eq_A_side))
    mat_b_top_side = np.row_stack((eq_b_top, eq_b_side))
    mat_A_top_left = np.row_stack((eq_A_top, eq_A_left))
    mat_b_top_left = np.row_stack((eq_b_top, eq_b_left))
    mat_A_top_right = np.row_stack((eq_A_top, eq_A_right))
    mat_b_top_right = np.row_stack((eq_b_top, eq_b_right))

    up_left = np.linalg.solve(mat_A_up_left, mat_b_up_left)
    up_right = np.linalg.solve(mat_A_up_right, mat_b_up_right)
    up_side = np.linalg.solve(mat_A_up_side, mat_b_up_side)
    bot_left = np.linalg.solve(mat_A_bot_left, mat_b_bot_left)
    bot_right = np.linalg.solve(mat_A_bot_right, mat_b_bot_right)
    bot_side = np.linalg.solve(mat_A_bot_side, mat_b_bot_side)
    top_side = np.linalg.solve(mat_A_top_side, mat_b_top_side)
    top_left = np.linalg.solve(mat_A_top_left, mat_b_top_left)
    top_right = np.linalg.solve(mat_A_top_right, mat_b_top_right)

    pt_up_left = (int(up_left[0]), int(up_left[1]))
    pt_up_right = (int(up_right[0]), int(up_right[1]))
    pt_up_side = (int(up_side[0]), int(up_side[1]))
    pt_bot_left = (int(bot_left[0]), int(bot_left[1]))
    pt_bot_right = (int(bot_right[0]), int(bot_right[1]))
    pt_bot_side = (int(bot_side[0]), int(bot_side[1]))
    pt_top_side = (int(top_side[0]), int(top_side[1]))
    pt_top_left = (int(top_left[0]), int(top_left[1]))
    pt_top_right = (int(top_right[0]), int(top_right[1]))

    return pt_up_left, pt_up_right, pt_up_side, pt_bot_left, pt_bot_right, \
           pt_bot_side, pt_top_side, pt_top_left, pt_top_right
Ejemplo n.º 50
0
def state_data_processing(v3_state, num_processing_steps):
    while len(v3_state) < num_processing_steps + 1:
        v3_state = np.row_stack((v3_state, v3_state[len(v3_state) - 1]))
    return v3_state
Ejemplo n.º 51
0
def find_corners(img, pos_camera=None, mark_img=True, show_img=False, img_white_keys=None):
    # Convert to HSV color space
    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)

    # Equalize V channel
    img_hsv[:,:,2] = clahe.apply(img_hsv[:,:,2])

    if show_img:
        imshow(np.hstack((img_hsv[:,:,0], img_hsv[:,:,1], img_hsv[:,:,2])), 3)

    # Threshold white keys
    img_w = cv2.inRange(img_hsv, np.array([20, 0, 200]), np.array([160, 60, 255]))
    img_h = cv2.inRange(img_hsv, np.array([20, 0, 0]), np.array([160, 255, 255]))
    img_s = cv2.inRange(img_hsv, np.array([0, 0, 0]), np.array([255, 60, 255]))
    img_v = cv2.inRange(img_hsv, np.array([0, 0, 200]), np.array([255, 255, 255]))
    if show_img:
        imshow(np.hstack((img_h, img_s, img_v)), 3)

    # Fill white keys with watershed
    img_w[:,:10] = 255
    img_w[:,-10:] = 255
    img_w[:200,:] = 255
    img_w[-200:,:] = 255
    cv2.floodFill(img_w, np.zeros((img_w.shape[0]+2, img_w.shape[1]+2), dtype=np.uint8), (0,0), 0)
    if show_img:
        imshow(img_w, 3)
    img_fg = cv2.morphologyEx(img_w, cv2.MORPH_ERODE, np.ones((10, 10)))
    img_bg = cv2.morphologyEx(img_w, cv2.MORPH_DILATE, np.ones((100, 100)))
    img_bg[:,:10] = 0
    img_bg[:,-10:] = 0
    img_bg[:200,:] = 0
    img_bg[-200:,:] = 0
    img_bg = 255 - img_bg
    markers = np.zeros(img_w.shape, dtype=np.int32)
    markers[img_fg > 0] = 1
    markers[img_bg > 0] = 2
    markers = cv2.watershed(img, markers)
    img_w = (255 * (markers == 1)).astype(np.uint8)
    if show_img:
        imshow(img_w, 3)
    cv2.floodFill(img_w, np.zeros((img_w.shape[0]+2, img_w.shape[1]+2), dtype=np.uint8), (0,0), 0)

    # Find orientation of largest connected component
    _, contours, _ = cv2.findContours(img_w, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    areas = np.array([cv2.contourArea(c) for c in contours])
    vx, vy, x, y = cv2.fitLine(contours[areas.argmax()], cv2.DIST_L2, 0, 0.01, 0.01)
    V = np.vstack((vx, vy))

    # Determine camera position from keyboard orientation
    if pos_camera is None:
        if V[0] * V[1] > 0:
            pos_camera = "right"
        else:
            pos_camera = "left"

    # Dilate image with keyboard-aligned kernel and extract largest connected component
    theta = np.arctan2(V[0], V[1])
    kernel = 255 * np.round(scipy.ndimage.rotate(np.ones((150, 2)), theta * 180/np.pi)).astype(np.uint8)
    img_cc = cv2.morphologyEx(img_w, cv2.MORPH_DILATE, kernel)
    _, contours, _ = cv2.findContours(img_cc, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    areas = np.array([cv2.contourArea(c) for c in contours])
    contour = np.squeeze(contours[areas.argmax()], axis=1)
    img_cc.fill(0)
    cv2.drawContours(img_cc, contours, areas.argmax(), 255, -1)
    img_w = 255 * np.logical_and(img_w>0, img_cc>0).astype(np.uint8)

    if show_img:
        imshow(img_cc, 3)

    # Find combined contour of keyboard segmentations
    _, contours, _ = cv2.findContours(img_w, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
    img_w.fill(0)
    if mark_img:
        for i in range(len(contours)):
            cv2.drawContours(img, contours, i, (0,0,255), 3)
    for i in range(len(contours)):
        cv2.drawContours(img_w, contours, i, 255, -1)
    areas = np.array([cv2.contourArea(c) for c in contours])
    contours = [np.squeeze(contour, axis=1) for contour in contours]
    contour = np.vstack(contours)
    contour = np.column_stack((contour, np.ones(contour.shape[0])))

    if show_img:
        imshow(img_w, 3)

    # Find corners
    corner_left = contour[contour[:,0].argmin()].astype(np.int32)
    corner_right = contour[contour[:,0].argmax()].astype(np.int32)
    corner_bottom = contour[contour.shape[0]-1-contour[:,1][::-1].argmax()].astype(np.int32)
    corner_top = contour[contour[:,1].argmin()].astype(np.int32)

    # Push up bottom corner to the key's surface
    num_white = 0
    for i in range(40):
        vec_w = img_w[corner_bottom[1]-i,corner_bottom[0]-20:corner_bottom[0]+20]>0
        num_white_next = vec_w.sum()
        if num_white_next - num_white > 3:
            corner_bottom[1] -= i - 1
            break
        num_white = num_white_next

    # Refine corner with subpixel search
    corners = np.row_stack((corner_left[:2], corner_right[:2], corner_bottom[:2], corner_top[:2]))
    criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 100, 0.001)
    corners = cv2.cornerSubPix(img_v,np.float32(corners.astype(np.float64)),(10,10),(-1,-1),criteria)
    corner_left = np.round(np.append(corners[0], 1)).astype(np.int32)
    corner_right = np.round(np.append(corners[1], 1)).astype(np.int32)
    corner_bottom = np.round(np.append(corners[2], 1)).astype(np.int32)
    corner_top = np.round(np.append(corners[3], 1)).astype(np.int32)

    # Determine front and back sides of the keyboard
    if pos_camera == "right":
        corner_back = corner_right
        corner_front = corner_left
        contour_back = img_w.shape[1] - 1 - np.argmax(img_w[corner_top[1]:corner_back[1]+1,::-1]>0, axis=1)
    else:
        corner_back = corner_left
        corner_front = corner_right
        contour_back = np.argmax(img_w[corner_top[1]:corner_back[1]+1]>0, axis=1)

    # Find back contour
    idx = np.logical_and(contour_back<img_w.shape[1]-1, contour_back>0)
    contour_back = np.column_stack((contour_back, \
                                    np.arange(corner_top[1], corner_back[1]+1),
                                    np.ones((contour_back.shape[0],), dtype=np.int32)))
    contour_back = contour_back[idx]
    contour_back_origin = contour_back - corner_back
    cv2.polylines(img, np.int32([contour_back[:,:2]]), False, (0,255,255), 5)
    corner_top = contour_back[contour_back[:,1].argmin()]

    # Rotate line from vertical position until it hits the back contour
    num_hit = 0
    if pos_camera == "right":
        sign_theta = 1
    else:
        sign_theta = -1
    for theta in np.linspace(0,np.pi/2,90):
        line_back = [sign_theta * np.cos(theta), -np.sin(theta), 0]
        num_hit_new = np.sum(np.dot(contour_back_origin, line_back)>0)
        # Stop when the gradient of hit pixels spikes
        if num_hit_new - num_hit > contour_back.shape[0] / 30 and theta > 0:
            break
        num_hit = num_hit_new
    line_back[-1] = -np.dot(corner_back, line_back)

    # Update contour to include only points close to the line
    contour_back = contour_back[np.abs(np.dot(contour_back, line_back))<10,:]

    if mark_img:
        cv2.polylines(img, np.int32([contour_back[:,:2]]), False, (0,255,0), 5)
        dir_line_back = np.array([line_back[1], -line_back[0]])
        points_line_back = np.array([2000, -2000])[:,np.newaxis] * dir_line_back[np.newaxis,:] + corner_back[np.newaxis,:2]
        cv2.line(img, tuple(points_line_back[0].astype(np.int32)), tuple(points_line_back[1].astype(np.int32)), (255,0,255), 5)

    # Fit least squares line to back contour
    # U, S, VT = np.linalg.svd(contour_back[:,:2] - corner_back[:2])
    U, S, VT = np.linalg.svd(contour_back[:,:2] - contour_back[:,:2].mean(axis=0))
    line_back = np.append(VT[-1], 0)
    line_back[-1] = -np.dot(corner_back, line_back)

    if mark_img:
        dir_line_back = np.array([line_back[1], -line_back[0]])
        points_line_back = np.array([2000, -2000])[:,np.newaxis] * dir_line_back[np.newaxis,:] + corner_back[np.newaxis,:2]
        cv2.line(img, tuple(points_line_back[0].astype(np.int32)), tuple(points_line_back[1].astype(np.int32)), (255,0,0), 5)

    # Find intersection between back and top lines
    corner_top_mid = corner_top
    line_top = np.cross(corner_top, corner_front).astype(np.float32)
    line_top /= np.linalg.norm(line_top)
    corner_top = np.cross(line_back, line_top)
    corner_top /= corner_top[-1]
    corner_top = np.round(corner_top).astype(np.int32)

    # Plot corners
    if mark_img:
        cv2.circle(img, tuple(corner_top[:2]), 10, (0,255,0), 3)
        cv2.circle(img, tuple(corner_right[:2]), 10, (255,255,0), 3)
        cv2.circle(img, tuple(corner_bottom[:2]), 10, (255,0,0), 3)
        cv2.circle(img, tuple(corner_left[:2]), 10, (255,0,255), 3)
        cv2.circle(img, tuple(corner_top_mid[:2]), 10, (255,0,255), 3)

    # Collect corners
    if pos_camera == "left":
        corners = np.row_stack((corner_left, corner_top, corner_right, corner_bottom))
    else:
        corners = np.row_stack((corner_top, corner_right, corner_bottom, corner_left))

    if img_white_keys is not None:
        img_white_keys[:,:] = img_w
    return corners, pos_camera
x = np.arange(0, len(dis_plt))
plt.figure(figsize=(12, 6))
plt.plot(x, dis_plt, 'g--')
plt.xlabel('步数')
plt.ylabel("路程")
plt.title("路程迭代曲线")
plt.savefig("路程迭代曲线.pdf")
plt.show()

# 画出最优路线
location_plt = location.copy()
for i in range(0, 30):
    location_plt[i][0] = location[path[i]][0]
    location_plt[i][1] = location[path[i]][1]
row = np.array([location[path[0]][0], location[path[0]][1]])
location_plt = np.row_stack((location_plt, row))
x = location_plt[:, 0]
y = location_plt[:, 1]
plt.figure(figsize=(8, 8))
plt.plot(x, y, 'r-o')
plt.xlabel('经度')
plt.ylabel("纬度")
plt.title("河北路线图")
m = location[:, 0]  # 新建画图坐标(m,n)
n = location[:, 1]
name = []
name_file = open("河北城市名称.txt", 'r', encoding='UTF-8')
while True:
    city_name = name_file.readline()
    name.append(city_name)
    if not city_name:
Ejemplo n.º 53
0
Prot = 0
Jp1 = PTrans(Px, Pz1, Prot)[0:2, :]
Jp2 = PTrans(Px, Pz2, Prot)[0:2, :]
Jp = np.vstack((Jp1, Jp2, Jp2, Jp1))

# Substep b: Calculate pusher wrench
# See figure in Table 1 for reference
FP1 = np.array([Fn, -Ft])
FP2 = np.array([Fn, Ft])
Fp = np.hstack((FP1, FP1, FP2, FP2))

W1 = Jp[0:2, :].T.dot(Fp[0:2])
W2 = Jp[2:4, :].T.dot(Fp[2:4])
W3 = Jp[4:6, :].T.dot(Fp[4:6])
W4 = Jp[6:, :].T.dot(Fp[6:])
W = np.row_stack((W1, W2, W3, W4))

# STEP 3: CALCULATE OBJECT VELOCITY
# Calculate Js
Sx = .1
Sz = 0
Srot = 0
Js = PTrans(
    Sx, Sz, Srot
)  # Jacobian that maps v_obj (in object frame) to v_s (in support frame)

# Calculate gravity wrench
w_gravity = np.array([0, m * g, 0])

# Define B, wrench on a unit limit surface
B = np.diag((1, 1, k**(-2)))
Ejemplo n.º 54
0
                              how='left')

print "make all train dataset ........."
print "make train_noWeekend dataset .........."
#train = pd.concat([final_train1, final_train2, final_train3, final_train4], axis=0, ignore_index=True)
train1_noWeekend_matrix = final_train1_noWeekend.drop(
    ['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
train2_noWeekend_matrix = final_train2_noWeekend.drop(
    ['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
train3_noWeekend_matrix = final_train3_noWeekend.drop(
    ['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
train4_noWeekend_matrix = final_train4_noWeekend.drop(
    ['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
#final_train_matrix = train.drop(['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
final_train_noWeekend_matrix = np.row_stack(
    (train1_noWeekend_matrix, train2_noWeekend_matrix, train3_noWeekend_matrix,
     train4_noWeekend_matrix))
train_noWeekend_X = final_train_noWeekend_matrix[:, :-1]
train_noWeekend_Y = final_train_noWeekend_matrix[:, -1]
print "make train_Weekend dataset .........."
#train = pd.concat([final_train1, final_train2, final_train3, final_train4], axis=0, ignore_index=True)
train1_Weekend_matrix = final_train1_Weekend.drop(
    ['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
train2_Weekend_matrix = final_train2_Weekend.drop(
    ['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
train3_Weekend_matrix = final_train3_Weekend.drop(
    ['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
train4_Weekend_matrix = final_train4_Weekend.drop(
    ['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
#final_train_matrix = train.drop(['user_id', 'day_of_week', 'record_date'], axis=1).as_matrix()
final_train_Weekend_matrix = np.row_stack(
Ejemplo n.º 55
0
                    future_reward = sess.run(output, feed_dict={x: temp_Map})

                    reward = reward + np.amax(future_reward) * future_Discount
                    Q_value = np.append(Q_value, reward)

                temp_Map = train_data_element[0:n][0:m]
                temp_Map = np.array(temp_Map)
                temp_Map[curr_Pos[0], curr_Pos[1]] = 2
                #Q_temp = sess.run(output, feed_dict={x:temp_Map})
                #Q_temp = Q_temp[np.array([ACTIONS.index(curr_action)])]
                #Q_value_curr_step = np.append([Q_value_curr_step], [Q_temp])
                temp_Map = np.reshape(temp_Map, [1, n, m, 1])
                train_map = np.append(train_map, temp_Map, axis=0)
                temp = np.zeros([1, 4])
                temp[0, ACTIONS.index(curr_action)] = 1
                action_data = np.row_stack((action_data, temp))

            _, tra_loss = sess.run([train_optimizer, train_loss],
                                   feed_dict={
                                       x: train_map,
                                       action: action_data,
                                       y: Q_value
                                   })
            #print(tra_loss)

            if not is_Terminal:
                curr_Map[next_Pos[0], next_Pos[1]] = 0
                curr_Pos = np.copy(next_Pos)

        temp_ob = sess.run(output, feed_dict={x: ob})
        print(temp_ob)
Ejemplo n.º 56
0
import scipy
import numpy as np
import itertools
import matplotlib.pyplot as plt

# TODO: Run this cell to generate the data
num_samples = 400
cov = np.array([[1., .7], [.7, 1.]]) * 10
mean_1 = [.1, .1]
mean_2 = [6., .1]

x_class1 = np.random.multivariate_normal(mean_1, cov, num_samples // 2)
x_class2 = np.random.multivariate_normal(mean_2, cov, num_samples // 2)
xy_class1 = np.column_stack((x_class1, np.zeros(num_samples // 2)))
xy_class2 = np.column_stack((x_class2, np.ones(num_samples // 2)))
data_full = np.row_stack([xy_class1, xy_class2])
np.random.shuffle(data_full)
data = data_full[:, :2]
labels = data_full[:, 2]

# TODO: Make a scatterplot for the data points showing the true cluster assignments of each point
plt.scatter(x_class1[:, 0], x_class1[:, 1], marker="x")  # first class, x shape
plt.scatter(x_class2[:, 0], x_class2[:, 1],
            marker="o")  # second class, circle shape
plt.show()


def cost(data, R, Mu):
    N, D = data.shape
    K = Mu.shape[1]
    J = 0
Ejemplo n.º 57
0
def surf(*args, **kwargs):
    """ surf(..., axesAdjust=True, axes=None)
    
    Shaded surface plot.
    
    Usage
    -----
      * surf(Z) - create a surface using the given image with z coordinates.
      * surf(Z, C) - also supply a texture image to map.
      * surf(X, Y, Z) - give x, y and z coordinates.
      * surf(X, Y, Z, C) - also supply a texture image to map.
    
    Parameters
    ----------
    Z : A MxN 2D array
    X : A length N 1D array, or a MxN 2D array
    Y : A length M 1D array, or a MxN 2D array
    C : A MxN 2D array, or a AxBx3 3D array
        If 2D, C specifies a colormap index for each vertex of Z.  If
        3D, C gives a RGB image to be mapped over Z.  In this case, the
        sizes of C and Z need not match.
    
    Keyword arguments
    -----------------
    axesAdjust : bool
        If axesAdjust==True, this function will call axes.SetLimits(), and set
        the camera type to 3D. If daspectAuto has not been set yet,
        it is set to False.
    axes : Axes instance
        Display the image in this axes, or the current axes if not given.
    
    Also see grid()
    
    """
    def checkZ(z):
        if z.ndim != 2:
            raise ValueError('Z must be a 2D array.')

    # Parse input
    if len(args) == 1:
        z = np.asanyarray(args[0])
        checkZ(z)
        y = np.arange(z.shape[0])
        x = np.arange(z.shape[1])
        c = None
    elif len(args) == 2:
        z, c = map(np.asanyarray, args)
        checkZ(z)
        y = np.arange(z.shape[0])
        x = np.arange(z.shape[1])
    elif len(args) == 3:
        x, y, z = map(np.asanyarray, args)
        checkZ(z)
        c = None
    elif len(args) == 4:
        x, y, z, c = map(np.asanyarray, args)
        checkZ(z)
    else:
        raise ValueError(
            'Invalid number of arguments.  Must pass 1-4 arguments.')

    # Parse kwargs
    axes = None
    if 'axes' in kwargs:
        axes = kwargs['axes']
    axesAdjust = True
    if 'axesAdjust' in kwargs:
        axesAdjust = kwargs['axesAdjust']

    # Set y vertices
    if y.shape == (z.shape[0], ):
        y = y.reshape(z.shape[0], 1).repeat(z.shape[1], axis=1)
    elif y.shape != z.shape:
        raise ValueError(
            'Y must have same shape as Z, or be 1D with length of rows of Z.')

    # Set x vertices
    if x.shape == (z.shape[1], ):
        x = x.reshape(1, z.shape[1]).repeat(z.shape[0], axis=0)
    elif x.shape != z.shape:
        raise ValueError(
            'X must have same shape as Z, or be 1D with length of columns of Z.'
        )

    # Set vertices
    vertices = np.column_stack((x.ravel(), y.ravel(), z.ravel()))

    # Create texcoords
    if c is None or c.shape == z.shape:
        # No texture -> colormap on the z value
        # Grayscale texture -> color mapping
        texcoords = (c if c is not None else z).ravel()

    elif c.ndim == 3:
        # color texture -> use texture mapping
        U, V = np.meshgrid(np.linspace(0, 1, z.shape[1]),
                           np.linspace(0, 1, z.shape[0]))
        texcoords = np.column_stack((U.ravel(), V.ravel()))

    else:
        raise ValueError('C must have same shape as Z, or be 3D array.')

    # Create faces
    w = z.shape[1]
    i = np.arange(z.shape[0] - 1)
    faces = np.row_stack(
        np.column_stack((j + w * i, j + 1 + w * i, j + 1 + w * (i + 1),
                         j + w * (i + 1))) for j in range(w - 1))

    ## Visualize

    # Get axes
    if axes is None:
        axes = vv.gca()

    # Create mesh
    m = vv.Mesh(axes, vertices, faces, values=texcoords, verticesPerFace=4)

    # Should we apply a texture?
    if c is not None and c.ndim == 3:
        m.SetTexture(c)
    else:
        m.clim = m.clim  # trigger correct limits

    # Adjust axes
    if axesAdjust:
        if axes.daspectAuto is None:
            axes.daspectAuto = False
        axes.cameraType = '3d'
        axes.SetLimits()

    # Return
    axes.Draw()
    return m
import numpy as np
from matplotlib import pyplot as plt

fnx = lambda: np.random.randint(5, 50, 10)
y = np.row_stack((fnx(), fnx(), fnx()))
x = np.arange(10)

y1, y2, y3 = fnx(), fnx(), fnx()

fig, ax = plt.subplots()
ax.stackplot(x, y)
plt.show()

fig, ax = plt.subplots()
ax.stackplot(x, y1, y2, y3)
plt.show()
Ejemplo n.º 59
0
def findSplitAndThresh(resp, Xsrc, Xtar, F2, splitevaltype, lamdda, minChild,
                       kappa):
    F1 = resp.shape[0]
    rerr = float("inf")
    fid = 1
    thr = float("inf")
    Ft = Xtar.shape[0]
    Fs = Xsrc.shape[0]
    #special treatment for random tree growing
    if splitevaltype == 'random':
        F1 = 1
        F2 = 1
    for s in range(F1):
        #get thresholds to evaluate
        if F2 == 0:
            tthrs = np.median(resp[s, :], axis=0)
        else:
            respmin = min(resp[s, :])
            respmax = max(resp[s, :])
            tthrs = np.zeros((F2 + 1, 1), np.float32)
            tthrs[0:-1] = np.random.rand(
                5, 1) * 0.95 * (respmax - respmin) + respmin
            tthrs[-1] = np.median(resp[s, :])
        for t in range(len(tthrs)):
            tthr = tthrs[t][0]
            left = resp[s, :] < tthr
            right = ~left
            nl = len(np.nonzero(left)[0])
            nr = len(np.nonzero(right)[0])
            '''
            nl=0
            nr=0
            for i in range(len(resp)):
                if left[i]==1:
                    nl=nl+1
                else:
                    nr=nr+1
            '''
            #            left=left.astype('int')
            #            right=right.astype('int')
            if nl < minChild or nr < minChild:
                continue
#  mat0 = dataSet[nonzero(dataSet[:,feature] > value)[0],:] #nonzero对应于去掉特征数据缺失值
#  mat1 = dataSet[nonzero(dataSet[:,feature] <= value)[0],:]
            XsrcL = Xsrc[:, left]
            XsrcR = Xsrc[:, right]
            XtarL = Xtar[:, left]
            XtarR = Xtar[:, right]
            if splitevaltype == 'random':
                trerr = 0
            elif splitevaltype == 'banlanced':
                trerr = np.square(nl - nr)
            elif splitevaltype == 'variance':
                trerrL = sum(np.var(XtarL, axis=1, ddof=1)) / Ft
                trerrR = sum(np.var(XtarR, axis=1, ddof=1)) / Ft
                if kappa > 0:
                    trerrLsrc = sum(np.var(XsrcL, axis=1, ddof=1)) / Fs
                    trerrRsrc = sum(np.var(XsrcR, axis=1, ddof=1)) / Fs
                    trerrL = (trerrL + kappa * trerrLsrc) / 2
                    trerrR = (trerrR + kappa * trerrRsrc) / 2
                trerr = (nl * trerrL + nr * trerrR) / (nl + nr)
            elif splitevaltype == 'reconstruction':
                XsrcL = np.row_stack((XsrcL, np.ones(XsrcL.shape[1])))
                TL = XtarL.dot(
                    np.linalg.lstsq(((XsrcL.dot(XsrcL.T)) +
                                     lamdda * np.eye(XsrcL.shape[0])),
                                    XsrcL)[0].T)
                XsrcR = np.row_stack((XsrcR, np.ones(XsrcR.shape[1])))
                TR = XtarR.dot(
                    np.linalg.lstsq(((XsrcR.dot(XsrcR.T)) +
                                     lamdda * np.eye(XsrcR.shape[0])),
                                    XsrcR)[0].T)
                trerrL = np.sqrt(sum(sum((XtarL - TL * XsrcL)**2)) / nl)
                trerrR = np.sqrt(sum(sum((XtarR - TR * XsrcR)**2)) / nr)
                if kappa > 0:
                    trerrLsrc = sum(np.var(XsrcL, axis=1, ddof=1)) / Fs
                    trerrRsrc = sum(np.var(XsrcR, axis=1, ddof=1)) / Fs
                    trerrL = (trerrL + kappa * trerrLsrc) / 2
                    trerrR = (trerrR + kappa * trerrRsrc) / 2
                trerr = (nl * trerrL + nr * trerrR) / (nl + nr)
            else:
                raise ValueError('Unknown split evaluation type')
            if trerr < rerr:
                rerr = trerr
                thr = tthr
                fid = s
    return fid, thr, rerr
Ejemplo n.º 60
0
def get_surface_distance(surf,
                         dlabel=None,
                         medial=None,
                         medial_labels=None,
                         drop_labels=None,
                         use_wb=False,
                         n_proc=1,
                         verbose=False):
    """
    Calculates surface distance for vertices in `surf`

    Parameters
    ----------
    surf : str or os.PathLike
        Path to surface file on which to calculate distance
    dlabel : str or os.PathLike, optional
        Path to file with parcel labels for provided `surf`. If provided will
        calculate parcel-parcel distances instead of vertex distances. Default:
        None
    medial : str or os.PathLike, optional
        Path to file containing labels for vertices corresponding to medial
        wall. If provided (and `use_wb=False`), will disallow calculation of
        surface distance along the medial wall. Default: None
    medial_labels : list of str, optional
        List of parcel names that comprise the medial wall and through which
        travel should be disallowed (if `dlabel` provided and `use_wb=False`).
        Will supersede `medial` if both are provided. Default: None
    drop_labels : list of str, optional
        List of parcel names that should be dropped from the final distance
        matrix (if `dlabel` is provided). If not specified, will ignore all
        parcels commonly used to reference the medial wall (e.g., 'unknown',
        'corpuscallosum', '???', 'Background+FreeSurfer_Defined_Medial_Wall').
        Default: None
    use_wb : bool, optional
        Whether to use calls to `wb_command -surface-geodesic-distance` for
        computation of the distance matrix; this will involve significant disk
        I/O. If False, all computations will be done in memory using the
        :func:`scipy.sparse.csgraph.dijkstra` function. Default: False
    n_proc : int, optional
        Number of processors to use for parallelizing distance calculation. If
        negative, will use max available processors plus 1 minus the specified
        number. Default: 1 (no parallelization)
    verbose : bool, optional
        Whether to print progress bar while distances are calculated. Default:
        True

    Returns
    -------
    distance : (N, N) numpy.ndarray
        Surface distance between vertices/parcels on `surf`

    Notes
    -----
    The distance matrix computed with `use_wb=False` will have slightly lower
    values than when `use_wb=True` due to known estimation errors. These will
    be fixed at a later date.
    """

    if drop_labels is None:
        drop_labels = [
            'unknown', 'corpuscallosum', '???',
            'Background+FreeSurfer_Defined_Medial_Wall'
        ]
    if medial_labels is None:
        medial_labels = []

    # convert to paths, if necessary
    surf, dlabel, medial = pathify(surf), pathify(dlabel), pathify(medial)

    # wb_command requires gifti files so convert if we receive e.g., a FS file
    # also return a "remove" flag that will be used to delete the temporary
    # gifti file at the end of this process
    surf, remove_surf = _surf_to_gii(surf)
    n_vert = len(nib.load(surf).agg_data()[0])

    # check if dlabel / medial wall files were provided
    labels, mask = None, np.zeros(n_vert, dtype=bool)
    dlabel, remove_dlabel = _labels_to_gii(dlabel, surf)
    medial, remove_medial = _labels_to_gii(medial, surf)

    # get data from dlabel / medial wall files if they provided
    if dlabel is not None:
        labels = nib.load(dlabel).agg_data()
    if medial is not None:
        mask = nib.load(medial).agg_data().astype(bool)

    # determine which parcels should be ignored (if they exist)
    delete, uniq_labels = [], np.unique(labels)
    if (len(drop_labels) > 0 or len(medial_labels) > 0) and labels is not None:
        # get vertex labels
        n_labels = len(uniq_labels)

        # get parcel labels and reverse dict to (name : label)
        table = nib.load(dlabel).labeltable.get_labels_as_dict()
        table = {v: k for k, v in table.items()}

        # generate dict mapping label to array indices (since labels don't
        # necessarily start at 0 / aren't contiguous)
        idx = dict(zip(uniq_labels, np.arange(n_labels)))

        # get indices of parcel distance matrix to be deleted
        for lab in set(table) & set(drop_labels):
            lab = table.get(lab)
            delete.append(idx.get(lab))

        for lab in set(table) & set(medial_labels):
            lab = table.get(lab)
            mask[labels == lab] = True

    # calculate distance from each vertex to all other parcels
    parallel = Parallel(n_jobs=n_proc, max_nbytes=None)
    if use_wb:
        parfunc = delayed(_get_workbench_distance)
        graph = surf
    else:
        parfunc = delayed(_get_graph_distance)
        graph = make_surf_graph(*nib.load(surf).agg_data(), mask=mask)
    bar = trange(n_vert, verbose=verbose, desc='Calculating distances')
    dist = np.row_stack(parallel(parfunc(n, graph, labels) for n in bar))

    # average distance for all vertices within a parcel + set diagonal to 0
    if labels is not None:
        dist = np.row_stack(
            [dist[labels == lab].mean(axis=0) for lab in uniq_labels])
        dist[np.diag_indices_from(dist)] = 0

    # remove distances for parcels that we aren't interested in
    if len(delete) > 0:
        for axis in range(2):
            dist = np.delete(dist, delete, axis=axis)

    # if we created gifti files then remove them
    if remove_surf:
        surf.unlink()
    if remove_dlabel:
        dlabel.unlink()
    if remove_medial:
        medial.unlink()

    return dist