Пример #1
0
    def _projTGraph(self,g):

        sentry = TH1AddDirSentry()

        y      = numpy.ndarray( (g.GetN(),),dtype=numpy.double, buffer=g.GetY() )
        ysplit = numpy.hsplit(y,self._nfold)
        p_y    = numpy.sum(ysplit,axis=self._pax)

        eyh = numpy.ndarray( (g.GetN(),),dtype=numpy.double, buffer=g.GetEYhigh() )
        eyh2_split = numpy.hsplit( (eyh**2) ,self._nfold)
        p_eyh      = numpy.sqrt( numpy.sum(eyh2_split,axis=self._pax) )

        eyl = numpy.ndarray( (g.GetN(),),dtype=numpy.double, buffer=g.GetEYlow() )
        eyl2_split = numpy.hsplit( (eyl**2) ,self._nfold)
        p_eyl      = numpy.sqrt( numpy.sum(eyl2_split,axis=self._pax) )

        x = array.array('d',[0]*self._nbins)
        exh = array.array('d',[0]*self._nbins)
        exl = array.array('d',[0]*self._nbins)
        for i in xrange(self._nbins):
            x[i]            = (self._axdef[i+1]+self._axdef[i])/2.
            exh[i] = exl[i] = (self._axdef[i+1]-self._axdef[i])/2.

        p_g = ROOT.TGraphAsymmErrors(self._nbins, x, p_y, exl, exh, p_eyl, p_eyh)
        p_g.SetNameTitle('%s_proj_%s' % (g.GetName(),self._proj),'%s proj %s' % (g.GetTitle(),self._proj))

        ROOT.TAttLine.Copy(g,p_g)
        ROOT.TAttFill.Copy(g,p_g)
        ROOT.TAttMarker.Copy(g,p_g)
        return p_g
Пример #2
0
def extract_t5(filename, t):
    '''Extract data at timeslice t from the propagator.

    Converts the raw data into complex numbers.
    '''
    # Loop structure: s c r/i s c t z y x.
    # Pluck out bits at t. Store in tmp.
    tmp = []
    for i in range(2*nc*nc*ns*ns):
        with open(filename, "rb") as f:  # Inefficient?
            f.seek(i*8*nt*V + 8*t*V, 0)
            data = np.fromfile(f, dtype='>d', count=V)
            tmp.append(data)
    tmp = ar(tmp, dtype=np.float).reshape((-1,))
    # Convert to complex numbers.  Store in ctmp.
    ctmp_re = []
    ctmp_im = []
    for chunk in np.hsplit(tmp, ns*nc):
        chunk_re, chunk_im = np.hsplit(chunk, 2)
        ctmp_re.append(chunk_re)
        ctmp_im.append(chunk_im)
    ctmp_re = ar(ctmp_re, dtype=np.float).reshape((-1,))
    ctmp_im = ar(ctmp_im, dtype=np.float).reshape((-1,))
    ctmp = ctmp_re + 1j*ctmp_im
        
    return ctmp
def ostu_algorithm(img, blursize=3):
    blur = cv2.GaussianBlur(img, (blursize, blursize), 0)
    hist = cv2.calcHist([blur], [0], None, [256], [0, 256])
    hist_norm = hist.ravel() / hist.max()
    Q = hist_norm.cumsum()
    bins = np.arange(256)
    fn_min = np.inf
    thresh = -1
    for i in xrange(1, 256):
        p1, p2 = np.hsplit(hist_norm, [i])  # probabilities
        q1, q2 = Q[i], Q[255] - Q[i]  # cum sum of classes
        b1, b2 = np.hsplit(bins, [i])  # weights

        if q1 == 0:
            continue
        if q2 == 0:
            continue
        m1, m2 = np.sum(p1 * b1) / q1, np.sum(p2 * b2) / q2
        v1, v2 = np.sum(((b1 - m1) ** 2) * p1) / q1, np.sum(((b2 - m2) ** 2) * p2) / q2
        fn = v1 * q1 + v2 * q2

        if fn < fn_min:
            fn_min = fn
            thresh = i
    _, otsu = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)

    return otsu
Пример #4
0
def plotDensityPCA(args, pcaMatrix):
    outName = args.outputFileName
    outName = outName + '_PCAdensity_' + str(NBINS)
    pcaXCoord = numpy.hsplit(pcaMatrix, [1])[0]
    pcaXCoord = pcaXCoord.reshape(1, len(pcaXCoord))[0]
    pcaXCoord = pcaXCoord.real
    pcaYCoord = numpy.hsplit(pcaMatrix, [1])[1]
    pcaYCoord = pcaYCoord.reshape(1, len(pcaYCoord))[0]
    pcaYCoord = pcaYCoord.real
    #fig2.set_title('Density plot of main PCA components')
    H, edgeX, edgeY = numpy.histogram2d(pcaXCoord, pcaYCoord, bins = NBINS)
    H = numpy.rot90(H)
    H = numpy.flipud(H)
    # mask zeroes
    maskedH = numpy.ma.masked_where(H==0, H)
    #Plot the histogram
    fig2 = matplotlib.pyplot.figure()
    plt.pcolormesh(edgeX, edgeY, maskedH)
    plt.xlabel('Pricinpal Component 1')
    plt.ylabel('Principal Component 2') 
    cbar = plt.colorbar()
    cbar.ax.set_ylabel('Counts')
    fig2.savefig(outName, format='png')
    #show()
    return fig2
    def fit(self, features, classes):
        # TODO implement the above algorithm to build a random forest of decision trees
        self.trees = [] #list of root nodes
        esr = self.example_subsample_rate
        asr = self.attr_subsample_rate
        means = np.mean(features, axis=0)

        for i in xrange(self.num_trees):
            # a) Subsample the examples provided (with replacement) in accordance with a example subsampling rate.
            features_np = np.asarray(features)
            classes_np = np.asarray([classes]).transpose()
            merged = np.concatenate((features_np, classes_np), axis=1)
            merged_rand = np.random.permutation(merged)
            merged_rand_esr = merged_rand[0:int(esr*len(features_np))]
            split_rand = np.hsplit(merged_rand_esr, np.array([4, 6]))
            features_split = split_rand[0]
            classes_split = split_rand[1]

            # b) From the sample in a), choose attributes at random to learn on, in accordance with an attribute subsampling rate.
            num_attrs = int(asr * features.shape[1])
            rand_attrs = np.random.randint(features.shape[1], num_attrs)
            for i in xrange(len(rand_attrs)):
                attr_features_split = np.hsplit(features_split, np.array([rand_attrs[i], 6]))[0]    #need to rewrite

            # c) Fit a decision tree to the subsample of data we've chosen (to a certain depth)
            

            leaf0 = DecisionNode(None, None, None, class_label=0)
            leaf1 = DecisionNode(None, None, None, class_label=1)
            nodeA1 = DecisionNode(leaf1, leaf0, lambda x: 1 if x[0]<means[0] else 0)
            root_node = nodeA1

            self.trees.append(root_node)
Пример #6
0
def calculate(db):
    rep = "1"
    i = 0
    while (rep == "y" or rep == "Y" or rep == "1"):
        search = str(raw_input("Substance: "))
        k = i
    
        with open(db,'r') as dbfile:
            for line in dbfile:
                if (search == line.split()[0]):
                    mass = float(input("Mass (g): "))
                    prop = mass/100*numpy.array([line.split()[1:]], dtype=float)
                    i = i + 1
                    if (rep == "1"):
                        propall = prop
                    else:
                        propall = numpy.vstack([propall, prop])
        
        if (i == k):
            print ("Substance "+search+" not found!")
        rep = str(raw_input("Repeat [y/n]: "))
    
    if (i != 0):
        prot = sum(numpy.hsplit(propall,  (0, 1))[1])
        lip = sum(numpy.hsplit(propall,  (1, 2))[1])
        carb = sum(numpy.hsplit(propall,  (2, 3))[1])
        ccal = sum(numpy.hsplit(propall,  (3, 4))[1])
        glyc = sum(numpy.hsplit(propall,  (4, 4))[2])
    
    print ("\nProteins: "+str(round(prot, 2))+"\nLipids: "+str(round(lip, 2))+"\nCarbohydrates: "+str(round(carb, 2))+"\nccal: "+str(round(ccal, 2))+"\nGlycemic index: "+str(round(glyc, 2)))
Пример #7
0
def run(name, source, quick=False):
    print time.asctime(time.localtime()), "Filling BDT Branches"  

    branch_names = joblib.load("pickle/variables.pkl")
    
    if quick == True:
        signal = joblib.load('pickle/all_signalq.pkl')   
        clf = joblib.load("pickle/" + name + "quick.pkl")     
    else:
        signal = joblib.load('pickle/all_signal.pkl')
        clf = joblib.load("pickle/" + name + ".pkl")

    # predict and write probability of each MC event being signal
    bdt_MC_predicted = clf.predict_proba(signal)
    bdt_MC_predicted.dtype = [('GradBoost_prob', np.float64)]
    array2root((np.hsplit(bdt_MC_predicted,2)[1]), "/net/storage03/data/users/dlafferty/NTuples/SignalMC/2012/combined/Bs2phiphi_MC_2012_combined_corrected_TupleA_BDT.root", "DecayTree")

    # predict and write probability of every data event being signal
    all_data = root2array("/net/storage03/data/users/dlafferty/NTuples/data/2012/combined/Bs2phiphi_data_2012_corrected_TupleA_BDT.root", "DecayTree", branch_names)
    all_data = rec2array(all_data)

    bdt_data_predicted = clf.predict_proba(all_data)
    bdt_data_predicted.dtype = [('GradBoost_prob', np.float64)]
    array2root((np.hsplit(bdt_data_predicted,2)[1]), "/net/storage03/data/users/dlafferty/NTuples/data/2012/combined/Bs2phiphi_data_2012_corrected_TupleA_BDT.root", "DecayTree")
        
    print time.asctime(time.localtime()), "Branches Filled!"
Пример #8
0
def main():
   global args
   args = parse()
   # Run through desc stats for file 1
   prefix = args.files[0].split(".")[0] 
   fn = prefix+".txt"
   outfile = prefix+"_results.txt"
   data = np.genfromtxt(fn)
#   ligindex = [4,5,6,7,8,27,28,29,30,31,32,33,34] # Can be used to analyse a subset of ligands
#   data = data[ligindex]
#   print data
   data = np.hsplit(data,[1]) # Split expt values apart
   expt = data[0]
   comput = data[1]
   anova = np.copy(comput) # Beginning of array for anova
   origdata = np.hstack((expt,np.reshape(np.mean(comput,axis=1),(47,1)))) # Reshape then stack, now each ligand has an entry of length 2
   write_data(outfile,origdata)
   # Now do stats and t test for others
   for argfile in args.files[1:]:
      prefix = argfile.split(".")[0] 
      fn = prefix+".txt"
      outfile = prefix+"_results_vs_file1.txt"
      data = np.genfromtxt(fn)
#      data = data[ligindex]
      data = np.hsplit(data,[1]) # Split expt values apart
      expt = data[0]
      comput = data[1]
      anova = np.hstack((anova,comput)) # Add data to Anova array
      currdata = np.hstack((expt,np.reshape(np.mean(comput,axis=1),(47,1)))) # Reshape then stack, now each ligand has an entry of length 2
      write_data(outfile,currdata)
      t_test_diffs(origdata,currdata,outfile)
   # Now do Anova      
   calc_anova(anova,"Anovas.txt")
Пример #9
0
 def logistic_test(self, X, Y, train_results, predict_with_intercept=True,
                   predict_with_fixed_effects=True, use_prior_beta_split=True):
     
     training_betas = train_results.params
     print training_betas
     
     # please add fixed effects BEFORE intercept, for now! 
     if self.fixed_effects_set:
         if not predict_with_fixed_effects:
             if use_prior_beta_split:
                 print np.shape(X), self.prior_beta_split, len(training_betas)
                 X = np.hsplit(X, [len(self.subject_indices.keys())])[1]
                 training_betas = training_betas[self.prior_beta_split:]
                 print np.shape(X), len(training_betas)
             else:
                 X = np.hsplit(X, [len(self.subject_indices.keys())])[1]
                 training_betas = training_betas[len(self.subject_indices.keys()):]
     
     
     if self.intercept_set:
         if not predict_with_intercept:
             X = np.hsplit(X, 1)[1]
             training_betas = training_betas[1:]
             
     
     test_eta = np.dot(X, training_betas)
     test_p = np.exp(test_eta) / (1. + np.exp(test_eta))
     test_predict = (test_p > 0.5)
     return (Y == test_predict).sum()*1. / Y.shape[0]
Пример #10
0
	def __init__(self,data=list(),Lambda=.1, gamma =.5, theta=None ):
	# SVM Class
	#
	# @param data		[Nxd] array of observations where N is the number of observations and d is the dimensionality of the abstract space
	# @param Lambda		Regularizer to control Smoothness / Accuracy.  Preliminary experimental results show the range 0-1 controls this parameter.
	# @param gamma		List of gamma values which define the kernel smoothness
	
		try:
			self.N,self.d = data.shape
		except ValueError:
			self.N,self.d = (len(data),1)
			self.X = data.reshape([ self.N, self.d ])
		else:
			self.X = data

		self.Lambda = Lambda
		self.gamma = gamma
		
		self.t = np.hsplit(self.X,[1])[0]
		self.offset = np.tile( np.hsplit(self.X,[1])[0], len(theta) )
		self.theta = np.repeat( np.array(theta), self.N )
		
		self.D = self._K( self.X.reshape([self.N,1,self.d]) - self.X.T.reshape([1,self.N,self.d]) )
		self.S = np.array( [ [ subset(self.X,self.D, t, theta ) for t in self.t ] for theta in self.theta ] ).flatten()
		
		self.SV = None			# X value array of SV
		self.NSV = None			# cardinality of SV
		self.alpha = None			# the full weight array for all observations
		self.beta = None			# weight array for SV
		self.K = None				# precomputed kernel matrix
		
		self._compute()
Пример #11
0
    def initialize(self, cloud_file=""):
        """Configure the cloud information.

        This function gets the appropriate database file and creates the cloud information
        from it. The default behavior is to use the module stored database. However, an
        alternate database file can be provided. The alternate database file needs to have a
        table called *Cloud* with the following columns:

        cloudId
            int : A unique index for each cloud entry.
        c_date
            int : The time (units=seconds) since the start of the simulation for the cloud observation.
        cloud
            float : The cloud coverage in 8ths of the sky.

        Parameters
        ----------
        cloud_file : str, optional
            The full path to an alternate cloud database.
        """
        if cloud_file != "":
            self.cloud_db = cloud_file
        else:
            self.cloud_db = os.path.join(os.path.dirname(__file__), self.CLOUD_DB)

        with sqlite3.connect(self.cloud_db) as conn:
            cur = conn.cursor()
            query = "select c_date, cloud from Cloud;"
            cur.execute(query)
            results = numpy.array(cur.fetchall())
            self.cloud_dates = numpy.hsplit(results, 2)[0].flatten()
            self.cloud_values = numpy.hsplit(results, 2)[1].flatten()
            cur.close()
    def compute_candidate_connections(self, paf, cand_a, cand_b, img_len, params):
        candidate_connections = []
        for joint_a in cand_a:
            for joint_b in cand_b:  # jointは(x, y)座標
                vector = joint_b[:2] - joint_a[:2]
                norm = np.linalg.norm(vector)
                if norm == 0:
                    continue

                ys = np.linspace(joint_a[1], joint_b[1], num=params['n_integ_points'])
                xs = np.linspace(joint_a[0], joint_b[0], num=params['n_integ_points'])
                integ_points = np.stack([ys, xs]).T.round().astype('i')  # joint_aとjoint_bの2点間を結ぶ線分上の座標点 [[x1, y1], [x2, y2]...]
                paf_in_edge = np.hstack([paf[0][np.hsplit(integ_points, 2)], paf[1][np.hsplit(integ_points, 2)]])
                unit_vector = vector / norm
                inner_products = np.dot(paf_in_edge, unit_vector)

                integ_value = inner_products.sum() / len(inner_products)
                # vectorの長さが基準値以上の時にペナルティを与える
                integ_value_with_dist_prior = integ_value + min(params['limb_length_ratio'] * img_len / norm - params['length_penalty_value'], 0)

                n_valid_points = sum(inner_products > params['inner_product_thresh'])
                if n_valid_points > params['n_integ_points_thresh'] and integ_value_with_dist_prior > 0:
                    candidate_connections.append([int(joint_a[3]), int(joint_b[3]), integ_value_with_dist_prior])
        candidate_connections = sorted(candidate_connections, key=lambda x: x[2], reverse=True)
        return candidate_connections
Пример #13
0
    def ChangeSize(self,n_nodes):
        self.masses.resize((1,n_nodes),refcheck=False)
        #self.masses=np.resize(self.masses,(1,n_nodes))
        #self.masses[0][-1] #bug in resize??
        self.initDisp.resize((1,n_nodes),refcheck=False)
        self.initVel.resize((1,n_nodes),refcheck=False)
        #self.initDisp=np.resize(self.initDisp,(1,n_nodes))
        #self.initVel=np.resize(self.initVel,(1,n_nodes))
        

        if n_nodes>self.n_nodes:
            #Take care of 2D array manipulation
            delta=n_nodes-self.n_nodes
            hor=np.zeros((self.n_nodes,delta))
            ver=np.zeros((delta,n_nodes))
            self.springs=np.vstack((np.hstack((self.springs,hor)),ver))
            self.dampers=np.vstack((np.hstack((self.dampers,hor)),ver))
            # Take care of displacement and forces list
            print self.n_nodes,n_nodes
            for i in range(0,n_nodes-self.n_nodes):
                #print i
                self.displacements.append(-1)
                self.forces.append(-1)
            #addArray=[0 for x in range(self.syst.n_nodes,n_nodes)]
        elif n_nodes<self.n_nodes:
            self.springs=np.hsplit(np.vsplit(self.springs,(n_nodes,n_nodes))[0],(n_nodes,n_nodes))[0]
            self.dampers=np.hsplit(np.vsplit(self.dampers,(n_nodes,n_nodes))[0],(n_nodes,n_nodes))[0]
            self.displacements=self.displacements[0:n_nodes]
            self.forces=self.forces[0:n_nodes]
        self.n_nodes=n_nodes
Пример #14
0
def zero_directions(zero_vec, tf, e=0.0001):
    """
    Parameters: zero_vec => a vector containing all the transmission zeros of a system
                tf       => the transfer function G(s) of the system
                e        => this avoids possible divide by zero errors in G(z)
    Returns:    zero_dir => zero directions in the form:
                            (zero, input direction, output direction)
    Notes: this method is going to give dubious answers if the function G has pole zero cancellation...
    """
    zero_dir = []
    for z in zero_vec:
        num, den = cn.tfdata(tf)
        rows, cols = np.shape(num)

        G = np.empty(shape=(rows, cols))

        for x in range(rows):
            for y in range(cols):
                top = np.polyval(num[x][y], z)
                bot = np.polyval(den[x][y], z)
                if bot == 0.0:
                    bot = e

                entry = float(top) / bot
                G[x][y] = entry

        U, S, V = np.linalg.svd(G)
        V = np.transpose(np.conjugate(V))
        u_rows, u_cols = np.shape(U)
        v_rows, v_cols = np.shape(V)
        yz = np.hsplit(U, u_cols)[-1]
        uz = np.hsplit(V, v_cols)[-1]
        zero_dir.append((z, uz, yz))
    return zero_dir
Пример #15
0
def scale_up(a, x=2, y=2, num_z=None):
    """Scale the input array repeating the array values up by the
    x and y factors.

    Requires:
    --------
    a : array
        An ndarray, 1D arrays will be upcast to 2D
    x, y : numbers
        Factors to scale the array in x (col) and y (row).  Scale factors
        must be greater than 2
    num_z : number
        For 3D, produces the 3rd dimension, ie. if num_z = 3 with the
        defaults, you will get an array with shape=(3, 6, 6).  If
        num_z != None or 0, then the options are 'repeat', 'random'.
        With 'repeat' the extras are kept the same and you can add random
        values to particular slices of the 3rd dimension, or multiply them.

    Returns:
    -------
    >>> a = np.array([[0, 1, 2], [3, 4, 5]]
    >>> b = scale(a, x=2, y=2)
    array([[0, 0, 1, 1, 2, 2],
           [0, 0, 1, 1, 2, 2],
           [3, 3, 4, 4, 5, 5],
           [3, 3, 4, 4, 5, 5]])

    Notes:
    -----
    >>> a = np.arange(2*2).reshape(2,2)
    array([[0, 1],
           [2, 3]])
    >>> f_(scale(a, x=2, y=2, num_z=2))
    Array... shape (3, 4, 4), ndim 3, not masked
    0, 0, 1, 1    0, 0, 1, 1    0, 0, 1, 1
    0, 0, 1, 1    0, 0, 1, 1    0, 0, 1, 1
    2, 2, 3, 3    2, 2, 3, 3    2, 2, 3, 3
    2, 2, 3, 3    2, 2, 3, 3    2, 2, 3, 3
    sub (0)       sub (1)       sub (2)

    """
    if (x < 1) or (y < 1):
        print("x or y scale < 1... \n{}".format(scale_up.__doc__))
        return None
    a = np.atleast_2d(a)
    z0 = np.tile(a.repeat(x), y)  # repeat for x, then tile
    z1 = np.hsplit(z0, y)         # split into y parts horizontally
    z2 = np.vstack(z1)            # stack them vertically
    if a.shape[0] > 1:            # if there are more, repeat
        z3 = np.hsplit(z2, a.shape[0])
        z3 = np.vstack(z3)
    else:
        z3 = np.vstack(z2)
    if num_z not in (0, None):
        d = [z3]
        for i in range(num_z):
            d.append(z3)
        z3 = np.dstack(d)
        z3 = np.rollaxis(z3, 2, 0)
    return z3
Пример #16
0
    def internal_processing(self, X, y, X_test):
        """
        """  
        Xs = np.hsplit(X, 5)
        Xts = np.hsplit(X_test, 5)
        Xts_cal = []
        
        for i in range(len(Xs)):           
            Xts_cal.append(calibrate(Xs[i], y, Xts[i]))
         
        XX_test = np.hstack(Xts_cal)   
        
        ec = EC(n_preds=5)
        ec.fit(X, y)
        y_ens = ec.predict_proba(XX_test)
#        y_pred = ec.predict_proba(X_test)
        
        #validation
        yv = ec.predict_proba(X)
        print 'Weights: %s' %(ec.w)
        print 'Validation log-loss: %s' %(logloss_mc(y, yv))
        
        cc = CalibratedClassifierCV(base_estimator=EC(n_preds=5), 
                                    method='isotonic', cv=10)
                                    
        cc.fit(X, y)
        y_cal = cc.predict_proba(XX_test)
        
        y_pred = (y_ens + y_cal)/2.
         
        return y_pred       
Пример #17
0
def valid():
    rows = np.vsplit(puzzle, 9)
    cols = np.hsplit(puzzle, 9)
    grids = [grid for h in np.hsplit(puzzle, 3) for grid in np.vsplit(h, 3)]

    units = rows + cols + grids
    return all(np.max(np.bincount(unit[unit != 0])) == 1 for unit in units)
Пример #18
0
def cube_array_search(k_face_array,k_faces):
    """
    Find the row indices (of s) corresponding to the
    cubes stored in the rows of cube array v.
    It is assumed that the rows of s are sorted in
    lexicographical order.

    Example:

      k_face_array = array([[0,0,0],[0,0,1],[0,1,0],[1,0,1]])
      k_faces = array([[0,1,0],[0,0,1]])
      cube_array_searchsorted(k_face_array,k_faces)

    Returns:

      array([2,1])

    """
    if rank(k_face_array) != 2 or rank(k_faces) != 2:
        raise ValueError,'expected rank 2 arrays'

    if k_face_array.shape[1] != k_faces.shape[1]:
        raise ValueError,'number of columns must agree'

    # a dense array used to lookup k_face_array row indices 
    lookup_grid_dimensions = k_face_array.max(axis=0) + 1
    
    lookup_grid = empty(lookup_grid_dimensions,dtype=k_faces.dtype)
    lookup_grid[:] = -1
    lookup_grid[hsplit(k_face_array,k_face_array.shape[1])] = arange(k_face_array.shape[0],dtype=k_faces.dtype).reshape((-1,1))
    row_indices = lookup_grid[hsplit(k_faces,k_faces.shape[1])].reshape((-1))

    return row_indices
Пример #19
0
def readdat():
    with open("hw2_lssvm_all.dat", "r") as f:
        data = np.array([[float(i.strip()) for i in line.split()] for line in f.readlines()])
        X = np.hsplit(data, [len(data[0])-1, len(data)])[0]
        y = np.hsplit(data, [len(data[0])-1, len(data)])[1].reshape(np.shape(X)[0])

    return X, y
Пример #20
0
 def __init__(self, train_file="./data/traindata.txt", test_file="./data/testdata.txt"):
     self.train = np.hsplit(np.loadtxt(train_file), np.array([0, 9]))
     self.test = np.hsplit(np.loadtxt(test_file), np.array([0, 9]))
     self.train_sample = self.train[1]
     self.train_label = 2 * self.train[2] - 3
     self.test_sample = self.test[1]
     self.test_label = 2 * self.test[2] - 3
Пример #21
0
def test(training_file, test_file, default_kernel="rbf"):
    print "testing..."
    sys.stdout.write("%s:training... " % (strftime("%Y-%m-%d %H:%M:%S", gmtime())))
    sys.stdout.flush()
    classifier = SVMClassifier(training_file, default_kernel)
    print "(%s) DONE." % (strftime("%Y-%m-%d %H:%M:%S", gmtime()))
    # train_classifier(training_file)

    my_data = genfromtxt(test_file, delimiter="\t", skip_header=0)
    n_col = my_data.shape[1]
    n_features = n_col - 1  # assuming that the latest column
    # contains the the outputs
    # for testing
    X = preprocessing.scale(np.hsplit(my_data, [n_features, n_col])[0])
    Y = np.squeeze(np.asarray(np.hsplit(my_data, [n_features, n_col])[1]))

    predictions = classifier.predict(X)

    # compute classification accurancy
    if np.unique(Y).size == 2:
        # auc and roc for binary classification
        fpr, tpr, thresholds = metrics.roc_curve(Y, predictions)
        print "auc/roc report: "
        print fpr, tpr, metrics.auc(fpr, tpr), thresholds
        print "full classification report: "
        print metrics.classification_report(Y, predictions)
        print "report for the rarest class: "
        print metrics.classification_report(Y, predictions, labels=[1])
    else:
        # precision for multi-class (results between 0-1)
        print "precision score: " + str(metrics.precision_score(Y, predictions, None, None, average="weighted"))
        print "full classification report: "
        print metrics.classification_report(Y, predictions)
        print "report for the rarest class: "
        print metrics.classification_report(Y, predictions, labels=[1])
def Encoding(data, general_matrix=None):
    encoder = LabelBinarizer()
    count = 0
    # encoding
    for i in range(data.shape[1]):
        if type(data[0, i]) == str:
            count += 1
            col = data[:, i]
            unique = np.unique(col if general_matrix is None else general_matrix[:, i])

            try:
                encoder.fit(unique)
            except:
                pass

            new_col = encoder.transform(col)

            # split at i and i + 1
            before, removed, after = np.hsplit(data, [i, i + 1])
            # concatenate
            data = np.concatenate((before, new_col, after), axis=1)
            before, removed, after = np.hsplit(general_matrix, [i, i + 1])
            general_matrix = np.concatenate((before, encoder.transform(general_matrix[:, i]), after), axis=1)

    print "count : %d" % count
    # return data
    return data
def cross_validate(xmat, ymat, dimensions):
    stacked = numpy.hstack((xmat, ymat))
    #numpy.random.shuffle(stacked)
    array = numpy.vsplit(stacked, 2)
    training_set = numpy.hsplit(array[0], 2)
    testing_set = numpy.hsplit(array[1], 2)
    train_x = training_set[0]
    train_y = training_set[1]
    test_x = testing_set[0]
    test_y = testing_set[1]
    count = 0
    testoverall = 0
    trainoverall = 0
    test_array = numpy.ones(dimensions)
    train_array = numpy.ones(dimensions)
    best_fit_funcs_array = []
    while count < dimensions:
        print "Calculating dimension: " + str(count)
        best_fit_func = best_fit(k_dimensionify(train_x, count), train_y)
        test = mean_error(test_x, test_y, best_fit_func)
        train = mean_error(train_x, train_y, best_fit_func)
        print "Test  Mean Error: " + str(test)
        print "Train Mean Error: " + str(train)
        test_array[count] = test
        train_array[count] = train
        best_fit_funcs_array.append(best_fit_func)
        count += 1
        testoverall += test
        trainoverall += train
    test_min = test_array.argmin(axis=0)
    best_fit_func = best_fit_funcs_array[test_min]
    return [test_array, train_array, best_fit_func]
Пример #24
0
def otsuthresh(hist):
    #http://docs.opencv.org/master/d7/d4d/tutorial_py_thresholding.html
    # find normalized_histogram, and its cumulative distribution function
    hist_norm = old_div(hist.astype("float").ravel(),hist.max())
    Q = hist_norm.cumsum()

    bins = np.arange(len(hist_norm))

    fn_min = np.inf
    thresh = -1

    for i in range(1,len(hist_norm)):
        p1,p2 = np.hsplit(hist_norm,[i]) # probabilities
        q1,q2 = Q[i],Q[len(hist_norm)-1]-Q[i] # cum sum of classes
        b1,b2 = np.hsplit(bins,[i]) # weights

        # finding means and variances
        m1,m2 = old_div(np.sum(p1*b1),q1), old_div(np.sum(p2*b2),q2)
        v1,v2 = old_div(np.sum(((b1-m1)**2)*p1),q1),old_div(np.sum(((b2-m2)**2)*p2),q2)

        # calculates the minimization function
        fn = v1*q1 + v2*q2
        if fn < fn_min:
            fn_min = fn
            thresh = i

    return thresh
Пример #25
0
def phiSub(Q, k1, k2):
    """
    Calculate initial vector for any subset.

    Parameters
    ----------
    mec : dcpyps.Mechanism
        The mechanism to be analysed.

    Returns
    -------
    phi : ndarray, shape (kA)
    """

    u = np.ones((k2 - k1 + 1, 1))
    p = pinf(Q)
    p1, p2, p3 = np.hsplit(p,(k1, k2+1))
    p1c = np.hstack((p1, p3))

    #Q = Q.copy()
    Q1, Q2, Q3 = np.hsplit(Q,(k1, k2+1))
    Q21, Q22, Q23 = np.hsplit(Q2.transpose(),(k1, k2+1))
    Q22c = Q22.copy()
    Q12 = np.vstack((Q21.transpose(), Q23.transpose()))

    nom = np.dot(p1c, Q12)
    denom = np.dot(nom,u)
    phi = nom / denom
    return phi, Q22c
Пример #26
0
    def embedPayload(self, payload, override=False):
        #check the type
        if not isinstance(payload, Payload):
            raise TypeError

        #check the size of carrier larger than paypload
        xml_list = payload.xml.split('\n')
        pay_info = xml_list[1].split('"')
        pay_type = pay_info[1]
        row,column = pay_info[3].split(',')
        if(pay_type =='Color'):
            pay_total = int(row)*int(column)*3
        else:
            pay_total = int(row)*int(column)
        #check size for carrier
        if(len(self.img.shape)==3):
            row_ca,column_ca,_= self.img.shape
            ca_total = int(row_ca)*int(column_ca)*3
        else:
            row_ca,column_ca = self.img.shape
            ca_total = int(row_ca)*int(column_ca)
        if(pay_total*8 > ca_total):
            raise ValueError

        if(override is False):
            if(self.payloadExists()):
                raise Exception


        #embed process
        #row , column are for payload, pay_type is for payload
        xml_toembed = [ord(c) for c in payload.xml]

        #carrier flat
        if(len(self.img.shape)==3):
            redlist = (self.img[:,:,0])
            greenlist = (self.img[:,:,1])
            bluelist = (self.img[:,:,2])
            whole_list = numpy.concatenate((redlist,greenlist,bluelist))
            whole_list.flatten()
            whole_list = numpy.concatenate(whole_list)
        else:
            whole_list = self.img.flatten()
        xml_toembed = numpy.asarray(xml_toembed, dtype=numpy.uint8)
        unpack = numpy.unpackbits(xml_toembed,axis=0)
        unpack = numpy.append(unpack, whole_list[len(unpack):])
        new_list = numpy.copy(whole_list)
        new_list.fill(~1)
        whole_list = numpy.bitwise_and(whole_list,new_list)
        whole_list = numpy.bitwise_or(whole_list[:len(unpack)], unpack)
        if(len(self.img.shape)==3):
            a,b,c = numpy.hsplit(whole_list,3)
            whole_list = numpy.dstack((a,b,c))
            d=numpy.hsplit(whole_list,row_ca)
            whole_list = numpy.vstack(d)
        else:
            d=numpy.hsplit(whole_list,row_ca)
            whole_list = numpy.vstack(d)
        return whole_list
Пример #27
0
 def _split(self, X, Xs):
     indices = np.cumsum(self.input_dims)
     X_split = np.hsplit(X, indices)
     if Xs is not None:
         Xs_split = np.hsplit(Xs, indices)
     else:
         Xs_split = [None] * len(X_split)
     return X_split, Xs_split
Пример #28
0
def create_dataset(name, test, train):
    training_set = load(train, converters[name])
    testing_set = load(test, converters[name])
    train_x, train_y = np.hsplit(training_set, [training_set[0].size-1])
    test_x, test_y = np.hsplit(testing_set, [testing_set[0].size-1])
    # this splits the dataset on the last instance, so your label must
    # be the last instance in the dataset
    return train_x, train_y, test_x, test_y
Пример #29
0
 def from_image(cls, image):
     assert cls.check_image_shape(image.shape[1], image.shape[0]) == 0
     # decompose 3x2 mosaic into cube map faces
     (pos, neg) = np.vsplit(image, 2)
     (xp, yp, zp) = np.hsplit(pos, 3)
     (xn, yn, zn) = np.hsplit(neg, 3)
     array = np.stack([xp, xn, yp, yn, zp, zn])
     return CubeMap(array)
Пример #30
0
def import_file_table(  path_name_ext, 
                        has_vert_axis=0, 
                        has_hor_axis=0, 
                        transpose=0):
    """
    This function imports a file that has a table view with two axis (so a time 
        and a frequency axis, for example). An example is the 2D setup. The 
        alternative is a list view, as used in the 3D setup.
    INPUT:
        path_name_ext: the path, filename and extension (extension and existence will be checked)
        has_vert_axis=1: this will remove the vertical axis from the output
        has_hor_axis=1: this will remove the horizontal axis from the output
        transpose=1: you can transpose the data. 
        IMPORTANT: the has_vert_axis and has_hor_axis will work on the data as it is in the file. So if you have a file with the frequencies on the first row and you transpose it, you still have to set has_hor_axis=1.
         
    OUTPUT:
        data: the data
        hor_axis: what was on the horizontal axis of the file, independent of transpose
        vert_axis: what was on the vertical axis of the file, independent of transpose
        If an axis is missing, it will give False.
        If the file was not found or someting, data will be False
    
    """

    try:
        path_name_ext = check_for_extension(path_name_ext, extension=".dat")
        
        data = np.loadtxt(path_name_ext)
        
        if transpose == 1:
            data = data.T
            has_vert_axis, has_hor_axis = has_hor_axis, has_vert_axis

        if has_hor_axis == 1:
            hor_axis, data = np.vsplit(data, [1])
            hor_axis = hor_axis[0]
        else:
            hor_axis = False
            
        if has_vert_axis == 1:
            vert_axis, data = np.hsplit(data, [1])
            vert_axis = (vert_axis.T)[0]     # to make it a list, instead of a list with 1D-list
            
            if has_hor_axis == 1:
                dump, hor_axis = np.hsplit(hor_axis, [1])
        else:
            vert_axis = False
            
        if transpose == 1:
            hor_axis, vert_axis = vert_axis, hor_axis
            
    except IOError:
        print("ERROR (imports, import_file_table): Unable to load file", path_name_ext)
        data = False
        hor_axis = False
        vert_axis = False
        
    return data, hor_axis, vert_axis
Пример #31
0
        pixels_alpha_list = []
        for p in pixels_list:
            pixels_alpha_list.append(p[3])

        x1, y1, x2, y2 = 0, 0, 0, 0

        if not 0 in pixels_alpha_list:
            print("该图片不是存在透明底色的png格式图片")

        else:
            pixels_alpha_numpy_array = numpy.array(pixels_alpha_list).reshape(
                img.height, img.width)

            #按列分割
            columns_list = numpy.hsplit(pixels_alpha_numpy_array, img.width)
            columns_transparent_list = [0 for i in range(img.height)]
            #从左往右
            for i, arr in enumerate(columns_list):
                if not (arr.flatten() == columns_transparent_list).all():
                    x1 = i
                    break
            #从右往左
            for i, arr in enumerate(reversed(columns_list)):
                if not (arr.flatten() == columns_transparent_list).all():
                    x2 = img.width - i
                    break

            #按行分割
            rows_list = numpy.vsplit(pixels_alpha_numpy_array, img.height)
            rows_transparent_list = numpy.array([0 for i in range(img.width)])
    # numpy.split(ary, indices_or_sections, axis):该函数沿特定的轴将数组分割为子数组
    # # ary 被分割的输入数组
    # # indices_or_sections 可以是整数,表明要从输入数组创建的,等大小的子数组的数量。 如果此参数是一维数组,则其元素表明要创建新子数组的点
    # # axis 默认为 0
    a = np.arange(9)
    # 将数组分为三个大小相等的子数组
    b = np.split(a, 3)
    print(b, "\n")
    # 将数组在一维数组中表明的位置分割:
    b = np.split(a, [4, 7])
    print(b, "\n")

    # numpy.hsplit是split()函数的特例,其中轴为 1 表示水平分割,无论输入数组的维度是什么
    a = np.arange(16).reshape(4, 4)
    # 水平分割
    b = np.hsplit(a, 2)
    print(b, '\n')

    # numpy.vsplit是split()函数的特例,其中轴为 0 表示竖直分割,无论输入数组的维度是什么
    # 竖直分割
    b = np.vsplit(a, 2)
    print(b, "\n")
    # 添加/删除元素
    # numpy.resize(arr, shape) 此函数返回指定大小的新数组。 如果新大小大于原始大小,则包含原始数组中的元素的重复副本
    # # arr:要修改大小的输入数组
    # # shape:返回数组的新形状
    a = np.array([[1, 2, 3], [4, 5, 6]])
    b = np.resize(a, (3, 2))
    print(b, '\n')
    b = np.resize(a, (3, 3))
    print(b, '\n')
    def CountingCC(im_in):
        # Threshold, Set values equal to or above 220 to 0, Set values below 220 to 255.
        th, im_th = cv2.threshold(im_in, 120, 255, cv2.THRESH_BINARY_INV)

        def CC(img):
            nlabels, labels, stats, centroids = cv2.connectedComponentsWithStats(
                img)
            label_hue = np.uint8(179 * labels / np.max(labels))
            blank_ch = 255 * np.ones_like(label_hue)
            labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
            labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
            labeled_img[label_hue == 0] = 0
            return labeled_img, nlabels, labels, stats, centroids

        # fixing the image
        kernel = np.ones((5, 5), np.uint8)
        erosion = cv2.erode(im_th, kernel, iterations=3)
        dilation = cv2.dilate(erosion, kernel, iterations=2)
        components, nlabels, labels, stats, centroids = CC(dilation)

        # creating the matrices
        a = np.hsplit(stats, 5)
        horizontal = a[2]
        vertical = a[3]
        area = a[4]
        b = np.hsplit(centroids, 2)
        x_centr = b[0]
        y_centr = b[1]
        horizontalNEW = np.zeros(nlabels)
        verticalNEW = np.zeros(nlabels)
        TotalAreaNEW = np.zeros(nlabels)
        NEW_dimensions = np.zeros((nlabels, 6))

        # Logic check if something is DROPLET or NOT
        d = 0
        droplet_counter = 0
        Not_Droplet = np.empty(nlabels, dtype=object)
        for i in range(nlabels):
            d = ((horizontal[i] + vertical[i]) / 2)
            d1 = 0.785 * d * d
            if abs(area[i] -
                   (d1)) > 6000 or horizontal[i] < 20 or vertical[i] < 20:
                Not_Droplet[i] = "NOT a droplet"
            else:
                Not_Droplet[i] = "ok"
                droplet_counter = droplet_counter + 1

        # building the new final dimensions matrix
        for row in range(nlabels):
            for column in range(8):
                if column == 0:
                    NEW_dimensions[row, column] = (row + 1)
                elif column == 1:
                    NEW_dimensions[row, column] = x_centr[row]
                elif column == 2:
                    NEW_dimensions[row, column] = y_centr[row]
                elif column == 3:
                    if horizontal[row] < 100:
                        NEW_dimensions[row, column] = horizontal[row] + 20
                    else:
                        NEW_dimensions[row, column] = horizontal[row] + 40
                elif column == 4:
                    if vertical[row] < 100:
                        NEW_dimensions[row, column] = vertical[row] + 20
                    else:
                        NEW_dimensions[row, column] = vertical[row] + 40
                elif column == 5:
                    NEW_dimensions[row, column] = (
                        (NEW_dimensions[row][3]) +
                        (NEW_dimensions[row][4])) * 3.14 * 0.25 * (
                            (NEW_dimensions[row][3]) +
                            (NEW_dimensions[row][4]))
            column = column + 1
        row = row + 1
        plt.show()

        # here we have to build the surface area difference
        TotalArea_Frame = 956771  # i am not sure about this number for this image - but we dont care about it now
        TotalArea_Droplets = 0
        TotalArea_Background = 0
        d3 = 0
        droplet_counter_2 = 0
        # Not_Droplet = np.empty(nlabels, dtype=object)
        for i in range(nlabels):
            d3 = ((horizontal[i] + vertical[i]) / 2)
            d4 = 0.785 * d3 * d3
            if abs(area[i] -
                   (d4)) > 2000 or horizontal[i] < 10 or vertical[i] < 10:
                pass
            else:
                droplet_counter_2 = droplet_counter_2 + 1
                TotalArea_Droplets = int(TotalArea_Droplets +
                                         (NEW_dimensions[i][5]))

        TotalArea_Background = TotalArea_Frame - TotalArea_Droplets
        print(f'The total area is : {TotalArea_Frame}. '
              f' // The droplets area is: {TotalArea_Droplets}. '
              f' // The free area is : {TotalArea_Background}.'
              f' // The droplets measured here are : {droplet_counter_2}')

        # here we draw the circles, the boxes and the numbers
        XCENTER = []
        r = []

        YCENTER = []
        image = components
        i = 0
        out = image.copy()
        for row in range(1, nlabels, 1):
            for column in range(5):
                if Not_Droplet[row] == "ok":
                    # print(Not_Droplet[row])
                    XCENTER.append((int(x_centr[row])))
                    YCENTER.append((int(y_centr[row])))
                    X = XCENTER[i]
                    Y = YCENTER[i]
                    cv2.rectangle(out, (int(X) - 3, int(Y) - 3),
                                  (int(X) + 3, int(Y) + 3), (0, 0, 0))
                    r.append(
                        (math.sqrt(NEW_dimensions[row][5] * 0.31830988618) *
                         0.5))
                    P = r[i]
                    cv2.circle(out, (int(X), int(Y)), int(P), (255, 255, 0, 4))
                    cv2.putText(out, ('%d' % (row + 1)), (int(X), int(Y)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.8, (255, 255, 255),
                                2)
                    i = i + 1
                else:
                    pass

            column = column + 1

        row = row + 1
        cv2.putText(out, ('%d droplets' % droplet_counter), (5, 30),
                    cv2.FONT_ITALIC, 1.2, (220, 220, 220), 2)

        # here we will build the MatrixA

        # 1st column: Average rate of growth of each droplet in 2 minutes
        # to find the average growth you need the area and the centroid of each droplet
        # DONE!!! 2nd column: Average number of droplets in 2 minutes
        # DONE!!! 3rd column: Average  surface area of empty background in 2 minutes
        MatrixA = np.zeros((nlabels, 3))
        for row in range(nlabels):
            for column in range(0, 3, 1):
                if column == 0:
                    MatrixA[row, column] = 1
                elif column == 1:
                    MatrixA[row, column] = droplet_counter
                elif column == 2:
                    MatrixA[row, column] = TotalArea_Background
            column = column + 1
        row = row + 1

        # save the new MatrixA to a csv file
        # mypath = '/Users/georgedamoulakis/PycharmProjects/Droplets/working'
        # onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]
        # df_M_A = pd.DataFrame(MatrixA)  # converting it to a pandas
        # df_M_A.columns = [ 'Rate of Growth', 'Number of Droplets', 'Background Area']
        # df_M_A.to_csv(f'MatrixA for image: {onlyfiles}.csv', index=False)  # save as csv

        # show the images
        cv2.imshow("Initial", im_in)
        cv2.imshow("Final", out)
        cv2.waitKey(0)
        cv2.destroyAllWindows()
        return r, XCENTER, YCENTER, out
    def got_image(self, rgb_msg, depth_msg):
        depth_image = self._bridge.imgmsg_to_cv2(depth_msg,
                                                 desired_encoding="32FC1")

        color_image = self._bridge.imgmsg_to_cv2(rgb_msg)

        final_bbox = None

        if self._is_first_frame and self._inital_bbox is not None:
            rospy.loginfo("Initializing tracker")
            current_bbox = self._inital_bbox
            bbox_center = self.calculate_bbox_center(current_bbox)

            if depth_image[bbox_center[1]][bbox_center[0]] > 0:
                if self._original_distance == -1:
                    self._original_distance = depth_image[bbox_center[1]][
                        bbox_center[0]]

                current_distance = depth_image[bbox_center[1]][bbox_center[0]]

            self._tracker.init(color_image, current_bbox)
            self._is_first_frame = False

            final_bbox = current_bbox

        elif not self._is_first_frame:
            width = int(color_image.shape[1] * self._scale)
            height = int(color_image.shape[0] * self._scale)

            color_image = cv2.resize(color_image, (width, height))
            depth_image = cv2.resize(depth_image, (width, height))

            if self._has_scale_changed:
                self._has_scale_changed = False

                self._tracker = cv2.TrackerCSRT_create()

                if self._scale == self._fallback_scale:
                    bbox_scaled = tuple(
                        [self._scale * i for i in self._last_bbox])

                    self._inital_bbox = self.scale_bbox(
                        self._inital_bbox, self._scale)

                    self._tracker.init(color_image, bbox_scaled)
                    self.tracker_suggested_bbox = bbox_scaled
                else:
                    bbox_scaled = tuple([
                        self._scale / self._fallback_scale * i
                        for i in self._last_bbox
                    ])

                    self._inital_bbox = self.scale_bbox(
                        self._inital_bbox, self._scale / self._fallback_scale)

                    self._tracker.init(color_image, bbox_scaled)
                    self.tracker_suggested_bbox = bbox_scaled

                ok = True

            else:
                ok, self.tracker_suggested_bbox = self._tracker.update(
                    color_image)

            if ok:
                bbox_center = self.calculate_bbox_center(
                    self.tracker_suggested_bbox)

                split_array = np.array([
                    int(self.tracker_suggested_bbox[0]),
                    int(self.tracker_suggested_bbox[0] +
                        self.tracker_suggested_bbox[2]),
                ])
                split_array = [0 if x < 0 else x for x in split_array]
                bbox_content = np.hsplit(
                    depth_image[int(self.tracker_suggested_bbox[1]
                                    ):int(self.tracker_suggested_bbox[1] +
                                          self.tracker_suggested_bbox[3])],
                    split_array,
                )[1]

                mask = (bbox_content >=
                        (self.min_depth * self.depth_scale_factor)) & (
                            bbox_content <=
                            (self.max_depth * self.depth_scale_factor)) & (
                                bbox_content != 9.0)

                median = np.nanmedian(bbox_content[mask])
                if self.publish_median_depth:
                    bbox_depth_msg = Float32()
                    bbox_depth_msg.data = median / self.depth_scale_factor
                    self._pub_median_depth.publish(bbox_depth_msg)

                if median > 0:
                    if self._original_distance == -1:
                        self._original_distance = median

                    current_distance = median

                    depth_scale = self._original_distance / current_distance
                    tracker_scale = self.get_bbox_scale(
                        self._inital_bbox, self.tracker_suggested_bbox)

                    scaled_tracker_bbox = self.scale_bbox(
                        self.tracker_suggested_bbox,
                        depth_scale / tracker_scale)

                    scaled_tracker_bbox = tuple(
                        [int(x) for x in scaled_tracker_bbox])

                    final_bbox = scaled_tracker_bbox

                elif np.isnan(median):
                    self._current_status = 0

                self.tracker_suggested_bbox = [
                    int(x) for x in self.tracker_suggested_bbox
                ]
                self.tracker_suggested_bbox = (
                    self.tracker_suggested_bbox[0],
                    self.tracker_suggested_bbox[1],
                    self.tracker_suggested_bbox[2],
                    self.tracker_suggested_bbox[3],
                )

                if final_bbox is None:
                    final_bbox = self.tracker_suggested_bbox

        if final_bbox is not None:
            self._last_bbox = final_bbox

            width_ratio = float(final_bbox[2]) / float(color_image.shape[1])
            height_ratio = float(final_bbox[3]) / float(color_image.shape[0])

            if (width_ratio > self.max_bbox_ratio
                    or height_ratio > self.max_bbox_ratio
                ) and self._scale != self._fallback_scale:
                rospy.loginfo("Scaling down...")

                self._scale = self._fallback_scale
                self._has_scale_changed = True
            elif (width_ratio < self.max_bbox_ratio and height_ratio <
                  self.max_bbox_ratio) and self._scale == self._fallback_scale:
                rospy.loginfo("Scaling back up...")

                self._scale = 1.0
                self._has_scale_changed = True

            center = self.calculate_bbox_center(final_bbox)

            if self.check_point_oob(center, color_image, oob_threshold):
                self._current_status = 0

            bbox_message = BoundingBox2D()

            bbox_message.size_x = final_bbox[2]
            bbox_message.size_y = final_bbox[3]

            bbox_message.center.theta = 0
            bbox_message.center.x = final_bbox[0] + final_bbox[2] / 2
            bbox_message.center.y = final_bbox[1] + final_bbox[3] / 2

            self._pub_bbox.publish(bbox_message)

            status_message = Int8()
            status_message.data = self._current_status
            self._pub_status.publish(status_message)

            if self.publish_result_img:
                final_bbox = tuple([int(i) for i in final_bbox])

                if self._current_status == 1:
                    cv2.rectangle(color_image, final_bbox, (0, 0, 255), 2)
                else:
                    cv2.rectangle(color_image, final_bbox, (255, 0, 0), 2)

                cv2.circle(color_image, center, 3, (255, 0, 0), 2)
                imgmsg = self._bridge.cv2_to_imgmsg(color_image,
                                                    encoding="rgb8")

                self._pub_result_img.publish(imgmsg)
Пример #35
0
def simulation_differential_function(t: float, y_1d: np.ndarray,
                                     pass_through_state: PhysicalState,
                                     masses: np.ndarray,
                                     artificials: np.ndarray) -> np.ndarray:
    """
    y_1d =
     [X, Y, VX, VY, Heading, Spin, Fuel, Throttle, LandedOn, Broken] +
     SRB_time_left + time_acc (these are both single values) +
     [ComponentData, CoolantLoopData, RadiatorData]
    returns the derivative of y_1d, i.e.
    [VX, VY, AX, AY, Spin, 0, Fuel consumption, 0, 0, 0] + -constant + 0
    (zeroed-out fields are changed elsewhere)

    !!!!!!!!!!! IMPORTANT !!!!!!!!!!!
    This function should return a DERIVATIVE. The numpy.solve_ivp function
    will do the rest of the work of the simulation, this function just
    describes how things _move_.
    At its most basic level, this function takes in the _position_ of
    everything (plus some random stuff), and returns the _velocity_ of
    everything (plus some random stuff).
    Essentially, numpy.solve_ivp does this calculation:
    new_positions_of_system = t_delta * _derive(
                                            current_t_of_system,
                                            current_y_of_system)

    Any arguments after `t` and `y_1d` are just extra constants and
    pass-through state, which should remain constant during normal simulation.
    They are passed in to speed up computation, since this function is the most
    performance-sensitive part of the orbitx codebase(!!!) 
    """

    # Note: we create this y as a PhysicsState for convenience, but if you
    # set any values of y, the changes will be discarded! The only way they
    # will be propagated out of this function is by numpy using the return
    # value of this function as a derivative, as explained above.
    # If you want to set values in y, look at _reconcile_entity_dynamics.
    y = PhysicsState(y_1d, pass_through_state)
    acc_matrix = calc.grav_acc(y.X, y.Y, masses, y.Fuel)
    zeros = np.zeros(y._n)
    fuel_cons = np.zeros(y._n)

    # Engine thrust and fuel consumption
    for artif_index in artificials:
        if y[artif_index].fuel > 0 and y[artif_index].throttle > 0:
            # We have fuel remaining, calculate thrust
            entity = y[artif_index]
            capability = common.craft_capabilities[entity.name]

            fuel_cons[artif_index] = \
                -abs(capability.fuel_cons * entity.throttle)
            eng_thrust = (capability.thrust * entity.throttle *
                          calc.heading_vector(entity.heading))
            mass = entity.mass + entity.fuel

            if entity.name == AYSE and \
                    y[HABITAT].landed_on == AYSE:
                # It's bad that this is hardcoded, but it's also the only
                # place that this comes up so IMO it's not too bad.
                hab = y[HABITAT]
                mass += hab.mass + hab.fuel

            eng_acc = eng_thrust / mass
            acc_matrix[artif_index] += eng_acc

    # And SRB thrust
    srb_usage = 0
    try:
        if y.srb_time >= 0:
            hab_index = y._name_to_index(HABITAT)
            hab = y[hab_index]
            srb_acc = common.SRB_THRUST / (hab.mass + hab.fuel)
            srb_acc_vector = srb_acc * calc.heading_vector(hab.heading)
            acc_matrix[hab_index] += srb_acc_vector
            srb_usage = -1
    except PhysicsState.NoEntityError:
        # The Habitat doesn't exist.
        pass

    # Engineering values
    R = y.engineering.components.Resistance()
    I = y.engineering.components.Current()  # noqa: E741

    # Eventually radiators will affect the temperature.
    T_deriv = common.eta * np.square(I) * R
    R_deriv = common.alpha * T_deriv
    # Voltage we set to be constant. Since I = V/R, I' = V'/R' = 0/R' = 0
    V_deriv = I_deriv = np.zeros(_N_COMPONENTS)

    # Drag effects
    craft = y.craft
    if craft is not None:
        craft_index = y._name_to_index(y.craft)
        drag_acc = calc.drag(y)
        acc_matrix[craft_index] -= drag_acc

    # Centripetal acceleration to keep landed entities glued to each other.
    landed_on = y.LandedOn
    for landed_i in landed_on:
        lander = y[landed_i]
        ground = y[landed_on[landed_i]]

        centripetal_acc = (lander.pos - ground.pos) * ground.spin**2
        acc_matrix[landed_i] = \
            acc_matrix[landed_on[landed_i]] - centripetal_acc

    # Sets velocity and spin of a couple more entities.
    # If you want to set the acceleration of an entity, do it above and
    # keep that logic in _derive. If you want to set the velocity and spin
    # or any other fields that an Entity has, you should put that logic in
    # this _reconcile_entity_dynamics helper.
    y = helpers._reconcile_entity_dynamics(y)

    return np.concatenate(
        (
            y.VX,
            y.VY,
            np.hsplit(acc_matrix, 2),
            y.Spin,
            zeros,
            fuel_cons,
            zeros,
            zeros,
            zeros,
            np.array([srb_usage, 0]),
            # component connection state doesn't change here
            np.zeros(_N_COMPONENTS),
            T_deriv,
            R_deriv,
            V_deriv,
            I_deriv,
            # coolant loop/radiator connection state doesn't change here
            np.zeros(_N_COMPONENTS),
            np.zeros(_N_COMPONENTS),
            np.zeros(_N_COMPONENTS),
            np.zeros(_N_COOLANT_LOOPS * _N_COOLANT_FIELDS),
            np.zeros(_N_RADIATORS * _N_RADIATOR_FIELDS)),
        axis=None)
Пример #36
0
    gy = cv2.Sobel(img, cv2.CV_32F, 0, 1)
    mag, ang = cv2.cartToPolar(gx, gy)
    bins = np.int32(bin_n * ang /
                    (2 * np.pi))  # quantizing binvalues in (0...16)
    bin_cells = bins[:10, :10], bins[10:, :10], bins[:10, 10:], bins[10:, 10:]
    mag_cells = mag[:10, :10], mag[10:, :10], mag[:10, 10:], mag[10:, 10:]
    hists = [
        np.bincount(b.ravel(), m.ravel(), bin_n)
        for b, m in zip(bin_cells, mag_cells)
    ]
    hist = np.hstack(hists)  # hist is a 64 bit vector
    return hist


img = cv2.imread('digits.png', 0)
cells = [np.hsplit(row, 100) for row in np.vsplit(img, 50)]

# First half is trainData, remaining is testData
train_cells = [i[:50] for i in cells]
test_cells = [i[50:] for i in cells]

deskewed = [map(deskew, row) for row in train_cells]
hogdata = [map(hog, row) for row in deskewed]

trainData = np.float32(hogdata).reshape(-1, 64)
responses = np.repeat(np.arange(10), 250)[:, np.newaxis]

svm = cv2.ml.SVM_create()
svm.setKernel(cv2.ml.SVM_LINEAR)
svm.setType(cv2.ml.SVM_C_SVC)
svm.setC(2.67)
Пример #37
0
    elif i == "New York":
        a.append(0.5)
        b.append(1.0)

ar = np.array(a)
br = np.array(b)
arr = ar.reshape(50, 1)
brr = br.reshape(50, 1)
xx = np.array(x)
xd = np.delete(xx, (3), axis=1)
Array = np.concatenate((xd, arr), axis=1)
#print(Array)
YAR = np.array(Y)
y = YAR.reshape(50, 1)

arr1, arr2, arr3, arr4 = np.hsplit(Array, 4)
#print(arr1)
#print(arr3)

arri = (arr1 - np.mean(arr1)) / (np.max(arr1) - np.min(arr1))
#print(arri)
arrii = (arr2 - np.mean(arr2)) / (np.max(arr2) - np.min(arr2))

arriii = (arr3 - np.mean(arr3)) / (np.max(arr3) - np.min(arr3))
array = np.concatenate((arri, arrii, arriii, arr4), axis=1)
#print(array)

#y= y1 + arr1 + arr2 + arr4
#print(y)

from sklearn.model_selection import train_test_split
Пример #38
0
"""
    Infos
"""

feature_file_name = "pop_feat.pandas"
"""
    #####################
"""
print(feature_file_name)

featureFrame = pd.read_pickle(
    os.path.join(path_to_popularities, feature_file_name))

flength = len(featureFrame.columns)

split = np.hsplit(featureFrame.values, np.array([flength - 1, flength]))

features = split[0]
targets = np.array(split[1].T)[0]

learning_features, holdback_features, learning_targets, holdback_targets = train_test_split(
    features, targets, test_size=0.4, random_state=42, shuffle=True)

best_para = 0
best_score = 0

reg_paras = list()

for i in range(1, 10):
    reg_paras.append(i / 10)
for i in range(0, 10):
Пример #39
0
import numpy as np
import math

# load data set
train_data = np.loadtxt("hw3_train.dat.txt")
test_data = np.loadtxt("hw3_test.dat.txt")

def sign(x):
    if x > 0:
        return 1
    else:
        return -1

X, Y = np.hsplit(train_data, [10])
X_test, Y_test = np.hsplit(test_data, [10])
X_0 = np.array(int(X.shape[0]) *[[1.0]])
X = np.hstack((X_0, X))
X_0 = np.array(int(X_test.shape[0])*[[1.0]])
X_test = np.hstack((X_0, X_test))

X = np.mat(X)
Y = np.mat(Y)
X_hat = np.dot(np.linalg.inv(np.dot(X.T, X)), X.T)
W_LIN = np.dot(X_hat, Y)

Y_p = np.dot(X, W_LIN)

Ein = 0
num = 0
for i in range(Y_p.shape[0]):
    if sign(Y_p[i])!= Y[i]:
Пример #40
0
import numpy as np
import cv2
#from matplotlib import pyplot as plt

img = cv2.imread('digits.png')
cv2.imshow("img", img)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

# Now we split the image to 5000 cells, each 20x20 size
cells = [np.hsplit(row, 100) for row in np.vsplit(gray, 50)]

# Make it into a Numpy array. It size will be (50,100,20,20)
x = np.array(cells)

# Now we prepare train_data and test_data.
train = x[:, :50].reshape(-1, 400).astype(np.float32)  # Size = (2500,400)
test = x[:, 50:100].reshape(-1, 400).astype(np.float32)  # Size = (2500,400)

# Create labels for train and test data
k = np.arange(10)
train_labels = np.repeat(k, 250)[:, np.newaxis]
test_labels = train_labels.copy()

# Initiate kNN, train the data, then test it with test data for k=1
knn = cv2.ml.KNearest_create()

#knn.train(npaFlattenedImages, cv2.ml.ROW_SAMPLE, npaClassifications)

knn.train(train, train_labels)
ret, result, neighbours, dist = knn.find_nearest(test, k=5)
# https://morvanzhou.github.io/tutorials/data-manipulation/np-pd/2-7-np-split/
import numpy as np

A = np.arange(12).reshape((3, 4))
print(A)
# print(np.split(A, 2, axis=1)) # 對縱向分割
# print(np.split(A, 3, axis=0)) # 對橫向分割
# print(np.array_split(A, 3, axis=1)) # 對縱向不對等分割
print(np.vsplit(A, 3)) # 橫向分割
print(np.hsplit(A, 2)) # 縱向向分割
    def stiff_matrix(self, cfun=None):
        area = self.smspace.cellmeasure

        def f(x):
            x[0, :] = 0
            return x

        p = self.p
        G = self.G
        D = self.D
        PI1 = self.PI1

        cell2dof, cell2dofLocation = self.cell_to_dof()
        NC = len(cell2dofLocation) - 1
        cd = np.hsplit(cell2dof, cell2dofLocation[1:-1])
        DD = np.vsplit(D, cell2dofLocation[1:-1])

        if p == 1:
            tG = np.array([(0, 0, 0), (0, 1, 0), (0, 0, 1)])
            if cfun is None:

                def f1(x):
                    M = np.eye(x[1].shape[1])
                    M -= x[0] @ x[1]
                    N = x[1].shape[1]
                    A = np.zeros((N, N))
                    idx = np.arange(N)
                    A[idx, idx] = 2
                    A[idx[:-1], idx[1:]] = -1
                    A[idx[1:], idx[:-1]] = -1
                    A[0, -1] = -1
                    A[-1, 0] = -1
                    return x[1].T @ tG @ x[1] + M.T @ A @ M

                #f1 = lambda x: x[1].T@tG@x[1] + (np.eye(x[1].shape[1]) - x[0]@x[1]).T@(np.eye(x[1].shape[1]) - x[0]@x[1])
                K = list(map(f1, zip(DD, PI1)))
            else:
                cellbarycenter = self.smspace.cellbarycenter
                k = cfun(cellbarycenter)
                f1 = lambda x: (x[1].T @ tG @ x[1] +
                                (np.eye(x[1].shape[1]) - x[0] @ x[1]).T
                                @ (np.eye(x[1].shape[1]) - x[0] @ x[1])) * x[2]
                K = list(map(f1, zip(DD, PI1, k)))
        else:
            tG = list(map(f, G))
            if cfun is None:
                f1 = lambda x: x[1].T @ x[2] @ x[1] + (np.eye(x[1].shape[
                    1]) - x[0] @ x[1]).T @ (np.eye(x[1].shape[1]) - x[0] @ x[1]
                                            )
                K = list(map(f1, zip(DD, PI1, tG)))
            else:
                cellbarycenter = self.smspace.cellbarycenter
                k = cfun(cellbarycenter)
                f1 = lambda x: (x[1].T @ x[2] @ x[1] +
                                (np.eye(x[1].shape[1]) - x[0] @ x[1]).T
                                @ (np.eye(x[1].shape[1]) - x[0] @ x[1])) * x[3]
                K = list(map(f1, zip(DD, PI1, tG, k)))

        f2 = lambda x: np.repeat(x, x.shape[0])
        f3 = lambda x: np.tile(x, x.shape[0])
        f4 = lambda x: x.flatten()

        I = np.concatenate(list(map(f2, cd)))
        J = np.concatenate(list(map(f3, cd)))
        val = np.concatenate(list(map(f4, K)))
        gdof = self.number_of_global_dofs()
        A = csr_matrix((val, (I, J)), shape=(gdof, gdof), dtype=np.float)
        return A
    ax2.plot(x1, y1)
    ax2.set_title('Gradient Descent')
    ax2.set_xlabel('Iterations')
    ax2.set_ylabel('Cost Function Value')


if __name__ == '__main__':
    data = data_leading_in()
    print(data, data.shape)
    # m是矩阵的行数,n是矩阵的列数
    m, n = data.shape
    draw_data()
    # 设置参数
    iterations, alpha, theta = set_parameter()
    # 从data中得到X_without_x0和y
    X_without_x0 = copy.deepcopy(np.hsplit(data, (n - 1, n))[0])
    X_without_x0 = X_without_x0.astype(float)
    y = np.hsplit(data, n)[n - 1]
    # # 正规方程
    # normal_equation()
    # 特征缩放和均值归一化
    X_without_x0, mean_X_list, std_X_list = feature_scaling()
    # X_without_x0加上x0项得到X
    X = np.hstack((np.ones((m, 1)), X_without_x0))
    # 梯度下降
    x1, y1 = gradient_descent()
    # # 绘制梯度下降图像,观察迭代次数
    draw_gradient_descent()
    # 调整theta,消除特征缩放对图像的影响
    adjust_theta()
    plt.show()
y = np.array([[99],
             [99]])
np.hstack([grid, y])

# np.split, np.hsplit, np.vsplit
x = [1, 2, 3, 99, 99, 3, 2, 1]
x1, x2, x3 = np.split(x, [3, 5])
print(x1, x2, x3)

grid = np.arange(16).reshape((4, 4))
grid
upper, lower = np.vsplit(grid, [2])
upper
lower
left, right = np.hsplit(grid, [2])
left
right

# vectorized, universal functions, ufuncs
np.random.seed(0)
np.empty(3)
np.empty(5)
values = np.random.randint(1, 10, size=5)
len(values)
np.empty(len(values))

def compute_reciprocals(values):
    output = np.empty(len(values)) # room for return values
    for i in range(len(values)):
        output[i] = 1.0/values[i]
Пример #45
0
print(np.exp((np.arange(3))))
print(np.sqrt(4))
print('------------------------------------30')

# 拼接和切分操作
matrix4 = np.floor(np.random.random((2, 4)) * 10)
print(matrix4)

matrix5 = np.floor(np.random.random((2, 4)) * 10)
print(matrix5)
print('------------------------------------31')

print(np.hstack((matrix4, matrix5)))
print(np.vstack((matrix4, matrix5)))

print(np.hsplit(matrix4, 2))
print(np.vsplit(matrix4, 2))
print('------------------------------------32')

#求维度中的最大元素的位置
matrix6 = np.floor(np.random.random((2, 4)) * 10)
print(matrix6)
print(matrix6.argmax(axis=1))
print(matrix6.argmax(axis=0))
print('------------------------------------33')

#矩阵的扩展
print(np.tile(npArr0, (2, 3)))

#矩阵排序
print(npArr0)
Пример #46
0
def solve_equation(matrix):
    left_side, right_side = np.hsplit(matrix, [len(matrix)])
    result = linalg.solve(left_side, right_side)
    print(result)
c = np.stack((a, b), 0)
print(c.shape, c)

d = np.stack((a, b), 1)
print(d.shape, d)

e = np.stack((a, b), 2)
print(e.shape, e)

ee = np.stack((a, b), -1)
print(ee.shape)

a = np.arange(12)
print(a)
print(np.hsplit(a, 3))
print(np.hsplit(a, [3, 6]))
print(np.hsplit(a, [3, 6, 9]))
print(np.split(a, 3, 0))
print(np.split(a, [3, 6, 9], 0))

b = np.arange(12).reshape(4, 3)
print(b)
print(np.vsplit(b, 2))
print(np.split(b, 2, 0))
print(np.hsplit(b, [1]))
print(np.split(b, [1], 1))

a = np.arange(10, 20)
print(a)
Пример #48
0
    def simulateCallback(frame):
        global g_initFlag
        global forceShowFrame
        global forceApplyFrame
        global JsysPre
        global JsupPreL
        global JsupPreR
        global JsupPre
        global softConstPoint
        global stage

        motionModel.update(motion[frame])

        Kt, Kk, Kl, Kh, Ksc, Bt, Bl, Bh, Bsc = viewer.GetParam()

        Dt = 2 * (Kt**.5)
        Dk = 2 * (Kk**.5)
        Dl = 2 * (Kl**.5)
        Dh = 2 * (Kh**.5)
        Dsc = 2 * (Ksc**.5)

        if Bsc == 0.0:
            viewer.doc.showRenderer('softConstraint', False)
            viewer.motionViewWnd.update(1, viewer.doc)
        else:
            viewer.doc.showRenderer('softConstraint', True)
            renderer1 = viewer.doc.getRenderer('softConstraint')
            renderer1.rc.setLineWidth(0.1 + Bsc * 3)
            viewer.motionViewWnd.update(1, viewer.doc)

        # tracking
        th_r = motion.getDOFPositions(frame)
        th = controlModel.getDOFPositions()
        dth_r = motion.getDOFVelocities(frame)
        dth = controlModel.getDOFVelocities()
        ddth_r = motion.getDOFAccelerations(frame)
        ddth_des = yct.getDesiredDOFAccelerations(th_r, th, dth_r, dth, ddth_r,
                                                  Kt, Dt)
        ddth_c = controlModel.getDOFAccelerations()

        ype.flatten(ddth_des, ddth_des_flat)
        ype.flatten(dth, dth_flat)

        ype.flatten(ddth_c, ddth_c_flat)

        # jacobian
        refFootL = motionModel.getBodyPositionGlobal(supL)
        refFootR = motionModel.getBodyPositionGlobal(supR)

        positionFootL = [None] * footPartNum
        positionFootR = [None] * footPartNum
        for i in range(footPartNum):
            positionFootL[i] = controlModel.getBodyPositionGlobal(
                indexFootL[i])
            positionFootR[i] = controlModel.getBodyPositionGlobal(
                indexFootR[i])

        linkPositions = controlModel.getBodyPositionsGlobal()
        linkVelocities = controlModel.getBodyVelocitiesGlobal()
        linkAngVelocities = controlModel.getBodyAngVelocitiesGlobal()
        linkInertias = controlModel.getBodyInertiasGlobal()

        jointPositions = controlModel.getJointPositionsGlobal()
        jointAxeses = controlModel.getDOFAxeses()

        CM = yrp.getCM(linkPositions, linkMasses, totalMass)
        dCM = yrp.getCM(linkVelocities, linkMasses, totalMass)
        CM_plane = copy.copy(CM)
        CM_plane[1] = 0.
        dCM_plane = copy.copy(dCM)
        dCM_plane[1] = 0.

        linkPositions_ref = motionModel.getBodyPositionsGlobal()
        CM_ref = yrp.getCM(linkPositions_ref, linkMasses, totalMass)
        CM_plane_ref = copy.copy(CM_ref)
        CM_plane_ref[1] = 0.

        P = ymt.getPureInertiaMatrix(TO, linkMasses, linkPositions, CM,
                                     linkInertias)
        dP = ymt.getPureInertiaMatrixDerivative(dTO, linkMasses,
                                                linkVelocities, dCM,
                                                linkAngVelocities,
                                                linkInertias)

        yjc.computeJacobian2(Jsys, DOFs, jointPositions, jointAxeses,
                             linkPositions, allLinkJointMasks)
        yjc.computeJacobianDerivative2(dJsys, DOFs, jointPositions,
                                       jointAxeses, linkAngVelocities,
                                       linkPositions, allLinkJointMasks)

        if g_initFlag == 0:
            softConstPoint = controlModel.getBodyPositionGlobal(constBody)
            softConstPoint[1] -= .3
            g_initFlag = 1

        yjc.computeJacobian2(jFootL[0], DOFs, jointPositions, jointAxeses,
                             [positionFootL[0]], jointMasksFootL[0])
        yjc.computeJacobianDerivative2(dJFootL[0], DOFs, jointPositions,
                                       jointAxeses, linkAngVelocities,
                                       [positionFootL[0]], jointMasksFootL[0],
                                       False)

        yjc.computeJacobian2(jFootR[0], DOFs, jointPositions, jointAxeses,
                             [positionFootR[0]], jointMasksFootR[0])
        yjc.computeJacobianDerivative2(dJFootR[0], DOFs, jointPositions,
                                       jointAxeses, linkAngVelocities,
                                       [positionFootR[0]], jointMasksFootR[0],
                                       False)

        yjc.computeAngJacobian2(jAngFootL[0], DOFs, jointPositions,
                                jointAxeses, [positionFootL[0]],
                                jointMasksFootL[0])
        yjc.computeAngJacobianDerivative2(dJAngFootL[0], DOFs, jointPositions,
                                          jointAxeses, linkAngVelocities,
                                          [positionFootL[0]],
                                          jointMasksFootL[0], False)

        yjc.computeAngJacobian2(jAngFootR[0], DOFs, jointPositions,
                                jointAxeses, [positionFootR[0]],
                                jointMasksFootR[0])
        yjc.computeAngJacobianDerivative2(dJAngFootR[0], DOFs, jointPositions,
                                          jointAxeses, linkAngVelocities,
                                          [positionFootR[0]],
                                          jointMasksFootR[0], False)

        bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(
            bodyIDsToCheck, mus, Ks, Ds)
        CP = yrp.getCP(contactPositions, contactForces)

        for i in range(len(bodyIDsToCheck)):
            controlModel.SetBodyColor(bodyIDsToCheck[i], 0, 0, 0)

        contactFlagFootL = [0] * footPartNum
        contactFlagFootR = [0] * footPartNum

        for i in range(len(bodyIDs)):
            controlModel.SetBodyColor(bodyIDs[i], 255, 105, 105)
            index = controlModel.id2index(bodyIDs[i])
            for j in range(len(indexFootL)):
                if index == indexFootL[j]:
                    contactFlagFootL[j] = 1
                    if j != 0:
                        yjc.computeJacobian2(jFootL[j], DOFs, jointPositions,
                                             jointAxeses, [positionFootL[j]],
                                             jointMasksFootL[j])
                        yjc.computeJacobianDerivative2(
                            dJFootL[j], DOFs, jointPositions, jointAxeses,
                            linkAngVelocities, [positionFootL[j]],
                            jointMasksFootL[j], False)
                    break
            for j in range(len(indexFootR)):
                if index == indexFootR[j]:
                    contactFlagFootR[j] = 1
                    if j != 0:
                        yjc.computeJacobian2(jFootR[j], DOFs, jointPositions,
                                             jointAxeses, [positionFootR[j]],
                                             jointMasksFootR[j])
                        yjc.computeJacobianDerivative2(
                            dJFootR[j], DOFs, jointPositions, jointAxeses,
                            linkAngVelocities, [positionFootR[j]],
                            jointMasksFootR[j], False)
                    break

        for j in range(len(indexFootL)):
            yjc.computeAngJacobian2(jAngFootL[j], DOFs, jointPositions,
                                    jointAxeses, [positionFootL[j]],
                                    jointMasksFootL[j])
            yjc.computeAngJacobianDerivative2(dJAngFootL[j], DOFs,
                                              jointPositions, jointAxeses,
                                              linkAngVelocities,
                                              [positionFootL[j]],
                                              jointMasksFootL[j], False)
            yjc.computeAngJacobian2(jAngFootR[j], DOFs, jointPositions,
                                    jointAxeses, [positionFootR[j]],
                                    jointMasksFootR[j])
            yjc.computeAngJacobianDerivative2(dJAngFootR[j], DOFs,
                                              jointPositions, jointAxeses,
                                              linkAngVelocities,
                                              [positionFootR[j]],
                                              jointMasksFootR[j], False)
        '''
        if frame < 100 :
            if stage == POWERFUL_BALANCING:
            #if stage != MOTION_TRACKING:
                footCenterL = controlModel.getBodyPositionGlobal(supL)        
                footCenterR = controlModel.getBodyPositionGlobal(supR)
            else:
                footCenterL = controlModel.getBodyPositionGlobal(indexFootL[1])        
                footCenterR = controlModel.getBodyPositionGlobal(indexFootR[1])
        else:
        '''
        if footPartNum == 1:
            footCenterL = controlModel.getBodyPositionGlobal(supL)
            footCenterR = controlModel.getBodyPositionGlobal(supR)
        else:
            if ((contactFlagFootL[3] == 1 or contactFlagFootL[4] == 1)
                    and contactFlagFootL[0] == 0) or (
                        (contactFlagFootR[3] == 1 or contactFlagFootR[4] == 1)
                        and contactFlagFootR[0] == 0):
                footCenterL = (
                    controlModel.getBodyPositionGlobal(supL) +
                    controlModel.getBodyPositionGlobal(indexFootL[1])) / 2.0
                footCenterR = (
                    controlModel.getBodyPositionGlobal(supR) +
                    controlModel.getBodyPositionGlobal(indexFootR[1])) / 2.0
                #footCenterL = controlModel.getBodyPositionGlobal(indexFootL[1])
                #footCenterR = controlModel.getBodyPositionGlobal(indexFootR[1])
            else:
                footCenterL = (
                    controlModel.getBodyPositionGlobal(supL) +
                    controlModel.getBodyPositionGlobal(indexFootL[1])) / 2.0
                footCenterR = (
                    controlModel.getBodyPositionGlobal(supR) +
                    controlModel.getBodyPositionGlobal(indexFootR[1])) / 2.0
                #footCenterL = controlModel.getBodyPositionGlobal(indexFootL[1])
                #footCenterR = controlModel.getBodyPositionGlobal(indexFootR[1])

        footCenter = footCenterL + (footCenterR - footCenterL) / 2.0
        footCenter[1] = 0.

        footCenter_ref = refFootL + (refFootR - refFootL) / 2.0
        #footCenter_ref[1] = 0.
        #
        if checkAll(contactFlagFootL, 0) == 1 and checkAll(
                contactFlagFootR, 0) == 1:
            footCenter = footCenter
        elif checkAll(contactFlagFootL, 0) == 1:
            footCenter = footCenterR
        elif checkAll(contactFlagFootR, 0) == 1:
            footCenter = footCenterL
        footCenter[1] = 0.

        desForeSupLAcc = [0, 0, 0]
        desForeSupRAcc = [0, 0, 0]

        totalNormalForce = [0, 0, 0]

        for i in range(len(contactForces)):
            totalNormalForce[0] += contactForces[i][0]
            totalNormalForce[1] += contactForces[i][1]
            totalNormalForce[2] += contactForces[i][2]

        # linear momentum
        CM_ref_plane = footCenter
        dL_des_plane = Kl * totalMass * (CM_ref_plane -
                                         CM_plane) - Dl * totalMass * dCM_plane

        # angular momentum
        CP_ref = footCenter

        timeStep = 30.
        if CP_old[0] == None or CP == None:
            dCP = None
        else:
            dCP = (CP - CP_old[0]) / (1 / timeStep)
        CP_old[0] = CP

        if CP != None and dCP != None:
            ddCP_des = Kh * (CP_ref - CP) - Dh * (dCP)
            CP_des = CP + dCP * (1 / timeStep) + .5 * ddCP_des * (
                (1 / timeStep)**2)
            dH_des = np.cross(
                (CP_des - CM),
                (dL_des_plane + totalMass * mm.s2v(wcfg.gravity)))
            #dH_des = np.cross((CP_des - CM_plane), (dL_des_plane + totalMass*mm.s2v(wcfg.gravity)))
        else:
            dH_des = None

        # momentum matrix
        RS = np.dot(P, Jsys)
        R, S = np.vsplit(RS, 2)

        rs = np.dot((np.dot(dP, Jsys) + np.dot(P, dJsys)), dth_flat)
        r_bias, s_bias = np.hsplit(rs, 2)

        ##############################
        # soft point constraint

        P_des = softConstPoint
        P_cur = controlModel.getBodyPositionGlobal(constBody)
        dP_des = [0, 0, 0]
        dP_cur = controlModel.getBodyVelocityGlobal(constBody)
        ddP_des1 = Ksc * (P_des - P_cur) - Dsc * (dP_cur - dP_des)

        r = P_des - P_cur
        I = np.vstack(([1, 0, 0], [0, 1, 0], [0, 0, 1]))
        Z = np.hstack((I, mm.getCrossMatrixForm(-r)))

        yjc.computeJacobian2(Jconst, DOFs, jointPositions, jointAxeses,
                             [softConstPoint], constJointMasks)
        JL, JA = np.vsplit(Jconst, 2)
        Q1 = np.dot(Z, Jconst)

        q1 = np.dot(JA, dth_flat)
        q2 = np.dot(mm.getCrossMatrixForm(q1),
                    np.dot(mm.getCrossMatrixForm(q1), r))

        yjc.computeJacobianDerivative2(dJconst, DOFs, jointPositions,
                                       jointAxeses, linkAngVelocities,
                                       [softConstPoint], constJointMasks,
                                       False)
        q_bias1 = np.dot(np.dot(Z, dJconst), dth_flat) + q2

        ##############################

        flagContact = True
        if dH_des == None or np.any(np.isnan(dH_des)) == True:
            flagContact = False
            viewer.doc.showRenderer('rd_grf_des', False)
            viewer.motionViewWnd.update(1, viewer.doc)
        else:
            viewer.doc.showRenderer('rd_grf_des', True)
            viewer.motionViewWnd.update(1, viewer.doc)
        '''
        0 : initial
        1 : contact
        2 : fly
        3 : landing
        '''

        #MOTION = FORWARD_JUMP
        if mit.MOTION == mit.FORWARD_JUMP:
            frame_index = [136, 100]
            #frame_index = [100000, 100000]
        elif mit.MOTION == mit.TAEKWONDO:
            frame_index = [130, 100]
            #frame_index = [100000, 100000]
        elif mit.MOTION == mit.TAEKWONDO2:
            frame_index = [130 + 40, 100]
        else:
            frame_index = [1000000, 1000000]

        #MOTION = TAEKWONDO
        #frame_index = [135, 100]
        '''
        if frame > 300 :
            if stage != DYNAMIC_BALANCING:
                print("#", frame,"-DYNAMIC_BALANCING")
            stage = DYNAMIC_BALANCING
            Kk = Kk*1
            Dk = 2*(Kk**.5)        
        '''
        if frame > frame_index[0]:
            if stage != POWERFUL_BALANCING:
                print("#", frame, "-POWERFUL_BALANCING")
            stage = POWERFUL_BALANCING
            Kk = Kk * 2
            Dk = 2 * (Kk**.5)
        elif frame > frame_index[1]:
            if stage != MOTION_TRACKING:
                print("#", frame, "-MOTION_TRACKING")
            stage = MOTION_TRACKING

        trackingW = w

        if stage == MOTION_TRACKING:
            trackingW = w2
            Bt = Bt * 2

        # optimization

        mot.addTrackingTerms(problem, totalDOF, Bt, trackingW, ddth_des_flat)

        mot.addSoftPointConstraintTerms(problem, totalDOF, Bsc, ddP_des1, Q1,
                                        q_bias1)

        if flagContact == True:
            if stage != MOTION_TRACKING + 10:
                mot.addLinearTerms(problem, totalDOF, Bl, dL_des_plane, R,
                                   r_bias)
                mot.addAngularTerms(problem, totalDOF, Bh, dH_des, S, s_bias)

        a_sup_2 = [None]
        Jsup_2 = [None]
        dJsup_2 = [None]

        ##############################
        # Hard constraint
        if stage != MOTION_TRACKING:
            Kk2 = Kk * 2.0
        else:
            Kk2 = Kk * 1.5

        Dk2 = 2 * (Kk2**.5)
        '''
        desLinearAccL, desPosL = getDesFootLinearAcc(motionModel, controlModel, supL, ModelOffset, CM_ref, CM, Kk2, Dk2) 
        desLinearAccR, desPosR = getDesFootLinearAcc(motionModel, controlModel, supR, ModelOffset, CM_ref, CM, Kk2, Dk2) 

        desAngularAccL = getDesFootAngularAcc(motionModel, controlModel, supL, Kk2, Dk2)
        desAngularAccR = getDesFootAngularAcc(motionModel, controlModel, supR, Kk2, Dk2)
        '''

        if stage != MOTION_TRACKING:
            idx = 0  #LEFT/RIGHT_TOES

            desLinearAccL, desPosL = getDesFootLinearAcc(
                motionModel, controlModel, indexFootL[idx], ModelOffset,
                CM_ref, CM, Kk2, Dk2)
            desLinearAccR, desPosR = getDesFootLinearAcc(
                motionModel, controlModel, indexFootR[idx], ModelOffset,
                CM_ref, CM, Kk2, Dk2)

            desAngularAccL = getDesFootAngularAcc(motionModel, controlModel,
                                                  indexFootL[idx], Kk2, Dk2)
            desAngularAccR = getDesFootAngularAcc(motionModel, controlModel,
                                                  indexFootR[idx], Kk2, Dk2)

            a_sup_2 = np.hstack((np.hstack((desLinearAccL, desAngularAccL)),
                                 np.hstack((desLinearAccR, desAngularAccR))))

            Jsup_2 = np.vstack((jFootL[idx], jFootR[idx]))
            dJsup_2 = np.vstack((dJFootL[idx], dJFootR[idx]))

            rd_DesPosL[0] = desPosL.copy()
            rd_DesPosR[0] = desPosR.copy()
        else:
            if footPartNum == 5:
                idx = 3
                desAngularAccL = getDesFootAngularAcc(motionModel,
                                                      controlModel,
                                                      indexFootL[idx], Kk2,
                                                      Dk2)
                desAngularAccR = getDesFootAngularAcc(motionModel,
                                                      controlModel,
                                                      indexFootR[idx], Kk2,
                                                      Dk2)

                a_sup_2 = np.hstack((desAngularAccL, desAngularAccR))

                Jsup_2 = np.vstack((jAngFootL[idx], jAngFootR[idx]))
                dJsup_2 = np.vstack((dJAngFootL[idx], dJAngFootR[idx]))
            else:
                idx = 1
                desAngularAccL = getDesFootAngularAcc(motionModel,
                                                      controlModel,
                                                      indexFootL[idx], Kk2,
                                                      Dk2)
                desAngularAccR = getDesFootAngularAcc(motionModel,
                                                      controlModel,
                                                      indexFootR[idx], Kk2,
                                                      Dk2)

                a_sup_2 = np.hstack((desAngularAccL, desAngularAccR))

                Jsup_2 = np.vstack((jAngFootL[idx], jAngFootR[idx]))
                dJsup_2 = np.vstack((dJAngFootL[idx], dJAngFootR[idx]))

        ##############################

        ##############################
        # Additional constraint

        if stage != MOTION_TRACKING:
            #Kk2 = Kk * 2.5
            Kk2 = Kk * 2.5
            Dk2 = 2 * (Kk2**.5)
            desForePosL = [0, 0, 0]
            desForePosR = [0, 0, 0]
            desRearPosL = [0, 0, 0]
            desRearPosR = [0, 0, 0]

            for i in range(1, footPartNum):
                if contactFlagFootL[i] == 1:
                    desLinearAccL, desForePosL = getDesFootLinearAcc(
                        motionModel, controlModel, indexFootL[i], ModelOffset,
                        CM_ref, CM, Kk2, Dk2)
                    desAngularAccL = getDesFootAngularAcc(
                        motionModel, controlModel, indexFootL[i], Kk2, Dk2)
                    a_sup_2 = np.hstack(
                        (a_sup_2, np.hstack((desLinearAccL, desAngularAccL))))
                    Jsup_2 = np.vstack((Jsup_2, jFootL[i]))
                    dJsup_2 = np.vstack((dJsup_2, dJFootL[i]))
                if contactFlagFootR[i] == 1:
                    desLinearAccR, desForePosR = getDesFootLinearAcc(
                        motionModel, controlModel, indexFootR[i], ModelOffset,
                        CM_ref, CM, Kk2, Dk2)
                    desAngularAccR = getDesFootAngularAcc(
                        motionModel, controlModel, indexFootR[i], Kk2, Dk2)
                    a_sup_2 = np.hstack(
                        (a_sup_2, np.hstack((desLinearAccR, desAngularAccR))))
                    Jsup_2 = np.vstack((Jsup_2, jFootR[i]))
                    dJsup_2 = np.vstack((dJsup_2, dJFootR[i]))

            rd_DesForePosL[0] = desForePosL
            rd_DesForePosR[0] = desForePosR
            rd_DesRearPosL[0] = desRearPosL
            rd_DesRearPosR[0] = desRearPosR
        ##############################

        mot.setConstraint(problem, totalDOF, Jsup_2, dJsup_2, dth_flat,
                          a_sup_2)

        r = problem.solve()
        problem.clear()
        ype.nested(r['x'], ddth_sol)

        rootPos[0] = controlModel.getBodyPositionGlobal(selectedBody)
        localPos = [[0, 0, 0]]

        for i in range(stepsPerFrame):
            # apply penalty force
            bodyIDs, contactPositions, contactPositionLocals, contactForces = vpWorld.calcPenaltyForce(
                bodyIDsToCheck, mus, Ks, Ds)

            vpWorld.applyPenaltyForce(bodyIDs, contactPositionLocals,
                                      contactForces)

            extraForce[0] = viewer.GetForce()
            if (extraForce[0][0] != 0 or extraForce[0][1] != 0
                    or extraForce[0][2] != 0):
                forceApplyFrame += 1
                #vpWorld.applyPenaltyForce(selectedBodyId, localPos, extraForce)
                controlModel.applyBodyForceGlobal(selectedBody, extraForce[0])
                applyedExtraForce[0] = extraForce[0]

            if forceApplyFrame * wcfg.timeStep > 0.1:
                viewer.ResetForce()
                forceApplyFrame = 0

            controlModel.setDOFAccelerations(ddth_sol)

            controlModel.solveHybridDynamics()
            '''
            extraForce[0] = viewer.GetForce()
            if (extraForce[0][0] != 0 or extraForce[0][1] != 0 or extraForce[0][2] != 0) :
                forceApplyFrame += 1
                vpWorld.applyPenaltyForce(selectedBodyId, localPos, extraForce)
                applyedExtraForce[0] = extraForce[0]
            
            if forceApplyFrame*wcfg.timeStep > 0.1:
                viewer.ResetForce()
                forceApplyFrame = 0            
            '''
            vpWorld.step()

        # rendering
        rd_footCenter[0] = footCenter

        rd_CM[0] = CM.copy()

        rd_CM_plane[0] = CM_plane.copy()

        rd_footCenter_ref[0] = footCenter_ref
        rd_CM_plane_ref[0] = CM_ref.copy()
        rd_CM_ref[0] = CM_ref.copy()
        rd_CM_ref_vec[0] = (CM_ref - footCenter_ref) * 3.
        rd_CM_vec[0] = (CM - footCenter) * 3

        #rd_CM_plane[0][1] = 0.

        if CP != None and dCP != None:
            rd_CP[0] = CP
            rd_CP_des[0] = CP_des

        rd_dL_des_plane[0] = dL_des_plane
        rd_dH_des[0] = dH_des

        rd_grf_des[0] = totalNormalForce - totalMass * mm.s2v(
            wcfg.gravity)  #dL_des_plane - totalMass*mm.s2v(wcfg.gravity)

        rd_exf_des[0] = applyedExtraForce[0]
        rd_root_des[0] = rootPos[0]

        rd_CMP[0] = softConstPoint

        rd_soft_const_vec[0] = controlModel.getBodyPositionGlobal(
            constBody) - softConstPoint

        if (forceApplyFrame == 0):
            applyedExtraForce[0] = [0, 0, 0]
Пример #49
0
    def load_regular(self):

        self.directorypath = self.directorypathchoose.text()
        if self.directorypath[len(self.directorypath) - 1] == '/':
            self.directorypath = self.directorypath[:len(self.directorypath) -
                                                    1]

        try:
            #load intensities
            intensity_df = pd.read_csv(self.directorypath + '/Intensities' +
                                       '_' + self.datechoose.text() + '_' +
                                       self.timechoose.text() + '.csv')
        except:
            self.msgbox.setText(
                'Loading Intensity data failed, check the Date and Time have been written in the same way as\n when you saved the files'
            )
            self.msgbox.exec_()

            return

        uncleaned_intensity_dict = intensity_df.to_dict(orient='list')

        self.data_deposit.analyser.bg_sub_intensity_trace = self.strip_dict_of_nan(
            uncleaned_intensity_dict)

        for key in self.data_deposit.analyser.bg_sub_intensity_trace.keys():
            print(type(self.data_deposit.analyser.bg_sub_intensity_trace[key]))

        #load areas

        try:
            area_df = pd.read_csv(self.directorypath + '/Areas' + '_' +
                                  self.datechoose.text() + '_' +
                                  self.timechoose.text() + '.csv')

        except:
            self.msgbox.setText(
                'Loading Intensity data failed, check the Date and Time have been written in the same way as\n when you saved the files'
            )
            self.msgbox.exec_()

        uncleaned_area_dict = area_df.to_dict(orient='list')

        self.data_deposit.analyser.areatrace = self.strip_dict_of_nan(
            uncleaned_area_dict)

        #load trap_positions
        self.labelled_traps = np.loadtxt(
            self.directorypath + '/trap_positions' + '_' +
            self.datechoose.text() + '_' + self.timechoose.text() + '.csv',
            delimiter=',')

        labels_traps_split = np.hsplit(self.labelled_traps, [1, 3])

        self.data_deposit.analyser.trapgetter.trap_positions = labels_traps_split[
            1].astype(int)

        self.data_deposit.analyser.trapgetter.labels = labels_traps_split[
            0].reshape((labels_traps_split[0].shape[0], )).astype(int)

        #load detected vesicle centres

        file = open(
            self.directorypath + '/Centres' + '_' + self.datechoose.text() +
            '_' + self.timechoose.text() + '.txt', 'rb')

        self.data_deposit.analyser.centres = pickle.load(file)

        #load bookend times for experiment length

        t0_tmax = np.loadtxt(self.directorypath + '/bookendtimes' + '_' +
                             self.datechoose.text() + '_' +
                             self.timechoose.text() + '.csv',
                             delimiter=',')

        self.data_deposit.reloadt0 = int(t0_tmax[0])
        self.data_deposit.reloadtmax = int(t0_tmax[1])

        self.done_load_sig.emit()
Пример #50
0
data2 = np.expand_dims(data,axis = 0 )
data3 = np.expand_dims(data,axis = 1 )
#数组的组合和分割
a1 = np.arange(9).reshape(3,3)
b1 = np.arange(12).reshape(3,4)
c4 = np.arange(15).reshape(3,5)
np.hstack((a1,b1)) #水平组合
np.hstack((a1,b1,c4))
np.concatenate((a1,b1),axis=1)
m = a1 * 3
a1.shape == m.shape
np.stack((a1,m),axis=1)
#数组的分割
a2 = np.arange(24).reshape((4,6))
t2 = np.split(a2,2,axis=1)
t3 = np.split(a2,2,axis=0)
t4 = np.hsplit(a2,2)
t5 = np.vsplit(a2,2)












Пример #51
0
#flatten the dataset
print(new_cyclist_trials.ravel())
x = new_cyclist_trials.ravel()
x
y = x.reshape(3, 4)
y
z = y.reshape(4, 3)
z

#reshaping dataset
print(new_cyclist_trials.reshape(3, 4))
print(new_cyclist_trials.reshape(4, 3))
print(new_cyclist_trials.reshape(2, 6))
new_cyclist_trials

#resize
new_cyclist_trials.resize(6, 2)
new_cyclist_trials

#split array into 2
new_cyclist_trials = np.array([[10, 15, 17, 26, 13, 19],
                               [12, 13, 14, 24, 14, 23]])
new_cyclist_trials
print(np.hsplit(new_cyclist_trials, 2))

new_cyclist_1 = [10, 15, 17, 26, 13, 19]
new_cyclist_2 = [12, 13, 14, 24, 14, 23]

#stack the arrays together
print(np.hstack([new_cyclist_1, new_cyclist_2]))
Пример #52
0
    h2 = F.relu(self.l2(h1))
    h3 = F.relu(self.l3(h2))
    y = self.l4(h3)
    return y

# データの設定
with open('train_data.txt', 'r') as f :
  lines = f.readlines()

data = []
for l in lines :
  d = l.strip().split()
  data.append(list(map(int, d)))

data = np.array(data, dtype = np.int32)
trainx, trainy = np.hsplit(data, [2])
trainy = trainy[:, 0]
trainx = np.array(trainx, dtype = np.float32)
traint = np.array(trainy, dtype = np.int32)

train = chainer.datasets.TupleDataset(trainx, trainy)
test = chainer.datasets.TupleDataset(trainx, trainy)

# chainerの設定
# ニューラルネットワークの登録
model = L.Classifier(MyChain(), lossfun = F.softmax_cross_entropy)
optimizer = chainer.optimizers.Adam()
optimizer.setup(model)

# イテレータの定義
batchsize = 4
Пример #53
0
        continue

    fig = plt.figure(1)
    plt.clf()

    ax = fig.add_subplot(221, projection="3d")

    segment = segments[i]
    segment = segment - np.min(segment, axis=0)

    # Maintain aspect ratio on xy scale
    ax.set_xlim(0, np.max(segment[:, :]))
    ax.set_ylim(0, np.max(segment[:, :]))
    ax.set_zlim(0, np.max(segment[:, :]))

    x, y, z = np.hsplit(segment, segment.shape[1])

    ax.scatter(x, y, z, c=z.reshape(z.shape[0], ))

    ax = fig.add_subplot(222)
    ax.scatter(x, y)
    ax.set_xlim(0, np.max(segment[:, :]))
    ax.set_ylim(0, np.max(segment[:, :]))

    ax = fig.add_subplot(223)
    ax.scatter(x, z)
    ax.set_xlim(0, np.max(segment[:, :]))
    ax.set_ylim(0, np.max(segment[:, :]))

    ax = fig.add_subplot(224)
    ax.scatter(y, z)
import cv2
import numpy as np
from matplotlib import pyplot as plt

img = cv2.imread('C:/python27/code/thesis/data/iris1.png', 0)
blur = cv2.GaussianBlur(img, (5, 5), 0)
# find normalized_histogram, and its cumulative distribution function
hist = cv2.calcHist([blur], [0], None, [256], [0, 256])
hist_norm = hist.ravel() / hist.max()
Q = hist_norm.cumsum()
bins = np.arange(256)
fn_min = np.inf
thresh = -1
for i in xrange(1, 256):
    p1, p2 = np.hsplit(hist_norm, [i])  # probabilities
    q1, q2 = Q[i], Q[255] - Q[i]  # cum sum of classes
    b1, b2 = np.hsplit(bins, [i])  # weights
    # finding means and variances
    m1, m2 = np.sum(p1 * b1) / q1, np.sum(p2 * b2) / q2
    v1, v2 = np.sum(((b1 - m1)**2) * p1) / q1, np.sum(((b2 - m2)**2) * p2) / q2
    # calculates the minimization function
    fn = v1 * q1 + v2 * q2
    if fn < fn_min:
        fn_min = fn
        thresh = i
    # find otsu's threshold value with OpenCV function
    ret, otsu = cv2.threshold(blur, 0, 255,
                              cv2.THRESH_BINARY + cv2.THRESH_OTSU)
print thresh, ret
    def _viterbi(signal, trans, dists):
        '''
        Apply the Viterbi algorithm to the observations provided in 'signal'.
        Note: `signal` is a SINGLE observation sequence.

        Returns the maximum likelihood hidden state sequence as well as the
        log-likelihood of that sequence.

        Note that this function may behave strangely if the provided sequence
        is impossible under the model - e.g. if the transition model requires
        more observations than provided in the signal.

        Parameters
        ----------
        signal : (d,n) ndarray
            Signal for which the optimal state sequence is to be calculated.
            d is the dimension of each observation (number of features)
            n is the number of observations

        trans : (n_states+1,n_states+1) ndarray
            The transition probability table.  The rightmost column contains probability
            of transitioning to final state, and the last row the initial state's
            transition probabilities.   Note that all the rows need to add to 1.

        dists: (n_states,) list
            A list of Gaussian objects defining the emitting pdf's, one object for each
            emitting  state.

        Return
        ------
        seq : (n,) ndarray
            The optimal state sequence for the signal (excluding non-emitting states)

        ll : float
            The log-likelihood associated with the sequence
        '''

        n_states = trans.shape[0] - 1
        T = signal.shape[1]  # T - number of observations
        lltable = np.zeros(
            (n_states + 1, T + 1))  # table containing LLs of ML state sequence
        seq = np.zeros(T, dtype="int")
        backtable = np.zeros(
            (n_states + 1, T + 1),
            dtype="int")  # Back pointers for ML state sequence (0 is start)

        # Prepare time -1 column - overwritten with time n stuff later
        lltable[-1, -1] = 0
        for state in range(n_states):
            lltable[state, -1] = float("-inf")
        emissionLLs = np.array([[s.logf(x.flatten()) for s in dists]
                                for x in np.hsplit(signal, signal.shape[1])])

        for time in range(T):
            lltable[n_states, time] = float("-inf")
            for state in range(n_states):
                bestpred = np.argmax(
                    np.log(trans[:, state]) + lltable[:, time - 1]
                )  # Underflow issue with log 0 - np.seterr to avoid
                lltable[state, time] = np.log(
                    trans[bestpred, state]) + lltable[bestpred, time - 1]
                lltable[state, time] += emissionLLs[time, state]
                backtable[state, time] = bestpred
        for state in range(
                n_states +
                1):  # Last time step (t = n+1) - transition to final state
            bestpred = np.argmax(np.log(trans[:, state]) + lltable[:, T - 1])
            lltable[state, T] = lltable[bestpred, T - 1] + np.log(
                trans[bestpred, state])  # No emission prob
            backtable[state, T] = bestpred
        seq[T - 1] = backtable[n_states, T]
        for i in range(T - 2, -1, -1):
            seq[i] = backtable[seq[i + 1], i + 1]
        return seq, lltable[n_states, T]
Пример #56
0
       [ 4,  5],
       [ 6,  7],
       [ 8,  9],
       [10, 11]])
>>> np.hstack((a,b))
array([[ 0,  1,  6,  7],
       [ 2,  3,  8,  9],
       [ 4,  5, 10, 11]])
>>> 

>>> 
>>> a=np.arange(30).reshape(2,15)
>>> a
array([[ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14],
       [15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]])
>>> np.hsplit(a,3)
[array([[ 0,  1,  2,  3,  4],
       [15, 16, 17, 18, 19]]), array([[ 5,  6,  7,  8,  9],
       [20, 21, 22, 23, 24]]), array([[10, 11, 12, 13, 14],
       [25, 26, 27, 28, 29]])]
>>> result=np.hsplit(a,3)
>>> result[0]
array([[ 0,  1,  2,  3,  4],
       [15, 16, 17, 18, 19]])
>>> result[2]
array([[10, 11, 12, 13, 14],
       [25, 26, 27, 28, 29]])
>>> r=np.vsplit(a,2)
>>> r[0]
array([[ 0,  1,  2,  3,  4,  5,  6,  7,  8,  9, 10, 11, 12, 13, 14]])
>>> r[1]
Пример #57
0
#print(n)

#m = numpy.asarray([123,12,123,12,33],[],[])

import cv2
#0 means gray
im_g = cv2.imread("C:\\Users\\Bertug\\Desktop\\smallgray.png",1)

#print(im_g)
#yeni bir image olusturmak icin
#cv2.imwrite("newsmall.png",im_g)

#print(im_g[0:2,2:4])

#im_g.T transpozunu alir.
#flat ile tek sutunda toplariz
#for i in im_g.flat:
#    print(i)
#horizontal stack
#ims = numpy.hstack((im_g,im_g))
#print(ims)

#vertical stack
ims = numpy.vstack((im_g,im_g))


lst = numpy.hsplit(ims,5)

lst = numpy.vsplit(ims,3)

print(lst)
Пример #58
0
import numpy as np

A = np.arange(12).reshape((3,4))
print(A)

print(np.split(A,3,axis=0))

print(np.array_split(A,3,axis=1))

print(np.vsplit(A,3))
print(np.hsplit(A,2))
Пример #59
0
# coding=utf-8
"""
不等量水平分割
"""

import numpy as np

a = np.arange(24).reshape(2, 12)
print(a)
"""
等量水平分割3分"""
print(np.hsplit(a, 3))
"""
不等量
从列索引3,4分隔开"""
print(np.hsplit(a, (3, 5)))
Пример #60
0
hsplit_np = []
#用pandas将时间转为标准格式
dateparse = lambda dates: pd.datetime.strptime(dates, '%d/%m/%Y %H:%M')
#将时间栏合并,并转为标准时间格式
# rawdata = pd.read_csv('53787 气站数据.csv',parse_dates={'timeline':['date','(UTC)']},date_parser=dateparse)
rawdata = pd.read_csv(
    r'C:\Users\Administrator\PycharmProjects\untitled\files\53787.csv',
    header=None,
    names=[
        '区站号', '纬度', '经度', '观测场海拔高度', '年', '月', '日', '平均风速', '最大风速', '最大风速的风向',
        '极大风速', '极大风速的风向', 'other1', 'other2', 'other3', 'other4', 'other5'
    ])

longdate_np = np.array(rawdata)
print(longdate_np.shape)
hsplit_np = np.hsplit(longdate_np, (4, 7))

test = re.sub('[\[\]]', '', np.array_str(hsplit_np[1][0]))

print(test)

# date_number = datetime.datetime.strptime(str(hsplit_np[1][0]&'-'&hsplit_np[2][0]&'-'&hsplit_np[3][0])
#                                          ,'%Y-%m-%d').timestamp()
#
# print(datetime.datetime.utcfromtimestamp(date_number),'------>>>>>>',date_number)

# #定义一个将时间转为数字的函数,s为字符串
# def datestr2num(s):
#     #toordinal()将时间格式字符串转为数字
#     return datetime.datetime.strptime(s,'%Y-%m-%d %H:%M:%S').toordinal()
#