Example #1
0
    def stabilize(self, A, t, node, selected):
        """
        check that the given node can be solved with a stable linear system
        """
        C = linalg.inv(np.dot(A.T, A))

        best_idx = 0
        c_min = self.theta+1
        a_idx = -1

        if node.dim == x_dim:
            for candidate in np.setdiff1d(np.nonzero(self.y_computed)[0], selected):
                (c, _tau) = self.compute_lcn_extend(A, t, C, self.y[candidate,:], node.idx, candidate)
                if c < c_min:
                    a_star = self.y[candidate,:]
                    a_idx = candidate
                    c_min = c 
                    tau = _tau
        else:
            for candidate in np.setdiff1d(np.nonzero(self.x_computed)[0], selected):
                (c, _tau) = self.compute_lcn_extend(A, t, C, self.x[candidate,:], candidate, node.idx)
                if c < c_min:
                    a_star = self.x[candidate,:]
                    a_idx = candidate
                    c_min = c
                    tau = _tau

        if c_min < self.theta:
            log.debug('c_min : %f' % c_min)
            return a_star, a_idx

        return None
	def pick_parents(self):

		# compute fitness
		w = self.calc_fitness()
		# sample parents proportional to their fitness
		males = np.array(np.nonzero(self.sex == 0)).flatten()
		females = np.array(np.nonzero(self.sex == 1)).flatten()
		w_male = w[males]/np.sum(w[males])
		w_female = w[females]/np.sum(w[females])
		males = np.random.choice(males, size = self.size/2, p = w_male)
		females = np.random.choice(females, size = self.size/2, p = w_female)
		assert(len(males) == len(females))

		idx = []
		sexes = []
		for i in range(0, len(males)):
			idx.append(males[i])
			idx.append(females[i])
			sexes.append(0)
			sexes.append(1)

		# make a copy of current population
		newpop = copy.deepcopy(self)
		newpop.sex = np.array(sexes)

		# now copy chromosomes from current population to new one
		for j in range(0, len(self.chrlen)):
			for i in range(0, self.size):
				newpop.tr[i] = self.tr[ idx[i] ]
				newpop.chroms[j][2*i] = self.chroms[j][ 2*idx[i] ]
				newpop.chroms[j][2*i+1] = self.chroms[j][ 2*idx[i]+1 ]

		return newpop
Example #3
0
def calc_cluster_im(self,indices):
    screens = np.copy(self.screens[indices])
    if self.game_id  == 2: #pacman
        for s in screens:

            enemies_map = 1 * (s[:,:,0] == 180) + \
                          1 * (s[:,:,0] == 149) + \
                          1 * (s[:,:,0] == 212) + \
                          1 * (s[:,:,0] == 128) + \
                          1 * (s[:,:,0] == 232) + \
                          1 * (s[:,:,0] == 204)

            enemies_mask = np.ones((210,160),dtype=bool)
            enemies_mask[20:28,6:10] = 0
            enemies_mask[140:148,6:10] = 0
            enemies_mask[20:28,150:154] = 0
            enemies_mask[140:148,150:154] = 0
            enemies_map = enemies_map * enemies_mask
            r_ch = s[:,:,0]
            g_ch = s[:,:,1]
            b_ch = s[:,:,2]
            r_ch[np.nonzero(enemies_map)] = 45
            g_ch[np.nonzero(enemies_map)] = 50
            b_ch[np.nonzero(enemies_map)] = 184
    meanscreen=np.mean(screens,axis=0)

    return meanscreen
Example #4
0
    def polyhedra(self, wm):
        '''Iterates through the polyhedra that make up the closest volume to a certain vertex'''
        for p, facerow in enumerate(self.connected):
            faces = facerow.indices
            pts, polys = _ptset(), _quadset()
            if len(faces) > 0:
                poly = np.roll(self.polys[faces[0]], -np.nonzero(self.polys[faces[0]] == p)[0][0])
                assert pts[wm[p]] == 0
                assert pts[self.pts[p]] == 1
                pts[wm[poly[[0, 1]]].mean(0)]
                pts[self.pts[poly[[0, 1]]].mean(0)]

                for face in faces:
                    poly = np.roll(self.polys[face], -np.nonzero(self.polys[face] == p)[0][0])
                    a = pts[wm[poly].mean(0)]
                    b = pts[self.pts[poly].mean(0)]
                    c = pts[wm[poly[[0, 2]]].mean(0)]
                    d = pts[self.pts[poly[[0, 2]]].mean(0)]
                    e = pts[wm[poly[[0, 1]]].mean(0)]
                    f = pts[self.pts[poly[[0, 1]]].mean(0)]

                    polys((0, c, a, e))
                    polys((1, f, b, d))
                    polys((1, d, c, 0))
                    polys((1, 0, e, f))
                    polys((f, e, a, b))
                    polys((d, b, a, c))

            yield pts.points, np.array(list(polys.triangles))
	def init_from_ms(self, ms_run):
		'''
		Initialize the population from the results of an ms run to avoid having to do
		lengthy 'burn-in' phase at beginning of each simulation.
		'''

		if not isinstance(ms_run, ms.MsReader):
			raise TypeError("Argument 'ms_sample' must be an MsRun object.")

		sys.stderr.write("Initializing population from ms sample with header:\n{}\n".format(ms_run.header))

		# read in a simulation
		ms_sample = ms_run.next()

		# initialize position of mutations
		chrlen = self.chrlen[0]
		pos = np.array(ms_sample.positions, dtype = np.float32)*chrlen # scale by chromosome length

		assert(len(ms_sample.samples) >= 2*self.size)

		for i in range(0, self.size):
			alleles = np.array([ int(x) for x in ms_sample.samples[i] ])
			derived = np.nonzero(alleles)[0]
			if len(derived):
				self.chroms[0][i] = pos[ np.nonzero(alleles)[0] ]
			else:
				self.chroms[0][i] = np.ndarray((0,), dtype = np.float32)
Example #6
0
 def compute_sketch(self):
     start_time = time.time()
     if self.sketch is not None:
         return self.sketch
     mat_b = np.zeros([self.l, self.mat.shape[1]])
     # compute zero valued row list
     zero_rows = np.nonzero([round(s, 7) == 0.0 for s in np.sum(mat_b, axis = 1)])[0].tolist()
     # repeat inserting each row of matrix A 
     percent = 0
     for i in range(0, self.mat.shape[0]):
         # insert a row into matrix B
         mat_b[zero_rows[0], :] = self.mat[i, :]
         # remove zero valued row from the list
         zero_rows.remove(zero_rows[0])
         # if there is no more zero valued row
         if len(zero_rows) == 0:
             # compute SVD of matrix B, we want to find the first l
             if self.mat.shape[0] / i > percent:
                 percent = self.mat.shape[0] / i
                 print i
             self._sketch_func(mat_b)
             # update the zero valued row list
             zero_rows = np.nonzero([round(s, 7) == 0 for s in np.sum(mat_b, axis = 1)])[0].tolist()
     # why do we need this here? 
     # do we need to do a sketch one last time at the end? 
     self._sketch_func(mat_b)
     # get rid of extra non-zero rows when we return 
     self.sketch = mat_b[:self.l, :]
     self.sketching_time = time.time() - start_time
     return self.sketch
Example #7
0
 def compute_sketch(self):
     start_time = time.time()
     if self.sketch is not None:
         return self.sketch
     # basically, want to init an empty csr matrix 
     if self.randomized and (self.m > 100 * self.l):
         print "using sparse sketch"
         # lets use the sparse version of randomized sketch here 
         return self.compute_sparse_sketch()
     else:
         self._sketch_func = self._fast_rand_sketch
     # what do we do differently here? we need to iterate over the nzrow_inds,
     mat_b = np.zeros([self.l + self.b_size, self.m])
     # other way: np.where(~mat_b.any(axis=1))[0]
     # zero_rows = np.nonzero([round(s, 7) == 0.0 for s in np.sum(mat_b, axis = 1)])[0].tolist()
     zero_rows = np.nonzero([round(s, 7) == 0.0 for s in np.sum(mat_b[:self.l, :], axis = 1)])[0]
     zero_rows = np.hstack((zero_rows, np.arange(self.l, self.l + self.b_size))).tolist()
     # iterate through the nzrow_inds
     for i in self.nzrow_inds:
         mat_b[zero_rows[0], :] = self.mat.getrow(i).todense()
         zero_rows.remove(zero_rows[0])
         if len(zero_rows) == 0:
             print "sketching ", i
             self._sketch_func(mat_b)
             zero_rows = np.nonzero([round(s, 7) == 0.0 for s in np.sum(mat_b[:self.l, :], axis = 1)])[0]
             zero_rows = np.hstack((zero_rows, np.arange(self.l, self.l + self.b_size))).tolist()
     self._sketch_func(mat_b)
     self.sketch = mat_b[:self.l, :]
     self.sketching_time = time.time() - start_time 
     return self.sketch
Example #8
0
    def assign_nearest_nbh(self, query_doc):

        block_id, query_words, doc_words = query_doc
        query_vector = self.vectorize(query_words)
        doc_vector = self.vectorize(doc_words)
        #distance = emd(query_vector, doc_vector, self.distance_matrix)
        #return block_id, distance

        doc_indices = np.nonzero(doc_vector)[0]
        query_indices = np.nonzero(query_vector)[0]

        query_weights = [self.word_level_idf.get(q_i, 0) for q_i in query_indices]
        doc_weights = [self.word_level_idf.get(d_i, 0) for d_i in doc_indices]

        doc_centroid = np.average([self.embedding.model[self.reverse_vocab[i]] for i in doc_indices], axis=0,
                                  weights=doc_weights)
        query_centroid = np.average([self.embedding.model[self.reverse_vocab[i]] for i in query_indices], axis=0,
                                    weights=query_weights)

        # sklearn euclidean distances may not be a symmetric matrix, so taking
        # average of the two entries
        dist_arr = np.array([[(self.distance_matrix[w_i, q_j] + self.distance_matrix[q_j, w_i]) / 2
                              for w_i in doc_indices] for q_j in query_indices])

        label_assignment = np.argmin(dist_arr, axis=1)
        label_assignment = [(index, l) for index, l in enumerate(label_assignment)]

        distances = [dist_arr[(i,e)] * self.word_level_idf.get(query_indices[i], 1) for i, e in label_assignment]

        distance = (1 - self.alpha) * np.sum(distances) + \
                   self.alpha * sp.spatial.distance.cosine(doc_centroid,query_centroid)
        return block_id, distance
Example #9
0
 def _get_ind_under_point(self, event):
     'get the index of the vertex under point if within epsilon tolerance'
     try:
         x, y = zip(*self._poly.xy)
         
         # display coords
         xt, yt = self._poly.get_transform().numerix_x_y(x, y)
         d = np.sqrt((xt-event.x)**2 + (yt-event.y)**2)
         indseq = np.nonzero(np.equal(d, np.amin(d)))
         ind = indseq[0]
     
         if d[ind]>=self._epsilon:
             ind = None
     
         return ind
     except:
         # display coords
         xy = np.asarray(self._poly.xy)
         xyt = self._poly.get_transform().transform(xy)
         xt, yt = xyt[:, 0], xyt[:, 1]
         d = np.sqrt((xt-event.x)**2 + (yt-event.y)**2)
         indseq = np.nonzero(np.equal(d, np.amin(d)))[0]
         ind = indseq[0]
         
         if d[ind]>=self._epsilon:
             ind = None
         
         return ind
Example #10
0
def find_clique_index(variables, polynomial, clique_set):
    support = np.any(get_support(variables, polynomial), axis=0)
    support[np.nonzero(support)[0]] = 1
    for i, clique in enumerate(clique_set):
        if np.dot(support, clique) == len(np.nonzero(support)[0]):
            return i
    return -1
Example #11
0
def _generate_clique_alt(variables, obj, inequalities, equalities):
    n_dim = len(variables)
    rmat = spmatrix(1.0, range(n_dim), range(n_dim))
    for support in get_support(variables, obj):
        nonzeros = np.nonzero(support)[0]
        value = random.random()
        for i in nonzeros:
            for j in nonzeros:
                rmat[i, j] = value
    for polynomial in flatten([inequalities, equalities]):
        support = np.any(get_support(variables, polynomial), axis=0)
        nonzeros = np.nonzero(support)[0]
        value = random.random()
        for i in nonzeros:
            for j in nonzeros:
                rmat[i, j] = value
    rmat = rmat + 5*n_dim*spmatrix(1.0, range(n_dim), range(n_dim))
    # compute symbolic factorization using AMD ordering
    symb = cp.symbolic(rmat, p=amd.order)
    ip = symb.ip
    # symb = cp.symbolic(rmat)
    # ip = range(n_dim)
    cliques = symb.cliques()
    R = np.zeros((len(cliques), n_dim))
    for i, clique in enumerate(cliques):
        for j in range(len(clique)):
            R[i, ip[cliques[i][j]]] = 1
    return R
Example #12
0
def CCML(ccpart,ccmat,mu,r,nu,s,row_alpha,col_alpha):
	lp = []
	
	ccmat = np.array(ccmat)

	state = ccpart.states[1]

	# loop through the states
	for state in ccpart.states:
		all_cols = state['col_parts']
		all_rows = state['row_parts']

		K = max(all_cols)+1
				
		lp_temp = lcrp(all_cols,col_alpha)
		for view in range(K):
			row_part = all_rows[view,:]
			lp_temp += lcrp(row_part,row_alpha)
			cols_view = np.nonzero(all_cols==view)[0]
			for col in cols_view:
				for cat in range(row_part.max()+1):
					X = ccmat[np.nonzero(row_part==cat)[0],col]
					lp_temp += NGML(X,mu,r,nu,s)

		lp.append(lp_temp);

	# return the normalized probabilities
	return lp-sp.misc.logsumexp(lp)
Example #13
0
def rect(time, t0, t1, height=1, noise = 0.0):
	"""Rectangular signal of given height and width t1-t0

    Parameters
    ----------
        time : np.ndarray of shape (N,)
        	time instants (equidistant)
        t0 : float
        	time instant of rect lhs
        t1 : float
        	time instant of rect rhs
        height : float
         	signal maximum
        noise :float, optional
        	std of simulated signal noise

    Returns
    -------
        x : np.ndarray of shape (N,)
         	signal amplitudes at time instants
    """

	x = np.zeros((len(time),))
	x[np.nonzero(time > t0)] = height
	x[np.nonzero(time > t1)] = 0.0

	if noise > 0:
		x = x + np.random.randn(len(time)) * noise
	return x
def x_input_to_states(xinput, CORR_VAL_OUT=0, PARALLEL = False):
    sinput = np.zeros(xinput.shape)
    num_samples = xinput.shape[0]
    num_sensors = xinput.shape[1]
    if num_samples < num_sensors:
        print '[WARN] number of samplesa are smaller than number of sensors'

    print 'Mapping', xinput.shape, ' marix to discrete states '

    for k, samples in enumerate(xinput.T):
        obs = samples[:,np.newaxis]
        label, opt_num_cluster, model, score, score_err_sum = state_retrieval(obs, max_num_cluster=6, est_method='kmean', PARALLEL=PARALLEL)
        high_peak_label_idx = np.argmax(model.cluster_centers_)
        low_peak_label_idx = np.argmin(model.cluster_centers_)
        high_peak_idx = np.nonzero(label == high_peak_label_idx)[0]
        sinput[high_peak_idx,k] = 1
        low_peak_idx = np.nonzero(label == low_peak_label_idx)[0]
        sinput[low_peak_idx, k] = -1

    corr_state_val = []
    if CORR_VAL_OUT == 1:
        print 'Compute Correlation Score....'
        for k,(row1, row2) in enumerate(zip(sinput.T, xinput.T)):
            corr_state_val.append(round(stats.pearsonr(row1, row2)[0],3))
    corr_state_val = np.array(corr_state_val)
    return sinput, corr_state_val
Example #15
0
def intersubjectconsensus():
    """Compute inter-subjects clustering consensus.

    """
    base_dir = r'/nfs/h1/workingshop/huanglijie/uni_mul_analysis'
    db_dir = os.path.join(base_dir, 'multivariate', 'detection', 'mvpcluster')

    n_clusters = 60

    mask_file = os.path.join(base_dir, 'multivariate', 'detection',
                             'mask.nii.gz')
    mask = nib.load(mask_file).get_data()

    for n in range(1, n_clusters):
        n += 1
        merged_file = os.path.join(db_dir, 'merged_cluster_'+str(n)+'.nii.gz')
        merged_data = nib.load(merged_file).get_data()
        n_subjs = merged_data.shape[3]
        mtx = np.zeros((n_subjs, n_subjs))
        for i in range(n_subjs):
            for j in range(n_subjs):
                data_i = merged_data[..., i]
                data_j = merged_data[..., j]
                vtr_i = data_i[np.nonzero(mask)]
                vtr_j = data_j[np.nonzero(mask)]
                tmp = metrics.adjusted_mutual_info_score(vtr_i, vtr_j)
                mtx[i, j] = tmp
        outfile = os.path.join(db_dir, 'consensus_'+str(n)+'.csv')
        np.savetxt(outfile, mtx, delimiter=',')
Example #16
0
def FindBigStuff(data,xsd =3,sd_method = 'Quian'):
    
    #s = np.std(data,0) * xsd
    
    #print s
    spikelist = np.array([0,0,0])[None,...]
    m,n = data.shape
    s = np.zeros(n)
    for i in range(n):
        
        x = data[:,i]
        if sd_method == 'Quian':
            s[i] = xsd * np.median(np.abs(x)) / 0.6745
        elif sd_method == 'STD':
            s[i] = np.std(x) * xsd
        taux = np.diff(np.where(abs(x)>s[i],1,0))
        times = np.nonzero(taux==1)[0]
        times2 = np.nonzero(taux==-1)[0]
        if len(times) !=0:
            if len(times)-1 == len(times2):
                times2 = np.append(times2,m)
            elif len(times) == len(times2)-1:
                times = np.append(0,times)
            chs = np.ones(times.shape)*i
            aux = np.append(chs[...,None],times[...,None],1)   
            aux = np.append(aux,times2[...,None],1)  
            spikelist = np.append(spikelist,aux,0)
    return np.delete(spikelist, (0), axis=0),s
Example #17
0
def aff_cercle_visi(lon, lat, dlon, dlat, col, fig):
    """
    Affichage des cercles de visibilité

    lon, lat : position utilisateur (float)
    dlon, dlat : vecteurs
    col : param affichage des cercles
    """
    from params import CRD

    s = len(dlon)
    lon_vis = np.zeros((2, s))
    lon_vis[0, :] = lon + dlon
    lon_vis[1, :] = lon - dlon
    #if min(lon_vis < 0) or max(lon_vis > 2 * np.pi):

    dlat2 = dlat[(s-1)::-1]  # np.array (250,)

    #lon_vis_array = np.array([lon_vis[0], lon_vis[1], lon_vis[0, 0]]) * CRD
    #dlat_array = (np.array([dlat, dlat2, dlat[0]]) + lat) * CRD
    #plt.plot(lon_vis_array, dlat_array, col)

    latv = dlat + lat

    ## bordures
    ## haut du demi-cercle
    indlat = np.nonzero(latv > np.pi / 2)[0]  # 1 dimension
    latv[indlat] = np.pi - latv[indlat]
    lon_vis[:, indlat] = lon_vis[:, indlat] + np.pi
    #plt.plot(lon_vis[0, indlat]*CRD, latv[indlat]*CRD, 'g-')
    #toto = np.nonzero(latv >= np.pi)[0].size
    #if toto:
    #    print("*"*5, toto)

    ## bas du demi-cercle
    indlat = np.nonzero(latv < -np.pi / 2)[0]  # 1 dimension
    latv[indlat] = -np.pi - latv[indlat]
    lon_vis[:, indlat] = lon_vis[:, indlat] + np.pi
    lon_vis[1, :] = lon_vis[1, (s-1)::-1]
    #plt.plot(lon_vis[1, indlat]*CRD, latv[indlat]*CRD, 'b-')
    #toto = np.nonzero(latv <= -np.pi)[0].size
    #if toto:
    #    print(toto, "*"*5)

    ## côtés du demi-cercle
    lon_vis = lon_vis + 2 * np.pi
    lon_vis = lon_vis % (2 * np.pi)

    latv2 = latv[(s-1)::-1]

    #plt.plot(lon_vis[0, :]*CRD, latv*CRD, col)
    #plt.plot(lon_vis[1, :]*CRD, latv2*CRD, col)
    cercled = lon_vis[0, :]*CRD, latv*CRD
    cercleg = lon_vis[1, :]*CRD, latv2*CRD

    #plt.plot(np.array([lon_vis[0, 0], lon_vis[1, 0]])*CRD,
    #         np.array([latv2[0], latv2[0]])*CRD, col)

    #plt.show()
    return cercleg, cercled
def evaluateSpeakerDiarization(flags, flagsGT):

	minLength = min( flags.shape[0], flagsGT.shape[0] )
	flags = flags[0:minLength]
	flagsGT = flagsGT[0:minLength]

	uFlags = numpy.unique(flags)
	uFlagsGT = numpy.unique(flagsGT)	

	# compute contigency table:
	cMatrix = numpy.zeros(( uFlags.shape[0], uFlagsGT.shape[0] ))
	for i in range(minLength):
		cMatrix[ int(numpy.nonzero(uFlags==flags[i])[0]), int(numpy.nonzero(uFlagsGT==flagsGT[i])[0]) ] += 1.0

	Nc, Ns = cMatrix.shape;
	N_s = numpy.sum(cMatrix,axis=0);
	N_c = numpy.sum(cMatrix,axis=1);
	N   = numpy.sum(cMatrix);

	purityCluster = numpy.zeros( (Nc,) )
	puritySpeaker = numpy.zeros( (Ns,) )
	# compute cluster purity:
	for i in range(Nc):
		purityCluster[i] = numpy.max( (cMatrix[i,:]) )/ (N_c[i]);

	for j in range(Ns):
		puritySpeaker[j] = numpy.max( (cMatrix[:,j]) )/ (N_s[j]);

	purityClusterMean = numpy.sum(purityCluster*N_c) / N;
	puritySpeakerMean = numpy.sum(puritySpeaker*N_s) / N;
	
	return purityClusterMean, puritySpeakerMean
Example #19
0
File: batchtps.py Project: rll/lfd
def check_update(ctx, b):
    ctx.tps_params[0] = ctx.default_tps_params.copy()
    ctx.update_ptrs()
    xt = ctx.pts_t[0].get()
    p_mat = ctx.proj_mats[b][0].get()
    o_mat = ctx.offset_mats[b][0].get()
    true_res = np.dot(p_mat, xt) + o_mat
    ctx.set_tps_params(ctx.offset_mats[b])
    o_gpu = ctx.tps_params[0].get()
    if not np.allclose(o_gpu, o_mat):
        print "setting tps params failed"
        diff = np.abs(o_mat - o_gpu)
        nz = np.nonzero(diff)
        print nz
        ipy.embed()
        sys.exit(1)
    ctx.update_transform(b)
    p1 = ctx.tps_params[0].get()
    if not np.allclose(true_res, p1):
        print "p1 and true res differ"
        print p1[:3]
        diff = np.abs(p1 - true_res)
        print np.max(diff)
        amax = np.argmax(diff)
        print amax
        nz = np.nonzero(diff)
        print nz[0]
        ipy.embed()
        sys.exit(1)
Example #20
0
def dup_idx(arr):
    """
    Return the indices of all duplicated array elements.

    Parameters
    ----------

    arr : array-like object

    Returns
    -------

    idx : NumPy array
        An array containing the indices of the duplicated elements

    Examples
    --------

    >>> from root_numpy import dup_idx
    >>> dup_idx([1, 2, 3, 4, 5])
    array([], dtype=int64)
    >>> dup_idx([1, 2, 3, 4, 5, 5])
    array([4, 5])
    >>> dup_idx([1, 2, 3, 4, 5, 5, 1])
    array([0, 4, 5, 6])

    """
    _, b = np.unique(arr, return_inverse=True)
    return np.nonzero(np.logical_or.reduce(
        b[:, np.newaxis] == np.nonzero(np.bincount(b) > 1),
        axis=1))[0]
Example #21
0
def mask_ts(x, ts):
    '''
    A function to mask the data matrix with a time series with zero-values.
    The data matrix is T x V, where T is the number of scans and V is the 
    number of withing-mask voxels. The time series ts, a 1D array of length T,
    may have some zeros. This function eliminates time points from the data
    matrix where ts=0. Moreover, the data matrix is multiplied non-zero values
    of ts.

    Input Parameters:
          x:        An array of size T x V, where T is the number of
                    time points and V is the number of voxels.
          ts:       A 1D array of length T. Some elements may be zero.

    Returns:
          y:        An array of size U x V, where U<T or U=T. This is
                    the data array masked by the time series ts. time 
                    points corresponding ts=0 are eliminated. The remaining
                    time points are weighted by the value of ts.
    '''
    # first, eliminate zero elements from x and ts
    tsNZ = ts[np.nonzero(ts)]
    xNZ = x[np.nonzero(ts)[0],:]
    # multiplying the data with ts by broadcasting
    y = xNZ * np.array([tsNZ]).T  
    # and returning the masked data
    return y
Example #22
0
def output_wiggle(bins, binsize, norm_factor, by_strand, name, extra_trackline = ""):
    """write all non-empty bins to bedgraph format strings; always includes
    minimal track line; Output is in 1-based wiggle format."""
    if not by_strand:
        print "track type=wiggle_0 alwaysZero=on visibility=full maxHeightPixels=100:80:50 " \
                + ("name='%s'" % name) + extra_trackline
        for chrom in sorted(bins.keys()):
            print "variableStep chrom=%s span=%d" % (chrom, binsize)
            non_zero_bins = numpy.nonzero(bins[chrom] > 0)
            result = numpy.column_stack((non_zero_bins[0] * binsize + 1,
                bins[chrom][non_zero_bins] * norm_factor))
            numpy.savetxt(sys.stdout, result, "%d\t%.8f")
    else:
        for strand in (0, 1):
            if strand == 0:
                nf = norm_factor
            else:
                nf = -norm_factor
            print "track type=wiggle_0 alwaysZero=on visibility=full maxHeightPixels=100:80:50 " \
                    + ("name='%s[%s]'" % (name, strand and '-' or '+')) + extra_trackline
            for chrom in sorted(bins.keys()):
                print "variableStep chrom=%s span=%d" % (chrom, binsize)
                non_zero_bins = numpy.nonzero(bins[chrom][strand] > 0)
                result = numpy.column_stack((non_zero_bins[0] * binsize + 1,
                    bins[chrom][strand][non_zero_bins] * nf))
                numpy.savetxt(sys.stdout, result, "%d\t%.8f")
Example #23
0
def decTY1(raw_8, raw_16=None, raw_32=None):
    """
    Modified byte offset decompressor used in Oxford Diffraction images
    @param raw_8,raw_16,raw_32: strings containing raw data with integer of the given size
    @return numpy.ndarray 
    """
    data = numpy.fromstring(raw_8, dtype="uint8").astype(int)
    data -= 127
    if raw_32 is not None:
        int32 = numpy.fromstring(raw_32, dtype="int32").astype(int)
        exception32 = numpy.nonzero(data == 128)
    if raw_16 is not None:
        int16 = numpy.fromstring(raw_16, dtype="int16").astype(int)
        exception16 = numpy.nonzero(data == 127)
        data[exception16] = int16
    if raw_32:
        data[exception32] = int32
    summed = data.cumsum()
    smax = summed.max()
    if (smax > (2 ** 31 - 1)):
        bytecode = "int64"
    elif (smax > (2 ** 15 - 1)):
        bytecode = "int32"
    elif (smax > (2 ** 7 - 1)):
        bytecode = "int16"
    else:
        bytecode = "int8"
    return summed.astype(bytecode)
Example #24
0
    def _hessian_main(self, params):
        params_infl = params[:self.k_inflate]
        params_main = params[self.k_inflate:]

        y = self.endog
        w = self.model_infl.predict(params_infl)
        w = np.clip(w, np.finfo(float).eps, 1 - np.finfo(float).eps)
        score = self.score(params)
        zero_idx = np.nonzero(y == 0)[0]
        nonzero_idx = np.nonzero(y)[0]

        mu = self.model_main.predict(params_main)

        hess_arr = np.zeros((self.k_exog, self.k_exog))

        coeff = (1 + w[zero_idx] * (np.exp(mu[zero_idx]) - 1))

        #d2l/dp2
        for i in range(self.k_exog):
            for j in range(i, -1, -1):
                hess_arr[i, j] = ((
                    self.exog[zero_idx, i] * self.exog[zero_idx, j] *
                    mu[zero_idx] * (w[zero_idx] - 1) * (1 / coeff -
                    w[zero_idx] * mu[zero_idx] * np.exp(mu[zero_idx]) /
                    coeff**2)).sum() - (mu[nonzero_idx] * self.exog[nonzero_idx, i] *
                    self.exog[nonzero_idx, j]).sum())

        return hess_arr
Example #25
0
def cleaningSineTracks(tfreq, minTrackLength=3):
	"""
	Delete short fragments of a collection of sinusoidal tracks 
	tfreq: frequency of tracks
	minTrackLength: minimum duration of tracks in number of frames
	returns tfreqn: output frequency of tracks
	"""

	if tfreq.shape[1] == 0:                                 # if no tracks return input
		return tfreq
	nFrames = tfreq[:,0].size                               # number of frames
	nTracks = tfreq[0,:].size                               # number of tracks in a frame
	for t in range(nTracks):                                # iterate over all tracks
		trackFreqs = tfreq[:,t]                               # frequencies of one track
		trackBegs = np.nonzero((trackFreqs[:nFrames-1] <= 0)  # begining of track contours
								& (trackFreqs[1:]>0))[0] + 1
		if trackFreqs[0]>0:
			trackBegs = np.insert(trackBegs, 0, 0)
		trackEnds = np.nonzero((trackFreqs[:nFrames-1] > 0)   # end of track contours
								& (trackFreqs[1:] <=0))[0] + 1
		if trackFreqs[nFrames-1]>0:
			trackEnds = np.append(trackEnds, nFrames-1)
		trackLengths = 1 + trackEnds - trackBegs              # lengths of trach contours
		for i,j in zip(trackBegs, trackLengths):              # delete short track contours
			if j <= minTrackLength:
				trackFreqs[i:i+j] = 0
	return tfreq
Example #26
0
def petscKron(A,B):
    dim = A.shape[0]*B.shape[0] # length of resulting matrix
    
    # Used to get indexes where values are non-zero
    Br,Bc = np.nonzero(B)
    Ar,Ac = np.nonzero(A)

    # Need to have values on first axis
    Ar = np.asarray(Ar).ravel(); Ac = np.asarray(Ac).ravel()
    Br = np.asarray(Br).ravel(); Bc = np.asarray(Bc).ravel()

    # Distance between each 'block'
    n = B.shape[1]
    
    # create petsc resulting matrix
    K = PETSc.Mat().createAIJ([dim,dim])
    K.setFromOptions(); K.setUp()
    start,end = K.getOwnershipRange()

    for i in xrange(len(Ar)): # Go through each non-zero value in A
        # br,bc are used to track which 'block' we're in (in result matrix)
        br,bc = n*Ar[i], n*Ac[i]

        for j in xrange(len(Br)): # Go through non-zero values in B
            # kr,kc used to see where to put the number in K (the indexs)
            kr = (Br[j]+br).astype(np.int32)
            kc = (Bc[j]+bc).astype(np.int32)

            if start <= kr < end: # Make sure we're in the correct processor
                K[kr, kc] = A[Ar[i],Ac[i]] * B[Br[j],Bc[j]]

    K.assemble()
    return K
Example #27
0
def truncate_hist1( self, xmin, xmax ):
    buf   = get_buffer_hist1( self )
    sbuf  = get_err_buffer_hist1( self )
    edges, fixed = get_bin_edges_axis( self.GetXaxis(), type=True )

    e1 = numpy.fabs(edges[:-1]-xmin)<1.e-9
    e2 = numpy.fabs(edges[1:]-xmax)<1.e-9
    assert numpy.any( e1 ) and numpy.any( e2 ), 'Invalid new histogram limits'
    i1 = numpy.nonzero( e1 )[0][0]
    i2 = numpy.nonzero( e2 )[0][-1]+1

    if fixed:
        newhist = self.__class__( self.GetName(), self.GetTitle(), i2-i1, xmin, xmax )
    else:
        newhist = self.__class__( self.GetName(), self.GetTitle(), i2-i1, edges[i1:i2] )

    newbuf = get_buffer_hist1( newhist )
    if sbuf is None:
        newsbuf = None
    else:
        newhist.Sumw2()
        newsbuf = get_err_buffer_hist1( newhist )

    newbuf[:] = buf[i1:i2]
    if not sbuf is None:
        newsbuf[:] = sbuf[i1:i2]

    newhist.SetEntries( newhist.Integral() )

    return newhist
Example #28
0
def d2init(p):

    # useful indexing for 2nd derivs

    # for difference terms
    n = (p - 2) * (p - 1) // 2
    Id = zeros((n, 2), dtype=int)
    n = -1
    for i in range(p - 1):
        for j in range(i + 1, p - 1):
            n += 1
            Id[n, :] = [i, j]

    # for permutation terms
    Ip = loop(zeros(p - 3, dtype=int), p, 0)
    loop(reset=True)

    # indexing for sums and products
    Jd = zeros((p - 1, p - 2), dtype=int)  # Jd[i,:] lists the rows of Id with i
    Jp = zeros((p - 1, p - 2), dtype=int)  # Jp[i,:] lists the rows of Ip without i
    mask = zeros(Ip.shape, dtype=int)
    for i in range(p - 1):
        (Jd[i, :], x) = nonzero(Id == i)
        (Jp[i, :],) = nonzero((Ip == i).choose(mask, 1).sum(axis=1) == 0)

    return Id, Jd, Ip, Jp
Example #29
0
def zpeaki(source,order=1,fpeak=fhigh):
    '''
        寻找n阶高/低点
        返回值为高点数据序列,以及该高点最大跨度的坐标(即计算该高/低点所需用到的最远的未来数据的坐标)
        order默认为1,小于1当作1
        返回值中第一个是高/低点非0,其余为0的序列 sh
                第二个是该高低点的最远未来数据的坐标序列 si
                其中 sh[np.nonzero(sh)]为高点序列, si[np.nonzero(sh)]为坐标序列,sif.time[si[np.nonzero(sh)]]为坐标的影响时间序列
    '''
    tsx1 = fpeak(source)
    sx1 = np.select([tsx1!=0],[source],0)
    icovered = rollx(np.arange(len(source)),-1)
    if order <= 1:
        return sx1,np.select([tsx1],[icovered],0)
    icursx = np.nonzero(tsx1)[0]
    for i in xrange(1,order):   #必然进入循环
        sxx = source[icursx]
        tsxx = fpeak(sxx)
        icovered[icursx] = rollx(icovered[icursx],-1)   #当前高/低点的计算范围,即之前顶点的范围左转一位(排除掉不是顶点的)
        icursx = icursx[np.nonzero(tsxx)[0]]
    osx = np.zeros_like(source)
    osx[icursx] = source[icursx]
    iz = np.zeros_like(source)
    iz[icursx] = icovered[icursx]   #去掉icovered之中不必要的那些数字
    return osx,iz
Example #30
0
def main():
	rfile = sys.argv[1]

	csvfile = open(rfile, 'rb')
	dat = csv.reader(csvfile, delimiter=',')

	X  = []
	Y = []

	for i, row in enumerate(dat):
		if i > 0:
			X.append(float(row[0]))
			Y.append(int(row[1]))

	X = np.array(X)
	Y = np.array(Y)

	class1 = np.array(X[np.nonzero(Y == 1)[0]])
	class2 = np.array(X[np.nonzero(Y == 2)[0]])

	print("computing...")
	#build GMM for two classes
	model1 = build_models1(class1)
	#model2 = build_models2(class2)

	print("Here are the models!")

	print(model1[0])
	#print(model2[0])


	plt.plot(range(1,20+1), model1[1], 'ro')
	#plt.plot(range(1,20+1), model2[1], 'ro')
	plt.show() 
Example #31
0
normalize = True
sim_matrix = np.zeros((609 + 1, 609 + 1))
cases_matrix = np.zeros((609 + 1, 609 + 1))
not_found = -1 if normalize else 0
start = time.time()
for uid1 in range(610):
    for uid2 in range(uid1, 610):
        # Comparing user to itself.
        if (uid1 == uid2):
            sim_matrix[uid1, uid2] = not_found
            cases_matrix[uid1, uid2] = 0
            continue

        u1_ratings = mat_movie_ratings.getrow(uid1).toarray().ravel()
        u2_ratings = mat_movie_ratings.getrow(uid2).toarray().ravel()
        common_ratings = np.nonzero(u1_ratings * u2_ratings)
        u1_ratings = u1_ratings[common_ratings]
        u2_ratings = u2_ratings[common_ratings]

        pc = pearson_correlation(u1_ratings, user_means[uid1], u2_ratings,
                                 user_means[uid2])
        if (np.isnan(pc)):
            pc = not_found

        sim_matrix[uid1, uid2] = pc
        cases_matrix[uid1, uid2] = u1_ratings.shape[0]

    print("User {} ready".format(uid1))
print("Run time = {} min".format(round((time.time() - start) / 60.0, 1)))

# Normailze sim matrix
    def objective_function(self,
                           variables,
                           subs,
                           generation,
                           annuity_scalar=1):
        """ Generates the full objective function, including the optimization variables.

        Args:
            variables (Dict): dictionary of variables being optimized
            subs (DataFrame): table of load data for the optimization windows
            generation (list, Expression): the sum of generation within the system
            annuity_scalar (float): a scalar value to be multiplied by any yearly cost or benefit that helps capture the cost/benefit over
                        the entire project lifetime (only to be set iff sizing)

        Returns:
            The expression of the objective function that it affects. This can be passed into the cvxpy solver.

        """
        # pandas converts the billing period lists to ints if there are only one
        # per time step. This checks for that and handles appropriately
        sub_billing_period = self.billing_period.loc[subs.index]

        demand_charges = 0
        # generation_tot = np.array(subs.loc[:, "generation"])
        load_array = np.array(subs.loc[:, "load"])
        net_load = load_array - variables['dis'] + variables['ch'] - generation

        # if mask contains more than a month, then determine dcterm monthly
        months = sub_billing_period.index.to_period('M')
        for mo in months.unique():
            # array of booleans; true for the selected month
            monthly_mask = (sub_billing_period.index.month == mo.month)

            # select the month's billing period data
            month_billing_period = sub_billing_period.loc[monthly_mask]

            # set of unique billing periods in the selected month
            pset = {
                int(item)
                for sublist in month_billing_period for item in sublist
            }

            # determine the index what has the first True value in the array of booleans
            # (the index of the timestep that corresponds to day 1 and hour 0 of the month)
            first_true = np.nonzero(monthly_mask)[0][0]

            for per in pset:
                # Add demand charge calculation for each applicable billing period (PER) within the selected month

                # get an array that is True only for the selected month
                billing_per_mask = monthly_mask.copy()

                for i in range(first_true,
                               first_true + len(month_billing_period)):
                    # loop through only the values that are 'True' (which should all be in order because
                    # the array should be sorted by datetime index) as they represent a month

                    # reassign the value at I to be whether PER applies to the time corresponding to I
                    # (reassign each 'True' value to 'False' if the billing period does not apply to that timestep)
                    billing_per_mask[i] = per in sub_billing_period.iloc[i]

                # add a demand charge for each billing period in a month (for every month being optimized)
                if np.all(billing_per_mask):
                    demand_charges += self.tariff.loc[
                        per, 'Value'] * annuity_scalar * cvx.max(net_load)
                else:
                    demand_charges += self.tariff.loc[
                        per, 'Value'] * annuity_scalar * cvx.max(
                            net_load[billing_per_mask])

        return {self.name: demand_charges}
Example #33
0
def min_distance_to_segment(seglons, seglats, lons, lats):
    """
    This function computes the shortest distance to a segment in a 2D reference
    system.

    :parameter seglons:
        A list or an array of floats specifying the longitude values of the two
        vertexes delimiting the segment.
    :parameter seglats:
        A list or an array of floats specifying the latitude values of the two
        vertexes delimiting the segment.
    :parameter lons:
        A list or a 1D array of floats specifying the longitude values of the
        points for which the calculation of the shortest distance is requested.
    :parameter lats:
        A list or a 1D array of floats specifying the latitude values of the
        points for which the calculation of the shortest distance is requested.
    :returns:
        An array of the same shape as lons which contains for each point
        defined by (lons, lats) the shortest distance to the segment.
        Distances are negative for those points that stay on the 'left side'
        of the segment direction and whose projection lies within the segment
        edges. For all other points, distance is positive.
    """

    # Check the size of the seglons, seglats arrays
    assert len(seglons) == len(seglats) == 2

    # Compute the azimuth of the segment
    seg_azim = azimuth(seglons[0], seglats[0], seglons[1], seglats[1])

    # Compute the azimuth of the direction obtained
    # connecting the first point defining the segment and each site
    azimuth1 = azimuth(seglons[0], seglats[0], lons, lats)

    # Compute the azimuth of the direction obtained
    # connecting the second point defining the segment and each site
    azimuth2 = azimuth(seglons[1], seglats[1], lons, lats)

    # Find the points inside the band defined by the two lines perpendicular
    # to the segment direction passing through the two vertexes of the segment.
    # For these points the closest distance is the distance from the great arc.
    idx_in = numpy.nonzero(
        (numpy.cos(numpy.radians(seg_azim-azimuth1)) >= 0.0) &
        (numpy.cos(numpy.radians(seg_azim-azimuth2)) <= 0.0))

    # Find the points outside the band defined by the two line perpendicular
    # to the segment direction passing through the two vertexes of the segment.
    # For these points the closest distance is the minimum of the distance from
    # the two point vertexes.
    idx_out = numpy.nonzero(
        (numpy.cos(numpy.radians(seg_azim-azimuth1)) < 0.0) |
        (numpy.cos(numpy.radians(seg_azim-azimuth2)) > 0.0))

    # Find the indexes of points 'on the left of the segment'
    idx_neg = numpy.nonzero(numpy.sin(numpy.radians(
        (azimuth1-seg_azim))) < 0.0)

    # Now let's compute the distances for the two cases.
    dists = numpy.zeros_like(lons)
    if len(idx_in[0]):
        dists[idx_in] = distance_to_arc(
            seglons[0], seglats[0], seg_azim, lons[idx_in], lats[idx_in])
    if len(idx_out[0]):
        dists[idx_out] = min_geodetic_distance(
            seglons, seglats, lons[idx_out], lats[idx_out])

    # Finally we correct the sign of the distances in order to make sure that
    # the points on the right semispace defined using as a reference the
    # direction defined by the segment (i.e. the direction defined by going
    # from the first point to the second one) have a positive distance and
    # the others a negative one.
    dists = abs(dists)
    dists[idx_neg] = - dists[idx_neg]

    return dists
Example #34
0
def _optimal_substemma(ms_id, explain_matrix, combinations, mode):
    """Do an exhaustive search for the combination among a given set of ancestors
    that best explains a given manuscript.

    """

    ms_id = ms_id - 1  # numpy indices start at 0
    val = current_app.config.val

    b_defined = val.def_matrix[ms_id]
    # remove variants where the inspected ms is undefined
    b_common = np.logical_and(val.def_matrix, b_defined)

    explain_equal_matrix = val.mask_matrix[ms_id]

    # The mss x passages boolean matrix that is TRUE whenever the inspected ms.
    # agrees with the potential source ms.
    b_equal = np.bitwise_and(val.mask_matrix, explain_equal_matrix) > 0
    b_equal = np.logical_and(b_equal, b_common)

    # The mss x passages boolean matrix that is TRUE whenever the inspected ms.
    # agrees with the potential source ms. or is posterior to it.
    b_post = np.bitwise_and(val.mask_matrix, explain_matrix) > 0
    b_post = np.logical_and(b_post, b_common)

    for comb in combinations:
        # how many passages does this combination explain?
        # pylint: disable=no-member
        b_explained_equal = np.logical_or.reduce(b_equal[comb.vec])
        b_explained_post = np.logical_or.reduce(b_post[comb.vec])
        b_explained_post = np.logical_and(b_explained_post,
                                          np.logical_not(b_explained_equal))
        b_explained = np.logical_or(b_explained_equal, b_explained_post)

        comb.n_explained_equal = np.count_nonzero(b_explained_equal)
        comb.n_explained_post = np.count_nonzero(b_explained_post)

        unexplained_matrix = np.copy(explain_matrix)
        unexplained_matrix[np.logical_not(b_defined)] = 0
        unexplained_matrix[b_explained] = 0
        b_unknown = np.bitwise_and(unexplained_matrix, 0x1) > 0
        unexplained_matrix[b_unknown] = 0
        b_open = unexplained_matrix > 0

        comb.n_unknown = np.count_nonzero(b_unknown)
        comb.n_open = np.count_nonzero(b_open)

        if mode == 'detail':
            comb.open_indices = tuple(
                int(n + 1) for n in np.nonzero(b_open)[0])
            comb.unknown_indices = tuple(
                int(n + 1) for n in np.nonzero(b_unknown)[0])

    if mode == 'search':
        # add the 'hint' column
        def key_len(c):
            return c.len

        def key_explained(c):
            return -c.explained()

        for _k, g in itertools.groupby(sorted(combinations, key=key_len),
                                       key=key_len):
            sorted(g, key=key_explained)[0].hint = True
Example #35
0
                pass

        return image, all_objs


if __name__ == "__main__":
    import os
    import utils

    os.chdir(os.path.abspath(".."))

    anchors = [1.22, 1.56, 2.95, 2.87, 4.63, 5.66, 7.31, 8.02, 10.66, 8.77]
    generator_cfg = {
        'IMAGE_H': 416,
        'IMAGE_W': 416,
        'GRID_H': 13,
        'GRID_W': 13,
        'BOX': 5,
        'LABELS': utils.label_names_as_list(),
        'CLASS': 1,
        'ANCHORS': anchors,
        'BATCH_SIZE': 4,
        'TRUE_BOX_BUFFER': 3,
        "JITTER": 0
    }
    imgs_annots = utils.images_with_annots()
    train_batch = BatchGenerator(imgs_annots, generator_cfg)
    for batch in train_batch:
        print(np.nonzero(batch[1]))
        exit(1)
Example #36
0
    def run(self, tqdm_desc_it="iterations", tqdm_desc_alg="algorithms", tqdm_desc_rep="repetitions", tqdm_leave=False, tqdm_disable=False, prev_draw=True):

        # Initialize Rewards and History of selected Actions (3d matrices [t x j x i])
        X = np.zeros((self.n, self.m, self.tau), dtype=float)
        R = np.zeros((self.n, self.m, self.tau), dtype=float)
        H = np.zeros((self.n, self.m, self.tau), dtype=int)

        # For each repetition
        #for i in tqdm(range(self.n), desc=tqdm_desc_rep, leave=(tqdm_leave and self.m == 1), disable=(tqdm_disable or self.n == 1)):
        for i in tqdm(range(self.n), desc=tqdm_desc_rep, leave=tqdm_leave, disable=(tqdm_disable or self.n == 1)):

            # Draw
            if prev_draw:
                X_prev_a = np.array([a.draw_nparray((self.tau, self.n)) for a in self.A])	

            # For each algorithm
            #for j, g in enumerate(tqdm(self.G, desc=tqdm_desc_alg, leave=tqdm_leave, disable=(tqdm_disable or self.m == 1))):
            for j, g in enumerate(self.G):

                # Initialize
                g.startGame()

                # Loop on time
                for t in tqdm(self.T, desc=tqdm_desc_it, leave=tqdm_leave, disable=(tqdm_disable or self.n > 1 or self.m > 1) ):
                    # The algorithm chooses the arm to play
                    a_t = g.choice()
                    # The arm played gives a reward
                    if prev_draw:
                        x_t = X_prev_a[a_t, t, i]
                    else:
                        x_t = self.A[a_t].draw()
                    # The reward is returned to the algorithm
                    g.getReward(a_t, x_t)
                    # Save both
                    X[i, j, t] = x_t
                    H[i, j, t] = a_t

        R = X * self.delta_r + self.min_r
        
        #actions history, with initial action index being 1, not 0
        H1 = H+1

        #actions map (bool 4d matrix)
        H_a = np.array([[[[True if (H[i,j,t]==a) else False for t in self.T] for a in self.K] for j in range(self.m)] for i in range(self.n)], dtype='bool')

        #progressive actions count (int 4d matrix [t x j x i x a])
        N_a = np.cumsum(H_a, axis=3)

        #averaged progressive actions count (float 3d matrix [t x j x a]) #averaged over repetitions
        self.MN_a = np.mean(N_a, axis=0)		

        #progressive actions frequency (float 4d matrix [t x j x i x a])
        F_a = N_a / self.T1

        #averaged progressive actions frequency (float 3d matrix [t x j x a]) #averaged over repetitions
        self.MF_a = np.mean(F_a, axis=0)

        if (self.win is not None):

            #window count (int 4d matrix [t x j x i x a])
            NW_a = np.concatenate((N_a[:,:,:,:self.win], N_a[:,:,:,self.win:] - N_a[:,:,:,:-self.win]), axis=3)

            #averaged window count (float 3d matrix [t x j x a]) #averaged over repetitions
            self.MNW_a = np.mean(NW_a, axis=0)		

            #window frequency (float 4d matrix [t x j x i x a])
            FW_a = np.concatenate((N_a[:,:,:,:self.win] / np.arange(1,self.win+1, dtype='float'), (N_a[:,:,:,self.win:] - N_a[:,:,:,:-self.win]) / float(self.win)), axis=3) 

            #averaged window frequency (float 3d matrix [t x j x a]) #averaged over repetitions
            self.MFW_a = np.mean(FW_a, axis=0)		

        #final arm pull count (int 3d matrix [j x i x a])
        n_a = N_a[:,:,:,self.tau-1]

        #averaged final arm pull count (float 2d matrix [j x a]) #averaged over repetitions
        self.mn_a = np.mean(n_a, axis=0)

        #final arm pull frequency (float 3d matrix [j x i x a])
        f_a = F_a[:,:,:,self.tau-1]

        #averaged final arm pull frequency (float 2d matrix [j x a]) #averaged over repetitions
        self.mf_a = np.mean(f_a, axis=0)

        #progressive cumulative rewards (float 3d matrix [t x j x i])
        SR = np.cumsum(R, axis=2, dtype='float')

        #averaged progressive cumulative rewards (float 2d matrix [t x j]) #averaged over repetitions
        self.MSR = np.mean(SR, axis=0)

        #final rewards (float 2d matrix [j x i])
        sr = SR[:,:,self.tau-1]

        #averaged final rewards (float 1d matrix [j]) #averaged over repetitions
        self.msr = np.mean(sr, axis=0)
        #and standard deviation
        self.dsr = np.std(sr, axis=0)

        #progressive average rewards (float 3d matrix [t x j x i]) #averaged over time
        MR = SR / self.T1

        #averaged progressive average rewards (float 2d matrix [t x j]) #averaged over time and repetitions
        self.MMR = np.mean(MR, axis=0)

        #regret (float 3d matrix [t x j x i])
        L = self.mu_star - R

        #averaged regret (float 2d matrix [t x j])
        #self.ML = np.mean(L, axis=0)
        #progressive average regret (float 3d matrix [t x j x i]) #averaged over time
        ML = self.mu_star - MR

        #averaged average regret (float 2d matrix [t x j]) #averaged over time and repetitions
        self.MML = np.mean(ML, axis=0)

        #cumulated regret (float 3d matrix [t x j x i])
        SL = np.cumsum(L, axis=2, dtype='float')

        #averaged cumulated regret (float 2d matrix [t x j]) #averaged over repetitions
        self.MSL = np.mean(SL, axis=0)

        #final cumulated regret (float 2d matrix [j x i])
        sl = SL[:,:,self.tau-1]

        #averaged final cumulated regret (float 1d matrix [j]) #averaged over repetitions
        self.msl = np.mean(sl, axis=0)
        #and standard deviation
        self.dsl = np.std(sl, axis=0)
        
        #rewards map (float 4d matrix [t x j x i x a])
        R_a = np.array([[[[R[i,j,t] if (H[i,j,t]==a) else 0.0 for t in self.T] for a in self.K] for j in range(self.m)] for i in range(self.n)], dtype='float')

        #averaged rewards map (float 3d matrix [t x j x a]) #averaged over repetitions
        self.MR_a = np.mean(R_a, axis=0)

        #progressive rewards map (int 4d matrix [t x j x i x a])
        SR_a = np.cumsum(R_a, axis=3)

        #averaged progressive rewards map (float 3d matrix [t x j x a]) #averaged over repetitions
        self.MSR_a = np.mean(SR_a, axis=0)

        #final rewards per action (float 3d matrix [j x i x a])
        sr_a = SR_a[:,:,:,self.tau-1]

        #averaged final rewards per action (float 2d matrix [j x a]) #averaged over repetitions
        self.msr_a = np.mean(sr_a, axis=0)

        #reward proportion per action (float 3d matrix [j x i x a])
        fr_a = sr_a / SR[:,:,self.tau-1,np.newaxis]

        #averaged proportion per action (float 2d matrix [j x a]) #averaged over repetitions
        self.mfr_a = np.mean(fr_a, axis=0)

        #progressive budget (float 3d matrix [t x j x i])
        # i.e. the progressive cumulative rewards plus initial budget
        B = SR + self.b_0

        #progressive on negative counter of episodes (float 3d matrix [t x j])
        # i.e. the number of episodes where, at each time t, alg j is running on negative budget
        N = np.sum(B >= 0, axis=0)
        
        #averaged progressive budget (float 2d matrix [t x j]) #averaged over repetitions
        #self.MB = np.mean(B, axis=0)
        self.MB = self.MSR + self.b_0

        #final budget (float 2d matrix [j x i])
        b = B[:,:,self.tau-1]

        #averaged final budget (float 1d matrix [j]) #averaged over repetitions
        self.mb = np.mean(b, axis=0)

        #time map on negative budget (int 3d matrix [t x j x i])
        TNB = np.array([[[1 if(v<0) else 0 for v in B_ij] for B_ij in B_i] for B_i in B])

        #time dead map (int 3d matrix [t x j x i])
        TD = np.maximum.accumulate(TNB, axis=2)

        #progressive survival counter of episodes (float 3d matrix [t x j])
        self.A = 1 - np.mean(TD, axis=0)
        
        #time map of the averaged budget on negative (int 2d matrix [t x j])
        self.TNMB = np.array([[1 if(v<0) else 0 for v in MB_j] for MB_j in self.MB])

        #survival time (before ruin or end) (int 2d matrix [j x i])
        Z = np.reshape(np.ones(self.n*self.m, dtype='int'), [self.n, self.m, 1]) #add 1 at the end		
        TNBZ = np.block([TNB, Z])
        self.TTNB = np.array([[np.nonzero(v_tj==1)[0][0] for v_tj in v_t] for v_t in TNBZ])		

        #averaged survival time (before ruin or end) (int 1d matrix [j])
        self.MTTNB = np.mean(self.TTNB, axis=0)
        #and std dev
        self.DTTNB = np.std(self.TTNB, axis=0)

        #cumulated time progression on negative budget
        STNB = np.cumsum(TNB, axis=2)
        self.STNMB = np.cumsum(self.TNMB, axis=1) 
        #self.MSTNB = np.mean(self.STNB, axis=0)

        #final cumulated time on negative budget
        stnb = STNB[:,:,self.tau-1]

        self.stnmb = self.STNMB[:,self.tau-1]

        #averaged final cumulated time on negative budget
        self.mstnb = np.mean(stnb, axis=0)
        #and std dev
        self.dstnb = np.std(stnb, axis=0)

        #ruin episodes (int 1d matrix [j])
        self.senb = np.count_nonzero(stnb, axis=0) 
        #rate
        self.renb = 1.0 - self.senb / self.n

        #negative budget progression
        NB = np.array([[[v if(v<0) else 0 for v in B_ij] for B_ij in B_i] for B_i in B])

        #average negative budget progression
        self.NMB = np.array([[v if(v<0) else 0 for v in MB_j] for MB_j in self.MB])

        #cumulated negative budget progression
        SNB = np.cumsum(NB, axis=2, dtype='float')

        #self.MSNB = np.mean(SNB, axis=0)

        #cumulated negative budget progression on average
        self.SNMB = np.cumsum(self.NMB, axis=1, dtype='float') 

        #final cumulated negative budget
        snb = SNB[:,:,self.tau-1]

        self.snmb = self.SNMB[:,self.tau-1]

        #final cumulated negative budget (float 1d matrix [j]) #averaged over repetitions
        self.msnb = np.mean(snb, axis=0)
        #and its std deviation
        self.dsnb = np.std(snb, axis=0)

        if(not self.save_only_means):
            self.R = R
            self.H = H
            self.H1 = H1
            self.H_a = H_a
            self.R_a = R_a
            self.N_a = N_a
            self.F_a = F_a
            self.n_a = n_a
            self.f_a = f_a
            self.NW_a = NW_a
            self.SR = SR
            self.sr = sr
            self.MR = MR
            self.L = L
            self.ML = ML
            self.SL = SL
            self.B = B
            self.b = b
            self.TNB = TNB
            self.STNB = STNB
            self.NB = NB
            self.SNB = SNB
            self.snb = snb
            mask_annul = (DAOannul.to_mask(method='center'))[star_ID]
            mask_apert = (DAOapert.to_mask(method='center'))[star_ID]
            # CAUTION!! YOU MUST USE 'center', NOT 'exact'!!!
    
            cutimg = mask_annul.cutout(img_uint16)
            df_cutimg = pd.DataFrame(cutimg)
            df_cutimg.to_csv('{0!s}_DAOstarfinder_starID_{1:04}_Star_Area_pixel_value.csv'\
                             .format(f_name[:-4], star_ID))
            
            apert_apply = mask_apert.multiply(img_uint16)  # change from 'sky_apply  = mask_annul.apply(img)'
            df_apert_apply = pd.DataFrame(apert_apply)
            df_apert_apply.to_csv('{0!s}_DAOstarfinder_starID_{1:04}_Apeture_area_pixel_value.csv'\
                                  .format(f_name[:-4], star_ID))

            apert_non0   = np.nonzero(apert_apply)
            apert_pixel  = apert_apply[apert_non0]
            apert_nan = apert_apply.copy()
            apert_nan[apert_nan == 0] = np.nan
            #print(apert_nan)
            
            sky_apply = mask_annul.multiply(img_uint16)  # change from 'sky_apply  = mask_annul.apply(img)'
            df_sky_apply = pd.DataFrame(sky_apply)
            df_sky_apply.to_csv('{0!s}_DAOstarfinder_starID_{1:04}_Sky_Annulus_pixel_value.csv'\
                                .format(f_name[:-4], star_ID))
            
            sky_non0   = np.nonzero(sky_apply)
            sky_pixel  = sky_apply[sky_non0]
            sky_nan = sky_apply.copy()
            sky_nan[sky_nan == 0] = np.nan
            
Example #38
0
ri = np.random.rand(Ni, 1)

a = np.vstack((0.02 * np.ones((Ne, 1)), 0.02 + 0.08 * ri))
b = np.vstack((0.2 * np.ones((Ne, 1)), 0.25 - 0.05 * ri))
c = np.vstack((-65 + 15 * re**2, -65 * np.ones((Ni, 1))))
d = np.vstack((8 - 6 * re**2, 2 * np.ones((Ni, 1))))
S = np.hstack(
    (0.5 * np.random.rand(Ne + Ni, Ne), -np.random.rand(Ne + Ni, Ni)))

# Initial values of u and v
v = -65 * np.ones((Ne + Ni, 1))
u = b * v
firings = np.empty((0, 2))

for t in range(1000):
    # Thalamic Input
    I = np.vstack((5 * np.random.randn(Ne, 1), 2 * np.random.randn(Ni, 1)))
    fired = np.nonzero(v >= 30)[0]  # Indices of spikes
    temp = np.array([t + 0 * fired, fired])
    firings = np.vstack((firings, temp.T))
    v[fired] = c[fired]
    u[fired] = u[fired] + d[fired]

    I = I + np.sum(S[:, fired], axis=1, keepdims=True)
    v = v + 0.5 * (0.04 * v**2 + 5 * v + 140 - u + I)  # step 0.5 ms
    v = v + 0.5 * (0.04 * v**2 + 5 * v + 140 - u + I)  # for numerical
    u = u + a * (np.multiply(b, v) - u)  # stability

plt.scatter(firings[:, 0], firings[:, 1])
plt.show()
Example #39
0
def create_hitor_calibration(output_filename, plot_pixel_calibrations=False):
    '''Generating HitOr calibration file (_calibration.h5) from raw data file and plotting of calibration data.

    Parameters
    ----------
    output_filename : string
        Input raw data file name.
    plot_pixel_calibrations : bool, iterable
        If True, genearating additional pixel calibration plots. If list of column and row tuples (from 1 to 80 / 336), print selected pixels.

    Returns
    -------
    nothing
    '''
    logging.info('Analyze HitOR calibration data and plot results of %s',
                 output_filename)

    with AnalyzeRawData(raw_data_file=output_filename, create_pdf=True
                        ) as analyze_raw_data:  # Interpret the raw data file
        analyze_raw_data.create_occupancy_hist = False  # too many scan parameters to do in ram histogramming
        analyze_raw_data.create_hit_table = True
        analyze_raw_data.create_tdc_hist = True
        analyze_raw_data.align_at_tdc = True  # align events at TDC words, first word of event has to be a tdc word
        analyze_raw_data.interpret_word_table()
        analyze_raw_data.interpreter.print_summary()
        analyze_raw_data.plot_histograms()

        n_injections = analyze_raw_data.n_injections  # use later
        meta_data = analyze_raw_data.out_file_h5.root.meta_data[:]
        scan_parameters_dict = get_scan_parameter(meta_data)
        inner_loop_parameter_values = scan_parameters_dict[next(
            reversed(
                scan_parameters_dict))]  # inner loop parameter name is unknown
        scan_parameter_names = scan_parameters_dict.keys()
        #         col_row_combinations = get_unique_scan_parameter_combinations(analyze_raw_data.out_file_h5.root.meta_data[:], scan_parameters=('column', 'row'), scan_parameter_columns_only=True)

        meta_data_table_at_scan_parameter = get_unique_scan_parameter_combinations(
            meta_data, scan_parameters=scan_parameter_names)
        scan_parameter_values = get_scan_parameters_table_from_meta_data(
            meta_data_table_at_scan_parameter, scan_parameter_names)
        event_number_ranges = get_ranges_from_array(
            meta_data_table_at_scan_parameter['event_number'])
        event_ranges_per_parameter = np.column_stack(
            (scan_parameter_values, event_number_ranges))
        if analyze_raw_data.out_file_h5.root.Hits.nrows == 0:
            raise AnalysisError("Found no hits.")
        hits = analyze_raw_data.out_file_h5.root.Hits[:]
        event_numbers = hits['event_number'].copy(
        )  # create contigous array, otherwise np.searchsorted too slow, http://stackoverflow.com/questions/15139299/performance-of-numpy-searchsorted-is-poor-on-structured-arrays

        output_filename = os.path.splitext(output_filename)[0]
        with tb.open_file(output_filename + "_calibration.h5",
                          mode="w") as calibration_data_file:
            logging.info('Create calibration')
            calibration_data = np.full(
                shape=(80, 336, len(inner_loop_parameter_values), 4),
                fill_value=np.nan,
                dtype='f4'
            )  # result of the calibration is a histogram with col_index, row_index, plsrDAC value, mean discrete tot, rms discrete tot, mean tot from TDC, rms tot from TDC

            progress_bar = progressbar.ProgressBar(
                widgets=[
                    '',
                    progressbar.Percentage(), ' ',
                    progressbar.Bar(marker='*', left='|', right='|'), ' ',
                    progressbar.AdaptiveETA()
                ],
                maxval=len(event_ranges_per_parameter),
                term_width=80)
            progress_bar.start()

            for index, (actual_scan_parameter_values, event_start,
                        event_stop) in enumerate(event_ranges_per_parameter):
                if event_stop is None:  # happens for the last chunk
                    event_stop = hits[-1]['event_number'] + 1
                array_index = np.searchsorted(
                    event_numbers, np.array([event_start, event_stop]))
                actual_hits = hits[array_index[0]:array_index[1]]
                for item_index, item in enumerate(scan_parameter_names):
                    if item == "column":
                        actual_col = actual_scan_parameter_values[item_index]
                    elif item == "row":
                        actual_row = actual_scan_parameter_values[item_index]
                    elif item == "PlsrDAC":
                        plser_dac = actual_scan_parameter_values[item_index]
                    else:
                        raise ValueError("Unknown scan parameter %s" % item)

                # Only pixel of actual column/row should be in the actual data chunk but since FIFO is not cleared for each scan step due to speed reasons and there might be noisy pixels this is not always the case
                n_wrong_pixel = np.count_nonzero(
                    np.logical_or(actual_hits['column'] != actual_col,
                                  actual_hits['row'] != actual_row))
                if n_wrong_pixel != 0:
                    logging.warning(
                        '%d hit(s) from other pixels for scan parameters %s',
                        n_wrong_pixel, ', '.join([
                            '%s=%s' % (name, value)
                            for (name,
                                 value) in zip(scan_parameter_names,
                                               actual_scan_parameter_values)
                        ]))

                actual_hits = actual_hits[np.logical_and(
                    actual_hits['column'] == actual_col, actual_hits['row'] ==
                    actual_row)]  # Only take data from selected pixel
                actual_tdc_hits = actual_hits[
                    (actual_hits['event_status'] & 0b0000111110011100) ==
                    0b0000000100000000]  # only take hits from good events (one TDC word only, no error)
                actual_tot_hits = actual_hits[
                    (actual_hits['event_status'] & 0b0000100010011100) ==
                    0b0000000000000000]  # only take hits from good events for tot
                tot, tdc = actual_tot_hits['tot'], actual_tdc_hits['TDC']

                if tdc.shape[0] < n_injections:
                    logging.info(
                        '%d of %d expected TDC hits for scan parameters %s',
                        tdc.shape[0], n_injections, ', '.join([
                            '%s=%s' % (name, value)
                            for (name,
                                 value) in zip(scan_parameter_names,
                                               actual_scan_parameter_values)
                        ]))
                if tot.shape[0] < n_injections:
                    logging.info(
                        '%d of %d expected hits for scan parameters %s',
                        tot.shape[0], n_injections, ', '.join([
                            '%s=%s' % (name, value)
                            for (name,
                                 value) in zip(scan_parameter_names,
                                               actual_scan_parameter_values)
                        ]))

                inner_loop_scan_parameter_index = np.where(
                    plser_dac == inner_loop_parameter_values
                )[0][
                    0]  # translate the scan parameter value to an index for the result histogram
                # numpy mean and std return nan if array is empty
                calibration_data[actual_col - 1, actual_row - 1,
                                 inner_loop_scan_parameter_index,
                                 0] = np.mean(tot)
                calibration_data[actual_col - 1, actual_row - 1,
                                 inner_loop_scan_parameter_index,
                                 1] = np.mean(tdc)
                calibration_data[actual_col - 1, actual_row - 1,
                                 inner_loop_scan_parameter_index,
                                 2] = np.std(tot)
                calibration_data[actual_col - 1, actual_row - 1,
                                 inner_loop_scan_parameter_index,
                                 3] = np.std(tdc)

                progress_bar.update(index)
            progress_bar.finish()

            calibration_data_out = calibration_data_file.create_carray(
                calibration_data_file.root,
                name='HitOrCalibration',
                title='Hit OR calibration data',
                atom=tb.Atom.from_dtype(calibration_data.dtype),
                shape=calibration_data.shape,
                filters=tb.Filters(complib='blosc',
                                   complevel=5,
                                   fletcher32=False))
            calibration_data_out[:] = calibration_data
            calibration_data_out.attrs.dimensions = scan_parameter_names
            calibration_data_out.attrs.scan_parameter_values = inner_loop_parameter_values
            calibration_data_out.flush()
            #             with PdfPages(output_filename + "_calibration.pdf") as output_pdf:
            plot_scurves(calibration_data[:, :, :, 0],
                         inner_loop_parameter_values,
                         "ToT calibration",
                         "ToT",
                         15,
                         "Charge [PlsrDAC]",
                         filename=analyze_raw_data.output_pdf)
            plot_scurves(calibration_data[:, :, :, 1],
                         inner_loop_parameter_values,
                         "TDC calibration",
                         "TDC [ns]",
                         None,
                         "Charge [PlsrDAC]",
                         filename=analyze_raw_data.output_pdf)
            tot_mean_all_pix = np.nanmean(calibration_data[:, :, :, 0],
                                          axis=(0, 1))
            tot_error_all_pix = np.nanstd(calibration_data[:, :, :, 0],
                                          axis=(0, 1))
            tdc_mean_all_pix = np.nanmean(calibration_data[:, :, :, 1],
                                          axis=(0, 1))
            tdc_error_all_pix = np.nanstd(calibration_data[:, :, :, 1],
                                          axis=(0, 1))
            plot_tot_tdc_calibration(
                scan_parameters=inner_loop_parameter_values,
                tot_mean=tot_mean_all_pix,
                tot_error=tot_error_all_pix,
                tdc_mean=tdc_mean_all_pix,
                tdc_error=tdc_error_all_pix,
                filename=analyze_raw_data.output_pdf,
                title="Mean charge calibration of %d pixel(s)" %
                np.count_nonzero(
                    ~np.all(np.isnan(calibration_data[:, :, :, 0]), axis=2)))
            # plotting individual pixels
            if plot_pixel_calibrations is True:
                # selecting pixels with non-nan entries
                col_row_non_nan = np.nonzero(
                    ~np.all(np.isnan(calibration_data[:, :, :, 0]), axis=2))
                plot_pixel_calibrations = np.dstack(col_row_non_nan)[0]
            elif plot_pixel_calibrations is False:
                plot_pixel_calibrations = np.array([], dtype=np.int)
            else:  # assuming list of column / row tuples
                plot_pixel_calibrations = np.array(plot_pixel_calibrations) - 1
            # generate index array
            pixel_indices = np.arange(plot_pixel_calibrations.shape[0])
            plot_n_pixels = 10  # number of pixels at the beginning, center and end of the array
            np.random.seed(0)
            # select random pixels
            if pixel_indices.size - 2 * plot_n_pixels >= 0:
                random_pixel_indices = np.sort(
                    np.random.choice(
                        pixel_indices[plot_n_pixels:-plot_n_pixels],
                        min(plot_n_pixels,
                            pixel_indices.size - 2 * plot_n_pixels),
                        replace=False))
            else:
                random_pixel_indices = np.array([], dtype=np.int)
            selected_pixel_indices = np.unique(
                np.hstack([
                    pixel_indices[:plot_n_pixels], random_pixel_indices,
                    pixel_indices[-plot_n_pixels:]
                ]))
            # plotting individual pixels
            for (column,
                 row) in plot_pixel_calibrations[selected_pixel_indices]:
                logging.info("Plotting charge calibration for pixel column " +
                             str(column + 1) + " / row " + str(row + 1))
                tot_mean_single_pix = calibration_data[column, row, :, 0]
                tot_std_single_pix = calibration_data[column, row, :, 2]
                tdc_mean_single_pix = calibration_data[column, row, :, 1]
                tdc_std_single_pix = calibration_data[column, row, :, 3]
                plot_tot_tdc_calibration(
                    scan_parameters=inner_loop_parameter_values,
                    tot_mean=tot_mean_single_pix,
                    tot_error=tot_std_single_pix,
                    tdc_mean=tdc_mean_single_pix,
                    tdc_error=tdc_std_single_pix,
                    filename=analyze_raw_data.output_pdf,
                    title="Charge calibration for pixel column " +
                    str(column + 1) + " / row " + str(row + 1))
Example #40
0
def mu_idx(_grid, r0, r1):
    grid_idx_ = np.logical_and(
        _grid[:, :, 4] >= r0,
        _grid[:, :, 4] < r1)
    return _grid[:, :, 0][np.nonzero(grid_idx_)].mean(), grid_idx_
Example #41
0
vocab = np.unique(word_list)
w_list_size = len(word_list)
vocab_size = len(vocab)
w_to_i = {word: ind for ind, word in enumerate(vocab)}

comat = np.zeros((vocab_size, vocab_size))
for i in range(w_list_size):
    for j in range(1, context_size + 1):
        ind = w_to_i[word_list[i]]
        if i - j > 0:
            lind = w_to_i[word_list[i - j]]
            comat[ind, lind] += 1.0 / j
        if i + j < w_list_size:
            rind = w_to_i[word_list[i + j]]
            comat[ind, rind] += 1.0 / j
coocs = np.transpose(np.nonzero(comat))


def wf(x):
    if x < xmax:
        return (x / xmax)**alpha
    return 1


l_embed, r_embed = [[
    Variable(torch.from_numpy(np.random.normal(0, 0.01, (embed_size, 1))),
             requires_grad=True) for j in range(vocab_size)
] for i in range(2)]
l_biases, r_biases = [[
    Variable(torch.from_numpy(np.random.normal(0, 0.01, 1)),
             requires_grad=True) for j in range(vocab_size)
Example #42
0
def extract_state(model, buffer=None):
  """
    Compute a bipartite graph representation of the solver. In this
    representation, the variables and constraints of the MILP are the
    left- and right-hand side nodes, and an edge links two nodes iff the
    variable is involved in the constraint. Both the nodes and edges carry
    features.
    Parameters
    ----------
    model : pyscipopt.scip.Model
        The current model.
    buffer : dict
        A buffer to avoid re-extracting redundant information from the solver
        each time.
    Returns
    -------
    variable_features : dictionary of type {'names': list, 'var_names', 'values': np.ndarray}
        The features associated with the variable nodes in the bipartite graph.
    edge_features : dictionary of type ('names': list, 'indices': np.ndarray, 'values': np.ndarray}
        The features associated with the edges in the bipartite graph.
        This is given as a sparse matrix in COO format.
    constraint_features : dictionary of type {'names': list, 'values': np.ndarray}
        The features associated with the constraint nodes in the bipartite graph.
    """
  if buffer is None or model.getNNodes() == 1:
    buffer = {}

  # update state from buffer if any
  s = model.getState(buffer['scip_state'] if 'scip_state' in buffer else None)
  buffer['scip_state'] = s

  if 'state' in buffer:
    obj_norm = buffer['state']['obj_norm']
  else:
    obj_norm = np.linalg.norm(s['col']['coefs'])
    obj_norm = 1 if obj_norm <= 0 else obj_norm

  row_norms = s['row']['norms']
  row_norms[row_norms == 0] = 1

  # Column features
  n_cols = len(s['col']['types'])

  if 'state' in buffer:
    col_feats = buffer['state']['col_feats']
  else:
    col_feats = {}
    col_feats['type'] = np.zeros(
        (n_cols, 4))  # BINARY INTEGER IMPLINT CONTINUOUS
    col_feats['type'][np.arange(n_cols), s['col']['types']] = 1
    col_feats['coef_normalized'] = s['col']['coefs'].reshape(-1, 1) / obj_norm

  col_feats['has_lb'] = ~np.isnan(s['col']['lbs']).reshape(-1, 1)
  col_feats['has_ub'] = ~np.isnan(s['col']['ubs']).reshape(-1, 1)
  col_feats['sol_is_at_lb'] = s['col']['sol_is_at_lb'].reshape(-1, 1)
  col_feats['sol_is_at_ub'] = s['col']['sol_is_at_ub'].reshape(-1, 1)
  col_feats['sol_frac'] = s['col']['solfracs'].reshape(-1, 1)
  col_feats['sol_frac'][s['col']['types'] ==
                        3] = 0  # continuous have no fractionality
  col_feats['basis_status'] = np.zeros((n_cols, 4))  # LOWER BASIC UPPER ZERO
  col_feats['basis_status'][np.arange(n_cols), s['col']['basestats']] = 1
  col_feats['reduced_cost'] = s['col']['redcosts'].reshape(-1, 1) / obj_norm
  col_feats['age'] = s['col']['ages'].reshape(-1, 1) / (s['stats']['nlps'] + 5)
  col_feats['sol_val'] = s['col']['solvals'].reshape(-1, 1)
  col_feats['inc_val'] = s['col']['incvals'].reshape(-1, 1)
  col_feats['avg_inc_val'] = s['col']['avgincvals'].reshape(-1, 1)

  col_feat_names = [[
      k,
  ] if v.shape[1] == 1 else [f'{k}_{i}' for i in range(v.shape[1])]
                    for k, v in col_feats.items()]
  col_feat_names = [n for names in col_feat_names for n in names]
  col_feat_vals = np.concatenate(list(col_feats.values()), axis=-1)

  # var_names = [
  #     model.getLPColsData()[i].getVar().name
  #     for i in range(len(model.getLPColsData()))
  # ]
  # Strip the prefix 't_' added by scip
  var_names = [i.name.lstrip('t_') for i in model.getVars(transformed=True)]

  variable_features = {
      'names': col_feat_names,
      'var_names': var_names,
      'values': col_feat_vals,
  }

  # Row features
  if 'state' in buffer:
    row_feats = buffer['state']['row_feats']
    has_lhs = buffer['state']['has_lhs']
    has_rhs = buffer['state']['has_rhs']
  else:
    row_feats = {}
    has_lhs = np.nonzero(~np.isnan(s['row']['lhss']))[0]
    has_rhs = np.nonzero(~np.isnan(s['row']['rhss']))[0]
    row_feats['obj_cosine_similarity'] = np.concatenate(
        (-s['row']['objcossims'][has_lhs],
         +s['row']['objcossims'][has_rhs])).reshape(-1, 1)
    row_feats['bias'] = np.concatenate(
        (-(s['row']['lhss'] / row_norms)[has_lhs],
         +(s['row']['rhss'] / row_norms)[has_rhs])).reshape(-1, 1)

  row_feats['is_tight'] = np.concatenate(
      (s['row']['is_at_lhs'][has_lhs],
       s['row']['is_at_rhs'][has_rhs])).reshape(-1, 1)

  row_feats['age'] = np.concatenate(
      (s['row']['ages'][has_lhs], s['row']['ages'][has_rhs])).reshape(
          -1, 1) / (s['stats']['nlps'] + 5)

  # # redundant with is_tight
  # tmp = s['row']['basestats']  # LOWER BASIC UPPER ZERO
  # tmp[s['row']['lhss'] == s['row']['rhss']] = 4  # LOWER == UPPER for equality constraints
  # tmp_l = tmp[has_lhs]
  # tmp_l[tmp_l == 2] = 1  # LHS UPPER -> BASIC
  # tmp_l[tmp_l == 4] = 2  # EQU UPPER -> UPPER
  # tmp_l[tmp_l == 0] = 2  # LHS LOWER -> UPPER
  # tmp_r = tmp[has_rhs]
  # tmp_r[tmp_r == 0] = 1  # RHS LOWER -> BASIC
  # tmp_r[tmp_r == 4] = 2  # EQU LOWER -> UPPER
  # tmp = np.concatenate((tmp_l, tmp_r)) - 1  # BASIC UPPER ZERO
  # row_feats['basis_status'] = np.zeros((len(has_lhs) + len(has_rhs), 3))
  # row_feats['basis_status'][np.arange(len(has_lhs) + len(has_rhs)), tmp] = 1

  tmp = s['row']['dualsols'] / (row_norms * obj_norm)
  row_feats['dualsol_val_normalized'] = np.concatenate(
      (-tmp[has_lhs], +tmp[has_rhs])).reshape(-1, 1)

  row_feat_names = [[
      k,
  ] if v.shape[1] == 1 else [f'{k}_{i}' for i in range(v.shape[1])]
                    for k, v in row_feats.items()]
  row_feat_names = [n for names in row_feat_names for n in names]
  row_feat_vals = np.concatenate(list(row_feats.values()), axis=-1)

  constraint_features = {
      'names': row_feat_names,
      'values': row_feat_vals,
  }

  # Edge features
  if 'state' in buffer:
    edge_row_idxs = buffer['state']['edge_row_idxs']
    edge_col_idxs = buffer['state']['edge_col_idxs']
    edge_feats = buffer['state']['edge_feats']
  else:
    coef_matrix = sp.csr_matrix(
        (s['nzrcoef']['vals'] / row_norms[s['nzrcoef']['rowidxs']],
         (s['nzrcoef']['rowidxs'], s['nzrcoef']['colidxs'])),
        shape=(len(s['row']['nnzrs']), len(s['col']['types'])))
    coef_matrix = sp.vstack(
        (-coef_matrix[has_lhs, :], coef_matrix[has_rhs, :])).tocoo(copy=False)

    edge_row_idxs, edge_col_idxs = coef_matrix.row, coef_matrix.col
    edge_feats = {}
    edge_feats['coef_normalized'] = coef_matrix.data.reshape(-1, 1)

  edge_feat_names = [[
      k,
  ] if v.shape[1] == 1 else [f'{k}_{i}' for i in range(v.shape[1])]
                     for k, v in edge_feats.items()]
  edge_feat_names = [n for names in edge_feat_names for n in names]
  edge_feat_indices = np.vstack([edge_row_idxs, edge_col_idxs])
  edge_feat_vals = np.concatenate(list(edge_feats.values()), axis=-1)

  edge_features = {
      'names': edge_feat_names,
      'indices': edge_feat_indices,
      'values': edge_feat_vals,
  }

  if 'state' not in buffer:
    buffer['state'] = {
        'obj_norm': obj_norm,
        'col_feats': col_feats,
        'row_feats': row_feats,
        'has_lhs': has_lhs,
        'has_rhs': has_rhs,
        'edge_row_idxs': edge_row_idxs,
        'edge_col_idxs': edge_col_idxs,
        'edge_feats': edge_feats,
    }
  return constraint_features, edge_features, variable_features
Example #43
0
    def process_online(self, activations, reset=True, **kwargs):
        """
        Detect the beats in the given activation function with the forward
        algorithm.

        Parameters
        ----------
        activations : numpy array
            Beat activation for a single frame.
        reset : bool, optional
            Reset the DBNBeatTrackingProcessor to its initial state before
            processing.

        Returns
        -------
        beats : numpy array
            Detected beat position [seconds].

        """
        # reset to initial state
        if reset:
            self.reset()
        # use forward path to get best state
        fwd = self.hmm.forward(activations, reset=reset)
        # choose the best state for each step
        states = np.argmax(fwd, axis=1)
        # decide which time steps are beats
        beats = self.om.pointers[states] == 1
        # the positions inside the beats
        positions = self.st.state_positions[states]
        # visualisation stuff (only when called frame by frame)
        if self.visualize and len(activations) == 1:
            beat_length = 80
            display = [' '] * beat_length
            display[int(positions * beat_length)] = '*'
            # activation strength indicator
            strength_length = 10
            self.strength = int(max(self.strength, activations * 10))
            display.append('| ')
            display.extend(['*'] * self.strength)
            display.extend([' '] * (strength_length - self.strength))
            # reduce the displayed strength every couple of frames
            if self.counter % 5 == 0:
                self.strength -= 1
            # beat indicator
            if beats:
                self.beat_counter = 3
            if self.beat_counter > 0:
                display.append('| X ')
            else:
                display.append('|   ')
            self.beat_counter -= 1
            # display tempo
            display.append('| %5.1f | ' % self.tempo)
            sys.stderr.write('\r%s' % ''.join(display))
            sys.stderr.flush()
        # forward path often reports multiple beats close together, thus report
        # only beats more than the minimum interval apart
        beats_ = []
        for frame in np.nonzero(beats)[0]:
            cur_beat = (frame + self.counter) / float(self.fps)
            next_beat = self.last_beat + 60. / self.max_bpm
            # FIXME: this skips the first beat, but maybe this has a positive
            #        effect on the overall beat tracking accuracy
            if cur_beat >= next_beat:
                # update tempo
                self.tempo = 60. / (cur_beat - self.last_beat)
                # update last beat
                self.last_beat = cur_beat
                # append to beats
                beats_.append(cur_beat)
        # increase counter
        self.counter += len(activations)
        # return beat(s)
        return np.array(beats_)
Example #44
0
def find_ball(img, truth, keepout, mount_offset_deg, hfov=68):

    img_h, img_w = img.shape[0:2]
    vfov = hfov * img_h / img_w

    img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
    truth_hsv = cv2.cvtColor(truth, cv2.COLOR_BGR2HSV)
    truth_hsv = cv2.blur(truth_hsv, (10, 10))

    img_sample = cv2.cvtColor(truth_hsv, cv2.COLOR_HSV2BGR)
    cv2.imshow('sample', img_sample)
    cv2.waitKey(0)

    target_hue = np.percentile(truth_hsv[:, :, 0], (5, 95))
    # mean_hue = np.mean(truth_hsv[0])
    # target_hue = [mean_hue*0.9, mean_hue*1.1]

    # target_sat = [0, 255]
    target_sat = np.percentile(truth_hsv[:, :, 1], (5, 95))
    # mean_sat = np.mean(truth_hsv[1])
    # target_sat = [mean_sat*0.9, mean_sat*1.1]

    # target_val = [0, 255]
    target_val = np.percentile(truth_hsv[:, :, 2], (5, 95))

    print(target_hue, target_sat, target_val)

    candidate_mask = cv2.inRange(img_hsv,
                                 (target_hue[0], target_sat[0], target_val[0]),
                                 (target_hue[1], target_sat[1], target_val[1]))

    # zero out keepout zone
    candidate_mask[keepout[0]:keepout[1], keepout[2]:keepout[3]] = 0

    kernel = np.ones((10, 10), np.uint8)
    candidate_mask = cv2.dilate(candidate_mask, kernel)
    candidate_mask = cv2.erode(candidate_mask, kernel)

    hot_indices = np.transpose(np.nonzero(candidate_mask))
    c, r = cv2.minEnclosingCircle(hot_indices)

    # reject ball candidates that are larger than 10pct of horizontal FoV
    # if(r > 0.1*img_h):
    #     print("FP suppressed")
    #     return img, candidate_mask

    prior_r_cm = 3.2
    baseline_cm = (img_w / r) * prior_r_cm
    distance_cm = (baseline / 2) / math.tan(math.radians(hfov / 2))

    # reject ball candidates closer than 1m
    if (distance < 100):
        print("FP suppressed")
        return 0, -1

    c_px = (int(c[0]), int(c[1]))
    img_annot = cv2.circle(img, c_px, int(r), (0, 0, 255), thickness=3)

    # get normalized x position with respect to hfov, in range [-0.5, 0.5]
    screen_coord_x = (c[1] - (img_w / 2)) / img_w
    # multiply by known fov to get
    heading = (screen_coord_x * hfov) + mount_offset_deg

    return heading, distance
Example #45
0
import numpy as np

a = (2 * 25.4 + 2) / 1000
y = np.arange(-7.5 * a, 8.5 * a, a)

ws_list = np.genfromtxt('/SNS/users/rwp/corelli/tube_calibration/list',
                        delimiter=',',
                        dtype=[('runs', '|S11'), ('banks', '5i8'),
                               ('height', 'i8')])

a_max = 0
s = None

for run, banks, height in ws_list:
    banks = np.asarray(banks)
    banks = banks[np.nonzero(banks)]
    bank_names = ','.join('bank' + str(b) for b in banks)
    print(run)
    print(banks)
    print('CORELLI_' + run)
    for bank in banks:
        for tube in range(16):
            filename = 'COR_{}_{}_{}'.format(run, bank, tube + 1)
            centers = np.genfromtxt(filename + '.peaks',
                                    skip_header=1,
                                    skip_footer=1,
                                    usecols=2)
            if (centers < 10).any() or (centers > 250).any():
                print("skipping run {}, bank {}, tube {}".format(
                    run, bank, tube))
                continue
Example #46
0
    def process_offline(self, activations, **kwargs):
        """
        Detect the beats in the given activation function with Viterbi
        decoding.

        Parameters
        ----------
        activations : numpy array
            Beat activation function.

        Returns
        -------
        beats : numpy array
            Detected beat positions [seconds].

        """
        # init the beats to return and the offset
        beats = np.empty(0, dtype=np.int)
        first = 0
        # use only the activations > threshold
        if self.threshold:
            activations, first = threshold_activations(activations,
                                                       self.threshold)
        # return no beats if no activations given / remain after thresholding
        if not activations.any():
            return beats
        # get the best state path by calling the viterbi algorithm
        path, _ = self.hmm.viterbi(activations)
        # also return no beats if no path was found
        if not path.any():
            return beats
        # correct the beat positions if needed
        if self.correct:
            # for each detection determine the "beat range", i.e. states where
            # the pointers of the observation model are 1
            beat_range = self.om.pointers[path]
            # get all change points between True and False
            idx = np.nonzero(np.diff(beat_range))[0] + 1
            # if the first frame is in the beat range, add a change at frame 0
            if beat_range[0]:
                idx = np.r_[0, idx]
            # if the last frame is in the beat range, append the length of the
            # array
            if beat_range[-1]:
                idx = np.r_[idx, beat_range.size]
            # iterate over all regions
            if idx.any():
                for left, right in idx.reshape((-1, 2)):
                    # pick the frame with the highest activations value
                    peak = np.argmax(activations[left:right]) + left
                    beats = np.hstack((beats, peak))
        else:
            # just take the frames with the smallest beat state values
            from scipy.signal import argrelmin
            beats = argrelmin(self.st.state_positions[path], mode='wrap')[0]
            # recheck if they are within the "beat range", i.e. the pointers
            # of the observation model for that state must be 1
            # Note: interpolation and alignment of the beats to be at state 0
            #       does not improve results over this simple method
            beats = beats[self.om.pointers[path[beats]] == 1]
        # convert the detected beats to seconds and return them
        return (beats + first) / float(self.fps)
Example #47
0
 def compute_non_zero_idx(self):
     gamma = np.divide(
         np.abs(self.w_estimates[-1]), np.sqrt(self.z_estimates[-1])
     )
     tmp = np.nonzero(gamma > np.finfo(float).eps)
     self.non_zero_idx = tmp[0]
Example #48
0
    def np_new_unique(ar, return_index=False, return_inverse=False, return_counts=False):
        """
        Find the unique elements of an array.

        Returns the sorted unique elements of an array. There are three optional
        outputs in addition to the unique elements: the indices of the input array
        that give the unique values, the indices of the unique array that
        reconstruct the input array, and the number of times each unique value
        comes up in the input array.

        Parameters
        ----------
        ar : array_like
            Input array. This will be flattened if it is not already 1-D.
        return_index : bool, optional
            If True, also return the indices of `ar` that result in the unique
            array.
        return_inverse : bool, optional
            If True, also return the indices of the unique array that can be used
            to reconstruct `ar`.
        return_counts : bool, optional
            If True, also return the number of times each unique value comes up
            in `ar`.

            .. versionadded:: 1.9.0

        Returns
        -------
        unique : ndarray
            The sorted unique values.
        unique_indices : ndarray, optional
            The indices of the first occurrences of the unique values in the
            (flattened) original array. Only provided if `return_index` is True.
        unique_inverse : ndarray, optional
            The indices to reconstruct the (flattened) original array from the
            unique array. Only provided if `return_inverse` is True.
        unique_counts : ndarray, optional
            The number of times each of the unique values comes up in the
            original array. Only provided if `return_counts` is True.

            .. versionadded:: 1.9.0

        See Also
        --------
        numpy.lib.arraysetops : Module with a number of other functions for
                                performing set operations on arrays.

        Examples
        --------
        >>> np.unique([1, 1, 2, 2, 3, 3])
        array([1, 2, 3])
        >>> a = np.array([[1, 1], [2, 3]])
        >>> np.unique(a)
        array([1, 2, 3])

        Return the indices of the original array that give the unique values:

        >>> a = np.array(['a', 'b', 'b', 'c', 'a'])
        >>> u, indices = np.unique(a, return_index=True)
        >>> u
        array(['a', 'b', 'c'],
               dtype='|S1')
        >>> indices
        array([0, 1, 3])
        >>> a[indices]
        array(['a', 'b', 'c'],
               dtype='|S1')

        Reconstruct the input array from the unique values:

        >>> a = np.array([1, 2, 6, 4, 2, 3, 2])
        >>> u, indices = np.unique(a, return_inverse=True)
        >>> u
        array([1, 2, 3, 4, 6])
        >>> indices
        array([0, 1, 4, 3, 1, 2, 1])
        >>> u[indices]
        array([1, 2, 6, 4, 2, 3, 2])

        """
        ar = np.asanyarray(ar).flatten()

        optional_indices = return_index or return_inverse
        optional_returns = optional_indices or return_counts

        if ar.size == 0:
            if not optional_returns:
                ret = ar
            else:
                ret = (ar,)
                if return_index:
                    ret += (np.empty(0, np.bool),)
                if return_inverse:
                    ret += (np.empty(0, np.bool),)
                if return_counts:
                    ret += (np.empty(0, np.intp),)
            return ret

        if optional_indices:
            perm = ar.argsort(kind='mergesort' if return_index else 'quicksort')
            aux = ar[perm]
        else:
            ar.sort()
            aux = ar
        flag = np.concatenate(([True], aux[1:] != aux[:-1]))

        if not optional_returns:
            ret = aux[flag]
        else:
            ret = (aux[flag],)
            if return_index:
                ret += (perm[flag],)
            if return_inverse:
                iflag = np.cumsum(flag) - 1
                inv_idx = np.empty(ar.shape, dtype=np.intp)
                inv_idx[perm] = iflag
                ret += (inv_idx,)
            if return_counts:
                idx = np.concatenate(np.nonzero(flag) + ([ar.size],))
                ret += (np.diff(idx),)
        return ret
 def analyze_text(self, window, doc_num=None):
     self._slide_window(window, doc_num)
     mask = self._uniq_words[:-1]  # to exclude none token
     if mask.any():
         self._occurrences[mask] += 1
         self._counter.update(itertools.combinations(np.nonzero(mask)[0], 2))
Example #50
0
def test_where_argwhere(test_case):
    rand_input = np.random.random_sample((11, 3, 5)).astype(np.float32)
    rand_input[np.nonzero(rand_input < 0.5)] = 0.0
    ret = _of_where_with_x_and_y_are_none(rand_input, input_shape=(11, 3, 5))
    exp_ret = np.argwhere(rand_input)
    test_case.assertTrue(np.array_equal(exp_ret, ret))
Example #51
0
def cityscape_panoptic_converter(original_folder_format, out_folder_format,
                                 out_file_format, category_file, data_split):
    """Convert the gt files in original folder to a new panoptic json file
    and convert the gt png into a new panoptic directory..

    Args:
        original_folder_format (str): The gt folder for specific data split.
        out_folder_format (str): The new panoptic folder.
        out_file_format (str): Json file in coco style to save the annotations.
        category_file (str): The category file path.
        data_split (str): The split of data, e.g., ['train', 'val', 'test']
    """
    out_folder = out_folder_format.format(data_split)
    if not osp.isdir(out_folder):
        print("Creating folder {} for panoptic segmentation PNGs".format(
            out_folder))
        os.mkdir(out_folder)

    categories = []
    for idx, el in enumerate(labels):
        if el.ignoreInEval:
            continue
        categories.append({
            'id': el.id,
            'name': el.name,
            'color': el.color,
            'supercategory': el.category,
            'isthing': 1 if el.hasInstances else 0
        })

    categories_dict = {cat['id']: cat for cat in categories}

    original_folder = original_folder_format.format(data_split)
    glob_pattern = osp.join(original_folder, '*/*_gtFine_instanceIds.png')
    file_list = sorted(glob.glob(glob_pattern))

    images = []
    annotations = []
    for working_idx, f in enumerate(file_list):
        if working_idx % 10 == 0:
            print(working_idx, len(file_list))

        original_format = np.array(Image.open(f))

        dir = f.split('/')[-2]
        file_name = f.split('/')[-1]
        image_id = file_name.rsplit('_', 2)[0]
        image_filename = '{}/{}_leftImg8bit.png'.format(dir, image_id)
        # image entry, id for image is its filename without extension
        images.append({
            "id": image_id,
            "width": original_format.shape[1],
            "height": original_format.shape[0],
            "file_name": image_filename
        })

        pan_format = np.zeros(
            (original_format.shape[0], original_format.shape[1], 3),
            dtype=np.uint8)
        id_generator = IdGenerator(categories_dict)

        unique_ids = np.unique(original_format)
        segm_info = []
        for el in unique_ids:
            if el < 1000:
                semantic_id = el
                is_crowd = 1
            else:
                semantic_id = el // 1000
                is_crowd = 0
            if semantic_id not in categories_dict:
                continue
            if categories_dict[semantic_id]['isthing'] == 0:
                is_crowd = 0
            mask = original_format == el
            segment_id, color = id_generator.get_id_and_color(semantic_id)
            pan_format[mask] = color

            area = np.sum(mask)  # segment area computation

            # bbox computation for a segment
            hor = np.sum(mask, axis=0)
            hor_idx = np.nonzero(hor)[0]
            x = hor_idx[0]
            width = hor_idx[-1] - x + 1
            vert = np.sum(mask, axis=1)
            vert_idx = np.nonzero(vert)[0]
            y = vert_idx[0]
            height = vert_idx[-1] - y + 1
            bbox = [x, y, width, height]

            segm_info.append({
                "id": int(segment_id),
                "category_id": int(semantic_id),
                "area": area,
                "bbox": bbox,
                "iscrowd": is_crowd
            })

        annotations.append({
            'image_id': image_id,
            'file_name': file_name,
            "segments_info": segm_info
        })

        Image.fromarray(pan_format).save(os.path.join(out_folder, file_name))

    d = {
        'images': images,
        'annotations': annotations,
        'categories': categories,
    }
    mmcv.dump(categories, category_file, cls=MyJsonEncoder)
    mmcv.dump(d, out_file_format.format(data_split), cls=MyJsonEncoder)
        print(ann)
        print(np.sum(np.array(counts)))
        vol[ann]=np.sum(np.array(counts))
    
    #volume in order of brains:
    vol=np.array([3359226,#hsv36,7
         3209858,#hsv36,6
         3159940,#hsv28,2
         3159940,#hsv28,4
        4883368,#prv28
        5416079,#prv36
        3373095,#prv50
        3543262,#ts04
        5007577,#ctb1
        5378468#ctb2
        ])
    #now find actual counts (nonzero pixels) in segmentations
    segs=[os.path.join(dct["dst"],xx+".tif") for xx in brains]
    counts=np.array([len(np.nonzero(tifffile.imread(seg))[0]) for seg in segs])
    scale=0.014 #um/pixel (approximate!!!)
    density=[np.round(counts[i]/(vol[i]*(scale**3)),2) for i in range(len(vol))]
    
    #export dataframe
    df=pd.DataFrame(density)
    df.index=brains
    df.columns=["density,cells/mm3"]
    df["volume(mm3)"]=counts*(scale**3)
    df["voxels"]=counts
    df=df.round(4)
    df.to_csv("/jukebox/wang/zahra/tracing_projects/mapping_paper/revision_images/short_timepoint_counts/injection.csv")
Example #53
0
# BANDS AOD_1640nm', 'AOD_1020nm', 'AOD_870nm', 'AOD_865nm', 'AOD_779nm',
#       'AOD_675nm', 'AOD_667nm', 'AOD_620nm', 'AOD_560nm', 'AOD_555nm',
#       'AOD_551nm', 'AOD_532nm', 'AOD_531nm', 'AOD_510nm', 'AOD_500nm',
#       'AOD_490nm', 'AOD_443nm', 'AOD_440nm', 'AOD_412nm', 'AOD_400nm',
#       'AOD_380nm', 'AOD_340nm'],

bands_aod = np.genfromtxt(path,
                          delimiter=',',
                          skip_header=header - 1,
                          skip_footer=len(data_aod),
                          usecols=cols,
                          dtype=str)

# Find which bands have no data (take mean of bands and find indices diff. than 0)
means = np.mean(data_aod, axis=0)
non_empty_aod = np.array(np.nonzero(means))
data_aod = data_aod[:, non_empty_aod[0]]

#########
# GRAPHS
#########
# Plot each band of aod measurements for total data
[
    plt.scatter(dates_aod,
                data_aod[:, i],
                label=bands_aod[non_empty_aod[0, i]],
                s=0.2) for i in range(non_empty_aod[0].shape[0])
]
plt.legend()
plt.show()
Example #54
0
def trim_silence(in_list, out_list, in_dimension, label_list, label_dimension, \
                                       silence_feature_index, percent_to_keep=0):
    '''
    Function to trim silence from binary label/speech files based on binary labels.
        in_list: list of binary label/speech files to trim
        out_list: trimmed files
        in_dimension: dimension of data to trim
        label_list: list of binary labels which contain trimming criterion
        label_dimesion:
        silence_feature_index: index of feature in labels which is silence: 1 means silence (trim), 0 means leave.
    '''
    assert len(in_list) == len(out_list) == len(label_list)
    io_funcs = BinaryIOCollection()
    for (infile, outfile, label_file) in zip(in_list, out_list, label_list):

        data = io_funcs.load_binary_file(infile, in_dimension)
        label = io_funcs.load_binary_file(label_file, label_dimension)

        audio_label_difference = data.shape[0] - label.shape[0]
        assert math.fabs(
            audio_label_difference
        ) < 3, '%s and %s contain different numbers of frames: %s %s' % (
            infile, label_file, data.shape[0], label.shape[0])

        ## In case they are different, resize -- keep label fixed as we assume this has
        ## already been processed. (This problem only arose with STRAIGHT features.)
        if audio_label_difference < 0:  ## label is longer -- pad audio to match by repeating last frame:
            print('audio too short -- pad')
            padding = numpy.vstack([data[-1, :]] *
                                   int(math.fabs(audio_label_difference)))
            data = numpy.vstack([data, padding])
        elif audio_label_difference > 0:  ## audio is longer -- cut it
            print('audio too long -- trim')
            new_length = label.shape[0]
            data = data[:new_length, :]
        #else: -- expected case -- lengths match, so do nothing

        silence_flag = label[:, silence_feature_index]
        #         print silence_flag
        if not (numpy.unique(silence_flag) == numpy.array([0, 1])).all():
            ## if it's all 0s or 1s, that's ok:
            assert (numpy.unique(silence_flag) == numpy.array([0]).all()) or \
                   (numpy.unique(silence_flag) == numpy.array([1]).all()), \
                   'dimension %s of %s contains values other than 0 and 1'%(silence_feature_index, infile)
        print('Remove %d%% of frames (%s frames) as silence... ' %
              (100 * numpy.sum(silence_flag / float(len(silence_flag))),
               int(numpy.sum(silence_flag))))
        non_silence_indices = numpy.nonzero(
            silence_flag ==
            0)  ## get the indices where silence_flag == 0 is True (i.e. != 0)
        if percent_to_keep != 0:
            assert type(percent_to_keep) == int and percent_to_keep > 0
            #print silence_flag
            silence_indices = numpy.nonzero(silence_flag == 1)
            ## nonzero returns a tuple of arrays, one for each dimension of input array
            silence_indices = silence_indices[0]
            every_nth = 100 / percent_to_keep
            silence_indices_to_keep = silence_indices[::
                                                      every_nth]  ## every_nth used +as step value in slice
            ## -1 due to weird error with STRAIGHT features at line 144:
            ## IndexError: index 445 is out of bounds for axis 0 with size 445
            if len(silence_indices_to_keep) == 0:
                silence_indices_to_keep = numpy.array(
                    [1])  ## avoid errors in case there is no silence
            print(
                '   Restore %s%% (every %sth frame: %s frames) of silent frames'
                % (percent_to_keep, every_nth, len(silence_indices_to_keep)))

            ## Append to end of utt -- same function used for labels and audio
            ## means that violation of temporal order doesn't matter -- will be consistent.
            ## Later, frame shuffling will disperse silent frames evenly across minibatches:
            non_silence_indices = (numpy.hstack(
                [non_silence_indices[0], silence_indices_to_keep]))
            ##  ^---- from tuple and back (see nonzero note above)

        trimmed_data = data[
            non_silence_indices, :]  ## advanced integer indexing
        io_funcs.array_to_binary_file(trimmed_data, outfile)
Example #55
0
def run(args, device, data):
    # Unpack data
    train_mask, val_mask, in_feats, labels, n_classes, g = data
    train_nid = th.LongTensor(np.nonzero(train_mask)[0])
    val_nid = th.LongTensor(np.nonzero(val_mask)[0])
    train_mask = th.BoolTensor(train_mask)
    val_mask = th.BoolTensor(val_mask)

    # Create sampler
    sampler = NeighborSampler(g, [int(fanout) for fanout in args.fan_out.split(',')])

    # Create PyTorch DataLoader for constructing blocks
    dataloader = DataLoader(
        dataset=train_nid.numpy(),
        batch_size=args.batch_size,
        collate_fn=sampler.sample_blocks,
        shuffle=True,
        drop_last=False,
        num_workers=args.num_workers)

    # Define model and optimizer
    model = SAGE(in_feats, args.num_hidden, n_classes, args.num_layers, F.relu, args.dropout)
    model = model.to(device)
    loss_fcn = nn.CrossEntropyLoss()
    loss_fcn = loss_fcn.to(device)
    optimizer = optim.Adam(model.parameters(), lr=args.lr)

    # Training loop
    avg = 0
    iter_tput = []
    for epoch in range(args.num_epochs):
        tic = time.time()

        # Loop over the dataloader to sample the computation dependency graph as a list of
        # blocks.
        for step, blocks in enumerate(dataloader):
            tic_step = time.time()

            # The nodes for input lies at the LHS side of the first block.
            # The nodes for output lies at the RHS side of the last block.
            input_nodes = blocks[0].srcdata[dgl.NID]
            seeds = blocks[-1].dstdata[dgl.NID]

            # Load the input features as well as output labels
            batch_inputs, batch_labels = load_subtensor(g, labels, seeds, input_nodes, device)

            # Compute loss and prediction
            batch_pred = model(blocks, batch_inputs)
            loss = loss_fcn(batch_pred, batch_labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()

            iter_tput.append(len(seeds) / (time.time() - tic_step))
            if step % args.log_every == 0:
                acc = compute_acc(batch_pred, batch_labels)
                gpu_mem_alloc = th.cuda.max_memory_allocated() / 1000000 if th.cuda.is_available() else 0
                print('Epoch {:05d} | Step {:05d} | Loss {:.4f} | Train Acc {:.4f} | Speed (samples/sec) {:.4f} | GPU {:.1f} MiB'.format(
                    epoch, step, loss.item(), acc.item(), np.mean(iter_tput[3:]), gpu_mem_alloc))

        toc = time.time()
        print('Epoch Time(s): {:.4f}'.format(toc - tic))
        if epoch >= 5:
            avg += toc - tic
        if epoch % args.eval_every == 0 and epoch != 0:
            eval_acc = evaluate(model, g, g.ndata['features'], labels, val_mask, args.batch_size, device)
            print('Eval Acc {:.4f}'.format(eval_acc))

    print('Avg epoch time: {}'.format(avg / (epoch - 4)))
Example #56
0
    def __init__(self, im1, im2=None, param=None):
        self.im1 = im1
        self.im2 = im2
        self.dim_im = len(self.im1.data.shape)
        self.dim_pix = 0
        self.distances = None
        self.res = ''
        self.param = param
        self.dist1_distribution = None
        self.dist2_distribution = None

        if self.dim_im == 3:
            self.orientation1 = self.im1.orientation
            if self.orientation1 != 'IRP':
                self.im1.change_orientation('IRP', generate_path=True)

            if self.im2 is not None:
                self.orientation2 = self.im2.orientation
                if self.orientation2 != 'IRP':
                    self.im2.change_orientation('IRP', generate_path=True)

        if self.param.thinning:
            self.thinning1 = Thinning(self.im1, self.param.verbose)
            self.thinning1.thinned_image.save()

            if self.im2 is not None:
                self.thinning2 = Thinning(self.im2, self.param.verbose)
                self.thinning2.thinned_image.save()

        if self.dim_im == 2 and self.im2 is not None:
            self.compute_dist_2im_2d()

        if self.dim_im == 3:
            if self.im2 is None:
                self.compute_dist_1im_3d()
            else:
                self.compute_dist_2im_3d()

        if self.dim_im == 2 and self.distances is not None:
            self.dist1_distribution = self.distances.min_distances_1[np.nonzero(self.distances.min_distances_1)]
            self.dist2_distribution = self.distances.min_distances_2[np.nonzero(self.distances.min_distances_2)]
        if self.dim_im == 3:
            self.dist1_distribution = []
            self.dist2_distribution = []

            for d in self.distances:
                if np.nonzero(d.min_distances_1)[0].size:  # Exist non zero values
                    self.dist1_distribution.append(d.min_distances_1[np.nonzero(d.min_distances_1)])
                else:  # all values are zero
                    self.dist1_distribution.append(0)
                if np.nonzero(d.min_distances_2)[0].size:  # Exist non zero values
                    self.dist2_distribution.append(d.min_distances_2[np.nonzero(d.min_distances_2)])
                else:  # all values are zero
                    self.dist2_distribution.append(0)

            self.res = 'Hausdorff\'s distance  -  First relative Hausdorff\'s distance median - Second relative Hausdorff\'s distance median(all in mm)\n'
            for i, d in enumerate(self.distances):
                med1 = np.median(self.dist1_distribution[i])
                med2 = np.median(self.dist2_distribution[i])
                if self.im2 is None:
                    self.res += 'Slice ' + str(i) + ' - slice ' + str(i + 1) + ': ' + str(d.H * self.dim_pix) + '  -  ' + str(med1 * self.dim_pix) + '  -  ' + str(med2 * self.dim_pix) + ' \n'
                else:
                    self.res += 'Slice ' + str(i) + ': ' + str(d.H * self.dim_pix) + '  -  ' + str(med1 * self.dim_pix) + '  -  ' + str(med2 * self.dim_pix) + ' \n'

        sct.printv('-----------------------------------------------------------------------------\n' +
                   self.res, self.param.verbose, 'normal')

        if self.param.verbose == 2:
            self.show_results()
 def itsTargVehsId(self, idx):
     centGrid = self.itsCentGrid(idx)
     targVehsId = centGrid[np.nonzero(centGrid)]
     return targVehsId
Example #58
0
    if not ret:
        break

    frame_normalize = cv.normalize(frame, None, 0, 255, cv.NORM_MINMAX)

    hsv_image = cv.cvtColor(frame, cv.COLOR_BGR2HSV)
    glitch_mask = cv.inRange(
        hsv_image, glitch_bottom, glitch_top)

    if not np.count_nonzero(glitch_mask):

        contour_mask = cv.inRange(
            hsv_image, hsv_color_bottom, hsv_color_top)
        # get welding puddle contour
        X, Y = np.nonzero(contour_mask)
        X = X.reshape((-1, 1))
        print(X, Y)
        # line_model = LinearRegression().fit(X, Y)
        # print(line_model.coef_, line_model.intercept_)

        cv.imshow('frame', frame)
        cv.imshow('normalize', frame)
        # cv.imshow('mask', contour_mask)
        # contour_point = get_puddle_contour(contour_mask)
        # line_mask = cv.inRange(
        #     hsv_image, line_color_bottom, line_color_top)

        # pointA = (
        #     contour_point[0],
        #     contour_point[0] * line_coef + line_intercept)
Example #59
0
def bnewt(A, mask=[], tol = 1e-6, delta_lower = 0.1, delta_upper = 3, fl = 0, check = 1, largemem = 0, chunk_size = 10000):
    """
    BNEWT A balancing algorithm for symmetric matrices
    X = BNEWT(A) attempts to find a vector X such that
    diag(X)*A*diag(X) is close to doubly stochastic. A must
    be symmetric and nonnegative.
    
    X0: initial guess. TOL: error tolerance.
    delta/Delta: how close/far balancing vectors can get
    to/from the edge of the positive cone.
    We use a relative measure on the size of elements.
    FL: intermediate convergence statistics on/off.
    RES: residual error, measured by norm(diag(x)*A*x - e).
    """
    # see details in Knight and Ruiz (2012)
    (n,m) = A.shape
    #np.seterr(divide='ignore')
    print 'Verifying Matrix\n'
    if (n != m):
        print 'Matrix must be symmetric to converge\n'
        return 'NaN'
    if (check):
        for i in range(0,n):
            for j in range(i,n):
                if (A[i][j] != A[j][i])or(A[i][j] < 0):
                    print 'Matrix must be symmetric and nonnegative to converge\n'
                    return 'NaN'
        print 'Check OK\n'
    else:
        print 'Check escaped\n'
  
    e        = np.ones((n,1))
    e[mask]  = 0
    #res      = np.empty((n,1))
    
    g        = 0.9
    etamax   = 0.1
    eta      = etamax
    stop_tol = tol*0.5
    x        = e #initial guess
    rt       = tol*tol
    if largemem:
        v      = x * chunking_dot(A,x,chunk_size=chunk_size)
    else:
        v      = x*np.dot(A,x)
    rk       = 1 - v
    rk[mask] = 0
    rho_km1  = np.dot(np.transpose(rk),rk)
    rout     = rho_km1
    rold     = rout
    
    MVP = 0 #matrix vector products
    i = 0
  
    while rout > rt:
        i = i+1
        k=0
        y=e
        innertol = max(eta*eta*rout,rt)
    
        while rho_km1 > innertol: #inner iteration by CG
            k = k+1
            if k==1:
                with np.errstate(invalid='ignore'):
                    Z       = rk/v
                Z[mask] = 0
                p       = Z
                rho_km1 = np.dot(np.transpose(rk),Z)
            else:
                beta = rho_km1/rho_km2
                p    =  Z + beta*p
      
            #update search direction 
            if largemem:
                w   = x*chunking_dot(A,x*p,chunk_size=chunk_size) + v*p
            else:
                w   = x*np.dot(A,x*p) + v*p
      
            alpha = rho_km1/np.dot(np.transpose(p),w)
            ap = alpha*p
      
            #test distance to boundary of cone
            ynew = y + ap
            if min(np.delete(ynew,mask)) <= delta_lower:
                if delta_lower == 0:
                    break
                else:
                    ind = np.nonzero(ap < 0)
                    gamma = min((delta_lower - y[ind])/ap[ind])
                    y = y + gamma*ap
                    break
                if max(ynew) >= delta_upper:
                    ind = np.nonzero(ynew > delta_upper)
                    gamma = min((delta_upper-y[ind])/ap[ind])
                    y = y + gamma*ap
                    break
      
            y       = ynew
            rk      = rk - alpha*w
            rho_km2 = rho_km1
            with np.errstate(invalid='ignore'):
                Z       = rk/v
            Z[mask] = 0
            rho_km1 = np.dot(np.transpose(rk),Z)
        #end inner iteration
    
        x        = x*y
        if largemem:
            v      = x * chunking_dot(A,x,chunk_size=chunk_size)
        else:
            v      = x*np.dot(A,x)
        rk       = 1-v
        rk[mask] = 0
        rho_km1  = np.dot(np.transpose(rk),rk)
        rout     = rho_km1
        MVP      = MVP + k + 1
        #print MVP,res
        #update inner iteration stopping criterion
        rat      = rout/rold
        rold     = rout
        res_norm = math.sqrt(rout)
        eta_o    = eta
        eta      = g*rat
    
        if g*eta_o*eta_o > 0.1:
            eta = max(eta,g*eta_o*eta_o)
        eta = max(min(eta,etamax),stop_tol/res_norm)
    
        if fl == 1:
            print '%3d %6d %.3f \n' % (i,k,r_norm)
      
        if MVP > 50000:
            break
    #end outer
  
    print 'Matrix vector products = %6d\n' % MVP
    #x = np.array(x)
    #x[mask] = 0
    return x
    def predict_frame(self, oriImg):
        test_image = Variable(T.transpose(T.transpose(T.unsqueeze(torch.from_numpy(oriImg).float(), 0), 2, 3), 1, 2),volatile=True).cuda()
        # print('Input Image Size: ', test_image.size())

        # Multiplier: A pyramid based scaling method to evaluate image from various scales.
        multiplier = [x * self.model_['boxsize'] / oriImg.shape[0] for x in self.param_['scale_search']]
        # print('Image Scaling Multipliers: ', multiplier, '\n')

        # Heatmap and Parts Affinity Field Data Structures
        heatmap_avg = torch.zeros((len(multiplier),19,oriImg.shape[0], oriImg.shape[1])).cuda()
        paf_avg = torch.zeros((len(multiplier),38,oriImg.shape[0], oriImg.shape[1])).cuda()

        # Compute Keypoint and Part Affinity Fields
        # print('Generating Keypoint Heatmap and Parts Affinity Field Predictions...')
        for m in range(len(multiplier)):
            # Set Image Scale
            scale = multiplier[m]
            h = int(oriImg.shape[0] * scale)
            w = int(oriImg.shape[1] * scale)
            # print('[', 'Multiplier: ', scale, '-', (w, h), ']')

            # Pad Image Corresponding to Detection Stride
            pad_h = 0 if (h % self.model_['stride'] == 0) else self.model_['stride'] - (h % self.model_['stride'])
            pad_w = 0 if (w % self.model_['stride'] == 0) else self.model_['stride'] - (w % self.model_['stride'])
            new_h = h + pad_h
            new_w = w + pad_w

            # Apply Image Resize Transformation
            imageToTest = cv2.resize(oriImg, (0,0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
            imageToTest_padded, pad = util.padRightDownCorner(imageToTest, self.model_['stride'], self.model_['padValue'])
            imageToTest_padded = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,2,0,1))/256 - 0.5

            # Generate Predictions
            feed = Variable(T.from_numpy(imageToTest_padded)).cuda()
            output1, output2 = self.model(feed)

            # Scale Prediction Outputs to Corresponding Image Size
            heatmap = nn.UpsamplingBilinear2d((oriImg.shape[0], oriImg.shape[1])).cuda()(output2)
            paf = nn.UpsamplingBilinear2d((oriImg.shape[0], oriImg.shape[1])).cuda()(output1)

            # print('Heatmap Dim:', heatmap.size())   # (1, Joint Count, X, Y)
            # print('PAF Dim:', paf.size())           # (1, PAF Count, X, Y)
            # print()

            heatmap_avg[m] = heatmap[0].data
            paf_avg[m] = paf[0].data

        # Compute Average Values
        heatmap_avg = T.transpose(T.transpose(T.squeeze(T.mean(heatmap_avg, 0)),0,1),1,2).cuda()
        paf_avg = T.transpose(T.transpose(T.squeeze(T.mean(paf_avg, 0)),0,1),1,2).cuda()

        # Convert to Numpy Type
        heatmap_avg = heatmap_avg.cpu().numpy()
        paf_avg = paf_avg.cpu().numpy()

        '''
        # [Plotting & Visualizing Heatmap and PAF]

        # Plot Heapmap Probabilities
        # util.plot_heatmap(oriImg, heatmap_avg)
        # util.plot_joint_heatmap(oriImg, heatmap_avg, 1)

        # Plot Part-Affinity Vectors
        # util.plot_paf(oriImg, paf_avg, 4)
        '''

        # Compute Heapmap Peaks (Using Non-Maximum Supression Method)
        all_peaks = []
        peak_counter = 0
        joint_pt_lookup = dict()
        for part in range(18):
            # Smooth out heapmap with gaussian kernel to remove high frequency variation.
            map_ori = heatmap_avg[:,:,part]
            map = gaussian_filter(map_ori, sigma=3)

            map_left = np.zeros(map.shape)
            map_left[1:,:] = map[:-1,:]
            map_right = np.zeros(map.shape)
            map_right[:-1,:] = map[1:,:]
            map_up = np.zeros(map.shape)
            map_up[:,1:] = map[:,:-1]
            map_down = np.zeros(map.shape)
            map_down[:,:-1] = map[:,1:]

            # Compute Peak Based on Binary Threshold
            peaks_binary = np.logical_and.reduce((map>=map_left, map>=map_right, map>=map_up, map>=map_down, map > self.param_['thre1']))
            peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse

            # Derive Joint Keypoint Peaks with Mapped ID with Probabilities
            peaks_with_score = [x + (map_ori[x[1],x[0]],) for x in peaks]
            id = range(peak_counter, peak_counter + len(peaks))
            peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]

            # Create Joint Lookup Dictionary
            for pt in peaks_with_score_and_id:
                joint_pt_lookup[(pt[1], pt[0])] = pt[2:4]

            all_peaks.append(peaks_with_score_and_id)
            peak_counter += len(peaks)

        '''
        # [Plot KeyPoint (with Probabilities)]
        # util.plot_key_point(oriImg, all_peaks)
        '''
        # util.plot_all_keypoints(oriImg, all_peaks)

        # Load Joint Index and Sequences Data
        mapIdx = self.md.get_mapIdx()
        limbSeq = self.md.get_limbseq()

        # Compute Part-Affinity Fields
        connection_all = []
        special_k = []
        mid_num = 10

        for k in range(len(mapIdx)):
            score_mid = paf_avg[:,:,[x-19 for x in mapIdx[k]]]
            # print(score_mid.shape)

            candA = all_peaks[limbSeq[k][0]-1]
            candB = all_peaks[limbSeq[k][1]-1]
            # print('Limb Seq Connection: [', limbSeq[k][0]-1, ',', limbSeq[k][1]-1, ']\n')

            nA = len(candA)
            nB = len(candB)
            indexA, indexB = limbSeq[k]

            if nA != 0 and nB != 0:
                connection_candidate = []
                for i in range(nA):
                    for j in range(nB):

                        # Compute Joint Unit Vector
                        vec = np.subtract(candB[j][:2], candA[i][:2])
                        norm = math.sqrt(vec[0]*vec[0] + vec[1]*vec[1])
                        # Assert: Check if the norm is a not a zero vector.
                        if not np.any(norm):
                            #print('Exception: Norm is a zero-vector')
                            continue

                        # TODO: Save this vector!
                        vec = np.divide(vec, norm)
                        #print('Unit Vector: [',i, ', ', j, ']: ', str(vec))

                        startend = zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), np.linspace(candA[i][1], candB[j][1], num=mid_num))
                        vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] for I in range(len(startend))])
                        vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] for I in range(len(startend))])

                        # Compute Components for Affinity Field Criterion
                        score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
                        score_with_dist_prior = sum(score_midpts)/len(score_midpts) + min(0.5*oriImg.shape[0]/norm-1, 0)

                        # Check PAF Criterion
                        criterion1 = len(np.nonzero(score_midpts > self.param_['thre2'])[0]) > 0.8 * len(score_midpts)
                        criterion2 = score_with_dist_prior > 0
                        if criterion1 and criterion2:
                            connection_candidate.append([i, j, score_with_dist_prior, score_with_dist_prior+candA[i][2]+candB[j][2]])

                connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
                connection = np.zeros((0,5))

                for c in range(len(connection_candidate)):
                    i, j, s = connection_candidate[c][0:3]
                    if (i not in connection[:,3] and j not in connection[:,4]):
                        connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
                        if len(connection) >= min(nA, nB): break

                connection_all.append(connection)

                #print('\nConnections:')
                #print(connection)
                #print()
            else:
                # Handle Exception for Potential Missing Part Entities
                special_k.append(k)
                connection_all.append([])

        # Build Human Pose
        subset = -1 * np.ones((0, 20))
        candidate = np.array([item for sublist in all_peaks for item in sublist])

        for k in range(len(mapIdx)):
            if k not in special_k:
                partAs = connection_all[k][:,0]
                partBs = connection_all[k][:,1]
                indexA, indexB = np.array(limbSeq[k]) - 1

                for i in range(len(connection_all[k])):
                    found = 0
                    subset_idx = [-1, -1]

                    for j in range(len(subset)):
                        if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
                            subset_idx[found] = j
                            found += 1

                    if found == 1:
                        j = subset_idx[0]
                        if subset[j][indexB] != partBs[i]:
                            subset[j][indexB] = partBs[i]
                            subset[j][-1] += 1
                            subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
                    elif found == 2: # if found 2 and disjoint, merge them
                        j1, j2 = subset_idx
                        # print "found = 2"
                        membership = ((subset[j1]>=0).astype(int) + (subset[j2]>=0).astype(int))[:-2]
                        if len(np.nonzero(membership == 2)[0]) == 0: #merge
                            subset[j1][:-2] += (subset[j2][:-2] + 1)
                            subset[j1][-2:] += subset[j2][-2:]
                            subset[j1][-2] += connection_all[k][i][2]
                            subset = np.delete(subset, j2, 0)
                        else: # as like found == 1
                            subset[j1][indexB] = partBs[i]
                            subset[j1][-1] += 1
                            subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]

                    # if find no partA in the subset, create a new subset
                    elif not found and k < 17:
                        row = -1 * np.ones(20)
                        row[indexA] = partAs[i]
                        row[indexB] = partBs[i]
                        row[-1] = 2
                        row[-2] = sum(candidate[connection_all[k][i,:2].astype(int), 2]) + connection_all[k][i][2]
                        subset = np.vstack([subset, row])

        # Remove Rows of Subset with the Least Parts Available
        deleteIdx = [];
        for i in range(len(subset)):
            if subset[i][-1] < 4 or subset[i][-2]/subset[i][-1] < 0.4:
                deleteIdx.append(i)
        subset = np.delete(subset, deleteIdx, axis=0)

        # Setup Pose Dictionary Data Structure for Prediction Return
        joints_per_skeleton = [[] for i in range(len(subset))]
    	for n in range(len(subset)):
    		for i in range(18):
    			cidx = subset[n][i]
    			if cidx != -1:
    				y = candidate[cidx.astype(int), 0]
    				x = candidate[cidx.astype(int), 1]
    				joints_per_skeleton[n].append([y, x])
    			else:
    				joints_per_skeleton[n].append(None)

        return joints_per_skeleton