Example #1
0
def wavefunction(coords, mocoeffs, gbasis, volume):
    """Calculate the magnitude of the wavefunction at every point in a volume.
    
    Attributes:
        coords -- the coordinates of the atoms
        mocoeffs -- mocoeffs for one eigenvalue
        gbasis -- gbasis from a parser object
        volume -- a template Volume object (will not be altered)
    """
    bfs = getbfs(coords, gbasis)
    
    wavefn = copy.copy(volume)
    wavefn.data = numpy.zeros( wavefn.data.shape, "d")

    conversion = convertor(1,"bohr","Angstrom")
    x = numpy.arange(wavefn.origin[0], wavefn.topcorner[0]+wavefn.spacing[0], wavefn.spacing[0]) / conversion
    y = numpy.arange(wavefn.origin[1], wavefn.topcorner[1]+wavefn.spacing[1], wavefn.spacing[1]) / conversion
    z = numpy.arange(wavefn.origin[2], wavefn.topcorner[2]+wavefn.spacing[2], wavefn.spacing[2]) / conversion

    for bs in range(len(bfs)):
        data = numpy.zeros( wavefn.data.shape, "d")
        for i,xval in enumerate(x):
            for j,yval in enumerate(y):
                for k,zval in enumerate(z):
                    data[i, j, k] = bfs[bs].amp(xval,yval,zval)
        numpy.multiply(data, mocoeffs[bs], data)
        numpy.add(wavefn.data, data, wavefn.data)
    
    return wavefn
Example #2
0
 def train(self, inp, out, training_weight=1.):
     inp = np.mat(inp).T
     out = np.mat(out).T
     deriv = []
     val = inp
     vals = [val]
     # forward calculation of activations and derivatives
     for weight,bias in self.__weights:
         val = weight*val
         val += bias
         deriv.append(self.__derivative(val))
         vals.append(self.__activation(val))
     deriv = iter(reversed(deriv))
     weights = iter(reversed(self.__weights))
     errs = []
     errs.append(np.multiply(vals[-1]-out, next(deriv)))
     # backwards propagation of errors
     for (w,b),d in zip(weights, deriv):
         errs.append(np.multiply(np.dot(w.T, errs[-1]), d))
     weights = iter(self.__weights)
     for (w,b),v,e in zip(\
             self.__weights,\
             vals, reversed(errs)):
         e *= self.__learning_rate*training_weight
         w -= e*v.T
         b -= e
     tmp = vals[-1]-out
     return np.dot(tmp[0].T,tmp[0])*.5*training_weight
Example #3
0
def boton_linea():
    inicio = time.time()
    label.destroy()
    
    #A grises
    imagen = cambiar_agrises(path_imagen_original)
    imagen.save("paso_1.jpg")
        
    h_hori = numpy.array([[-1, -1, -1],[2, 2, 2],[-1, -1, -1]])
    h_verti = numpy.array([[-1, 2, -1],[-1, 2, -1], [-1, 2, -1]])
    h_ob45 = numpy.array([[-1, -1, 2],[-1, 2, -1], [2, -1, -1]])
    h_obn45 = numpy.array([[2, -1, -1],[-1, 2, -1], [-1, -1, 2]])
    
    imagen_hori = convolucion(imagen, numpy.multiply(1.0/1.0,h_verti))
    #imagen_hori = cambiar_umbral(imagen_hori, 0.1)
    imagen_hori.save("paso_2.jpg")
    imagen_verti = convolucion(imagen, numpy.multiply(1.0/1.0,h_hori))
    #imagen_hori = cambiar_umbral(imagen_hori, 0.1)
    imagen_verti.save("paso_3.jpg")
    
    pixeles_hori = imagen_hori.load()
    pixeles_verti = imagen_verti.load()
    x, y = imagen.size
    imagen = linea(pixeles_verti, pixeles_hori, x, y, imagen)
    
    #Pone en ventana
    poner_imagen(imagen)
    
    #Tiempo
    fin = time.time()
    tiempo = fin - inicio
    print "Tiempo que trascurrio -> " + str(tiempo)
    return tiempo
Example #4
0
    def reducer_pca(self, region, matrixs):
        M = pd.DataFrame(matrixs)
        M[M == ''] = float('NaN')
        M = M.astype(float)
        M = M.transpose()
        (columns,rows)= np.shape(M)
        Mean = np.mean(M, axis=1).values
        C=np.zeros([columns,columns])  
        N=np.zeros([columns,columns])

        for i in range(rows):
            row = M.iloc[:,i] - Mean
            outer = np.outer(row,row)
            valid = np.isnan(outer) == False
            C[valid] = C[valid]+ outer[valid]
            N[valid] = N[valid]+ 1
            
        valid_outer = np.multiply(1 - np.isnan(N),N>0)
        cov = np.divide(C,N)
        cov = np.multiply(cov, valid_outer)
        U, D, V = np.linalg.svd(cov)
        cum_sum = np.cumsum(D[:])/np.sum(D)
        for i in range(len(cum_sum)):
            if cum_sum[i] >= 0.99:
                ind = i 
                break
        yield region, ind
def generate_RI_text_fast(N, RI_letters, cluster_sz, ordered, text_name, alph=alphabet):
	text_vector = np.zeros((1, N))
	text = utils.load_text(text_name)
	cluster2 = ''
	vector = np.ones((1,N))
	for char_num in xrange(len(text)):		
		cluster = cluster + text[char_num]
		if len(cluster) < cluster_sz:
			continue
		elif len(cluster) > cluster_sz:
			prev_letter = cluster[0]
			prev_letter_idx = alphabet.find(letter)
			inverse = np.roll(RI_letters[prev_letter_idx,:], cluster_sz-1)
			vector = np.multiply(vector, inverse)
			vector = np.roll(vector, 1)
			letter = text[char_num]
			letter_idx = alphabet.find(letter)
			vector = np.multiply(vector, RI_letters[letter_idx,:])
			cluster = cluster[1:]
		else: # (len(cluster) == cluster_size), happens once
			letters = list(cluster)
			for letter in letters:
				vector = np.roll(vector,1)
				letter_idx = alphabet.find(letter)
				vector = np.multiply(vector, RI_letters[letter_idx,:])
		text_vector += vector
	return text_vector
Example #6
0
def Column4(df,Nlen,Tlen,misdict):
    mA = np.zeros((Nlen*Tlen,Nlen*2+3),float)
    vb = np.zeros(Nlen*Tlen)
    i = 0
    for firmid,firmgroup in df.groupby('Firmid'):
        if not firmgroup['Dprice'].isnull().values.any():
            mA[i*Tlen:(i+1)*Tlen,i] = np.ones(Tlen)
            mA[i*Tlen:(i+1)*Tlen,i+Nlen] = firmgroup['Dmarket'].values
            mA[i*Tlen:(i+1)*Tlen,2*Nlen] = firmgroup['Event'].values
            eu = firmgroup['Conc'].values
            where_are_NaNs = np.isnan(eu)
            eu[where_are_NaNs] = 0
            mis = []
            for x in firmgroup['Nace2'].values:
                print x,misdict[x]
                mis.append(misdict[x])
            # mis = firmgroup['Dumconc'].values
            # where_are_NaNs = np.isnan(mis)
            # mis[where_are_NaNs] = 0
            mA[i*Tlen:(i+1)*Tlen,1+2*Nlen] = np.multiply(eu,firmgroup['Event'].values)
            mA[i*Tlen:(i+1)*Tlen,2+2*Nlen] = np.multiply(mis,firmgroup['Event'].values)
            vb[i*Tlen:(i+1)*Tlen] = [p2f(x) for x in firmgroup['Dprice'].values]
            i += 1
    tmpp = inv(mA.T.dot(mA)).dot(mA.T)
    Xhat = tmpp.dot(vb)
    gamma = Xhat[-3:]
    print gamma
    return gamma
Example #7
0
def std(f):
    x = np.array(range(len(f)))
    # normalize; we do not prefer attributes with many values
    x = x / x.mean()
    xf = np.multiply(f, x)
    x2f = np.multiply(f, np.power(x, 2))
    return np.sqrt((np.sum(x2f) - np.power(np.sum(xf), 2) / np.sum(f)) / (np.sum(f) - 1))
Example #8
0
def argsort_by_jet_lookup_table(rgb_color):
    # create a jet colormap like matlab
    jet_r = [0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0.0625, 0.1250, 0.1875, 0.2500, 0.3125, 0.3750, 0.4375, 0.5000, 0.5625, 0.6250, 0.6875, 0.7500, 0.8125, 0.8750, 0.9375, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 0.9375, 0.8750, 0.8125, 0.7500, 0.6875, 0.6250, 0.5625, 0.5000]

    jet_g = [0,     0,      0,      0,      0,      0,      0,      0, 0.0625, 0.1250, 0.1875, 0.2500, 0.3125, 0.3750, 0.4375, 0.5000, 0.5625, 0.6250, 0.6875, 0.7500, 0.8125, 0.8750, 0.9375, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 0.9375, 0.8750, 0.8125, 0.7500, 0.6875, 0.6250, 0.5625, 0.5000, 0.4375, 0.3750, 0.3125, 0.2500, 0.1875, 0.1250, 0.0625,      0,      0,      0,      0,      0,      0,      0,      0,      0]

    jet_b = [0.5625, 0.6250, 0.6875, 0.7500, 0.8125, 0.8750, 0.9375, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 1.0000, 0.9375, 0.8750, 0.8125, 0.7500, 0.6875, 0.6250, 0.5625, 0.5000, 0.4375, 0.3750, 0.3125, 0.2500, 0.1875, 0.1250, 0.0625,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0,      0]

    # add missing purples, cyans, yellows to this jet map.    
    jet_r = jet_r + [0.8, 0.8, 0.6, 0.2, 0.8, 0.8]
    jet_g = jet_g + [0.2, 0.2, 0.2, 0.8, 0.8, 0.8]
    jet_b = jet_b + [0.6, 0.8, 0.6, 0.8, 0.4, 0.2]

    # map from 0..255 to 0..1
    rgb_color = numpy.divide(rgb_color, 255)
    
    # sort input rgb into this colormap order
    match_jet = list()
    match_dist = list()
    for col_idx in range(len(rgb_color)):
        diff_r = rgb_color[col_idx,0]-jet_r
        diff_g = rgb_color[col_idx,1]-jet_g
        diff_b = rgb_color[col_idx,2]-jet_b

        mag = numpy.sqrt(numpy.multiply(diff_r,diff_r) + numpy.multiply(diff_g,diff_g) + numpy.multiply(diff_b,diff_b) )
        match_jet.append(numpy.argmin(mag))
        match_dist.append(numpy.min(mag))
        # uncomment for testing of worst color matches
        #if numpy.min(mag) > 0.3:
        #    print numpy.min(mag), "Color matched:", rgb_color[col_idx,:], "Idx:", match_jet[col_idx], ":", jet_r[match_jet[col_idx]], jet_g[match_jet[col_idx]], jet_b[match_jet[col_idx]], "\n"
    
    # Return indices that will sort these colors (centroids) in their match order
    return(numpy.argsort(numpy.array(match_jet)))
    def estimateMethylatedFractions(self, pos, meanVector, modMeanVector, maskPos):

        maskPos = np.array(maskPos)
        L = len(maskPos)
        if L == 0:
            res = self.bootstrap(pos, meanVector[self.post], modMeanVector[self.post])
        else:
            est = np.zeros(L)
            low = np.zeros(L)
            upp = np.zeros(L)
            res = np.zeros(3)
            wts = np.zeros(L)

            # for offset in maskPos:
            for count in range(L):
                offset = maskPos[count]
                mu0 = meanVector[self.post + offset]
                mu1 = modMeanVector[self.post + offset]
                if mu1 > mu0:
                    k = self.bootstrap((pos + offset), mu0, mu1)
                    wts[count] = k[0] * (mu1 - mu0)
                    est[count] = k[0]
                    low[count] = k[1]
                    upp[count] = k[2]

            if sum(wts) > 1e-3:
                wts = wts / sum(wts)
                res[0] = np.multiply(est, wts).sum()
                res[1] = np.multiply(low, wts).sum()
                res[2] = np.multiply(upp, wts).sum()

        print str(res)
        return res
Example #10
0
def ratio_err(top,bottom,top_low,top_high,bottom_low,bottom_high):
    #uses simple propagation of errors (partial derivatives)
    #note it returns errorbars, not interval

    #-make sure input is numpy arrays-
    top = np.array(top)
    top_low = np.array(top_low)
    top_high = np.array(top_high)
    bottom = np.array(bottom)
    bottom_low = np.array(bottom_low)
    bottom_high = np.array(bottom_high)

    #-calculate errorbars-
    top_errlow = np.subtract(top,top_low)
    top_errhigh = np.subtract(top_high,top)
    bottom_errlow = np.subtract(bottom,bottom_low)
    bottom_errhigh = np.subtract(bottom_high,bottom)

    #-calculate ratio_low-
    ratio_low  = np.sqrt( np.square(np.divide(top_errlow,bottom)) + np.square( np.multiply(np.divide(top,np.square(bottom)),bottom_errlow)) )
    #-calculate ratio_high-
    ratio_high = np.sqrt( np.square(np.divide(top_errhigh,bottom)) + np.square( np.multiply(np.divide(top,np.square(bottom)),bottom_errhigh)) )
#    ratio_high = ((top_errhigh/bottom)**2.0 + (top/(bottom**2.0))*bottom_errhigh)**2.0)**0.5

    # return two vectors, err_low and err_high
    return ratio_low,ratio_high
Example #11
0
def run_sim(R_star, transit_duration, bodies):
    """Run 3-body sim and convert results to TTV + TDV values in [minutes]"""

    # Run 3-body sim for one full orbit of the outermost moon
    loop(bodies, orbit_duration)
    

    # Move resulting data from lists to numpy arrays
    ttv_array = numpy.array([])
    ttv_array = ttv_list
    tdv_array = numpy.array([])
    tdv_array = tdv_list

    # Zeropoint correction
    middle_point =  numpy.amin(ttv_array) + numpy.amax(ttv_array)
    ttv_array = numpy.subtract(ttv_array, 0.5 * middle_point)
    ttv_array = numpy.divide(ttv_array, 1000)  # km/s

    # Compensate for barycenter offset of planet at start of simulation:
    planet.px = 0.5 * (gravity_firstmoon + gravity_secondmoon)
    stretch_factor = 1 / ((planet.px / 1000) / numpy.amax(ttv_array))
    ttv_array = numpy.divide(ttv_array, stretch_factor)

    # Convert to time units, TTV
    ttv_array = numpy.divide(ttv_array, R_star)
    ttv_array = numpy.multiply(ttv_array, transit_duration * 60 * 24)  # minutes

    # Convert to time units, TDV
    oldspeed = (2 * R_star / transit_duration) * 1000 / 24 / 60 / 60  # m/sec
    newspeed = oldspeed - numpy.amax(tdv_array)
    difference = (transit_duration - (transit_duration * newspeed / oldspeed)) * 24 * 60
    conversion_factor = difference / numpy.amax(tdv_array)
    tdv_array = numpy.multiply(tdv_array, conversion_factor)

    return ttv_array, tdv_array
Example #12
0
	def derivadaCusto(self, x, y):
		# Calcula a derivada em função de W1 e W2
		self.yEstimado = self.propaga(x)
		matrix_x = np.matrix(list(x.values()))
		if(self.tamInput > 1):
			matrix_x = matrix_x.T
		matrix_y = np.matrix(list(y.values()))
		# erro a ser retropropagado
		ek = -np.subtract(matrix_y, self.yEstimado)
		'''
		print("Erro k:")
		print(ek.shape)
		print(ek)
		print("Derivada sigmoid YIN : ", self.derivadaSigmoide(self.yin).shape)
		print(self.derivadaSigmoide(self.yin))
		'''
		delta3 = np.multiply(ek, self.derivadaPrelu(self.yin))#self.derivadaSigmoide(self.yin))
		# Obtém o erro a ser retropropagado de cada camada, multiplicando pela derivada da função de ativação
		#adicionando o termo de regularização no gradiente (+lambda * pesos)
		dJdW2 = np.dot(delta3, self.zin) + self.lambdaVal*self.W2
		'''
		print("dJdW2 ------------ ", dJdW2.shape)
		print(dJdW2)
		print("Z shape:", self.z.shape)
		print(self.z)
		print("Derivada Z shape:", self.derivadaSigmoide(self.z).shape)
		print(self.derivadaSigmoide(self.z))
		print("W2 shape", self.W2.shape)
		print(self.W2)
		'''
		delta2 = np.multiply(np.dot(self.W2, delta3).T, self.derivadaSigmoide(self.z))
		dJdW1 = np.dot(matrix_x, delta2) + self.lambdaVal*self.W1
		return dJdW1, dJdW2
def determine_likeliest(genotypes,num_regions,rsid_info,rsid_order,sample,result_queue):
	#q initial value is uniform across all geos
	q = [float(1)/float(num_regions)] * num_regions
	g = []
	f = []

	valid = set(['A','T','G','C'])

	#set up genotype and frequency vectors
	for ind,v in enumerate(genotypes):
		rsid = rsid_order[ind]
		ref_allele = rsid_info[rsid]["allele"]

		if v[0] in valid and v[1] in valid:
			matches = 0
			for i in v:
				if i == ref_allele:
					matches += 1

			g.append(matches)
			f.append(rsid_info[rsid]["freqs"])

	q = np.array(q)
	g = np.array(g)
	f = np.array(f)

	q_n_1 = q

	e = .01
	l_n = -1.0 * sys.maxint
	l_n_1 = compute_likelihood(g,f,q)

	c = 0
	while l_n_1 - l_n > e:
		c += 1
		q = q_n_1
		q_n_1 = [0] * len(q)

		for i,g_v in enumerate(g):
			a_denom = np.dot(q,f[i])
			b_denom = np.dot(q,1.0 - f[i])

			a = np.multiply(f[i],q) / a_denom
			b = np.multiply(1.0 - f[i],q) / b_denom

			q_n_1 += float(g_v) * a
			q_n_1 += float(2 - g_v) * b

		q_n_1 = (float(1)/float(2*len(g))) * q_n_1
		l_n = l_n_1
		l_n_1 = compute_likelihood(g,f,q_n_1)

	print "Sample: %s, Iterations: %d, Likelihood: %f" % (sample,c,l_n_1)


	result_string = [str(i) for i in q_n_1]

	result_queue.put("%s|%s\n" % (sample,"|".join(result_string)))
	
	return
Example #14
0
def plot_triplet(apn, idx):
  plt.subplot(1,3,1)
  plt.imshow(np.multiply(apn[idx*3+0,:,:,:],1/256))
  plt.subplot(1,3,2)
  plt.imshow(np.multiply(apn[idx*3+1,:,:,:],1/256))
  plt.subplot(1,3,3)
  plt.imshow(np.multiply(apn[idx*3+2,:,:,:],1/256))
Example #15
0
def resolve_collision(m):
    # Calculate relative velocity
    rv = numpy.subtract(m.b.velocity, m.a.velocity)

    # Calculate relative velocity in terms of the normal direction
    velocity_along_normal = numpy.dot(rv, m.normal)

    # Do not resolve if velocities are separating
    if velocity_along_normal > 0:
        # print("Separating:", velocity_along_normal)
        # print("  Normal:  ", m.normal)
        # print("  Vel:     ", m.b.velocity, m.a.velocity)
        return False

    # Calculate restitution
    e = min(m.a.restitution, m.a.restitution)

    # Calculate impulse scalar
    j = -(1 + e) * velocity_along_normal
    j /= 1 / m.a.mass + 1 / m.b.mass

    # Apply impulse
    impulse = numpy.multiply(j, m.normal)

    # print("Before: ", m.a.velocity, m.b.velocity)
    m.a.velocity = numpy.subtract(m.a.velocity,
                                  numpy.multiply(1 / m.a.mass, impulse))
    m.b.velocity = numpy.add(m.b.velocity,
                             numpy.multiply(1 / m.b.mass, impulse))
    # print("After:  ", m.a.velocity, m.b.velocity)
    # print("  Normal:  ", m.normal)

    return True
Example #16
0
        def mark_lalo_anomoly(lat, lon):
            """mask pixels with abnormal values (0, etc.)
            This is found on sentinelStack multiple swath lookup table file.
            """
            # ignore pixels with zero value
            zero_mask = np.multiply(lat != 0., lon != 0.)

            # ignore anomaly non-zero values 
            # by get the most common data range (d_min, d_max) based on histogram
            mask = np.array(zero_mask, np.bool_)
            for data in [lat, lon]:
                bin_value, bin_edge = np.histogram(data[mask], bins=10)                
                # if there is anomaly, histogram won't be evenly distributed
                while np.max(bin_value) > np.sum(zero_mask) * 0.3:
                    # find the continous bins where the largest bin is --> normal data range
                    bin_value_thres = ut.median_abs_deviation_threshold(bin_value, cutoff=3)
                    bin_label = ndimage.label(bin_value > bin_value_thres)[0]
                    idx = np.where(bin_label == bin_label[np.argmax(bin_value)])[0]
                    # convert to min/max data value
                    bin_step = bin_edge[1] - bin_edge[0]
                    d_min = bin_edge[idx[0]] - bin_step / 2.
                    d_max = bin_edge[idx[-1]+1] + bin_step / 2.
                    mask *= np.multiply(data >= d_min, data <= d_max)
                    bin_value, bin_edge = np.histogram(data[mask], bins=10)
            lat[mask == 0] = 90.
            lon[mask == 0] = 0.
            return lat, lon, mask
Example #17
0
 def trans_param_to_current_array(self, quantity_dict, trans_param,
                                  model='LIF', mcnc_grouping=None,
                                  std=None):
     quantity_array = quantity_dict['quantity_array']
     quantity_rate_array = np.abs(np.gradient(quantity_array)) / DT
     if model == 'LIF':
         current_array = trans_param[0] * quantity_array +\
             trans_param[1] * quantity_rate_array + trans_param[2]
         if std is not None:
             std = 0 if std < 0 else std
             current_array += np.random.normal(
                 loc=0., scale=std, size=quantity_array.shape)
     if model == 'Lesniak':
         trans_param = np.tile(trans_param, (4, 1))
         trans_param[:, :2] = np.multiply(
             trans_param[:, :2].T, mcnc_grouping).T
         quantity_array = np.tile(quantity_array, (mcnc_grouping.size, 1)).T
         quantity_rate_array = np.tile(
             quantity_rate_array, (mcnc_grouping.size, 1)).T
         current_array = np.multiply(quantity_array, trans_param[:, 0]) +\
             np.multiply(quantity_rate_array, trans_param[:, 1]) +\
             np.multiply(np.ones_like(quantity_array), trans_param[:, 2])
         if std is not None:
             std = 0 if std < 0 else std
             current_array += np.random.normal(loc=0., scale=std,
                                               size=quantity_array.shape)
     return current_array
Example #18
0
    def backPropagate (self, targets):
        """Performs back propagation taking as an argument the targets
        and returns the new weights"""
        
        ##compute deltas for output layer
        if(self.cost=="MSE"): ##Mean-Squared Error as loss function
            d_error = np.array(self.xi_2.ravel()-np.array(targets)).reshape((self.n_o,1))
        else:
            #"CROSS ENTROPY as loss function"
            d_error = np.array([-1*(np.array(targets)[i]/(self.xi_2[i]))+(1-np.array(targets)[i])/(1-self.xi_2[i]) for i in range(self.n_o)]).reshape((self.n_o,1))

        
        pr_2 = self.xi_2.ravel().T *(1- self.xi_2.ravel().T)
        
        d_error = np.array(d_error.ravel()).reshape((1,self.n_o)).flatten()
        delta_i_2 = np.multiply(d_error,pr_2).reshape((self.n_o,1))

        ##deltas for hidden layer     
        err = np.dot(self.w_2,delta_i_2)
        pr_1 = 1-self.xi_1**2
        delta_i_1 = np.multiply(err,pr_1)
        
        #####Update the weights
        ######update output weights w_2
        self.w_2 = self.w_2 - self.epsilon*np.dot(self.xi_1,delta_i_2.T)
                
        ######update input weights w_1
        self.w_1 = self.w_1 - self.epsilon*np.dot(self.xi_0,delta_i_1.T[:,1:])

        return self.w_1, self.w_2
Example #19
0
    def prepare_regular_grid_interpolator(self):
        """Prepare aux data for RGI module"""
        # source points in regular grid
        src_length = int(self.src_metadata['LENGTH'])
        src_width = int(self.src_metadata['WIDTH'])
        self.src_pts = (np.arange(src_length), np.arange(src_width))

        # destination points
        dest_y = readfile.read(self.file, datasetName='azimuthCoord')[0]
        dest_x = readfile.read(self.file, datasetName='rangeCoord')[0]
        if 'SUBSET_XMIN' in self.src_metadata.keys():
            print('input data file was cropped before.')
            dest_y[dest_y != 0.] -= float(self.src_metadata['SUBSET_YMIN'])
            dest_x[dest_x != 0.] -= float(self.src_metadata['SUBSET_XMIN'])
        self.interp_mask = np.multiply(np.multiply(dest_y > 0, dest_y < src_length),
                                       np.multiply(dest_x > 0, dest_x < src_width))
        self.dest_pts = np.hstack((dest_y[self.interp_mask].reshape(-1, 1),
                                   dest_x[self.interp_mask].reshape(-1, 1)))

        # destimation data size
        self.length = int(self.lut_metadata['LENGTH'])
        self.width = int(self.lut_metadata['WIDTH'])
        lat0 = float(self.lut_metadata['Y_FIRST'])
        lon0 = float(self.lut_metadata['X_FIRST'])
        lat_step = float(self.lut_metadata['Y_STEP'])
        lon_step = float(self.lut_metadata['X_STEP'])
        self.laloStep = (lat_step, lon_step)
        self.SNWE = (lat0 + lat_step * (self.length - 1),
                     lat0,
                     lon0,
                     lon0 + lon_step * (self.width - 1))
Example #20
0
    def update_weights(self, forward_layer_error_signal_factor):
        """
            Update the weights using gradient descent algorithm.
             @forward_layer_error_signal_factor: forward layers error factor as a vector
                ==> dot(forward_layer.weights.T, forward_layer.gradient)
        """
        gradient_of_error_func = multiply(
            multiply(self.learning_rate, forward_layer_error_signal_factor, forward_layer_error_signal_factor),
            self.activation_function.derivative(f_of_x=self.outputs, out=self.outputs),
            out=self.outputs
        )

        back_propagation_error_factor = dot(self.weights.T, gradient_of_error_func)

        self.previous_weight_update = multiply(
            self.previous_weight_update, self.momentum, out=self.previous_weight_update
        )
        delta_weights = add(
            self.previous_weight_update,
            multiply(gradient_of_error_func.reshape((-1, 1)), self.inputs),
            out=self.previous_weight_update
        )

        self.weights += delta_weights                   # update weights.
        self.bias += gradient_of_error_func             # update bias.

        return back_propagation_error_factor            # back-propagate error factor to previous layer ...
Example #21
0
def calcSurfaceForce(P, x, y):
    ds = np.sqrt((x[1] - x[0])**2.0 + (y[1] - y[0])**2.0)
    [tangentVectorX, tangentVectorY, normalVectorX, normalVectorY] = calcSurfaceVectors(x, y)
    Fx = -np.multiply(normalVectorX, P) * ds
    Fy = -np.multiply(normalVectorY, P) * ds
    return (Fx, Fy)
# ---------------------------------------------------- #
Example #22
0
 def test_general(self):
     """
     Tests for life the universe and everything.
     """
     inputchannels = ((3.0, -4.0, 5.0),
                      (3.0, 4.0, -5.0),
                      (0.25, -0.5, 1.0),
                      (0.25, 0.5, -1.0),
                      (0.3, -0.4, 0.5),
                      (0.3, 0.4, -0.5),
                      (3.0, 4.0, 5.0),
                      (-3.0, -4.0, -5.0),
                      (0.25, 0.5, 1.0),
                      (-0.25, -0.5, -1.0),
                      (0.3, 0.4, 0.5),
                      (-0.3, -0.4, -0.5),
                      (0.0, 0.0, 0.0))
     individuallynormalizedchannels = []
     individualfactors = [0.2, 0.2, 1.0, 1.0, 2.0, 2.0, 0.2, 0.2, 1.0, 1.0, 2.0, 2.0, 1.0]
     for i in range(len(inputchannels)):
         individuallynormalizedchannels.append(tuple(numpy.multiply(inputchannels[i], individualfactors[i])))
     globalynormalizedchannels = tuple(numpy.multiply(inputchannels, 0.2))
     labels = ("The", "first", "six", "channels", "are", "labeled")
     isignal = sumpf.Signal(channels=inputchannels, samplingrate=48000, labels=labels)
     insignal = sumpf.Signal(channels=individuallynormalizedchannels, samplingrate=48000, labels=labels)
     gnsignal = sumpf.Signal(channels=globalynormalizedchannels, samplingrate=48000, labels=labels)
     norm = sumpf.modules.NormalizeSignal()
     self.assertEqual(norm.GetOutput(), sumpf.Signal())  # the default input Signal should be empty
     norm.SetSignal(isignal)
     self.assertEqual(norm.GetOutput(), gnsignal)        # by default, the input Signal's channels should not be normalized individually
     norm = sumpf.modules.NormalizeSignal(signal=isignal, individual=True)
     self.assertEqual(norm.GetOutput(), insignal)        # as specified in the constructor call, the channels should be normalized individually
     norm.SetIndividual(False)
     self.assertEqual(norm.GetOutput(), gnsignal)        # as specified in the setter method call, the channels should no longer be normalized individually
Example #23
0
def poly_centroid(P):
	X = P[:,0]
	Y = P[:,1]
	return 1/6.0/poly_area(P) * np.asarray([\
		np.dot(X + np.roll(X, -1), np.multiply(X, np.roll(Y, -1)) - np.multiply(np.roll(X, -1), Y)),\
		np.dot(Y + np.roll(Y, -1), np.multiply(Y, np.roll(X, -1)) - np.multiply(np.roll(Y, -1), X))\
	])
    def _get_H(self, debug=False):
        """
        returns H_t as defined in algorithm 2
        
        Reference:
        https://en.wikipedia.org/wiki/Limited-memory_BFGS
        http://www.ccms.or.kr/data/pdfpaper/jcms21_1/21_1_117.pdf
        https://homes.cs.washington.edu/~galen/files/quasi-newton-notes.pdf
        """
        I = np.identity(len(self.w))
        
        if min(len(self.s), len(self.y)) == 0:
                print "Warning: No second order information used!"
                return I
            
        assert len(self.s) > 0, "s cannot be empty."
        assert len(self.s) == len(self.y), "s and y must have same length"
        assert self.s[0].shape == self.y[0].shape, \
            "s and y must have same shape"
        assert abs(self.y[-1]).sum() != 0, "latest y entry cannot be 0!"
        assert 1/np.inner(self.y[-1], self.s[-1]) != 0, "!"

        I = np.identity(len(self.s[0]))
        H = np.dot((np.inner(self.s[-1], self.y[-1]) / np.inner(self.y[-1],
                   self.y[-1])), I)

        for (s_j, y_j) in itertools.izip(self.s, self.y):
            rho = 1.0/np.inner(y_j, s_j)
            V = I - np.multiply(rho, np.outer(s_j, y_j))
            H = (V).dot(H).dot(V.T)
            H += np.multiply(rho, np.outer(s_j, s_j))

        return H
Example #25
0
def normalize_layout(l):
    """Make sure all the spots in a layout are where you can click.

    Returns a copy of the layout with all spot coordinates are
    normalized to within (0.0, 0.98).

    """
    xs = []
    ys = []
    ks = []
    for (k, (x, y)) in l.items():
        xs.append(x)
        ys.append(y)
        ks.append(k)
    minx = np.min(xs)
    maxx = np.max(xs)
    try:
        xco = 0.98 / (maxx - minx)
        xnorm = np.multiply(np.subtract(xs, [minx] * len(xs)), xco)
    except ZeroDivisionError:
        xnorm = np.array([0.5] * len(xs))
    miny = np.min(ys)
    maxy = np.max(ys)
    try:
        yco = 0.98 / (maxy - miny)
        ynorm = np.multiply(np.subtract(ys, [miny] * len(ys)), yco)
    except ZeroDivisionError:
        ynorm = np.array([0.5] * len(ys))
    return dict(zip(ks, zip(map(float, xnorm), map(float, ynorm))))
Example #26
0
def bin_maker(bin_size,F_matrix,summed=None):
    """
    Calculate the conditional usage as a function of the flow on the link according to bin_size
    """
    bin_max = np.ceil(max(F_matrix[:,0])/bin_size)*bin_size # round up to nearest bin_size
    nbins = bin_max/bin_size # number of bins
    bin_means = np.linspace(.5*bin_size,bin_max-(.5*bin_size),nbins) # mean values of bins

    H_temp = []
    H = np.zeros((nbins,4)) # [#nodes, mean usage, min usage, max usage]
    for b in range(int(nbins)):
        for t in range(lapse):
            if b*bin_size <= F_matrix[t,0] < (b+1)*bin_size:
                H_temp.append(F_matrix[t,1])
        if len(H_temp)>0:
            H[b,0] = len(H_temp)
            H[b,1] = sum(H_temp)/len(H_temp)
            H[b,2] = min(H_temp)
            H[b,3] = max(H_temp)
        else: # no data in the bin
            H[b,0] = 0
            H[b,1] = 0
            H[b,2] = 0
            H[b,3] = 0
        H_temp=[]

    if summed:
        part_sum = np.multiply(bin_means,bin_size)
        bin_sum = sum(np.multiply(H[:,1],part_sum))
        return np.array([bin_means,H[:,1]]),bin_sum
    else:
        return bin_means,H
 def adaBoostTrainDecisionStump(self,dataArr,classLabels,numInt=40):
     weakDecisionStumpArr = []
     m = np.shape(dataArr)[0]
     weight = np.mat(np.ones((m,1))/m)     # init the weight of the data.Normally, we set the initial weight is 1/n
     aggressionClassEst = np.mat(np.zeros((m,1)))
     for i in range(numInt): # classEst == class estimation
         bestStump,error,classEst = self.buildStump(dataArr,classLabels,weight) # D is a vector of the data's weight
         # print("D: ",weight.T)
         alpha = float(0.5 * np.log((1.0 - error)/max(error , 1e-16)))   # alpha is the weighted of the weak classifier
         bestStump['alpha'] = alpha
         weakDecisionStumpArr.append(bestStump)
         exponent = np.multiply(-1* alpha * np.mat(classLabels).T , classEst) # calculte the exponent [- alpha * Y * Gm(X)]
         print("classEst :",classEst.T)
         weight = np.multiply(weight,np.exp(exponent)) # update the weight of the data, w_m = e^[- alpha * Y * Gm(X)]
         weight = weight/weight.sum()  # D.sum() == Z_m (Normalized Factor) which makes sure the D_(m+1) can be a probability distribution
         # give every estimated class vector (the classified result of the weak classifier) a weight
         aggressionClassEst += alpha*classEst
         print("aggression classEst: ",aggressionClassEst.T)
         # aggressionClassError = np.multiply(np.sign(aggressionClassEst) != np.mat(classLabels).T, np.ones((m,1)))
         # errorRate = aggressionClassError.sum()/m
         errorRate = (np.sign(aggressionClassEst) != np.mat(classLabels).T).sum()/m # calculate the error classification
         # errorRate = np.dot((np.sign(aggressionClassEst) != np.mat(classLabels).T).T,np.ones((m,1)))/m
         print("total error: ",errorRate,"\n")
         if errorRate == 0:
             break
     return weakDecisionStumpArr
Example #28
0
def die(first_noun, second_noun, trans_verb):
    """Vectorize a sentence with 'noun die noun verb' = (sub, obj)."""
    noun_model = space.words.polyglot_model()
    noun_space = noun_model[0]

    die_vector = compose.train.die_cat_stored()
    ver_vector = compose.train.verb(trans_verb, noun_model)

    fst_vector = noun_space[first_noun]
    snd_vector = noun_space[second_noun]

    par_vector_sub = kron(
        csr_matrix(snd_vector), csr_matrix(ver_vector))
    par_vector_obj = kron(
        csr_matrix(snd_vector), numpy.transpose(csr_matrix(ver_vector)))

    par_vector_sub = kron(
        numpy.transpose(csr_matrix(fst_vector)), csr_matrix(par_vector_sub))
    par_vector_obj = kron(
        numpy.transpose(csr_matrix(fst_vector)), csr_matrix(par_vector_obj))

    vector_sub = numpy.multiply(csr_matrix(die_vector), par_vector_sub)
    vector_obj = numpy.multiply(csr_matrix(die_vector), par_vector_obj)

    return (vector_sub.toarray().flatten(), vector_obj.toarray().flatten())
	def assign_weights(self,network,matTargetNeurons):
		numInput = self.dicProperties["IODim"]
		numNodesReservoir = self.dicProperties["ReservoirDim"]
		numInhib = numInput*numNodesReservoir*self.dicProperties["InhibFrac"]
		nRowLength = len(matTargetNeurons[0])
		numInhibPerRow = int(np.floor(nRowLength*self.dicProperties["InhibFrac"]))
		if self.dicProperties["Distribution"] == "Betweenness":
			if self.lstBetweenness == []:
				self.lstBetweenness = betwCentrality(network)[0].a
			rMaxBetw = self.lstBetweenness.max()
			rMinBetw = self.lstBetweenness.min()
			rMaxWeight = self.dicProperties["Max"]
			rMinWeight = self.dicProperties["Min"]
			for i in range(self.dicProperties["IODim"]):
				self.lstBetweenness = np.multiply(np.add(self.lstBetweenness,-rMinBetw+rMinWeight*rMaxBetw/(rMaxWeight-rMinWeight)),(rMaxWeight-rMinWeight)/rMaxBetw)
				self.__matConnect[i,matTargetNeurons[i]] = self.lstBetweenness[matTargetNeurons[i]] # does not take duplicate indices into account... never mind
			# generate the necessary inhibitory connections
			lstNonZero = np.nonzero(self.__matConnect)
			lstInhib = np.random.randint(0,len(lstNonZero),numInhib)
			self.__matConnect[lstInhib] = -self.__matConnect[lstInhib]
			rFactor = (self.dicProperties["Max"]-self.dicProperties["Min"])/(rMaxBetw-rMinBetw) # entre 0 et Max-Min
			self.__matConnect = np.add(np.multiply(self.__matConnect,rFactor),self.dicProperties["Min"]) # entre Min et Max
		elif self.dicProperties["Distribution"] == "Gaussian":
			for i in range(self.dicProperties["IODim"]):
				self.__matConnect[i,matTargetNeurons[i,:numInhibPerRow]] = -np.random.normal(self.dicProperties["MeanInhib"],self.dicProperties["VarInhib"],numInhibPerRow)
				self.__matConnect[i,matTargetNeurons[i,numInhibPerRow:]] = np.random.normal(self.dicProperties["MeanExc"],self.dicProperties["VarExc"],nRowLength-numInhibPerRow)
		elif self.dicProperties["Distribution"] == "Lognormal":
			for i in range(self.dicProperties["IODim"]):
				self.__matConnect[i,matTargetNeurons[i][:numInhibPerRow]] = -np.random.lognormal(self.dicProperties["LocationInhib"],self.dicProperties["ScaleInhib"],numInhibPerRow)
				self.__matConnect[i,matTargetNeurons[i][numInhibPerRow:]] = np.random.lognormal(self.dicProperties["LocationExc"],self.dicProperties["ScaleExc"],nRowLength-numInhibPerRow)
		else:
			None # I don't know what to do for the degree correlations yet
Example #30
0
def online_k_means(k,b,t,X_in):
    random_number = 11232015
    random_num = np.random.randint(X_in.shape[0], size =300 )
    rng = np.random.RandomState(random_number)
    permutation1 = rng.permutation(len(random_num))
    random_num = random_num[permutation1]
    x_input = X_in[random_num]
    c,l = mykmeansplusplus(x_input,k,t)
    v = np.zeros((k))
    for i in range(t):
        random_num = np.random.randint(X_in.shape[0], size = b)
        rng = np.random.RandomState(random_number)
        permutation1 = rng.permutation(len(random_num))
        random_num = random_num[permutation1]
        M = X_in[random_num]
        Y = cdist(M,c,metric='euclidean', p=2, V=None, VI=None, w=None)
        clust_index = np.argmin(Y,axis = 1)
        for i in range(M.shape[0]):
            c_in = clust_index[i]
            v[c_in] += 1
            ita = 1 / v[c_in]
            c[c_in] = np.add(np.multiply((1 - ita),c[c_in]),np.multiply(ita,M[i]))
    Y_l = cdist(X_in,c,metric='euclidean', p=2, V=None, VI=None, w=None)
    l = np.argmin(Y_l,axis = 1)        
    return c,l
w_conv8=np.load('./weights_npy/conv8_weights.npy')

B_conv8=np.load('./weights_npy/conv8_bias.npy')

w_conv9=np.load('./weights_npy/conv9_weights.npy')

B_conv9=np.load('./weights_npy/conv9_bias.npy')


w_conv10=np.load('./weights_npy/conv10_weights.npy')
W_conv10[:,:,:,0:1]=w_conv10[:,:,:,:]
B_conv10=np.load('./weights_npy/conv10_bias.npy')
b_conv10[0:1]=B_conv10[0:1]
precision = 1000
W1 = np.multiply(W_conv1.flatten(),precision)
B1 = np.multiply(B_conv1.flatten(),precision)

W2 = np.multiply(w_conv2.flatten(),precision)
B2 = np.multiply(B_conv2.flatten(),precision)

W3 = np.multiply(w_conv3.flatten(),precision)
B3 = np.multiply(B_conv3.flatten(),precision)

W4 = np.multiply(w_conv4.flatten(),precision)
B4 = np.multiply(B_conv4.flatten(),precision)

W5 = np.multiply(w_conv5.flatten(),precision)
B5 = np.multiply(B_conv5.flatten(),precision)

W6 = np.multiply(w_conv6.flatten(),precision)
def findCont(img,temp2):
    templateImage = np.multiply(img,temp2)
    ret2,img4 = cv2.threshold(templateImage,70,255,cv2.THRESH_BINARY)
    (img5,contours,hierarchy) = cv2.findContours(img4.copy(),cv2.RETR_TREE,cv2.CHAIN_APPROX_NONE)
    cnts = sorted(contours,key = cv2.contourArea,reverse=True)
    return templateImage,img4,cnts
    def adadelta_pattern(self,n,beta):
        #pattern mode
        #update of wmk
        x_n=self.x[n,:]
        x_n=x_n[np.newaxis,:]  #1*d
        y_n=self.y[n,:]
        y_n=y_n[np.newaxis,:]
        
        
        z3_delta = self.so - y_n # w3
        a3_delta = z3_delta * self.cs['activation function derivative']['ReLU'](self.so,beta,derivative=True)
# =============================================================================
        z2_delta = np.dot(a3_delta, self.wMK.T)
        a2_delta = z2_delta * self.cs['activation function derivative'][s2](self.sh2,beta,derivative=True) # w2
# =============================================================================
        z1_delta = np.dot(a2_delta, self.wJM.T)
        a1_delta = z1_delta * self.cs['activation function derivative'][s2](self.sh1,beta,derivative=True) # w1
 
        gwMK =  np.dot(self.sh2.T, a3_delta)
        gbo =  np.sum(a3_delta, axis=0, keepdims=True)
# =============================================================================
        gwJM =  np.dot(self.sh1.T, a2_delta)
        gbh2 =  np.sum(a2_delta, axis=0)
# =============================================================================
        gwIJ =  np.dot(x_n.T, a1_delta)
        gbh1 =  np.sum(a1_delta, axis=0)
        
        gw = [gwIJ,gwJM,gwMK]
        gb = [gbh1,gbh2,gbo]
        
        self.adadelta_g.append(gw)
        self.adadelta_b.append(gb)
        if len(self.adadelta_g)>10:
            self.adadelta_g.pop(0)
        if len(self.adadelta_b)>10:
            self.adadelta_b.pop(0)
        
        r_wmk = 0.9*(np.sum(np.square([self.adadelta_g[0][2] for i in range(len(self.adadelta_g))]),axis = 0)/10)+0.1*np.square(gwMK)        
        r_wjm = 0.9*(np.sum(np.square([self.adadelta_g[0][1] for i in range(len(self.adadelta_g))]),axis = 0)/10)+0.1*np.square(gwJM) 
        r_wij = 0.9*(np.sum(np.square([self.adadelta_g[0][0] for i in range(len(self.adadelta_g))]),axis = 0)/10)+0.1*np.square(gwIJ) 

        r_wbo = 0.9*(np.sum(np.square([self.adadelta_b[0][2] for i in range(len(self.adadelta_b))]),axis = 0)/10)+0.1*np.square(gbo)   
        r_wbh2 = 0.9*(np.sum(np.square([self.adadelta_b[0][1] for i in range(len(self.adadelta_b))]),axis = 0)/10)+0.1*np.square(gbh2)         
        r_wbh1 = 0.9*(np.sum(np.square([self.adadelta_b[0][0] for i in range(len(self.adadelta_b))]),axis = 0)/10)+0.1*np.square(gbh1)         

        r_wmk = 0.00000001 + np.sqrt(r_wmk)
        r_wjm = 0.00000001 + np.sqrt(r_wjm)
        r_wij = 0.00000001 +  np.sqrt(r_wij)
        
        r_wbo  = 0.00000001 + np.sqrt(r_wbo)
        r_wbh2 = 0.00000001 +  np.sqrt(r_wbh2)
        r_wbh1 = 0.00000001 +  np.sqrt(r_wbh1)


        delta_wmk =np.multiply(self.u_wmk, np.divide(gwMK,r_wmk))
        delta_wjm =np.multiply(self.u_wjm, np.divide(gwJM,r_wjm))
        delta_wij =np.multiply(self.u_wij, np.divide(gwIJ,r_wij))
        
        delta_bo = np.multiply(self.u_wbo, np.divide(gbo,r_wbo))
        delta_bh2 = np.multiply(self.u_wbh2, np.divide(gbh2,r_wbh2))
        delta_bh1 = np.multiply(self.u_wbh1, np.divide(gbh1,r_wbh1))
        
        delta_g = [delta_wij,delta_wjm,delta_wmk]
        delta_b = [delta_bh1,delta_bh2,delta_bo]
        
        self.adadelta_deltag.append(delta_g)
        self.adadelta_deltab.append(delta_b)
        if len(self.adadelta_deltag)>10:
            self.adadelta_deltag.pop(0)
        if len(self.adadelta_deltab)>10:
            self.adadelta_deltab.pop(0)
        
        self.u_wmk = 0.00000001 + np.sqrt( 0.9*(np.sum(np.square([self.adadelta_deltag[0][2] for i in range(len(self.adadelta_deltag))]),axis = 0)/10)+0.1*np.square(delta_wmk))        
        self.u_wjm = 0.00000001 + np.sqrt(0.9*(np.sum(np.square([self.adadelta_deltag[0][1] for i in range(len(self.adadelta_deltag))]),axis = 0)/10)+0.1*np.square(delta_wjm)) 
        self.u_wij =0.00000001 + np.sqrt( 0.9*(np.sum(np.square([self.adadelta_deltag[0][0] for i in range(len(self.adadelta_deltag))]),axis = 0)/10)+0.1*np.square(delta_wij)) 

        self.u_wbo =0.00000001+np.sqrt( 0.9*(np.sum(np.square([self.adadelta_deltab[0][2] for i in range(len(self.adadelta_deltab))]),axis = 0)/10)+0.1*np.square(delta_bo))   
        self.u_wbh2 =0.00000001+np.sqrt( 0.9*(np.sum(np.square([self.adadelta_deltab[0][1] for i in range(len(self.adadelta_deltab))]),axis = 0)/10)+0.1*np.square(delta_bh2))         
        self.u_wbh1 =0.00000001+np.sqrt( 0.9*(np.sum(np.square([self.adadelta_deltab[0][0] for i in range(len(self.adadelta_deltab))]),axis = 0)/10)+0.1*np.square(delta_bh1))         

        self.wMK -= delta_wmk
        self.wJM -= delta_wjm
        self.wIJ -= delta_wij        
        self.bo -= delta_bo
        self.bh2 -= delta_bh2
        self.bh1 -= delta_bh1
Example #34
0
    def _compute_w(self, X, y):

        w = X.T.dot(np.multiply(y, self.alpha))

        return w
Example #35
0
def encode_easy_training_data(root_dir_path,
                              csvfile,
                              output_path,
                              num_images=500,
                              image_extension='.jpeg',
                              training_proportion=0.9):
    '''
    Given a root directory root_dir_path, a csv file with labels csv_labels_path,
    and a class_threshold, create...
    '''

    # Prepare the image path
    images_dir_path = path.join(root_dir_path, 'images')

    # Prepare the output paths
    training_data_path = path.join(output_path, 'training')
    makedirs(training_data_path, exist_ok=True)
    validation_data_path = path.join(output_path, 'validation')
    makedirs(validation_data_path, exist_ok=True)

    # Prepare a dictionary from the CSV file
    filename_labels_dictionary = initialize_dictionary_from_file(csvfile)

    # Compute the size of each subset
    print(num_images)
    print(training_proportion)
    number_of_training_images_per_grade = np.multiply(
        np.ones(2), floor(num_images * training_proportion))
    number_of_validation_images_per_grade = np.multiply(
        np.ones(2), floor(num_images * (1 - training_proportion)))

    # For each image in the input path
    image_filenames = listdir(images_dir_path)
    for current_image_filename in image_filenames:
        # Get only image name, without the extension, to look up for the label in the dictionary
        image_entry_name = current_image_filename[:current_image_filename.
                                                  rfind('.')]
        # Get current label
        current_label = int(filename_labels_dictionary[image_entry_name])
        # We will only copy those images with label = 0 or 4
        if (current_label == 0) or (current_label == 4):
            # If the label is 4, generate 1
            if current_label == 4:
                current_label = 1
            valid_copy = False
            # Check if we still need to move images to the training set
            if number_of_training_images_per_grade[current_label] > 0:
                current_output_folder = training_data_path
                number_of_training_images_per_grade[
                    current_label] = number_of_training_images_per_grade[
                        current_label] - 1
                valid_copy = True
            else:
                # Check if we still need to move images to the validation set
                if number_of_validation_images_per_grade[current_label] > 0:
                    number_of_validation_images_per_grade[
                        current_label] = number_of_validation_images_per_grade[
                            current_label] - 1
                    current_output_folder = validation_data_path
                    valid_copy = True
            # If we need to copy the file
            if valid_copy:
                # Create the corresponding folder
                current_output_folder = path.join(current_output_folder,
                                                  str(int(current_label)))
                makedirs(current_output_folder, exist_ok=True)
                # Copy the image to the target folder
                shutil.copyfile(
                    path.join(images_dir_path, current_image_filename),
                    path.join(current_output_folder, current_image_filename))
Example #36
0
    console_handler = logging.StreamHandler()
    console_handler.setFormatter(console_formatter)
    console_handler.setLevel(logging.INFO)
    logger.addHandler(console_handler)

    logger.info('started')

    shapes_file = '../data/rectangles.pkl'
    with open(shapes_file, 'rb') as shapes_fp:
        shapes_data = load(shapes_fp)
    logger.info('loaded %d items from %s' % (len(shapes_data), shapes_file))

    # we need to convert the data to floats and normalize to get the machinery below to work properly
    shapes_data = shapes_data.astype('float')
    shapes_data = np.multiply(shapes_data, 1.0 / shapes_data.max())
    logger.info('the training data has max %.4f and min %.4f' %
                (shapes_data.max(), shapes_data.min()))

    n_inputs = 32 * 32
    n_hidden1 = 500
    n_hidden2 = 500
    n_hidden3 = 20  # was 20
    n_hidden4 = n_hidden2
    n_hidden5 = n_hidden1
    n_outputs = n_inputs
    learning_rate = 0.001

    initializer = tf.contrib.layers.variance_scaling_initializer()
    my_dense_layer = partial(tf.layers.dense,
                             activation=tf.nn.elu,
def admm(X, y, max_iter=5000):
    # solve by inner point method        
    m, n = X.shape
    X = np.column_stack((X, np.ones((m, 1))))
    y = y.astype(np.float64)
    data_num = len(y)
    C = 1.0
    kernel = np.dot(X, np.transpose(X))
    p = np.matrix(np.multiply(kernel,np.outer(y, y))) + np.diag(np.ones(data_num, np.float64)) * .5/C
    e = np.matrix(np.ones([data_num, 1], np.float64))

    bounds = (0, np.inf)    


    low, up = bounds    
    x = np.ones((m,1))
    tau = 1.618
    sigma = 1
    
    # initial 
    u = np.ones((m, 1))
    t = x
    A = p + sigma * np.eye(m)
    I = np.eye(m)
    invA = cg(A, I)
    for it in range(max_iter):
        # update x
        b = e + u + sigma * t
        x = invA * b
        
        # update y
        t = x - (1/sigma)*u
        t[t < low] = low
        t[t > up] = up
                    
        # update u
        u = u - tau*sigma*(x-t)

        dual = -(0.5*x.T*(p*x) - e.T*x)
        dual = dual.item()
        y1 = np.reshape(y, (-1, 1))
        lambda1 = np.multiply(x, y1)
        w = np.dot(X.T, lambda1)
        w = np.matrix(w).reshape(-1, 1)      
        tmp = np.maximum(1-np.multiply(y1, X*w),0)
        primal = 0.5*np.linalg.norm(w)**2 + 1 * np.sum(tmp)
        primal = primal.item()

        # stop criteria            
        if np.abs(dual-primal)/(1+np.abs(dual)+np.abs(primal)) < 1e-12:
            break

        # print(t, np.linalg.norm(gradient))
        # print(np.min(x), np.max(x))
        # print(np.sum(x < -1e-4), np.sum(x>1+1e-4))
        # print(np.abs(dual-primal)/(1+np.abs(dual)+np.abs(primal)))

    y1 = np.reshape(y, (-1, 1))
    alpha1 = x
    lambda1 = np.multiply(y1,alpha1)   
    w = np.dot(X.T, lambda1)
    w = np.array(w).reshape(-1)
    b = w[n]
    w = w[0:n]

    return w, b
Example #38
0
def liklihood(G, Q, F):
    QF = np.matmul(Q, F)
    QF2 = np.matmul(Q, 1 - F)
    L = np.sum(np.multiply(G, np.log(QF)) + np.multiply((2. - G), np.log(QF2)))
    return L
def ilasso(cell_list, alpha, sigma, lag_len, dt, cv):
    """
    Learning temporal dependency among irregular time series ussing Lasso (or its variants)
    NOTE:Target is one variable.

    M. T. Bahadori and Yan Liu, "Granger Causality Analysis in Irregular Time Series", (SDM 2012)
    :param cell_list:one cell for each time series. Each cell is a 2xT matrix.
    First row contains the values and the second row contains SORTED time stamps.
    The first time series is the target time series which is predicted.
    :param alpha:The regularization parameter in Lasso
    :param sigma:Kernel parameter. Here Gaussian Kernel Bandwidth
    :param lag_len: Length of studied lag
    :param dt:Delta t denotes the  average  length  of  the  sampling  intervals for the target time series
    :param cv:cross validation
    :return (tuple) tuple containing:
        result: The NxL coefficient matrix.
    """


    #for test
    # sigma = 0.1
    # alpha = 1e-2
    # dt = 1

    # index of last time which is less than lag_len*dt - 1
    B = np.argmax(cell_list[0][1, :] > lag_len * dt)
    assert B >= 0, " lag_len DT error"
    # number of index of time of explained variable
    N1 = cell_list[0][1].shape[0]
    # number of features
    P = len(cell_list)
    # Build the matrix elements
    Am = np.zeros((N1 - B, P * lag_len))  # explanatory variables
    bm = cell_list[0][0, B:N1 + 1].reshape((N1 - B, 1))
    # for loop for stored time stamp
    for i in range(B, N1):
        ti = np.linspace((cell_list[0][1, i] - lag_len * dt),cell_list[0][1, i] - dt , num = lag_len)
        # for loop for features
        for j in range(P):
            assert len(ti) == lag_len, str(len(ti))+str(lag_len)+"length does not match"
            """
            tij = np.broadcast_to(ti, (len(cell_list[j][1, :]), ti.size))
            tSelect = np.broadcast_to(cell_list[j][1, :],
                                      (lag_len, cell_list[j][1, :].size)).T
            ySelect = np.broadcast_to(cell_list[j][0, :],
                                      (lag_len, cell_list[j][0, :].size)).T
            """
            # reduce kernel length in order to reduce complexity of calculation
            # kernel is used as window function
            # time_match is a cell_list[time_match][1, :] nearest to tij
            time_match = np.searchsorted(cell_list[j][1, :], cell_list[0][1, i])
            kernel_length = 25 # half of kernel length
            start = time_match - kernel_length if time_match-kernel_length > 0 else 0
            end = time_match + kernel_length if time_match + kernel_length < len(cell_list[j][1, :])-1 else len(cell_list[j][1, :])
            tij = np.broadcast_to(ti, (len(cell_list[j][1, start:end]), ti.size))
            tSelect = np.broadcast_to(cell_list[j][1, start:end],
                                      (lag_len, cell_list[j][1, start:end].size)).T
            ySelect = np.broadcast_to(cell_list[j][0, start:end],
                                      (lag_len, cell_list[j][0, start:end].size)).T
            exponent = -(np.multiply((tij - tSelect), (tij - tSelect)) / 2*sigma**2)
            # assert np.isfinite(exponent).all() == 1, str(exponent)
            Kernel = np.exp(exponent)
            # assert np.isfinite(Kernel).all() == 1, str(Kernel)
            with np.errstate(divide='ignore'):
                ker_sum = np.sum(Kernel, axis=0)
                numerator = np.sum(np.multiply(ySelect, Kernel), axis=0)
                # assert np.isfinite(numerator).all() ==1,str(numerator)
                # assert np.isfinite(ker_sum).all() ==1,str(ker_sum)
                tmp = np.divide(numerator,ker_sum)
                tmp[ker_sum==0] = 1
                # assert (np.isfinite(tmp)).all() == 1,str(tmp)+str(ker_sum)
            """
            if np.sum(Kernel, axis=0).any() == 0:
                print("kernel zero" + str(np.sum(Kernel, axis=0)))
                print(tmp)
            """
            Am[i - B, (j * lag_len):(j + 1) * lag_len] = tmp

    # assert (np.isfinite(Am)).all() == True,str(Am)
    # Solving Lasso using a solver; here the 'GLMnet' package
    if cv == False:
        fit = glmnet(x=Am, y=bm, family='gaussian', alpha=1,
                 lambdau=np.array([alpha]))
        weight = fit['beta']  # array of coefficient
        # Computing the BIC and AIC metrics
        bic = LA.norm(Am @ weight - bm) ** 2 - np.log(N1 - B) * np.sum(
            weight == 0) / 2
        aic = LA.norm(Am @ weight - bm) ** 2 - 2 * np.sum(weight == 0) / 2

        # weight_shape_before = weight.shape
        # weight_shape_after = weight[np.logical_not(np.isnan(weight))].shape
        # assert np.isnan(weight).all() == False

        # Reformatting the output
        result = np.zeros((P, lag_len))
        for i in range(P):
            result[i, :] = weight[i * lag_len:(i + 1) * lag_len].ravel()

        return result, aic, bic
    else :
        last_index = int((N1 - B) * 0.7)
        Am_train = Am[:last_index]
        bm_train = bm[:last_index]
        Am_test = Am[last_index:]
        bm_test = bm[last_index:]
        fit = glmnet(x=Am_train, y=bm_train, family='gaussian', alpha=1, lambdau=np.array([alpha]))
        weight = fit['beta']  # array of coefficient
        test_error = LA.norm(Am_test @ weight - bm_test) ** 2/ (N1 - B - last_index)
        # Computing the BIC and AIC metrics
        bic = LA.norm(Am_train @ weight - bm_train) ** 2 - np.log(N1 - B) * np.sum(weight == 0) / 2
        aic = LA.norm(Am_train @ weight - bm_train) ** 2 - 2 * np.sum(weight == 0) / 2
        # weight_shape_before = weight.shape
        # weight_shape_after = weight[np.logical_not(np.isnan(weight))].shape
        # assert np.isnan(weight).all() == False

        # Reformatting the output
        result = np.zeros((P, lag_len))
        for i in range(P):
            result[i, :] = weight[i * lag_len:(i + 1) * lag_len].ravel()

        return result, aic, bic, test_error
def frechetDist(P, Q):
    ca = np.ones((len(P), len(Q)))
    ca = np.multiply(ca, -1)
    return _c(ca, len(P) - 1, len(Q) - 1, P, Q)
Example #41
0
def vollaths_f5(image_array) -> float:
    return float(np.sum(np.multiply(image_array, np.roll(image_array, 1, axis=1))) -
                 (len(image_array) * len(image_array[0]) * np.mean(image_array) ** 2))
# 均值为0, 极差为1
mu = A.mean(axis=0)
print(mu.shape, type(mu))
#print(mu)
s = A.max(axis=0) - A.min(axis=0)       # axis=0
X = (A - mu) / s
print('X:', X, type(s), sep='\n')
# X各维特征的协方差矩阵
# SIGMA = X.T * X        # 与下一行代码的最后结果一样,中间SIGMA和S不一样,U和V一样
SIGMA = np.mat(np.cov(X, rowvar=0))
print('SIGMA:----', SIGMA, sep='\n')
# 奇异值分解
U, S, V = np.linalg.svd(SIGMA)
print(type(U), '---')
print('U:', U, sep='\n')
print('S:', S, sep='\n')
# [1.2589664  0.08825582]  1.26大,选择第一个特征U[:,0]作为主成分特征
print('V:', V, sep='\n')
# 主成分特征矩阵 U靠前的是主成分矩阵
U_reduce = U[:, 0]
print('U_reduce:', U_reduce, sep='\n')
# 降维样本
Z = X * U_reduce
print('Z:', Z, sep='\n')

# 恢复到均值极差转换之前
X_approx = Z * U_reduce.T
print('X_approx:', X_approx, sep='\n')
# 恢复到原始样本
A_approx = np.multiply(X_approx, s) + mu    # 对应元素相乘
print('A_approx:', A_approx, A, sep='\n')
										int_img_df.iloc[-1, 0] = corrected_value
									except IndexError:
										print 'No previous image exists'

									redo_loop = False

								except ValueError:
									print ('Invalid Entry -- ' +
										'input should be a single digit or float of format X.X')
						else:
							print ('Invalid Entry -- ' +
								'input should be a single digit or float of format X.X')
				plt.close()
				dial_ID += 1

				pixel_data = np.multiply(dial_img, 1.0 / 255.0)
				# reshape into an array with height 1 so it can fit in single row 
				pixel_data = pixel_data.reshape(h*w, 1, l)
				new_row = pd.Series([img_value, pixel_data.tolist()], index=int_df_headers)
				sys.exit()

# group_results.to_csv(results_dir + test_name + '_' + group.replace(' ', '_')  + '_averages.csv')
# 				print ('   Saving result file for ' + group)

# # next step will be to add fully test command line functionality and write code
# # such that for the 4 different videos, various frames are studied and
# # training_data.csv is created and stores resized, cropped values. Rotated image is strictly for viewing.
# # flip image for viewing, but store upside down since that is how the original video is

# # multiple threads?
 
Example #44
0
def entropy_histogram(image_array) -> float:
    num_elements = np.size(image_array)
    histogram, _ = np.histogram(image_array, 65535)
    probabilities = histogram / num_elements
    return float(-np.sum(np.multiply(probabilities, np.log2(probabilities, where=probabilities > 0))))
Example #45
0
	def infer(
		self,
		test_x,
		test_x_len,
		test_x_base_names,
		test_epoch,
		model_path='model',
		out_type='y',
		gain='mmse-lsa',
		out_path='out',
		n_filters=40,
		saved_data_path=None,
		):
		"""
		Deep Xi inference. The specified 'out_type' is saved.

		Argument/s:
			test_x - noisy-speech test batch.
			test_x_len - noisy-speech test batch lengths.
			test_x_base_names - noisy-speech base names.
			test_epoch - epoch to test.
			model_path - path to model directory.
			out_type - output type (see deepxi/args.py).
			gain - gain function (see deepxi/args.py).
			out_path - path to save output files.
			saved_data_path - path to saved data necessary for enhancement.
		"""
		out_path_base = out_path
		if not isinstance(test_epoch, list): test_epoch = [test_epoch]
		if not isinstance(gain, list): gain = [gain]

		# The mel-scale filter bank is to compute an ideal binary mask (IBM)
		# estimate for log-spectral subband energies (LSSE).
		if out_type == 'subband_ibm_hat':
			mel_filter_bank = self.mel_filter_bank(n_filters)

		for e in test_epoch:
			if e < 1: raise ValueError("test_epoch must be greater than 0.")
			for g in gain:

				out_path = out_path_base + '/' + self.ver + '/' + 'e' + str(e) # output path.
				if out_type == 'xi_hat': out_path = out_path + '/xi_hat'
				elif out_type == 'gamma_hat': out_path = out_path + '/gamma_hat'
				elif out_type == 's_STPS_hat': out_path = out_path + '/s_STPS_hat'
				elif out_type == 'y':
					if self.inp_tgt_type == 'MagIRM': out_path = out_path + '/y'
					else: out_path = out_path + '/y/' + g
				elif out_type == 'deepmmse': out_path = out_path + '/deepmmse'
				elif out_type == 'ibm_hat': out_path = out_path + '/ibm_hat'
				elif out_type == 'subband_ibm_hat': out_path = out_path + '/subband_ibm_hat'
				elif out_type == 'cd_hat': out_path = out_path + '/cd_hat'
				else: raise ValueError('Invalid output type.')
				if not os.path.exists(out_path): os.makedirs(out_path)


				self.model.load_weights(model_path + '/epoch-' + str(e-1) +
					'/variables/variables' )

				print("Processing observations...")
				inp_batch, supplementary_batch, n_frames = self.observation_batch(test_x, test_x_len)

				print("Performing inference...")
				tgt_hat_batch = self.model.predict(inp_batch, batch_size=1, verbose=1)

				print("Saving outputs...")
				batch_size = len(test_x_len)
				for i in tqdm(range(batch_size)):
					base_name = test_x_base_names[i]
					inp = inp_batch[i,:n_frames[i],:]
					tgt_hat = tgt_hat_batch[i,:n_frames[i],:]

					# if tf.is_tensor(supplementary_batch):
					supplementary = supplementary_batch[i,:n_frames[i],:]

					if saved_data_path is not None:
						saved_data = read_mat(saved_data_path + '/' + base_name + '.mat')
						supplementary = (supplementary, saved_data)

					if out_type == 'xi_hat':
						xi_hat = self.inp_tgt.xi_hat(tgt_hat)
						save_mat(out_path + '/' + base_name + '.mat', xi_hat, 'xi_hat')
					elif out_type == 'gamma_hat':
						gamma_hat = self.inp_tgt.gamma_hat(tgt_hat)
						save_mat(out_path + '/' + base_name + '.mat', gamma_hat, 'gamma_hat')
					elif out_type == 's_STPS_hat':
						s_STPS_hat = self.inp_tgt.s_stps_hat(tgt_hat)
						save_mat(out_path + '/' + base_name + '.mat', s_STPS_hat, 's_STPS_hat')
					elif out_type == 'y':
						y = self.inp_tgt.enhanced_speech(inp, supplementary, tgt_hat, g).numpy()
						save_wav(out_path + '/' + base_name + '.wav', y, self.inp_tgt.f_s)
					elif out_type == 'deepmmse':
						xi_hat = self.inp_tgt.xi_hat(tgt_hat)
						d_PSD_hat = np.multiply(np.square(inp), gfunc(xi_hat, xi_hat+1.0,
							gtype='deepmmse'))
						save_mat(out_path + '/' + base_name + '.mat', d_PSD_hat, 'd_psd_hat')
					elif out_type == 'ibm_hat':
						xi_hat = self.inp_tgt.xi_hat(tgt_hat)
						ibm_hat = np.greater(xi_hat, 1.0).astype(bool)
						save_mat(out_path + '/' + base_name + '.mat', ibm_hat, 'ibm_hat')
					elif out_type == 'subband_ibm_hat':
						xi_hat = self.inp_tgt.xi_hat(tgt_hat)
						xi_hat_subband = np.matmul(xi_hat, mel_filter_bank.transpose())
						subband_ibm_hat = np.greater(xi_hat_subband, 1.0).astype(bool)
						save_mat(out_path + '/' + base_name + '.mat', subband_ibm_hat,
							'subband_ibm_hat')
					elif out_type == 'cd_hat':
						cd_hat = self.inp_tgt.cd_hat(tgt_hat)
						save_mat(out_path + '/' + base_name + '.mat', cd_hat, 'cd_hat')
					else: raise ValueError('Invalid output type.')
Example #46
0
def vollaths_f4(image_array) -> float:
    return float(np.sum(np.multiply(image_array, np.roll(image_array, 1, axis=1))) -
                 np.sum(np.multiply(image_array, np.roll(image_array, 2, axis=1))))
X_valid = X[50000:]
T_valid = T[50000:]
Wij = np.random.normal(loc=0.0, scale=1.0/28, size=(J-1,785))
Wjk = np.random.normal(loc=0.0, scale=(J-1)**(-0.5), size=(10,J))
Gij = np.zeros((J, 785))
Gjk = np.zeros((10,J))
Rij = np.ones(J-1) # learning rate
Rjk = np.ones(10) # learning rate
maxima = 0
iterations = 0
result = [['Iterations', 'Train Accuracy', 'Valid Accuracy', 'Test Accuracy']]
while(True):
    Z_train = Fij(X_train,Wij)
    Y_train = Fjk(Z_train,Wjk)
    del_k = T_train-Y_train
    del_j = np.multiply(del_k.dot(Wjk),Fij_prime(X_train,Wij))
    prevGjk = Gjk
    prevGij = Gij
    Gjk = -del_k.T.dot(Z_train)/len(X_train) - prevGjk*0
    Gij = -del_j.T.dot(X_train)/len(X_train) - prevGij*0
    Wjk = Wjk - np.multiply(Gjk,Rjk[:,np.newaxis])
    Wij = Wij - np.multiply(Gij[:J-1],Rij[:,np.newaxis])
    validAccuracy = accuracy(X_valid,T_valid,Wij,Wjk)
    iterations = iterations + 1
    if validAccuracy >= maxima:
        maxima = validAccuracy
        Wij_final = Wij
        Wjk_final = Wjk
        flag = 0
    elif flag<10: # can be increased to get better results
        flag = flag + 1
npCC[1:-1] = 0.01 * np.exp(-(xCC - xb)**2)
ECF = -Eb * np.ones(len(xCF))
ECC[1:-1] = 0.5 * (ECF[:-1] + ECF[1:])

#Initialize ghost cells
neCC[0] = -neCC[1]
neCC[-1] = neCC[-2]
npCC[0] = -npCC[1]
npCC[-1] = npCC[-2]
#What about the electric field?

t = 0.0
iiter = 1
while t < Tfinal:
    #Computing the advective flux. East flux- flux on the right cell face & West flux- flux on the left cell face
    eastFlux = np.multiply(neCC[1:-1], ECC[1:-1])
    westFlux = np.multiply(neCC[:-2], ECC[:-2])
    #Ghalf- flux terms
    Ghalf = (eastFlux - westFlux) / dx + (D / dx**2) * (
        neCC[2:] - 2.0 * neCC[1:-1] + neCC[:-2])
    #Shalf- source term
    Shalf = np.multiply(
        np.multiply(neCC[1:-1], np.abs(ECC[1:-1])),
        np.exp(-np.divide(np.ones(len(ECC[1:-1])), np.abs(ECC[1:-1]))))
    #Computing the densities at half time step (t_n/2)
    neCChalf[1:-1] = neCC[1:-1] + dt * (Ghalf + Shalf)
    npCChalf[1:-1] = npCC[1:-1] + dt * (Shalf)
    ECFhalf = np.append(
        ECF[1:] + dx * (neCChalf[1:-1] - npCChalf[1:-1]), -Eb
    )  #This is a simple numerical integration- usually we have to solve the Poisson's equation
    ECChalf[1:-1] = 0.5 * (ECFhalf[:-1] + ECFhalf[1:])
Example #49
0
    def generate_dec_batch(self, batch_size=32, pad=True,
                           max_len=SEQ_MAX_LEN, shuffle=True):
        """

        :param batch_size:
        :param pad:
        :param max_len:
        :param shuffle:
        :return:
        """
        # only train role==bot dialogue
        dataset = self.bot_dial
        if shuffle:
            id_list = np.random.permutation(len(dataset))
        else:
            id_list = list(range(len(dataset)))

        batch = []
        for item in id_list:
            raw_item = dataset[item]
            try:
                token_list, segment_list, response, res_range_pos, next_goal, next_goal_pos = self.get_dec_item(raw_item)
            except:
                continue

            # convert to id
            token_id_list, segment_id_list, pos_list, seq_len = self.convert_seg_token_into_id(
                token_list, segment_list,pad, max_len)
            response_id_list, _ = self.convert_token_to_id(response, pad=False)
            next_goal_id = self.goaltype2id.get(next_goal)

            # get decoder self attention mask
            slf_attn_mask_arr = self.dec_self_attn_mask(seq_len, len(response)+1, SEQ_MAX_LEN)

            # convert to array
            token_id_arr = np.array([token_id_list], dtype=np.int64)
            segment_id_arr = np.array([segment_id_list], dtype=np.int64)
            pos_arr = np.array([pos_list], dtype=np.int64)

            # get LM mask and responce length
            res_range_pos = np.array(res_range_pos,dtype=np.int64)
            res_lm_mask, response_length = self._get_lm_mask(res_range_pos)
            res_lm_label = np.multiply(token_id_arr, self._get_lm_mask(res_range_pos + 1)[0])
            res_lm_label = np.array(res_lm_label, dtype=np.int64)


            batch.append((token_id_arr, segment_id_arr, pos_arr, slf_attn_mask_arr, # basic input
                          res_lm_mask, response_length, res_lm_label, # responce
                          (next_goal_pos,next_goal_id)))

            if len(batch) == batch_size:
                # basic input
                token_id_arr = np.concatenate([item [0] for item in batch], axis=0)
                segment_id_arr =  np.concatenate([item [1] for item in batch], axis=0)
                pos_arr =  np.concatenate([item [2] for item in batch], axis=0)
                slf_attn_mask_arr = np.concatenate([item [3] for item in batch], axis=0)

                # reponce input
                lm_mask_arr = np.concatenate([item[4] for item in batch], axis=0)
                response_length_arr =  np.array([item[5] for item in batch], dtype=np.float32)
                res_lm_label_arr = np.concatenate([item[6] for item in batch], axis=0)

                # goal type
                goal_list = [item [-1] for item in batch]
                goal_pos = []
                goal_type_list = []
                for batch_id, (pos, goal_type) in enumerate(goal_list):
                    goal_pos.append([batch_id, pos])
                    goal_type_list.append(goal_type)

                goal_pos = np.array(goal_pos, dtype=np.int64)
                goal_type_list = np.array(goal_type_list, dtype=np.int64)

                yield token_id_arr, segment_id_arr, pos_arr, slf_attn_mask_arr, \
                      lm_mask_arr, response_length_arr, res_lm_label_arr,\
                      goal_pos, goal_type_list
                batch = []
def Fij_prime(X,Wij): # Sigmoid derivative
    sigmoid = Fij(X,Wij)
    return np.multiply(sigmoid,1-sigmoid)
Example #51
0
def deramp(data, mask_in, ramp_type='linear', metadata=None, max_num_sample=1e6):
    '''Remove ramp from input data matrix based on pixel marked by mask
    Ignore data with nan or zero value.
    Parameters: data      : 2D / 3D np.ndarray, data to be derampped
                            If 3D, it's in size of (num_date, length, width)
                mask_in   : 2D np.ndarray, mask of pixels used for ramp estimation
                ramp_type : str, name of ramp to be estimated.
                metadata  : dict, containing reference pixel info, REF_Y/X
    Returns:    data_out  : 2D / 3D np.ndarray, data after deramping
                ramp      : 2D / 3D np.ndarray, estimated ramp
    '''
    dshape = data.shape
    length, width = dshape[-2:]
    num_pixel = length * width

    # prepare input data
    if len(dshape) == 3:
        data = np.moveaxis(data, 0, -1)        #reshape to (length, width, numDate)
        data = data.reshape(num_pixel, -1)
        dmean = np.mean(data, axis=-1).flatten()
    else:
        data = data.reshape(-1, 1)
        dmean = np.array(data).flatten()

    ## mask

    # 1. default
    if mask_in is None:
        mask_in = np.ones((length, width), dtype=np.float32)
    mask = (mask_in != 0).flatten()
    del mask_in

    # 2. ignore pixels with NaN or zero data value
    mask *= np.multiply(~np.isnan(dmean), dmean != 0.)
    del dmean

    # 3. for big dataset: uniformally sample the data for ramp estimation
    if max_num_sample and np.sum(mask) > max_num_sample:
        step = int(np.ceil(np.sqrt(np.sum(mask) / max_num_sample)))
        if step > 1:
            sample_flag = np.zeros((length, width), dtype=np.bool_)
            sample_flag[int(step/2)::step,
                        int(step/2)::step] = 1
            mask *= sample_flag.flatten()
            del sample_flag

    # design matrix
    xx, yy = np.meshgrid(np.arange(0, width),
                         np.arange(0, length))
    xx = np.array(xx, dtype=np.float32).reshape(-1, 1)
    yy = np.array(yy, dtype=np.float32).reshape(-1, 1)
    ones = np.ones(xx.shape, dtype=np.float32)
    if ramp_type == 'linear':
        G = np.hstack((yy, xx, ones))
    elif ramp_type == 'quadratic':
        G = np.hstack((yy**2, xx**2, yy*xx, yy, xx, ones))
    elif ramp_type == 'linear_range':
        G = np.hstack((xx, ones))
    elif ramp_type == 'linear_azimuth':
        G = np.hstack((yy, ones))
    elif ramp_type == 'quadratic_range':
        G = np.hstack((xx**2, xx, ones))
    elif ramp_type == 'quadratic_azimuth':
        G = np.hstack((yy**2, yy, ones))
    else:
        raise ValueError('un-recognized ramp type: {}'.format(ramp_type))

    # estimate ramp
    X = np.dot(np.linalg.pinv(G[mask, :], rcond=1e-15), data[mask, :])
    ramp = np.dot(G, X)
    ramp = np.array(ramp, dtype=data.dtype)
    del X

    # reference in space if metadata
    if metadata and all(key in metadata.keys() for key in ['REF_X','REF_Y']):
        ref_y, ref_x = int(metadata['REF_Y']), int(metadata['REF_X'])
        ref_idx = ref_y * width + ref_x
        ramp -= ramp[ref_idx, :]

    # do not change pixel with original zero value
    ramp[data == 0] = 0

    data_out = data - ramp
    if len(dshape) == 3:
        ramp = np.moveaxis(ramp, -1, 0)
        data_out = np.moveaxis(data_out, -1, 0)
    ramp = ramp.reshape(dshape)
    data_out = data_out.reshape(dshape)
    return data_out, ramp
Example #52
0
def score_anomalies(y, y_hat, critic, index, score_window=10, critic_smooth_window=None,
                    error_smooth_window=None, smooth=True, rec_error_type="point", comb="mult",
                    lambda_rec=0.5):
    """Compute an array of anomaly scores.

    Anomaly scores are calculated using a combination of reconstruction error and critic score.

    Args:
        y (ndarray):
            Ground truth.
        y_hat (ndarray):
            Predicted values. Each timestamp has multiple predictions.
        index (ndarray):
            time index for each y (start position of the window)
        critic (ndarray):
            Critic score. Each timestamp has multiple critic scores.
        score_window (int):
            Optional. Size of the window over which the scores are calculated.
            If not given, 10 is used.
        critic_smooth_window (int):
            Optional. Size of window over which smoothing is applied to critic.
            If not given, 200 is used.
        error_smooth_window (int):
            Optional. Size of window over which smoothing is applied to error.
            If not given, 200 is used.
        smooth (bool):
            Optional. Indicates whether errors should be smoothed.
            If not given, `True` is used.
        rec_error_type (str):
            Optional. The method to compute reconstruction error. Can be one of
            `["point", "area", "dtw"]`. If not given, 'point' is used.
        comb (str):
            Optional. How to combine critic and reconstruction error. Can be one
            of `["mult", "sum", "rec"]`. If not given, 'mult' is used.
        lambda_rec (float):
            Optional. Used if `comb="sum"` as a lambda weighted sum to combine
            scores. If not given, 0.5 is used.

    Returns:
        ndarray:
            Array of anomaly scores.
    """

    critic_smooth_window = critic_smooth_window or math.trunc(y.shape[0] * 0.01)
    error_smooth_window = error_smooth_window or math.trunc(y.shape[0] * 0.01)

    step_size = 1  # expected to be 1

    true_index = index  # no offset

    true = [item[0] for item in y.reshape((y.shape[0], -1))]

    for item in y[-1][1:]:
        true.extend(item)

    critic_extended = list()
    for c in critic:
        critic_extended.extend(np.repeat(c, y_hat.shape[1]).tolist())

    critic_extended = np.asarray(critic_extended).reshape((-1, y_hat.shape[1]))

    critic_kde_max = []
    pred_length = y_hat.shape[1]
    num_errors = y_hat.shape[1] + step_size * (y_hat.shape[0] - 1)

    for i in range(num_errors):
        critic_intermediate = []

        for j in range(max(0, i - num_errors + pred_length), min(i + 1, pred_length)):
            critic_intermediate.append(critic_extended[i - j, j])

        if len(critic_intermediate) > 1:
            discr_intermediate = np.asarray(critic_intermediate)
            try:
                critic_kde_max.append(discr_intermediate[np.argmax(
                    stats.gaussian_kde(discr_intermediate)(critic_intermediate))])
            except np.linalg.LinAlgError:
                critic_kde_max.append(np.median(discr_intermediate))
        else:
            critic_kde_max.append(np.median(np.asarray(critic_intermediate)))

    # Compute critic scores
    critic_scores = _compute_critic_score(critic_kde_max, critic_smooth_window)

    # Compute reconstruction scores
    rec_scores, predictions = reconstruction_errors(
        y, y_hat, step_size, score_window, error_smooth_window, smooth, rec_error_type)

    rec_scores = stats.zscore(rec_scores)
    rec_scores = np.clip(rec_scores, a_min=0, a_max=None) + 1

    # Combine the two scores
    if comb == "mult":
        final_scores = np.multiply(critic_scores, rec_scores)

    elif comb == "sum":
        final_scores = (1 - lambda_rec) * (critic_scores - 1) + lambda_rec * (rec_scores - 1)

    elif comb == "rec":
        final_scores = rec_scores

    else:
        raise ValueError(
            'Unknown combination specified {}, use "mult", "sum", or "rec" instead.'.format(comb))

    true = [[t] for t in true]
    return final_scores, true_index, true, predictions
Example #53
0
rand_col_test = 100*np.random.rand(len(x_test))
x_verif = np.insert(x_test, 2, rand_col_test, axis=1)

#3 nested loops - generate scaled weights
lin_combos = []
for i in range(0, 11):
    for j in range(0, 11):
        for k in range(0, 11):
            combo = [float(i/10), float(j/10), float(k/10)]
            lin_combos.append(combo)
lin_combos = np.array(lin_combos)


for combo in lin_combos:
    #Scale input data
    scaled_x_train = np.multiply(x_train, combo)
    #Scale verificaion data
    scaled_x_verif = np.multiply(x_verif, combo)

    #Method 1
    reg = KNNRegressor(scaled_x_train, y_train, 5)
    neighbors = reg.find_all_neighbors(scaled_x_verif)
    nbh_std = reg.find_neighborhood_std(neighbors)

    print("Combo = ", combo, "\tnbh_std = ", nbh_std)
    #y_pred = reg.predict(x_test)
    #print(train_filename)
    #print(test_filename)
    #print(nbh_std)
    #print(mean_squared_error(y_test, y_pred)**0.5)
Example #54
0
def ecc_plot(aryMean, vecEccBin, strPathOut):
    """
    Plot results for eccentricity & cortical depth analysis.

    This version plots the values using two separate colourmaps for negative
    and positive values.

    Plots statistical parameters (e.g. parameter estimates) by cortical depth
    (x-axis) and pRF eccentricity (y-axis). This function is part of a tool for
    analysis of cortical-depth-dependent fMRI responses at different
    retinotopic eccentricities.
    """
    # Number of eccentricity bins:
    varEccNum = vecEccBin.shape[0]

    # Font type:
    strFont = 'Liberation Sans'

    # Font colour:
    vecFontClr = np.array([17.0/255.0, 85.0/255.0, 124.0/255.0])

    # Find minimum and maximum correlation values:
    varMin = np.percentile(aryMean, 2.5)
    varMax = np.percentile(aryMean, 97.5)

    # Round:
    varMin = (np.floor(varMin * 0.1) / 0.1)
    varMax = (np.ceil(varMax * 0.1) / 0.1)

    # Same scale for negative and positive colour bar:
    if np.greater(np.absolute(varMin), varMax):
        varMax = np.absolute(varMin)
    else:
        varMin = np.multiply(-1.0, np.absolute(varMax))

    # Fixed axis limites for comparing plots across conditions/ROIs:
    # varMin = -400.0
    # varMax = 400.0

    # Create main figure:
    fig01 = plt.figure(figsize=(4.0, 3.0),
                       dpi=200.0,
                       facecolor=([1.0, 1.0, 1.0]),
                       edgecolor=([1.0, 1.0, 1.0]))

    # Big subplot in the background for common axes labels:
    axsCmn = fig01.add_subplot(111)

    # Turn off axis lines and ticks of the big subplot:
    axsCmn.spines['top'].set_color('none')
    axsCmn.spines['bottom'].set_color('none')
    axsCmn.spines['left'].set_color('none')
    axsCmn.spines['right'].set_color('none')
    axsCmn.tick_params(labelcolor='w',
                       top=False,
                       bottom=False,
                       left=False,
                       right=False)

    # Set and adjust common axes labels:
    axsCmn.set_xlabel('Cortical depth',
                      alpha=1.0,
                      fontname=strFont,
                      fontweight='normal',
                      fontsize=7.0,
                      color=vecFontClr,
                      position=(0.5, 0.0))
    axsCmn.set_ylabel('pRF eccentricity',
                      alpha=1.0,
                      fontname=strFont,
                      fontweight='normal',
                      fontsize=7.0,
                      color=vecFontClr,
                      position=(0.0, 0.5))
    axsCmn.set_title('fMRI signal change',
                     alpha=1.0,
                     fontname=strFont,
                     fontweight='bold',
                     fontsize=10.0,
                     color=vecFontClr,
                     position=(0.5, 1.1))

    # Create colour-bar axis:
    axsTmp = fig01.add_subplot(111)

    # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    # Number of colour increments:
    varNumClr = 20

    # Colour values for the first colormap (used for negative values):
    aryClr01 = plt.cm.PuBu(np.linspace(0.1, 1.0, varNumClr))

    # Invert the first colour map:
    aryClr01 = np.flipud(np.array(aryClr01, ndmin=2))

    # Colour values for the second colormap (used for positive values):
    aryClr02 = plt.cm.OrRd(np.linspace(0.1, 1.0, varNumClr))

    # Combine negative and positive colour arrays:
    aryClr03 = np.vstack((aryClr01, aryClr02))

    # Create new custom colormap, combining two default colormaps:
    objCustClrMp = colors.LinearSegmentedColormap.from_list('custClrMp',
                                                            aryClr03)

    # Lookup vector for negative colour range:
    vecClrRngNeg = np.linspace(varMin, 0.0, num=varNumClr)

    # Lookup vector for positive colour range:
    vecClrRngPos = np.linspace(0.0, varMax, num=varNumClr)

    # Stack lookup vectors:
    vecClrRng = np.hstack((vecClrRngNeg, vecClrRngPos))

    # 'Normalize' object, needed to use custom colour maps and lookup table
    # with matplotlib:
    objClrNorm = colors.BoundaryNorm(vecClrRng, objCustClrMp.N)

    # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    # Plot correlation coefficients of current depth level:
    pltTmpCorr = plt.imshow(aryMean,
                            interpolation='nearest',  # 'none',  # 'bicubic',
                            origin='lower',
                            norm=objClrNorm,
                            cmap=objCustClrMp)

    # Position of labels for the x-axis:
    vecXlblsPos = np.array([0, (aryMean.shape[1] - 1)])
    # Set position of labels for the x-axis:
    axsTmp.set_xticks(vecXlblsPos)
    # Create list of strings for labels:
    lstXlblsStr = ['WM', 'CSF']
    # Set the content of the labels (i.e. strings):
    axsTmp.set_xticklabels(lstXlblsStr,
                           alpha=0.9,
                           fontname=strFont,
                           fontweight='bold',
                           fontsize=8.0,
                           color=vecFontClr)

    # Position of labels for the y-axis:
    vecYlblsPos = np.arange(-0.5, (varEccNum - 0.5), 1.0)
    # Set position of labels for the y-axis:
    axsTmp.set_yticks(vecYlblsPos)
    # Create list of strings for labels:
    # lstYlblsStr = map(str,
    #                   np.around(vecEccBin, decimals=1)
    #                   )
    lstYlblsStr = [str(x) for x in np.around(vecEccBin, decimals=1)]
    # Set the content of the labels (i.e. strings):
    axsTmp.set_yticklabels(lstYlblsStr,
                           alpha=0.9,
                           fontname=strFont,
                           fontweight='bold',
                           fontsize=8.0,
                           color=vecFontClr)

    # Turn of ticks:
    axsTmp.tick_params(labelcolor=([0.0, 0.0, 0.0]),
                       top=False,
                       bottom=False,
                       left=False,
                       right=False)

    # We create invisible axes for the colour bar slightly to the right of the
    # position of the last data-axes. First, retrieve position of last
    # data-axes:
    objBbox = axsTmp.get_position()
    # We slightly adjust the x-position of the colour-bar axis, by shifting
    # them to the right:
    vecClrAxsPos = np.array([(objBbox.x0 * 7.5),
                             objBbox.y0,
                             objBbox.width,
                             objBbox.height])
    # Create colour-bar axis:
    axsClr = fig01.add_axes(vecClrAxsPos,
                            frameon=False)

    # Add colour bar:
    pltClrbr = fig01.colorbar(pltTmpCorr,
                              ax=axsClr,
                              fraction=1.0,
                              shrink=1.0)

    # The values to be labeled on the colour bar:
    # vecClrLblsPos01 = np.arange(varMin, 0.0, 10)
    # vecClrLblsPos02 = np.arange(0.0, varMax, 100)
    vecClrLblsPos01 = np.linspace(varMin, 0.0, num=3)
    vecClrLblsPos02 = np.linspace(0.0, varMax, num=3)
    vecClrLblsPos = np.hstack((vecClrLblsPos01, vecClrLblsPos02))

    # The labels (strings):
    # vecClrLblsStr = map(str, vecClrLblsPos)
    vecClrLblsStr = [str(x) for x in vecClrLblsPos]

    # Set labels on coloubar:
    pltClrbr.set_ticks(vecClrLblsPos)
    pltClrbr.set_ticklabels(vecClrLblsStr)
    # Set font size of colour bar ticks, and remove the 'spines' on the right
    # side:
    pltClrbr.ax.tick_params(labelsize=8.0,
                            tick2On=False)

    # Make colour-bar axis invisible:
    axsClr.axis('off')

    # Save figure:
    fig01.savefig(strPathOut,
                  dpi=160.0,
                  facecolor='w',
                  edgecolor='w',
                  orientation='landscape',
                  bbox_inches='tight',
                  pad_inches=0.2,
                  transparent=False,
                  frameon=None)
Example #55
0
 def numpy_run(self):
     self.x.map_read()
     self.y.map_read()
     self.output.map_invalidate()
     numpy.multiply(self.x.mem, self.y.mem, self.output.mem)
    def predict_frame(self, oriImg):
        test_image = Variable(T.transpose(T.transpose(T.unsqueeze(torch.from_numpy(oriImg).float(), 0), 2, 3), 1, 2),volatile=True).cuda()
        # print('Input Image Size: ', test_image.size())

        # Multiplier: A pyramid based scaling method to evaluate image from various scales.
        multiplier = [x * self.model_['boxsize'] / oriImg.shape[0] for x in self.param_['scale_search']]
        # print('Image Scaling Multipliers: ', multiplier, '\n')

        # Heatmap and Parts Affinity Field Data Structures
        heatmap_avg = torch.zeros((len(multiplier),19,oriImg.shape[0], oriImg.shape[1])).cuda()
        paf_avg = torch.zeros((len(multiplier),38,oriImg.shape[0], oriImg.shape[1])).cuda()

        # Compute Keypoint and Part Affinity Fields
        # print('Generating Keypoint Heatmap and Parts Affinity Field Predictions...')
        for m in range(len(multiplier)):
            # Set Image Scale
            scale = multiplier[m]
            h = int(oriImg.shape[0] * scale)
            w = int(oriImg.shape[1] * scale)
            # print('[', 'Multiplier: ', scale, '-', (w, h), ']')

            # Pad Image Corresponding to Detection Stride
            pad_h = 0 if (h % self.model_['stride'] == 0) else self.model_['stride'] - (h % self.model_['stride'])
            pad_w = 0 if (w % self.model_['stride'] == 0) else self.model_['stride'] - (w % self.model_['stride'])
            new_h = h + pad_h
            new_w = w + pad_w

            # Apply Image Resize Transformation
            imageToTest = cv2.resize(oriImg, (0,0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC)
            imageToTest_padded, pad = util.padRightDownCorner(imageToTest, self.model_['stride'], self.model_['padValue'])
            imageToTest_padded = np.transpose(np.float32(imageToTest_padded[:,:,:,np.newaxis]), (3,2,0,1))/256 - 0.5

            # Generate Predictions
            feed = Variable(T.from_numpy(imageToTest_padded)).cuda()
            output1, output2 = self.model(feed)

            # Scale Prediction Outputs to Corresponding Image Size
            heatmap = nn.UpsamplingBilinear2d((oriImg.shape[0], oriImg.shape[1])).cuda()(output2)
            paf = nn.UpsamplingBilinear2d((oriImg.shape[0], oriImg.shape[1])).cuda()(output1)

            # print('Heatmap Dim:', heatmap.size())   # (1, Joint Count, X, Y)
            # print('PAF Dim:', paf.size())           # (1, PAF Count, X, Y)
            # print()

            heatmap_avg[m] = heatmap[0].data
            paf_avg[m] = paf[0].data

        # Compute Average Values
        heatmap_avg = T.transpose(T.transpose(T.squeeze(T.mean(heatmap_avg, 0)),0,1),1,2).cuda()
        paf_avg = T.transpose(T.transpose(T.squeeze(T.mean(paf_avg, 0)),0,1),1,2).cuda()

        # Convert to Numpy Type
        heatmap_avg = heatmap_avg.cpu().numpy()
        paf_avg = paf_avg.cpu().numpy()

        '''
        # [Plotting & Visualizing Heatmap and PAF]

        # Plot Heapmap Probabilities
        # util.plot_heatmap(oriImg, heatmap_avg)
        # util.plot_joint_heatmap(oriImg, heatmap_avg, 1)

        # Plot Part-Affinity Vectors
        # util.plot_paf(oriImg, paf_avg, 4)
        '''

        # Compute Heapmap Peaks (Using Non-Maximum Supression Method)
        all_peaks = []
        peak_counter = 0
        joint_pt_lookup = dict()
        for part in range(18):
            # Smooth out heapmap with gaussian kernel to remove high frequency variation.
            map_ori = heatmap_avg[:,:,part]
            map = gaussian_filter(map_ori, sigma=3)

            map_left = np.zeros(map.shape)
            map_left[1:,:] = map[:-1,:]
            map_right = np.zeros(map.shape)
            map_right[:-1,:] = map[1:,:]
            map_up = np.zeros(map.shape)
            map_up[:,1:] = map[:,:-1]
            map_down = np.zeros(map.shape)
            map_down[:,:-1] = map[:,1:]

            # Compute Peak Based on Binary Threshold
            peaks_binary = np.logical_and.reduce((map>=map_left, map>=map_right, map>=map_up, map>=map_down, map > self.param_['thre1']))
            peaks = zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0]) # note reverse

            # Derive Joint Keypoint Peaks with Mapped ID with Probabilities
            peaks_with_score = [x + (map_ori[x[1],x[0]],) for x in peaks]
            id = range(peak_counter, peak_counter + len(peaks))
            peaks_with_score_and_id = [peaks_with_score[i] + (id[i],) for i in range(len(id))]

            # Create Joint Lookup Dictionary
            for pt in peaks_with_score_and_id:
                joint_pt_lookup[(pt[1], pt[0])] = pt[2:4]

            all_peaks.append(peaks_with_score_and_id)
            peak_counter += len(peaks)

        '''
        # [Plot KeyPoint (with Probabilities)]
        # util.plot_key_point(oriImg, all_peaks)
        '''
        # util.plot_all_keypoints(oriImg, all_peaks)

        # Load Joint Index and Sequences Data
        mapIdx = self.md.get_mapIdx()
        limbSeq = self.md.get_limbseq()

        # Compute Part-Affinity Fields
        connection_all = []
        special_k = []
        mid_num = 10

        for k in range(len(mapIdx)):
            score_mid = paf_avg[:,:,[x-19 for x in mapIdx[k]]]
            # print(score_mid.shape)

            candA = all_peaks[limbSeq[k][0]-1]
            candB = all_peaks[limbSeq[k][1]-1]
            # print('Limb Seq Connection: [', limbSeq[k][0]-1, ',', limbSeq[k][1]-1, ']\n')

            nA = len(candA)
            nB = len(candB)
            indexA, indexB = limbSeq[k]

            if nA != 0 and nB != 0:
                connection_candidate = []
                for i in range(nA):
                    for j in range(nB):

                        # Compute Joint Unit Vector
                        vec = np.subtract(candB[j][:2], candA[i][:2])
                        norm = math.sqrt(vec[0]*vec[0] + vec[1]*vec[1])
                        # Assert: Check if the norm is a not a zero vector.
                        if not np.any(norm):
                            #print('Exception: Norm is a zero-vector')
                            continue

                        # TODO: Save this vector!
                        vec = np.divide(vec, norm)
                        #print('Unit Vector: [',i, ', ', j, ']: ', str(vec))

                        startend = zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), np.linspace(candA[i][1], candB[j][1], num=mid_num))
                        vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] for I in range(len(startend))])
                        vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] for I in range(len(startend))])

                        # Compute Components for Affinity Field Criterion
                        score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1])
                        score_with_dist_prior = sum(score_midpts)/len(score_midpts) + min(0.5*oriImg.shape[0]/norm-1, 0)

                        # Check PAF Criterion
                        criterion1 = len(np.nonzero(score_midpts > self.param_['thre2'])[0]) > 0.8 * len(score_midpts)
                        criterion2 = score_with_dist_prior > 0
                        if criterion1 and criterion2:
                            connection_candidate.append([i, j, score_with_dist_prior, score_with_dist_prior+candA[i][2]+candB[j][2]])

                connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True)
                connection = np.zeros((0,5))

                for c in range(len(connection_candidate)):
                    i, j, s = connection_candidate[c][0:3]
                    if (i not in connection[:,3] and j not in connection[:,4]):
                        connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]])
                        if len(connection) >= min(nA, nB): break

                connection_all.append(connection)

                #print('\nConnections:')
                #print(connection)
                #print()
            else:
                # Handle Exception for Potential Missing Part Entities
                special_k.append(k)
                connection_all.append([])

        # Build Human Pose
        subset = -1 * np.ones((0, 20))
        candidate = np.array([item for sublist in all_peaks for item in sublist])

        for k in range(len(mapIdx)):
            if k not in special_k:
                partAs = connection_all[k][:,0]
                partBs = connection_all[k][:,1]
                indexA, indexB = np.array(limbSeq[k]) - 1

                for i in range(len(connection_all[k])):
                    found = 0
                    subset_idx = [-1, -1]

                    for j in range(len(subset)):
                        if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]:
                            subset_idx[found] = j
                            found += 1

                    if found == 1:
                        j = subset_idx[0]
                        if subset[j][indexB] != partBs[i]:
                            subset[j][indexB] = partBs[i]
                            subset[j][-1] += 1
                            subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]
                    elif found == 2: # if found 2 and disjoint, merge them
                        j1, j2 = subset_idx
                        # print "found = 2"
                        membership = ((subset[j1]>=0).astype(int) + (subset[j2]>=0).astype(int))[:-2]
                        if len(np.nonzero(membership == 2)[0]) == 0: #merge
                            subset[j1][:-2] += (subset[j2][:-2] + 1)
                            subset[j1][-2:] += subset[j2][-2:]
                            subset[j1][-2] += connection_all[k][i][2]
                            subset = np.delete(subset, j2, 0)
                        else: # as like found == 1
                            subset[j1][indexB] = partBs[i]
                            subset[j1][-1] += 1
                            subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2]

                    # if find no partA in the subset, create a new subset
                    elif not found and k < 17:
                        row = -1 * np.ones(20)
                        row[indexA] = partAs[i]
                        row[indexB] = partBs[i]
                        row[-1] = 2
                        row[-2] = sum(candidate[connection_all[k][i,:2].astype(int), 2]) + connection_all[k][i][2]
                        subset = np.vstack([subset, row])

        # Remove Rows of Subset with the Least Parts Available
        deleteIdx = [];
        for i in range(len(subset)):
            if subset[i][-1] < 4 or subset[i][-2]/subset[i][-1] < 0.4:
                deleteIdx.append(i)
        subset = np.delete(subset, deleteIdx, axis=0)

        # Setup Pose Dictionary Data Structure for Prediction Return
        joints_per_skeleton = [[] for i in range(len(subset))]
    	for n in range(len(subset)):
    		for i in range(18):
    			cidx = subset[n][i]
    			if cidx != -1:
    				y = candidate[cidx.astype(int), 0]
    				x = candidate[cidx.astype(int), 1]
    				joints_per_skeleton[n].append([y, x])
    			else:
    				joints_per_skeleton[n].append(None)

        return joints_per_skeleton
        max_value = 0
        output_unit = []
        for i in range(num_top):
            feature_map = feature_maps[i][unitID]
            if max_value == 0:
                max_value = np.max(feature_map)
            feature_map = feature_map / max_value
            mask = cv2.resize(feature_map, segment_size)
            mask[mask < threshold_scale] = 0.0  # binarize the mask
            mask[mask > threshold_scale] = 1.0

            img = cv2.imread(paths[i])
            img = cv2.resize(img, segment_size)
            img = cv2.normalize(img.astype('float'), None, 0.0, 1.0,
                                cv2.NORM_MINMAX)
            img_mask = np.multiply(img, mask[:, :, np.newaxis])
            img_mask = np.uint8(img_mask * 255)
            output_unit.append(img_mask)
            output_unit.append(
                np.uint8(np.ones((segment_size[0], margin, 3)) * 255))
        montage_unit = np.concatenate(output_unit, axis=1)
        cv2.imwrite(
            os.path.join(output_folder, 'image',
                         '%s-unit%03d.jpg' % (layer, unitID)), montage_unit)
        if flag_crop == 1:
            # load the library to crop image
            import tightcrop
            montage_unit_crop = tightcrop.crop_tiled_image(
                montage_unit, margin)
            cv2.imwrite(
                os.path.join(output_folder, 'image',
Example #58
0
    def _ulogprob_vis(self, X):
        X = np.asmatrix(X)

        return self.b.T * X \
           + np.sum(np.log(1. + np.exp(self.W.T * X + self.c)), 0) \
           + np.sum(np.multiply(X, self.L * X), 0) / 2.
Example #59
0
def compute_multiassign_weights_(_idx2_wx, _idx2_wdist, massign_alpha,
                                 massign_sigma, massign_equal_weights):
    """
    Multi Assignment Filtering from Improving Bag of Features

    Args:
        _idx2_wx ():
        _idx2_wdist ():
        massign_alpha ():
        massign_sigma ():
        massign_equal_weights (): Turns off soft weighting. Gives all assigned
            vectors weight 1

    Returns:
        tuple : (idx2_wxs, idx2_maws)

    References:
        (Improving Bag of Features)
        http://lear.inrialpes.fr/pubs/2010/JDS10a/jegou_improvingbof_preprint.pdf

        (Lost in Quantization)
        http://www.robots.ox.ac.uk/~vgg/publications/papers/philbin08.ps.gz

        (A Context Dissimilarity Measure for Accurate and Efficient Image Search)
        https://lear.inrialpes.fr/pubs/2007/JHS07/jegou_cdm.pdf

    Notes:
        sigma values from \cite{philbin_lost08}
        (70 ** 2) ~= 5000,
        (80 ** 2) ~= 6250,
        (86 ** 2) ~= 7500,

    Auto:
        from ibeis.algo.hots.smk import smk_index
        import utool as ut; print(ut.make_default_docstr(smk_index.compute_multiassign_weights_))
    """
    if not ut.QUIET:
        print('[smk_index.assign] compute_multiassign_weights_')
    # Valid word assignments are beyond fraction of distance to the nearest word
    massign_thresh = _idx2_wdist.T[0:1].T.copy()
    # HACK: If the nearest word has distance 0 then this threshold is too hard
    # so we should use the distance to the second nearest word.
    flag_too_close = (massign_thresh == 0)
    massign_thresh[flag_too_close] = _idx2_wdist.T[1:2].T[flag_too_close]
    # Compute the threshold fraction
    np.add(.001, massign_thresh, out=massign_thresh)
    np.multiply(massign_alpha, massign_thresh, out=massign_thresh)
    invalid = np.greater_equal(_idx2_wdist, massign_thresh)
    if ut.VERBOSE:
        _ = (invalid.size - invalid.sum(), invalid.size)
        print('[smk_index.assign] + massign_alpha = %r' % (massign_alpha,))
        print('[smk_index.assign] + massign_sigma = %r' % (massign_sigma,))
        print('[smk_index.assign] + massign_equal_weights = %r' % (massign_equal_weights,))
        print('[smk_index.assign] * Marked %d/%d assignments as invalid' % _)

    if massign_equal_weights:
        # Performance hack from jegou paper: just give everyone equal weight
        masked_wxs = np.ma.masked_array(_idx2_wx, mask=invalid)
        idx2_wxs  = list(map(ut.filter_Nones, masked_wxs.tolist()))
        #ut.embed()
        if ut.DEBUG2:
            assert all([isinstance(wxs, list) for wxs in idx2_wxs])
        idx2_maws = [np.ones(len(wxs), dtype=np.float32) for wxs in idx2_wxs]
    else:
        # More natural weighting scheme
        # Weighting as in Lost in Quantization
        gauss_numer = -_idx2_wdist.astype(np.float64)
        gauss_denom = 2 * (massign_sigma ** 2)
        gauss_exp   = np.divide(gauss_numer, gauss_denom)
        unnorm_maw = np.exp(gauss_exp)
        # Mask invalid multiassignment weights
        masked_unorm_maw = np.ma.masked_array(unnorm_maw, mask=invalid)
        # Normalize multiassignment weights from 0 to 1
        masked_norm = masked_unorm_maw.sum(axis=1)[:, np.newaxis]
        masked_maw = np.divide(masked_unorm_maw, masked_norm)
        masked_wxs = np.ma.masked_array(_idx2_wx, mask=invalid)
        # Remove masked weights and word indexes
        idx2_wxs  = list(map(ut.filter_Nones, masked_wxs.tolist()))
        idx2_maws = list(map(ut.filter_Nones, masked_maw.tolist()))
        #with ut.EmbedOnException():
        if ut.DEBUG2:
            checksum = [sum(maws) for maws in idx2_maws]
            for x in np.where([not ut.almost_eq(val, 1) for val in checksum])[0]:
                print(checksum[x])
                print(_idx2_wx[x])
                print(masked_wxs[x])
                print(masked_maw[x])
                print(massign_thresh[x])
                print(_idx2_wdist[x])
            #all([ut.almost_eq(x, 1) for x in checksum])
            assert all([ut.almost_eq(val, 1) for val in checksum]), 'weights did not break evenly'

    return idx2_wxs, idx2_maws
def smoSimple(dataMatIn, classLabels, C, toler, maxIter):
    """
    一个简化版的SMO算法实现
    :param dataMatIn: 数据输入
    :param classLabels: 数据对应的分类标签
    :param C: 松弛变量
    :param toler: 容错率
    :param maxIter: 最大迭代次数
    :return:
    """
    # 首先将输入数据转化成numpy的mat矩阵存储,维度为(100,2)
    X = np.mat(dataMatIn)
    # 将输入标签转化成numpy的mat矩阵存储,并且转置成(100,1)
    Y = np.mat(classLabels).transpose()
    b = 0
    # 获取dataMatrix的维度,m=100,n=2
    m, n = np.shape(X)
    # 对于每一组数据都初始化一个拉格朗日乘子
    lambdas = np.mat(np.zeros((m, 1)))
    # 初始化迭代
    item_num = 0
    while item_num < maxIter:
        lambdaPairChanged = 0
        for i in range(m):
            # 步骤一:计算误差Ei
            # fxi = w^Txi+b
            fxi = float(np.multiply(lambdas, Y).T * (X * X[i, :].T)) + b
            # 误差项计算
            Ei = fxi - float(Y[i])
            # 优化alpha,设定一定的容错率
            if (Y[i] * Ei < -toler and lambdas[i] < C) or (Y[i] * Ei > toler
                                                           and lambdas[i] > 0):
                # 随机选择另一个lambdaJ组合一起进行优化
                j = utils.selectJrand(i, m)
                # 计算lambdaJ对应的误差Ej
                fxj = float(np.multiply(lambdas, Y).T * (X * X[j, :].T)) + b
                Ej = fxj - float(Y[j])
                # 保存更新前的lambda值
                lambdaIold = lambdas[i].copy()
                lambdaJold = lambdas[j].copy()
                # 步骤2:计算上下界H和L
                if Y[i] != Y[j]:
                    L = max(0, lambdas[j] - lambdas[i])
                    H = min(C, C + lambdas[j] - lambdas[i])
                else:
                    L = max(0, lambdas[j] + lambdas[i] - C)
                    H = min(C, lambdas[j] + lambdas[i])
                if L == H:
                    print("L == H")
                    continue
                # 步骤3:计算eta
                eta = X[i, :] * X[i, :].T + X[j, :] * X[j, :].T - 2.0 * X[
                    i, :] * X[j, :].T
                if eta <= 0:
                    print("eta <= 0")
                    continue
                # 步骤4:更新lambdaJ
                lambdas[j] += Y[j] * (Ei - Ej) / eta
                # 步骤5:修剪lambdaJ
                lambdas[j] = utils.clipLambda(lambdas[j], H, L)
                if abs(lambdas[j] - lambdaJold) < 0.00001:
                    print("alphaJ 变化太小了")
                    continue
                # 步骤6:更新lambdaI
                lambdas[i] += Y[j] * Y[i] * (lambdaJold - lambdas[j])
                # 步骤7:更新b1和b2
                b1 = b - Ei - Y[i] * (lambdas[i] - lambdaIold) * X[i, :] * X[i, :].T - \
                     Y[j] * (lambdas[j] - lambdaJold) * X[j, :] * X[i, :].T
                b2 = b - Ej - Y[i] * (lambdas[i] - lambdaIold) * X[i, :] * X[j, :].T - \
                     Y[j] * (lambdas[j] - lambdaJold) * X[j, :] * X[j, :].T

                # 步骤8:根据b1和b2更新b
                if 0 < lambdas[i] < C:
                    b = b1
                elif 0 < lambdas[j] < C:
                    b = b2
                else:
                    b = (b1 + b2) / 2.0
                # 统计优化次数
                lambdaPairChanged += 1
                print("第%d次迭代 样本:%d, alpha优化次数:%d" %
                      (item_num, i, lambdaPairChanged))
        if lambdaPairChanged == 0:
            item_num += 1
        else:
            item_num = 0
        print("迭代次数:%d" % item_num)
    return b, lambdas