def get_corr_map_fast(coo1, coo2, skypos, skyrange, sec, bound, bandwidth):
  imsz = imagetools.deg2pix(skypos, skyrange, 0.0001)
  count = np.zeros(imsz)
  print(imsz)
  co_rel = np.array([[0,0]])
  len1 = coo1.shape[0]
  len2 = coo2.shape[0]
  print(len1,len2)
  wcs = imagetools.define_wcs(skypos,skyrange,width=False,height=False,verbose=0,pixsz=0.0001)
  #with open('../data/try2_%d.csv'%sec, 'wb') as csvfile:
    #writer = csv.writer(csvfile)
  if len2>len1:
    for i in range(len1):
      #print(i)
      tmp_co = coo2-coo1[i,:]
      tmp_co = tmp_co[np.absolute(tmp_co[:,0])<=bound,:]
      tmp_co = tmp_co[np.absolute(tmp_co[:,1])<=bound,:]
      co_rel = np.concatenate((co_rel, tmp_co), axis = 0)
  else:
    for i in range(len2):
      #print(i)
      tmp_co = coo2[i,:]-coo1
      tmp_co = tmp_co[np.absolute(tmp_co[:,0])<=bound,:]
      tmp_co = tmp_co[np.absolute(tmp_co[:,1])<=bound,:]
      co_rel = np.concatenate((co_rel, tmp_co), axis = 0)
  print co_rel.shape
  if co_rel.shape[0]>50:
    centroid = ck.find_centroid(co_rel[1:], bandwidth, 11, bound)
  else:
    return count, np.array([0.0, 0.0])

  return count, centroid
def test_matrix_assemble(dim):
    eps = 1000*DOLFIN_EPS

    (u, uu), (v, vv), (U, UU), dPP, bc = _create_dp_problem(dim)

    # Scalar assemble
    mat = assemble(u*v*U*dPP)

    # Create a numpy matrix based on the local size of the vector
    # and populate it with values from local vector
    loc_range = u.vector().local_range()
    vec_mat = np.zeros_like(mat.array())
    vec_mat[range(loc_range[1] - loc_range[0]),
            range(loc_range[0], loc_range[1])] = u.vector().get_local()

    assert np.sum(np.absolute(mat.array() - vec_mat)) < eps

    # Vector assemble
    mat = assemble((uu[0]*vv[0]*UU[0] + uu[1]*vv[1]*UU[1])*dPP)

    # Create a numpy matrix based on the local size of the vector
    # and populate it with values from local vector
    loc_range = uu.vector().local_range()
    vec_mat = np.zeros_like(mat.array())
    vec_mat[range(loc_range[1] - loc_range[0]),
            range(loc_range[0], loc_range[1])] = uu.vector().get_local()

    assert np.sum(np.absolute(mat.array() - vec_mat)) < eps
Exemple #3
0
 def aggregate(self, gs):
     """Aggregate Capital, Labor in Efficiency unit and Bequest over all cohorts"""
     W, T, TS = self.W, self.T, self.TS
     """Aggregate all cohorts' capital and labor supply at each year"""
     K1, L1 = array([[0 for t in range(TS)] for i in range(2)], dtype=float)
     for t in range(TS):
         if t <= TS-T-1:
             K1[t] = sum([gs[t+y].apath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
             L1[t] = sum([gs[t+y].epath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
             self.Beq[t] = sum([gs[t+y].apath[-(y+1)]*self.pop[t,-(y+1)]
                                 /self.sp[t,-(y+1)]*(1-self.sp[t,-(y+1)]) for y in range(T)])
             self.C[t] = sum([gs[t+y].cpath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
         else:
             K1[t] = sum([gs[-1].apath[-(y+1)]*self.pop[t][-(y+1)] for y in range(T)])
             L1[t] = sum([gs[-1].epath[-(y+1)]*self.pop[t][-(y+1)] for y in range(T)])
             self.Beq[t] = sum([gs[-1].apath[-(y+1)]*self.pop[t,-(y+1)]
                                 /self.sp[t,-(y+1)]*(1-self.sp[t,-(y+1)]) for y in range(T)])
             self.C[t] = sum([gs[-1].cpath[-(y+1)]*self.pop[t][-(y+1)] for y in range(T)])
     self.Converged = (max(absolute(K1-self.K)) < self.tol*max(absolute(self.K)))
     """ Update the economy's aggregate K and N with weight phi on the old """
     self.K = self.phi*self.K + (1-self.phi)*K1
     self.L = self.phi*self.L + (1-self.phi)*L1
     self.k = self.K/self.L
     # print "K=%2.2f," %(self.K[0]),"L=%2.2f," %(self.L[0]),"K/L=%2.2f" %(self.k[0])
     for i in range(self.TS/self.T):
         print "K=%2.2f," %(self.K[i*self.T]),"L=%2.2f," %(self.L[i*self.T]),"K/L=%2.2f" %(self.k[i*self.T])
def paddingAnswers(answerSheet1, blankSheet1):
   numRowsA, numColsA, numBandsA, dataTypeA = ipcv.dimensions(answerSheet1)
   numRowsB, numColsB, numBandsB, dataTypeB = ipcv.dimensions(blankSheet1)
   print numRowsB, numColsB
   if numBandsA == 3:
      answerSheet = cv2.cvtColor(answerSheet1, cv.CV_BGR2GRAY)
   elif numBandsA == 1:
      answerSheet = answerSheet1

   if numBandsB == 3:
      blankSheet = cv2.cvtColor(blankSheet1, cv.CV_BGR2GRAY)
   elif numBandsB == 1:
      blankSheet = blankSheet1  

   pad = numpy.absolute(numRowsA - numColsA)/2.0
   maxCount = numpy.max(blankSheet)

   if (numRowsA-numColsA) % 2 != 0:
      answerSheet = numpy.pad(answerSheet, ((0,0),(pad,pad+1)), 'constant', constant_values=((maxCount, maxCount),(maxCount,maxCount)))
   elif (numRowsA-numColsA) % 2 == 0:
      answerSheet = numpy.pad(answerSheet, ((0,0),(pad,pad)), 'constant', constant_values=((maxCount, maxCount),(maxCount,maxCount)))

   pad1 = numpy.absolute(numRowsB - numColsB)/2.0
   maxCount = numpy.max(blankSheet)

   if (numRowsB-numColsB) % 2 != 0:
      blankSheet = numpy.pad(blankSheet, ((0,0),(pad1,pad1+1)), 'constant', constant_values=((maxCount, maxCount),(maxCount,maxCount)))
   elif (numRowsA-numColsA) % 2 == 0:
      blankSheet = numpy.pad(blankSheet, ((0,0),(pad1,pad1)), 'constant', constant_values=((maxCount, maxCount),(maxCount,maxCount)))


   return answerSheet, blankSheet
Exemple #5
0
    def getEnergy(self, normalized=True, mask=None):
        """
        Returns the current energy.

        Parameters:
            normalized: Flag to return the normalized energy (that is, divided
            by the total density)
        """
        if self.gpu:
            psi = self.psi.get()
            V = self.Vdt.get() / self.dt
        else:
            psi = self.psi
            V = self.Vdt.get() / self.dt
        density = np.absolute(psi) ** 2
        gradx = np.gradient(psi)[0]
        normFactor = density.sum() if normalized else 1.0
        return (
            np.ma.array(
                -(
                    0.25 * np.gradient(np.gradient(density)[0])[0]
                    - 0.5 * np.absolute(gradx) ** 2
                    - (self.g_C * density + V) * density
                ),
                mask=mask,
            ).sum()
            / normFactor
        )
Exemple #6
0
def errore_trasporto_upwind(n_iter, M, N, xmin, xmax, tinz, tfin, vel, ic):
    """Analisi errore trasporto upwind"""
    dx = zeros(n_iter)
    e1 = zeros(n_iter)
    e2 = zeros(n_iter)
    esup = zeros(n_iter)
    uexac = zeros([M + 1])

    dx[0] = (xmax - xmin) / M
    (u, x, t) = trasporto_upwind(M, N, xmin, xmax, tinz, tfin, vel, ic)
    uexac[0:M + 1] = ic(vel(x, t[-1]))

    e1[0] = dx[0] * sum(absolute(u[:, -1] - uexac))
    e2[0] = sqrt(dx[0] * sum(absolute(u[:, -1] - uexac) ** 2))
    esup[0] = max(abs(u[:, -1] - uexac))

    for i in range(1, n_iter):
        print i
        M *= 2
        dx[i] = (xmax - xmin) / M
        (u, x, t) = trasporto_upwind(M, N, xmin, xmax, tinz, tfin, vel, ic)
        uexac = zeros([M + 1])
        uexac[0:M + 1] = ic(vel(x, t[-1]))
        e1[i] = dx[i] * sum(absolute(u[:, -1] - uexac))
        e2[i] = sqrt(dx[i] * sum(absolute(u[:, -1] - uexac) ** 2))
        esup[i] = max(abs(u[:, -1] - uexac))

    return e1, e2, esup
Exemple #7
0
 def plotall(self):
     real = self.z_data_raw.real
     imag = self.z_data_raw.imag
     real2 = self.z_data_sim.real
     imag2 = self.z_data_sim.imag
     fig = plt.figure(figsize=(15,5))
     fig.canvas.set_window_title("Resonator fit")
     plt.subplot(131)
     plt.plot(real,imag,label='rawdata')
     plt.plot(real2,imag2,label='fit')
     plt.xlabel('Re(S21)')
     plt.ylabel('Im(S21)')
     plt.legend()
     plt.subplot(132)
     plt.plot(self.f_data*1e-9,np.absolute(self.z_data_raw),label='rawdata')
     plt.plot(self.f_data*1e-9,np.absolute(self.z_data_sim),label='fit')
     plt.xlabel('f (GHz)')
     plt.ylabel('Amplitude')
     plt.legend()
     plt.subplot(133)
     plt.plot(self.f_data*1e-9,np.unwrap(np.angle(self.z_data_raw)),label='rawdata')
     plt.plot(self.f_data*1e-9,np.unwrap(np.angle(self.z_data_sim)),label='fit')
     plt.xlabel('f (GHz)')
     plt.ylabel('Phase')
     plt.legend()
     # plt.gcf().set_size_inches(15,5)
     plt.tight_layout()
     plt.show()
Exemple #8
0
 def aggregate(self, gs):
     """Aggregate Capital, Labor in Efficiency unit and Bequest over all cohorts"""
     W, T, TS = self.W, self.T, self.TS
     """Aggregate all cohorts' capital and labor supply at each year"""
     K1, L1, H1, R1 = array([[0 for t in range(TS)] for i in range(4)], dtype=float)
     for t in range(TS):
         if t <= TS-T-1:
             K1[t] = sum([gs[t+y].apath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
             L1[t] = sum([gs[t+y].epath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
             H1[t] = sum([gs[t+y].hpath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
             R1[t] = sum([gs[t+y].rpath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
             self.Beq[t] = sum([gs[t+y].apath[-(y+1)]*self.pop[t,-(y+1)]
                                 /self.sp[t,-(y+1)]*(1-self.sp[t,-(y+1)]) for y in range(T)])
             self.C[t] = sum([gs[t+y].cpath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
         else:
             K1[t] = sum([gs[-1].apath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
             L1[t] = sum([gs[-1].epath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
             H1[t] = sum([gs[-1].hpath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
             R1[t] = sum([gs[-1].rpath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
             self.Beq[t] = sum([gs[-1].apath[-(y+1)]*self.pop[t,-(y+1)]
                                 /self.sp[t,-(y+1)]*(1-self.sp[t,-(y+1)]) for y in range(T)])
             self.C[t] = sum([gs[-1].cpath[-(y+1)]*self.pop[t,-(y+1)] for y in range(T)])
     self.Converged = (max(absolute(K1-self.K)) < self.tol*max(absolute(self.K)))
     """ Update the economy's aggregate K and N with weight phi on the old """
     self.K = self.phi*self.K + (1-self.phi)*K1
     self.L = self.phi*self.L + (1-self.phi)*L1
     self.k = self.K/self.L
     self.Hd = H1
     self.Rd = R1
     self.Hd_Hs = self.Hd - self.Hs
def add_data(N):
    if N<=37.0:
        Ns=N
    else:
        Ns=37.0
    X=Ns*pi*(f-f0)/f0
    A=sin(X)/sin(X/Ns)
    #ans=recur_exp(N, Np=37) 
    
    #A=sum([el[0]*exp(1j*2*pi*f/f0*el[1])/1.0 for el in ans])
    #A=sum([1.0*exp(1j*2*pi*f/f0*el[1])/1.0 for el in ans])

    #A=comb_A(N)
    Asq=absolute(A)**2
    #return Asq
    Ga0=2*mu**2*Y0*Ns**2
    Ga=Ga0*Asq/Ns**2
    Ba=Ga0*(sin(2*X)-2*X)/(2*X**2)
    w=2*pi*f
    S13=1j*sqrt(2*Ga*GL)/(Ga+1j*Ba+1j*w*C+GL)
#
#    if N>5:
#        Ns=N-5
#    else: 
#        Ns=1
#    X=Ns*pi*(f-f0)/f0
#    A=1*sin(X)/sin(X/Ns)
#    Asq=A**2
#    Ga0=2*mu**2*Y0*(Ns)**2
#    Ga=Ga0*Asq/Ns**2
#    Ba=Ga0*(sin(2*X)-2*X)/(2*X**2)
#    w=2*pi*f
#    S31=1j*sqrt(2*Ga*GL)/(Ga+1j*Ba+1j*w*C+GL)
#        
    return absolute(S13**2)
Exemple #10
0
def mask_bad(m, badval = UNSEEN, rtol = 1.e-5, atol = 1.e-8):
    """Return a boolean array with True where m is close to badval
    and False elsewhere
    [absolute(m - badval) <= atol + rtol * absolute(badval)]"""
    atol = npy.absolute(atol)
    rtol = npy.absolute(rtol)
    return npy.absolute(m - badval) <= atol + rtol * npy.absolute(badval)
Exemple #11
0
def mask_good(m, badval = UNSEEN, rtol = 1.e-5, atol = 1.e-8):
    """Return a mask with False where m is close to badval
    and True elsewhere
    [absolute(m - badval) > atol + rtol * absolute(badval)]"""
    atol = npy.absolute(atol)
    rtol = npy.absolute(rtol)
    return npy.absolute(m - badval) > atol + rtol * npy.absolute(badval)
def plotCoeff(X, y, obj, featureNames, whichReg):
    """ Plot Regression's Coeff
    """
    clf = classifiers[whichReg]
    clf,_,_ = fitAlgo(clf, X,y, opt= True, param_dict = param_dist_dict[whichReg])
    if whichReg == "LogisticRegression":
    	coeff = np.absolute(clf.coef_[0])
    else:
    	coeff = np.absolute(clf.coef_)
    print coeff
    indices = np.argsort(coeff)[::-1]
    print indices
    print featureNames
    featureList = []
    # num_features = len(featureNames)
    print("Feature ranking:")
    for f in range(num_features):
        featureList.append(featureNames[indices[f]])
        print("%d. feature %s (%.2f)" % (f, featureNames[indices[f]], coeff[indices[f]]))
    fig = pl.figure(figsize=(8,6),dpi=150)
    pl.title("Feature importances",fontsize=30)
    # pl.bar(range(num_features), coeff[indices],
    #         yerr = std_importance[indices], color=paired[0], align="center",
    #         edgecolor=paired[0],ecolor=paired[1])
    pl.bar(range(num_features), coeff[indices], color=paired[0], align="center",
            edgecolor=paired[0],ecolor=paired[1])
    pl.xticks(range(num_features), featureList, size=15,rotation=90)
    pl.ylabel("Importance",size=30)
    pl.yticks(size=20)
    pl.xlim([-1, num_features])
    # fix_axes()
    pl.tight_layout()
    save_path = 'plots/'+obj+'/'+whichReg+'_feature_importances.pdf'
    fig.savefig(save_path)
def calculateMie(data):
    # Extract data
    (p, w, n_p, n_medium, th, n_theta, x_rv) = data
    # Size parameter
    # x      - size parameter = k*radius = 2pi/lambda * radius
    #          (lambda is the wavelength in the medium around the scatterers)
    x = np.pi * p / (w / n_medium)
    # Mie parameters
    (S1, S2, Qext, Qsca, Qback, gsca) = bhmie(x, n_p / n_medium, n_theta)
    # Phase function
    P = (np.absolute(S1) ** 2.0 + np.absolute(S2) ** 2.0) / \
        (Qsca * x ** 2.0)
    # Cumulative distribution
    cP = st.cumulativeDistributionTheta(P, th)
    # Normalize
    cP /= cP[-1]

    # Inverse cumulative distribution for random variable picking
    cPinv = st.invertNiceFunction(np.degrees(th), cP, x_rv)

    pD = {}
    pD['particleDiameter'] = p
    pD['sizeParameter'] = x
    pD['wavelength'] = w
    pD['crossSections'] = Qext * np.pi * (p / 2.0) ** 2.0
    pD['inverseCDF'] = cPinv
    pD['phaseFunction'] = P
    pD['cumulativePhaseFunction'] = cP

    # Return generated data
    return pD
def callback(data):
    # trouble shoot - print data from subscriber
    #rospy.loginfo(rospy.get_caller_id() + "leftx: %s, rightx: %s", data.leftx, data.rightx)    
    
	# unpack values
    global m1val
    global m2val
    global xl 
    global xr
    xlnew = float(data.leftx);
    xrnew = float(data.rightx);

    if(numpy.absolute(xlnew-xl) < 0.02):
        xl = xlnew;
    #else:
    	#print('invalid x1, not updated')

    if(numpy.absolute(xrnew-xr) < 0.02):
        xr = xrnew;
    #else:
    	#print('invalid x2, not updated')




    xlcm = xl * 100;
    xrcm = xr * 100;
    #print 'leftx: {0:.3f} cm, rightx: {1:.3f} cm.'.format(xlcm, xrcm)

    # read currents
    m1cur, m2cur = readcurrents();
Exemple #15
0
def mask_good(m, badval=UNSEEN, rtol=1.0e-5, atol=1.0e-8):
    """Returns a bool array with ``False`` where m is close to badval.

    Parameters
    ----------
    m : a map (may be a sequence of maps)
    badval : float, optional
        The value of the pixel considered as bad (:const:`UNSEEN` by default)
    rtol : float, optional
        The relative tolerance
    atol : float, optional
        The absolute tolerance

    Returns
    -------
    a bool array with the same shape as the input map, ``False`` where input map is
    close to badval, and ``True`` elsewhere.

    See Also
    --------
    mask_bad, ma

    Examples
    --------
    >>> import healpy as hp
    >>> m = np.arange(12.)
    >>> m[3] = hp.UNSEEN
    >>> hp.mask_good(m)
    array([ True,  True,  True, False,  True,  True,  True,  True,  True,
            True,  True,  True], dtype=bool)
    """
    m = np.asarray(m)
    atol = np.absolute(atol)
    rtol = np.absolute(rtol)
    return np.absolute(m - badval) > atol + rtol * np.absolute(badval)
def get_peaks_cf(data, win_size):
    """
    data: audio as numpy array to be analyzed
    win_size: value in samples to create the blocks for analysis
    
    Used in calc_crest_factor, this function returns an array of peak levels
    for each window.

    return: array of peak audio levels
    """
    if len(data) == 2:
        # Seperate left and right channels
        data_l = data[0,:]               
        data_r = data[1,:]

        # Buffer up the data
        data_matrix_l = librosa.util.frame(data_l, win_size, win_size)
        data_matrix_r = librosa.util.frame(data_r, win_size, win_size)

        # Get peaks for left and right channels
        peaks_l = np.amax(np.absolute(data_matrix_l), axis=0)
        peaks_r = np.amax(np.absolute(data_matrix_r), axis=0)
        return np.maximum(peaks_l, peaks_r)

    else:
        data_matrix = librosa.util.frame(data, win_size, win_size)
        return np.amax(np.absolute(data_matrix), axis=0)
Exemple #17
0
def niceformat(x, l, s):
    """Get a nice formatted string of a number.

    Parameters:
        x: scalar
        l: boolean
            latex expression if True else ordinary
        s: boolean
            use '$' if True else don't

    Returns: expr
        expr: string
            string representing the number
            no measure unit
    """
    if l:
        pref = PREFIXES.copy()
        if s:
            pref[-6] = r'$\mu$'
        else:
            pref[-6] = r'\mu'
    else:
        pref = PREFIXES 
    xexp = 0
    while np.absolute(x) < 1 and xexp >= min(PREFIXES.keys()):
        xexp -= 3
        x *= 1000.
    while np.absolute(x) >= 1e3 and xexp <= max(PREFIXES.keys()):
        xexp += 3
        x /= 1000.
    s = '%.4g'%x + ' ' + pref[xexp]
    return s
def calc_num_walls(side_length, room_positions, ap_positions):
    """
    Calculate the number of walls between each room to each AP.

    This is used to calculate the wall losses as well as the indoor
    pathloss.

    Parameters
    ----------
    side_length : float
        The side length of the square room.
    room_positions : 2D complex numpy array
        The positions of all rooms in the grid.
    ap_positions : 1D complex numpy array
        The positions of access points in the grid.

    Returns
    -------
    num_walls : 2D numpy array of ints
        The number of walls from each room to access point.
    """
    all_positions_diffs = room_positions.reshape(-1, 1) - 1.0001 * ap_positions.reshape(1, -1)

    num_walls = np.round(
        np.absolute(np.real(all_positions_diffs / side_length))
        + np.absolute(np.imag(all_positions_diffs / side_length))
    ).astype(int)

    return num_walls
	def motion_update(self, X_t, O1, O2):
		O_d = O2 - O1
		t = np.sqrt(np.sum(np.power(O_d[0:2],2)))
		r1 = np.arctan2(O_d[1], O_d[0]) - O1[2]
		r2 = -r1 + O_d[2]
		sig_t = np.finfo(float).eps + self.a[2] * np.absolute(t)
		sig_r1 = np.finfo(float).eps + self.a[0] * np.absolute(r1)
		sig_r2 = np.finfo(float).eps + self.a[1] * np.absolute(r2)
		# sig_t = np.finfo(float).eps + np.sqrt(self.a[2] * np.absolute(t) + self.a[3] * np.absolute(r1+r2))
		# sig_r1 = np.finfo(float).eps + np.sqrt(self.a[0] * np.absolute(r1) + self.a[1] * np.absolute(t))
		# sig_r2 = np.finfo(float).eps + np.sqrt(self.a[0] * np.absolute(r2) + self.a[1] * np.absolute(t))
		# print('heeee')		
		# print(sig_r1)
		# print(sig_r2)		
		h_t = np.reshape(t + np.random.normal(0,sig_t,len(X_t)), (len(X_t),1))
		h_r1 = r1 + np.random.normal(0,sig_r1,len(X_t))
		h_r2 = r2 + np.random.normal(0,sig_r2,len(X_t))
		th = np.reshape(X_t[:,2] + h_r1, (len(X_t),1))
		pos = X_t[:,:2] + np.concatenate((np.cos(th), np.sin(th)), axis=1) * h_t
		ang = np.reshape(X_t[:,2]+h_r1+h_r2, (len(X_t),1))
		ang = (ang + (- 2*np.pi)*(ang > np.pi) + (2*np.pi)*(ang < -np.pi))
		X_upd = np.concatenate((pos,ang), axis=1)
		map_c = np.ceil(pos/10).astype(int)
		count = 0;
		for i in range(len(X_upd)):
			if(self.unocc_dict.has_key(tuple(map_c[i]))):
				X_upd[count] = X_upd[i]
				count = count+1
		# print(count)
		return X_upd[:count,:]
def crunchy3(offset, eta, sec, sigma=None):
    powers = []
    powers_norm = []

    y_axis = sec.get_y_axis()
    x_axis = sec.get_x_axis()
    px_y = np.absolute(y_axis[1] - y_axis[0])
    px_x = np.absolute(x_axis[1] - x_axis[0])

    if sigma is None:
        sigma = [px_y, px_x]
    if sigma[0] < px_y:
        sigma = [px_y, sigma[1]]
    if sigma[1] < px_x:
        sigma = [sigma[0], px_x]

    for yi in range(len(y_axis)):
        y = y_axis[yi]
        for xi in range(len(x_axis)):
            x = x_axis[xi]
            y_eff = y + eta * offset ** 2
            x_eff = x - offset
            this_weight = weight_function3(eta, y, x, y_eff, x_eff, sigma)
            if this_weight is None:
                powers.append(None)
                powers_norm.append(None)
            else:
                variance = 1 / this_weight
                powers.append(sec.get([yi, xi]) / variance)
                powers_norm.append(1 / variance)
    p = np.nansum(list(filter(None, powers)))
    pn = np.nansum(list(filter(None, powers_norm)))
    return offset, p / pn
def crunchy(eta, sec, hand=None, sigma=None):
    powers = []
    powers_norm = []

    y_axis = sec.get_y_axis()
    x_axis = sec.get_x_axis()
    if sigma is None:
        sigma = [np.absolute(y_axis[1] - y_axis[0]),
                 np.absolute(x_axis[1] - x_axis[0])]

    for yi in range(len(y_axis)):
        y = y_axis[yi]
        for xi in range(len(x_axis)):
            x = x_axis[xi]
            this_weight = weight_function(eta, x, y, sigma)
            if this_weight is None:
                powers.append(None)
                powers_norm.append(None)
            else:
                variance = 1 / this_weight
                powers.append(sec.get([yi, xi]) / variance)
                powers_norm.append(1 / variance)
    p = np.nansum(list(filter(None, powers)))
    pn = np.nansum(list(filter(None, powers_norm)))
    # print("eta: " + str(eta))
    # print(p)
    # print(pn)
    # print("p/pn: " + str(p/pn))
    return eta, p / pn
def det_trace_iter(A, v, E):
    """
    :param A: input matrix
    :param v: vector used for power method
    :param E: tolerance parameter
    :return: (determinant, trace, N) ->determinant, trace, number of iterations
    """
    N = 0
    err = 0
    lamda = 0
    while np.absolute(err) >= np.absolute(lamda * E) and N <= 100:
        N += 1
        temp = matrix_multiply(A, v)
        newlamda = matrix_multiply(np.transpose(v), temp)[0, 0]
        newlamda = newlamda / matrix_multiply(np.transpose(v), v)[0, 0]

        # can we use magnitude method???
        err = np.absolute(newlamda - lamda)
        v = temp
        lamda = newlamda
    # does multiple multiplication for power method!!!
    if np.absolute(err) < np.absolute(lamda * E):
        return (determinant_for_2x2(A), trace(A), N)
    else:
        #print("Uhh, failed")
        return None
def curvature_optimisation_func_lensmaker(curv_1 = 0.002, focal_length = 100.,
                                diameter = 5., thickness = 5., ref_index = 1.5):
    """
    This function is used to minimise the RMS spread of a beam of given
    diameter, propagated through a lens of given thickness, by changing
    the curvatures of the two sides of the lens. It is meant to be used
    in conjunction with an optimisation function.
    
    This function is bounded by the lensmaker equation, so it only requires
    one curvature as an argument and the other is calculated using the
    given focal length, thickness, and refractive index.
    """
    curv_2 = lensmaker_equation(curv_1, focal_length, thickness, ref_index)
    if curv_2 == 0:
        curv_2 -= 1e-13
    if curv_1 == 0:
        curv_1 += 1e-13
    aperture_radius_1 = np.absolute(1/float(curv_1))
    aperture_radius_1 -= 1e-6*aperture_radius_1
    aperture_radius_2 = np.absolute(1/float(curv_2))
    aperture_radius_2 -= 1e-6*aperture_radius_2
    lens_front = opt.SphericalRefraction(focal_length, curv_1, 
                                        aperture_radius_1, 1., ref_index)
    lens_back = opt.SphericalRefraction(focal_length + thickness, 
                                        curv_2, aperture_radius_2, 
                                        ref_index, 1.)
    max_radius = diameter/2.
    test_beam = rt.CollimatedBeam([0,0,0], [0,0,1], 6, max_radius, 2)
    rms_spread = rms_xy_spread([lens_front, lens_back], focal_length, test_beam)
    return np.log10(rms_spread)
def compareRecon(recon1, recon2):
    ''' compare two arrays and return 1 is they are the same within specified
        precision and 0 if not.
        function was made to accompany unit test code '''
    ## FIX: make precision a input parameter
    prec = -11   # desired precision
    if recon1.shape != recon2.shape:
        print 'shape is different!'
        print recon1.shape
        print recon2.shape
        return 0

    for i in range(recon1.shape[0]):
        for j in range(recon2.shape[1]):
            if numpy.absolute(recon1[i,j].real - recon2[i,j].real) > math.pow(10,-11):
                print "real: i=%d j=%d %.15f %.15f diff=%.15f" % (i, j, recon1[i,j].real, recon2[i,j].real, numpy.absolute(recon1[i,j].real-recon2[i,j].real))
                return 0
            ## FIX: need a better way to test
            # if we have many significant digits to the left of decimal we 
            #   need to be less stringent about digits to the right.
            # The code below works, but there must be a better way.
            if isinstance(recon1, complex):
                if int(math.log(numpy.abs(recon1[i,j].imag), 10)) > 1:
                    prec = prec + int(math.log(numpy.abs(recon1[i,j].imag), 10))
                    if prec > 0:
                        prec = -1
                print prec
                if numpy.absolute(recon1[i,j].imag - recon2[i,j].imag) > math.pow(10, prec):
                    print "imag: i=%d j=%d %.15f %.15f diff=%.15f" % (i, j, recon1[i,j].imag, recon2[i,j].imag, numpy.absolute(recon1[i,j].imag-recon2[i,j].imag))
                    return 0

    return 1
Exemple #25
0
def isEqual(left, right, eps=None, masked_equal=True):
  ''' This function checks if two numpy arrays or scalars are equal within machine precision, and returns a scalar logical. '''
  diff_type = "Both arguments to function 'isEqual' must be of the same class!"
  if isinstance(left,np.ndarray):
    # ndarray
    if not isinstance(right,np.ndarray): raise TypeError(diff_type)
    if not left.dtype==right.dtype:
      right = right.astype(left.dtype) # casting='same_kind' doesn't work...
    if np.issubdtype(left.dtype, np.inexact): # also catch float32 etc
      if eps is None: return ma.allclose(left, right, masked_equal=masked_equal)
      else: return ma.allclose(left, right, masked_equal=masked_equal, atol=eps)
    elif np.issubdtype(left.dtype, np.integer) or np.issubdtype(left.dtype, np.bool):
      return np.all( left == right ) # need to use numpy's all()
  elif isinstance(left,(float,np.inexact)):
    # numbers
    if not isinstance(right,(float,np.inexact)): raise TypeError(diff_type)
    if eps is None: eps = 100.*floateps # default
    if ( isinstance(right,float) or isinstance(right,float) ) or left.dtype.itemsize == right.dtype.itemsize: 
      return np.absolute(left-right) <= eps
    else:
      if left.dtype.itemsize < right.dtype.itemsize: right = left.dtype.type(right)
      else: left = right.dtype.type(left)
      return np.absolute(left-right) <= eps  
  elif isinstance(left,(int,bool,np.integer,np.bool)):
    # logicals
    if not isinstance(right,(int,bool,np.integer,np.bool)): raise TypeError(diff_type)
    return left == right
  else: raise TypeError(left)
Exemple #26
0
    def _exec_loop(self, a, bd_all, mask):
        """Solves the kriging system by looping over all specified points.
        Less memory-intensive, but involves a Python-level loop."""

        npt = bd_all.shape[0]
        n = self.X_ADJUSTED.shape[0]
        zvalues = np.zeros(npt)
        sigmasq = np.zeros(npt)

        a_inv = scipy.linalg.inv(a)

        for j in np.nonzero(~mask)[0]:   # Note that this is the same thing as range(npt) if mask is not defined,
            bd = bd_all[j]               # otherwise it takes the non-masked elements.
            if np.any(np.absolute(bd) <= self.eps):
                zero_value = True
                zero_index = np.where(np.absolute(bd) <= self.eps)
            else:
                zero_index = None
                zero_value = False

            b = np.zeros((n+1, 1))
            b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
            if zero_value:
                b[zero_index[0], 0] = 0.0
            b[n, 0] = 1.0
            x = np.dot(a_inv, b)
            zvalues[j] = np.sum(x[:n, 0] * self.Z)
            sigmasq[j] = np.sum(x[:, 0] * -b[:, 0])

        return zvalues, sigmasq
Exemple #27
0
    def _exec_loop_moving_window(self, a_all, bd_all, mask, bd_idx):
        """Solves the kriging system by looping over all specified points.
        Less memory-intensive, but involves a Python-level loop."""
        import scipy.linalg.lapack

        npt = bd_all.shape[0]
        n = bd_idx.shape[1]
        zvalues = np.zeros(npt)
        sigmasq = np.zeros(npt)

        for i in np.nonzero(~mask)[0]:   # Note that this is the same thing as range(npt) if mask is not defined,
            b_selector = bd_idx[i]       # otherwise it takes the non-masked elements.
            bd = bd_all[i]

            a_selector = np.concatenate((b_selector, np.array([a_all.shape[0] - 1])))
            a = a_all[a_selector[:, None], a_selector]

            if np.any(np.absolute(bd) <= self.eps):
                zero_value = True
                zero_index = np.where(np.absolute(bd) <= self.eps)
            else:
                zero_index = None
                zero_value = False
            b = np.zeros((n+1, 1))
            b[:n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
            if zero_value:
                b[zero_index[0], 0] = 0.0
            b[n, 0] = 1.0

            x = scipy.linalg.solve(a, b)

            zvalues[i] = x[:n, 0].dot(self.Z[b_selector])
            sigmasq[i] = - x[:, 0].dot(b[:, 0])

        return zvalues, sigmasq
def estimate(data):
    length=len(data)
    wave=thinkdsp.Wave(ys=data,framerate=Fs)
    spectrum=wave.make_spectrum()
    spectrum_heart=wave.make_spectrum()
    spectrum_resp=wave.make_spectrum()

    fft_mag=list(np.absolute(spectrum.hs))
    fft_length= len(fft_mag)

    spectrum_heart.high_pass(cutoff=0.8,factor=0.001)
    spectrum_heart.low_pass(cutoff=2,factor=0.001)
    fft_heart=list(np.absolute(spectrum_heart.hs))

    max_fft_heart=max(fft_heart)
    heart_sample=fft_heart.index(max_fft_heart)
    hr=heart_sample*Fs/length*60

    spectrum_resp.high_pass(cutoff=0.15,factor=0)
    spectrum_resp.low_pass(cutoff=0.4,factor=0)
    fft_resp=list(np.absolute(spectrum_resp.hs))

    max_fft_resp=max(fft_resp)
    resp_sample=fft_resp.index(max_fft_resp)
    rr=resp_sample*Fs/length*60
    
    print "Heart Rate:", hr, "BPM"
    
    if hr<10:
        print "Respiration Rate: 0 RPM"
    else:
        print "Respiration Rate:", rr, "RPM"
    
    return
Exemple #29
0
    def _exec_vector(self, a, bd, mask):
        """Solves the kriging system as a vectorized operation. This method
        can take a lot of memory for large grids and/or large datasets."""

        npt = bd.shape[0]
        n = self.X_ADJUSTED.shape[0]
        zero_index = None
        zero_value = False

        a_inv = scipy.linalg.inv(a)

        if np.any(np.absolute(bd) <= self.eps):
            zero_value = True
            zero_index = np.where(np.absolute(bd) <= self.eps)

        b = np.zeros((npt, n+1, 1))
        b[:, :n, 0] = - self.variogram_function(self.variogram_model_parameters, bd)
        if zero_value:
            b[zero_index[0], zero_index[1], 0] = 0.0
        b[:, n, 0] = 1.0

        if (~mask).any():
            mask_b = np.repeat(mask[:, np.newaxis, np.newaxis], n+1, axis=1)
            b = np.ma.array(b, mask=mask_b)

        x = np.dot(a_inv, b.reshape((npt, n+1)).T).reshape((1, n+1, npt)).T
        zvalues = np.sum(x[:, :n, 0] * self.Z, axis=1)
        sigmasq = np.sum(x[:, :, 0] * -b[:, :, 0], axis=1)

        return zvalues, sigmasq
def crunchy2(pt_and_sigma, sec, hand=None):
    pt, sigma = pt_and_sigma
    py, px = pt

    powers = []
    powers_norm = []

    y_axis = sec.get_y_axis()
    x_axis = sec.get_x_axis()
    px_y = np.absolute(y_axis[1] - y_axis[0])
    px_x = np.absolute(x_axis[1] - x_axis[0])

    if sigma == None:
        sigma = [px_y, px_x]
    if sigma[0] < px_y:
        sigma = [px_y, sigma[1]]
    if sigma[1] < px_x:
        sigma = [sigma[0], px_x]

    for yi in range(len(y_axis)):
        y = y_axis[yi]
        for xi in range(len(x_axis)):
            x = x_axis[xi]
            this_weight = weight_function2(y, x, py, px, sigma)
            if this_weight is None:
                powers.append(None)
                powers_norm.append(None)
            else:
                variance = 1 / this_weight
                powers.append(sec.get([yi, xi]) / variance)
                powers_norm.append(1 / variance)
    p = np.nansum(list(filter(None, powers)))
    pn = np.nansum(list(filter(None, powers_norm)))
    return pt, p / pn
def spectrum_magnitude(frames, NFFT):
    # fft变换
    complex_spectrum = numpy.fft.rfft(frames, NFFT)
    return numpy.absolute(complex_spectrum)
Exemple #32
0
def validate(ensemble,
             test,
             testout,
             test_gridday,
             frames_grid,
             margin,
             qt,
             spatial_channel=[],
             forecast_channel=[],
             calculate_channel={}):
    errorsum = 0
    averagetotalerror = 0
    cnt = 1
    channels = test.shape[-1]
    predicttotal = pd.DataFrame()
    pix = np.int(np.sqrt(max(frames_grid['pixno'])))
    gridpix = np.flip(
        np.array(range(1,
                       max(frames_grid['pixno']) + 1)).reshape(pix, pix), 0)
    gridpix = gridpix[margin:pix - margin, margin:pix - margin]
    errorframe = pd.DataFrame()
    minpop = min(frames_grid['norm_pop'])
    span = test_gridday[0][1]
    forecast_frames_grid = frames_grid[
        frames_grid['day'] <= max(frames_grid['day']) - span]
    forecast_frames_grid_array = []
    colnames = frames_grid.columns
    for k, (grid, span) in test_gridday.items():
        ######## for each test grid
        grid_forecast_frames_grid = pickle.loads(
            pickle.dumps(
                forecast_frames_grid[forecast_frames_grid.grid == grid], -1))
        _forecast_frames_grid = pickle.loads(
            pickle.dumps(
                grid_forecast_frames_grid[
                    grid_forecast_frames_grid['day'] == max(
                        grid_forecast_frames_grid['day'])], -1))
        track = test[k]
        totpop = track[0, ::, ::, 1]
        pix = totpop.shape[0]
        print(grid)
        popexists = pickle.loads(pickle.dumps(totpop[::, ::], -1))
        popexists[popexists > 0] = 1
        popexists_size = len(popexists[popexists > 0].flatten())
        out = testout[k]
        ######## for each prediction day
        for i in range(span):
            new_pos = ensemble.predict(track[np.newaxis, ::, ::, ::, ::])
            #new_pos = ensemble_predict(ensemble,track[np.newaxis, ::, ::, ::, ::])
            new = new_pos[::, ::, ::, ::]
            new = np.multiply(new[0, ::, ::, 0], popexists)[np.newaxis, ::, ::,
                                                            np.newaxis]
            new[new < 0] = 0
            new[new > 1] = 1
            gamma = forecast_gamma(grid_forecast_frames_grid, grid, span)
            _forecast_frames_grid = calculate_future_SIR(
                _forecast_frames_grid,
                grid,
                forecastbeta=new[0, ::, ::, 0],
                forecastgamma=gamma,
                qt=qt)
            if len(forecast_frames_grid_array) != 0:
                forecast_frames_grid_array = np.concatenate(
                    (forecast_frames_grid_array, _forecast_frames_grid.values),
                    axis=0)
            else:
                forecast_frames_grid_array = _forecast_frames_grid.values
            #print("forecast done")
            ########### append channels
            newtrack = new
            for channel in range(1, channels):
                if channel in spatial_channel:
                    channel_data = track[i, ::, ::, channel]
                    newtrack = np.concatenate(
                        (newtrack, channel_data[np.newaxis, ::, ::,
                                                np.newaxis]),
                        axis=3)
                elif channel in forecast_channel:
                    channel_data = out[i, ::, ::, channel]
                    newtrack = np.concatenate(
                        (newtrack, channel_data[np.newaxis, ::, ::,
                                                np.newaxis]),
                        axis=3)
                elif channel in calculate_channel:
                    channel2 = np.flip(
                        np.array(_forecast_frames_grid[
                            calculate_channel[channel]]).reshape(pix, pix), 0)
                    newtrack = np.concatenate(
                        (newtrack, channel2[np.newaxis, ::, ::, np.newaxis]),
                        axis=3)
            #print(channels,spatialorforecast_channel,newtrack.shape,track.shape)
            track = np.concatenate((track, newtrack), axis=0)
            predictframe = np.squeeze(new, 0)[::, ::, 0][margin:pix - margin,
                                                         margin:pix - margin]
            actualframe = out[i, ::, ::, 0][margin:pix - margin,
                                            margin:pix - margin]
            notzeroframe = pickle.loads(pickle.dumps(actualframe, -1))
            notzeroframe[notzeroframe == 0] = 1
            _errorframe = pd.DataFrame({
                'pixno':
                gridpix[totpop[margin:pix - margin,
                               margin:pix - margin] > 0].flatten(),
                'predict':
                predictframe[totpop[margin:pix - margin,
                                    margin:pix - margin] > 0].flatten(),
                'actual':
                actualframe[totpop[margin:pix - margin,
                                   margin:pix - margin] > 0].flatten()
            })
            _errorframe['day'] = i
            _errorframe['grid'] = grid
            errorframe = errorframe.append(_errorframe)
            error = np.sum(
                np.absolute((predictframe - actualframe) /
                            notzeroframe)) / (popexists_size + 1)
            averagetotalerror += np.sum(
                np.absolute(
                    (predictframe - actualframe))) / (popexists_size + 1)
            errorsum += error
            cnt += 1
    averageerror = errorsum / cnt
    averagetotalerror /= cnt
    forecast_frames_grid = pd.DataFrame(forecast_frames_grid_array)
    forecast_frames_grid.columns = colnames
    return (averageerror, averagetotalerror, forecast_frames_grid)
Exemple #33
0
    def run(self):
        # Run the Q-learning algoritm.
        discrepancy = []

        self.time = _time.time()

        # initial state choice
        s = _np.random.randint(0, self.S)

        for n in range(1, self.max_iter + 1):

            # Reinitialisation of trajectories every 100 transitions
            if (n % 100) == 0:
                s = _np.random.randint(0, self.S)

            # Action choice
            pn = _np.random.random()
            if pn < (1 - self.epsilon):
                # optimal_action = self.Q[s, :].max()
                a = self.Q[s, :].argmax()
            else:
                a = _np.random.randint(0, self.A)

            self.epsilon *= self.decay

            # Simulating next state s_new and reward associated to <s,s_new,a>
            p_s_new = _np.random.random()
            p = 0
            s_new = -1
            while (p < p_s_new) and (s_new < (self.S - 1)):
                s_new = s_new + 1
                p = p + self.P[a][s, s_new]

            try:
                r = self.R[a][s, s_new]
            except IndexError:
                try:
                    r = self.R[s, a]
                except IndexError:
                    r = self.R[s]

            # Updating the value of Q
            # Decaying update coefficient (1/sqrt(n+2)) can be changed
            delta = r + self.discount * self.Q[s_new, :].max() - self.Q[s, a]

            dQ = self.alpha * delta
            self.Q[s, a] = self.Q[s, a] + dQ

            # current state is updated
            s = s_new

            # Computing and saving maximal values of the Q variation
            discrepancy.append(_np.absolute(dQ))

            # Computing means all over maximal Q variations values
            if len(discrepancy) == 1000:
                self.mean_discrepancy.append(_np.mean(discrepancy))
                discrepancy = []

            # compute the value function and the policy
            self.V = self.Q.max(axis=1)
            self.policy = self.Q.argmax(axis=1)

        self._endRun()
Exemple #34
0
                          (0, 255, 0), 2)
            # print "eye " + str(ex) + " " + str(ey)
            # roi_eye = roi_face[int(1.2*ey):int(0.8*(ey+eh)), int(1.2*ex):int(0.8*(ex+ew))]
            roi_eye = roi_face[ey:ey + eh, ex:ex + ew]
            center = 0

            roi_eye = cv2.GaussianBlur(roi_eye, (3, 3), 0)
            roi_eye = cv2.addWeighted(roi_eye, 1.5, roi_eye, -0.5, 0)
            roi_eye_canny = cv2.Canny(roi_eye, 100, 200)
            cv2.imwrite('./data/canny' + str(counter) + '.png', roi_eye_canny)
            laplacian = cv2.Laplacian(roi_eye, cv2.CV_64F)
            cv2.imwrite('./data/lapla' + str(counter) + '.png', laplacian)
            # res = cv2.resize(roi_eye,(int(ew/2), int(eh/2)), interpolation = cv2.INTER_AREA)
            roi_eyex = cv2.Sobel(roi_eye, cv2.CV_64F, 1, 0, ksize=3)
            roi_eyey = cv2.Sobel(roi_eye, cv2.CV_64F, 0, 1, ksize=3)
            roi_eyex = np.absolute(roi_eyex)
            roi_eyey = np.absolute(roi_eyey)
            roi_eyex = np.uint8(roi_eyex)
            roi_eyey = np.uint8(roi_eyey)
            # sobelx64f = cv2.Sobel(img,cv2.CV_64F,1,0,ksize=5)
            # abs_sobel64f = np.absolute(sobelx64f)
            # sobel_8u = np.uint8(abs_sobel64f)

            cv2.imwrite('./data/zsobely' + str(counter) + '.png', roi_eyey)
            cv2.imwrite('./data/zsobelx' + str(counter) + '.png', roi_eyex)
            ret, tmp = cv2.threshold(roi_eyex, 0, 255, cv2.THRESH_OTSU)
            tmp = cv2.erode(tmp, kernel, iterations=1)
            cv2.imwrite('./data/zsobelxt' + str(counter) + '.png', tmp)

            mag = np.hypot(roi_eyex, roi_eyey)  # magnitude
            mag *= 255.0 / np.max(mag)  # normalize (Q&D)
print 'ret is ', ret
while ret == False:
    print ''

blankFrame = np.zeros(np.shape(frame), np.uint8)
emptyFrame = blankFrame
emptyFrame32 = np.float32(blankFrame)

cv2.namedWindow('frame')
cv2.setMouseCallback('frame', setEmpty)

while(True):
    _, frame = cap.read()
    frame32 = np.float32(frame)

    diff32 = np.absolute(frame32 - emptyFrame32)

    norm32 = np.sqrt(diff32[:,:,0]**2 + diff32[:,:,1]**2 + diff32[:,:,2]**2)/np.sqrt(255**2 + 255**2 + 255**2)

    diff = np.uint8(norm32*255)
    _, thresh = cv2.threshold(diff, 100, 255, 0)
    kernel = np.ones((20,20), np.uint8)
    blobby = cv2.dilate(thresh, kernel, iterations= 4)

    # buffer
    pastBuff = currBuff
    currBuff = ( (currBuff << 1) | (np.any(blobby)) ) & buffMask
    if currBuff == buffMask:
        cv2.imshow('frame', blobby)
    else:
        cv2.imshow('frame', blankFrame)
def get_reward_ABS(state, terminated):
    "Calculates the environments reward for the next state"

    if terminated:
        return -10
    return np.absolute(0.5 - state[0])
def apply_algos(algorithms, X, Y, visualize_results=False, fig_suptitple=''):
    """Apply classification algorithm to X and Y
    X is positive Y is negative class
    Parameters
    ----------
    \n `X` (np.array) The data of class 1
    \n `Y` (np.array) The data of class 2
    \n `visualize_results` (boolean) If you want to plot results
    \n `plot_ROC` (boolean) If you want to plot ROC
    Returns
    ---------
    (dict) `{"SCORE", "F_Measure_+", "F_Measure_-", "TPR", "TNR", "EER", "AUC"}`
    """
    split_perc = 0.8

    # Split Class 1 to train and test
    np.random.shuffle(X)
    X_train = X[0:int(np.ceil(split_perc * len(X))), :]
    X_test = X[int(np.ceil(split_perc * len(X))):, :]

    # Split Class 2 to train and test
    np.random.shuffle(Y)
    Y_train = Y[0:int(np.ceil(split_perc * len(Y))), :]
    Y_test = Y[int(np.ceil(split_perc * len(Y))):, :]

    # Merge data, construct labels
    merged_train = np.append(X_train, Y_train, axis=0)
    labels_train_true = len(X_train) * [0] + len(Y_train) * [1]
    merged_test = np.append(X_test, Y_test, axis=0)
    labels_test_true = len(X_test) * [0] + len(Y_test) * [1]

    # Dict all results
    labels_test_pred = {}
    model = {}
    fpr_pts, tpr_pts = {}, {}
    F_Measure_Positive, F_Measure_Negative, TPR, TNR, SCORE, EER, AUC = {}, {}, {}, {}, {}, {}, {}

    # Apply algorithms
    for algorithm in algorithms:

        # Apply and fit model
        if hasattr(globals()[algorithm](), 'kernel') and hasattr(globals()[algorithm](), 'gamma'):
            model[algorithm] = globals()[algorithm](kernel='rbf', gamma='auto')
        elif hasattr(globals()[algorithm](), 'n_neighbors'):
            model[algorithm] = globals()[algorithm](n_neighbors=5)
        else:
            model[algorithm] = globals()[algorithm]()
        model[algorithm].fit(merged_train, labels_train_true)

        # Predict
        labels_test_pred[algorithm] = model[algorithm].predict(merged_test)

        # Calculate Metrics
        F_Measure_Positive[algorithm] = f1_score(
            labels_test_true, labels_test_pred[algorithm], pos_label=1)
        F_Measure_Negative[algorithm] = f1_score(
            labels_test_true, labels_test_pred[algorithm], pos_label=0)
        TPR[algorithm] = recall_score(
            labels_test_true, labels_test_pred[algorithm], pos_label=1)
        TNR[algorithm] = recall_score(
            labels_test_true, labels_test_pred[algorithm], pos_label=0)
        SCORE[algorithm] = model[algorithm].score(
            merged_test, labels_test_true)
        if hasattr(model[algorithm], 'decision_function'):
            scores = model[algorithm].decision_function(merged_test)
        elif hasattr(model[algorithm], 'predict_proba'):
            scores = model[algorithm].predict_proba(merged_test)[:, 1]
        else:
            exit('No decision_function or predict_proba for model')
        fpr_pts[algorithm], tpr_pts[algorithm], _ = roc_curve(
            labels_test_true, scores, pos_label=1)
        fnr_pts = 1 - tpr_pts[algorithm]
        EER[algorithm] = (fpr_pts[algorithm][np.nanargmin(np.absolute((fnr_pts - fpr_pts[algorithm])))] +
                          fnr_pts[np.nanargmin(np.absolute((fnr_pts - fpr_pts[algorithm])))]) / 2
        AUC[algorithm] = roc_auc_score(labels_test_true, scores)

    # Show results if needed
    # One figure for classification results, one algo in each subplot
    # One figure for ROC curves, one roc algo curve in each subplot
    if visualize_results is True:

        if len(algorithms) == 1:
            pltdim1, pltdim2 = 1, 1
        elif len(algorithms) == 2:
            pltdim1, pltdim2 = 1, 2
        else:
            pltdim1, pltdim2 = np.ceil(np.sqrt(len(algorithms))), np.ceil(
                np.sqrt(len(algorithms)))
        _3d = True if X.shape[1] == 3 else False

        # Init Figure for class results
        fig = plot.figure()
        fig.suptitle(fig_suptitple)

        for i, algorithm in enumerate(algorithms):

            # Add subplot
            ax = fig.add_subplot(
                pltdim1, pltdim2, i + 1, projection='3d') if _3d is True else fig.add_subplot(pltdim1, pltdim2, i + 1)

            # Add the separating hyperplane
            if _3d is True:
                if algorithm == 'SVC':
                    add_3d_hyperplane(model[algorithm], ax,  np.append(
                        np.append(X_train, Y_train, axis=0), merged_test, axis=0))
            else:
                add_2d_hyperplane(model[algorithm], np.append(
                    np.append(X_train, Y_train, axis=0), merged_test, axis=0))

            # Scatter X Train and Y Train
            ax_X_train = add_scatter(ax, X_train, color='red', marker='o')
            ax_Y_train = add_scatter(ax, Y_train, color='blue', marker='o')
            leg_tuple_ax = (ax_X_train, ax_Y_train)
            leg_tuple_str = ('Negative Class Training Data (%d)' % (len(X_train)),
                             'Positive Class Training Data (%d)' % (len(Y_train)))

            # Scatter TN (Xclass) and FP
            labels_X_test_pred = labels_test_pred[algorithm][0:len(X_test)]
            ax_TN, lenTN, ax_FP, lenFP = add_scatter(ax, X_test, labels=labels_X_test_pred, labels_paint={
                "colors": ['red', 'red'], "markers": ['o', 'o']})
            leg_tuple_ax += (ax_TN, ax_FP)
            leg_tuple_str += ('True Negatives (%d/%d)' % (lenTN, lenTN + lenFP),
                              'False Positives (%d/%d)' % (lenFP, lenTN + lenFP))

            # Scatter TP (Yclass) and FN
            labels_Y_test_pred = labels_test_pred[algorithm][len(X_test):]
            ax_FN, lenFN, ax_TP, lenTP = add_scatter(ax, Y_test, labels=labels_Y_test_pred, labels_paint={
                "colors": ['blue', 'blue'], "markers": ['o', 'o']})
            leg_tuple_ax += (ax_TP, ax_FN)
            leg_tuple_str += ('True Positives (%d/%d)' % (lenTP, lenTP + lenFN),
                              'False Negatives (%d/%d)' % (lenFN, lenTP + lenFN))

            # Set Title and Legend
            if algorithm == 'SVC':
                algorithm_name = 'SVM'
            elif algorithm == 'KNeighborsClassifier':
                algorithm_name = 'kNN'
            ax.set_title('Classifier: ' + algorithm_name)
            # ax.set_title('Classifier: ' + algorithm_name + '\nF-Measure_+ = %.2f, F-Measure_- = %.2f, TPR = %.2f, TNR = %.2f, EER = %.2f' %
            #              (F_Measure_Positive[algorithm], F_Measure_Negative[algorithm], TPR[algorithm], TNR[algorithm], EER[algorithm]))
            ax.legend(leg_tuple_ax, leg_tuple_str, loc=0,
                      bbox_to_anchor=(0, 0, 1, 0.93))
            # ax.legend(leg_tuple_ax, leg_tuple_str, loc='upper right')

        # Init Figure for ROCs
        fig = plot.figure()
        fig.suptitle(fig_suptitple)
        # Plot ROC Curve
        for i, algorithm in enumerate(algorithms):
            line = fig.add_subplot(pltdim1, pltdim2, i + 1)
            line.plot(fpr_pts[algorithm], tpr_pts[algorithm])
            line.plot([0, 1], [0, 1], 'k--')
            if algorithm == 'SVC':
                algorithm_name = 'SVM'
            elif algorithm == 'KNeighborsClassifier':
                algorithm_name = 'kNN'
            line.set_title(algorithm_name + '\nROC Curve, AUC = %.2f' %
                           (AUC[algorithm]))
            line.set_xlabel('FPR')
            line.set_ylabel('TPR')

        # Show dem figs
        plot.show()

    return {"SCORE": SCORE,
            "F_Measure_+": F_Measure_Positive,
            "F_Measure_-": F_Measure_Negative,
            "TPR": TPR,
            "TNR": TNR,
            "EER": EER,
            "AUC": AUC
            }
def F_integral(twiss_filename, energy, alpha_snake, Anomalous_magnetic_moment,
               delta_phi, delta_y):
    nu_mass = TwissParameter_nu_Gamma(twiss_filename)
    ParametersForSRF = ParameterForSRF(twiss_filename)

    # Parameter_W_ForSRF(ParametersForSRF)

    zeta = np.array([float(i) for i in ParametersForSRF[0]])
    S = np.array([float(i) for i in ParametersForSRF[1]])
    length = np.array([float(i) for i in ParametersForSRF[2]])
    angle = np.array([float(i) for i in ParametersForSRF[3]])

    beta_y = np.array([float(i) for i in ParametersForSRF[4]])
    alpha_y = np.array([float(i) for i in ParametersForSRF[5]])
    mu_y = np.array([float(i) for i in ParametersForSRF[6]])
    mu_y_temp_num = 2.0 * math.pi
    mu_y = array_times_number(mu_y_temp_num, mu_y)

    K1_L = np.array([float(i) for i in ParametersForSRF[7]])

    nu_y = float(nu_mass[0])
    mass = float(nu_mass[1])

    gamma = float(energy) / mass

    alpha_snake = float(alpha_snake)
    B_moment_anomaly = float(Anomalous_magnetic_moment)

    delta_phi = float(delta_phi)
    delta_y = float(delta_y)

    exp_compx_nu1 = complex_exp(2.0 * math.pi * nu_y)
    exp_compx_nu1 = exp_compx_nu1 - 1.0
    exp_compx_nu1 = 1.0 / exp_compx_nu1

    exp_compx_nu2 = complex_exp(-2.0 * math.pi * nu_y)
    exp_compx_nu2 = exp_compx_nu2 - 1.0
    exp_compx_nu2 = 1.0 / exp_compx_nu2

    fy_z = fy(beta_y, mu_y)
    fy_z_conj = fy_conjugate(fy_z)

    d_fy_z = fy_derivative(beta_y, mu_y, alpha_y)
    d_fy_z_conj = fy_conjugate(d_fy_z)

    len_dfy = len(d_fy_z)
    d_fy_z_avg = np.zeros(len_dfy, dtype=complex)
    d_fy_z_avg[0] = d_fy_z[0]
    for i in range(len_dfy - 1):
        d_fy_z_avg[i + 1] = 0.5 * (d_fy_z[i] + d_fy_z[i + 1])

    d_fy_z_conj_avg = fy_conjugate(d_fy_z_avg)

    psi = psi_z(gamma, B_moment_anomaly, zeta, angle)
    complx_exp_psi = complex_exp_psi(psi)

    G1j = np.zeros(len_dfy, dtype=complex)
    # G1j[0] = d_fy_z_avg[0] * cos(alpha_snake * ( 1 - zeta[0])) * (complx_exp_psi[0] - complx_exp_psi[0])
    for i in range(len_dfy - 1):
        G1j[i + 1] = d_fy_z_avg[i + 1] * cos(
            alpha_snake *
            (1 - zeta[i + 1])) * (complx_exp_psi[i + 1] - complx_exp_psi[i])

    SUM_G1j = np.zeros(len_dfy, dtype=complex)
    SUM_G1j[0] = G1j[0]
    for i in range(len_dfy - 1):
        SUM_G1j[i + 1] = G1j[i + 1] + SUM_G1j[i]

    G2j = np.zeros(len_dfy, dtype=complex)
    # G2j[0] = d_fy_z_conj_avg[0] * cos(alpha_snake * ( 1 - zeta[0])) * (complx_exp_psi[0] - complx_exp_psi[0])
    for i in range(len_dfy - 1):
        G2j[i + 1] = d_fy_z_conj_avg[i + 1] * cos(
            alpha_snake *
            (1 - zeta[i + 1])) * (complx_exp_psi[i + 1] - complx_exp_psi[i])

    SUM_G2j = np.zeros(len_dfy, dtype=complex)
    SUM_G2j[0] = G2j[0]
    for i in range(len_dfy - 1):
        SUM_G2j[i + 1] = G2j[i + 1] + SUM_G2j[i]

    F1j_const = exp_compx_nu1 * SUM_G1j[len_dfy - 1]
    F2j_const = exp_compx_nu2 * SUM_G2j[len_dfy - 1]

    F1j = np.zeros(len_dfy, dtype=complex)
    for i in range(len_dfy):
        F1j[i] = SUM_G1j[i] + F1j_const

    F2j = np.zeros(len_dfy, dtype=complex)
    for i in range(len_dfy):
        F2j[i] = SUM_G2j[i] + F2j_const

    F3 = np.zeros(len_dfy, dtype=complex)
    for i in range(len_dfy):
        F3[i] = gamma * B_moment_anomaly / complex(
            0.0, 2.0) * (fy_z_conj[i] * F1j[i] - fy_z[i] * F2j[i])

    F3 = np.absolute(F3)

    length_F = len(F3)

    For_dipoles = np.zeros(len_dfy)
    for i in range(len_dfy):
        For_dipoles[i] = (angle[i] * delta_phi * 0.001)**2.0

    For_quadrupoles = np.zeros(len_dfy)
    for i in range(len_dfy):
        For_quadrupoles[i] = (K1_L[i] * delta_y * 0.001)**2.0

    SUM_D_and_Q = np.zeros(len_dfy)
    for i in range(len_dfy):
        SUM_D_and_Q[i] = For_dipoles[i] + For_quadrupoles[i]

    element_of_strength = np.zeros(len_dfy)
    for i in range(len_dfy):
        element_of_strength[i] = F3[i]**2 * SUM_D_and_Q[i]

    Zero_integer_SRS = np.sum(element_of_strength)
    Zero_integer_SRS = 0.5 * math.sqrt(Zero_integer_SRS) / math.pi

    return S, F3, Zero_integer_SRS
Exemple #39
0
	def calcError(self, input, target):
		output = self.run(input)
		diff = np.absolute(output-target)
		return np.average(diff)
def index_finder(phi, phi_c):
    i = np.argmin(np.absolute(phi - phi_c))
    return i
Exemple #41
0
#                 #np.take(np.take(S[i,k,1+3*j], range(dims[j]/2,dims[j]), axis=0), range(dims[j]/2), axis=1)
#                 S[i,k,1+3*j][dims[j]/2:dims[j],0:dims[j]/2]
#                 )
# # make numpy-array of t_amps, only valid for first three indices,
# # because then, the dimensions of the matrices change, generally
# t = np.array(t)
# t = np.reshape(t, (nr,nfreq,nconf))

t = np.zeros((nr, nfreq, nconf, nin_max, nin_max), dtype='complex')
for i in range(nr):
    for j in range(nfreq):
        for k in range(nconf):
            t[i,j,k,0:nins[j],0:nins[j]] = S[i,k,1+3*j][nins[j]:2*nins[j],0:nins[j]]

transmissions = np.array([[
            np.mean(np.absolute(t[i,j,:,:])) * nin_max**2 / nins[j]
            for j in range(nfreq)] for i in range(nr)])
#print transmissions

trans_mean = np.array([[
            np.mean(np.absolute(t[i,j,:,:])) * nin_max**2 / nins[j]**2
            for j in range(nfreq)] for i in range(nr)])
#print trans_mean

trans_msqr = np.array([[
            np.mean(np.absolute(t[i,j,:,:])**2) * nin_max**2 / nins[j]**2
            for j in range(nfreq)] for i in range(nr)])
#print trans_msqr

print (trans_msqr - trans_mean**2) / transmissions**2
Exemple #42
0
	def calcTransformError(self, input, target):
		output = self.run(input)
		output[output >= 0] = 1
		output[output <= 0 ] = -1
		diff = np.absolute(output - target)
		return np.average(diff)
Exemple #43
0
            vis = img.copy()

            '''
                finding gradient by Sobel filter
            '''
            gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

            useGradient = 0

            if useGradient:
                gradX = cv2.Sobel(gray, ddepth=cv2.CV_32F, dx=1, dy=0, ksize=3)

                '''
                    take absolute value of gradient to use negative gradient
                '''
                gradX = np.absolute(gradX)

                '''
                    Normalization of gradient
                '''
                (minVal, maxVal) = (np.min(gradX), np.max(gradX)) 
                if maxVal - minVal > 0:
                    gradX = (255 * ((gradX - minVal) / float(maxVal - minVal))).astype("uint8")
                else:
                    gradX  = np.zeros(gray.shape, dtype = "uint8")
            else:
                gradX = 255 - gray

            '''
                Take median filter by horizontal axis
            '''
Exemple #44
0
	def errorCalc(self, input, target):
		result = self.run(input)
		diff = np.absolute(result-target)
		return np.average(diff)
Exemple #45
0
 def test_constantFaces(self):
     face_vec = np.ones(self.mesh.nF)
     assert all(np.absolute(self.mesh.aveF2CC * face_vec - 1.) < TOL)
     assert all(np.absolute(self.mesh.aveF2CCV * face_vec - 1.) < TOL)
Exemple #46
0
y_pred_train = regr.predict(X_train)
y_pred_test = regr.predict(X_test)
y_pred_train2 = regr2.predict(X_train2)
y_pred_test2 = regr2.predict(X_test2)
y_pred_train3 = regr3.predict(X_train3)
y_pred_test3 = regr3.predict(X_test3)
y_pred_train4 = regr4.predict(X_train4)
y_pred_test4 = regr4.predict(X_test4)
y_pred_train5 = regr5.predict(X_train5)
y_pred_test5 = regr5.predict(X_test5)

columns = ['Model', 'Train error', 'Test error', 'Sum of Absolute Weights']
model1 = "%.2f X + %.2f" % (regr.coef_[0][0], regr.intercept_[0])
values1 = [model1, np.sqrt(mean_squared_error(y_train, y_pred_train)), np.sqrt(mean_squared_error(y_test, y_pred_test)),
           np.absolute(regr.coef_[0]).sum() + np.absolute(regr.intercept_[0])]
model2 = "%.2f X + %.2f X2 + %.2f" % (regr2.coef_[0][0], regr2.coef_[0][1], regr2.intercept_[0])
values2 = [model2, np.sqrt(mean_squared_error(y_train, y_pred_train2)),
           np.sqrt(mean_squared_error(y_test, y_pred_test2)),
           np.absolute(regr2.coef_[0]).sum() + np.absolute(regr2.intercept_[0])]
model3 = "%.2f X + %.2f X2 + %.2f X3 + %.2f" % (
regr3.coef_[0][0], regr3.coef_[0][1], regr3.coef_[0][2], regr3.intercept_[0])
values3 = [model3, np.sqrt(mean_squared_error(y_train, y_pred_train3)),
           np.sqrt(mean_squared_error(y_test, y_pred_test3)),
           np.absolute(regr3.coef_[0]).sum() + np.absolute(regr3.intercept_[0])]
model4 = "%.2f X + %.2f X2 + %.2f X3 + %.2f X4 + %.2f" % (regr4.coef_[0][0], regr4.coef_[0][1],
                                                          regr4.coef_[0][2], regr4.coef_[0][3], regr4.intercept_[0])
values4 = [model4, np.sqrt(mean_squared_error(y_train, y_pred_train4)),
           np.sqrt(mean_squared_error(y_test, y_pred_test4)),
           np.absolute(regr4.coef_[0]).sum() + np.absolute(regr4.intercept_[0])]
model5 = "%.2f X + %.2f X2 + %.2f X3 + %.2f X4 + %.2f X5 + %.2f" % (
def perception_step(Rover):
    # Perform perception steps to update Rover()
    # TODO: 
    # NOTE: camera image is coming to you in Rover.img
    # 1) Define source and destination points for perspective transform
    dst_size = 5
    bottom_offset = 6
    image = Rover.img
    source = np.float32([[14, 140], [301 ,140],[200, 96], [118, 96]])
    destination = np.float32([[image.shape[1]/2 - dst_size, image.shape[0] - bottom_offset],
                  [image.shape[1]/2 + dst_size, image.shape[0] - bottom_offset],
                  [image.shape[1]/2 + dst_size, image.shape[0] - 2*dst_size - bottom_offset], 
                  [image.shape[1]/2 - dst_size, image.shape[0] - 2*dst_size - bottom_offset],
                  ])
    # 2) Apply perspective transform
    warped, mask = perspect_transform(image, source, destination)
    # 3) Apply color threshold to identify navigable terrain/obstacles/rock samples

    color_select = color_thresh(warped, rgb_thresh=(160,160,160))
    obstacle = np.absolute(np.float32(color_select) -1) * mask
    rock = rock_thresh(warped)
    # 4) Update Rover.vision_image (this will be displayed on left side of screen)
        # Example: Rover.vision_image[:,:,0] = obstacle color-thresholded binary image
        #          Rover.vision_image[:,:,1] = rock_sample color-thresholded binary image
        #          Rover.vision_image[:,:,2] = navigable terrain color-thresholded binary image
    Rover.vision_image[:,:,0] = obstacle * 255
    Rover.vision_image[:,:,1] = rock * 255
    Rover.vision_image[:,:,2] = color_select * 255

    # 5) Convert map image pixel values to rover-centric coords
    x_pix, y_pix = rover_coords(color_select)
    obsxpix, obsypix = rover_coords(obstacle)
    rockxpix, rockypix = rover_coords(rock)
    # 6) Convert rover-centric pixel values to world coordinates
    world_size = Rover.worldmap.shape[0]
    scale = 2 * dst_size
    xpos, ypos = Rover.pos[0], Rover.pos[1]
    yaw = Rover.yaw

    xpix_world, ypix_world = pix_to_world(x_pix, y_pix, xpos, ypos, yaw, world_size, scale)
    rockxpix_world, rockypix_world = pix_to_world(rockxpix, rockypix, xpos, ypos, yaw, world_size, scale)
    obsxpix_world, obsypix_world = pix_to_world(obsxpix, obsypix, xpos, ypos, yaw, world_size, scale)

    # 7) Update Rover worldmap (to be displayed on right side of screen)
        # Example: Rover.worldmap[obstacle_y_world, obstacle_x_world, 0] += 1
        #          Rover.worldmap[rock_y_world, rock_x_world, 1] += 1
        #          Rover.worldmap[navigable_y_world, navigable_x_world, 2] += 1
    Rover.worldmap[obsypix_world, obsxpix_world, 0] += 255
    Rover.worldmap[rockypix_world, rockxpix_world, 1] += 255
    Rover.worldmap[ypix_world, xpix_world, 2] += 255

    if len(rockxpix) > 5:
        rock_dist, rock_angles = to_polar_coords(rockxpix, rockypix)
        Rover.sample_angles = rock_angles
        Rover.sample_dists = rock_dist
        Rover.sample_seen = True

    # 8) Convert rover-centric pixel positions to polar coordinates
    dist, angles = to_polar_coords(x_pix, y_pix)
    # Update Rover pixel distances and angles
        # Rover.nav_dists = rover_centric_pixel_distances
        # Rover.nav_angles = rover_centric_angles
    Rover.nav_dists = dist
    Rover.nav_angles = angles
    
    return Rover
#Simple Regression Model
#Train data distribution
plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS,  color='blue')
plt.xlabel("Engine size")
plt.ylabel("Emission")
plt.show()
# Modeling
from sklearn import linear_model
regr = linear_model.LinearRegression()
train_x = np.asanyarray(train[['ENGINESIZE']])
train_y = np.asanyarray(train[['CO2EMISSIONS']])
regr.fit (train_x, train_y)
# The coefficients
print ('Coefficients: ', regr.coef_)
print ('Intercept: ',regr.intercept_)

plt.scatter(train.ENGINESIZE, train.CO2EMISSIONS,  color='blue')
plt.plot(train_x, regr.coef_[0][0]*train_x + regr.intercept_[0], '-r')
plt.xlabel("Engine size")
plt.ylabel("Emission")
# Evaluation
from sklearn.metrics import r2_score

test_x = np.asanyarray(test[['ENGINESIZE']])
test_y = np.asanyarray(test[['CO2EMISSIONS']])
test_y_hat = regr.predict(test_x)

print("Mean absolute error: %.2f" % np.mean(np.absolute(test_y_hat - test_y)))
print("Residual sum of squares (MSE): %.2f" % np.mean((test_y_hat - test_y) ** 2))
print("R2-score: %.2f" % r2_score(test_y_hat , test_y) )
    def robustProcess(
        self, numWindows: int, obs: np.ndarray, reg: np.ndarray
    ) -> Tuple[np.ndarray]:
        """Robust regression processing

        Perform robust regression processing using observations and regressors for a single evaluation frequency. 

        Parameters
        ----------
        numWindows : int
            The number of windows
        obs : np.ndarray
            The observations
        reg : np.ndarray
            The regressors

        Returns
        -------
        output : np.ndarray
            The solution to the regression problem
        varOutput : np.ndarray
            The variance
        """

        # create array for output
        output = np.empty(shape=(self.outSize, self.inSize), dtype="complex")
        varOutput = np.empty(shape=(self.outSize, self.inSize), dtype="float")
        # solve
        for i in range(0, self.outSize):
            observation = obs[i, :]
            predictors = reg[i, :, :]
            # save the output
            out, resids, weights = chatterjeeMachler(
                predictors, observation, intercept=self.intercept
            )
            # out, resids, scale, weights = mmestimateModel(predictors, observation, intercept=self.intercept)

            # now take the weights, apply to the observations and predictors, stack the appropriate rows and test
            observation2 = np.zeros(shape=(self.remoteSize), dtype="complex")
            predictors2 = np.zeros(
                shape=(self.remoteSize, self.inSize), dtype="complex"
            )
            for iChan in range(0, self.remoteSize):
                # now need to have my indexing array
                indexArray = np.arange(
                    iChan, numWindows * self.remoteSize, self.remoteSize
                )
                weightsLim = weights[indexArray]
                # weightsLim = weightsLim/np.sum(weightsLim) # normalise weights to 1
                observation2[iChan] = (
                    np.sum(obs[i, indexArray] * weightsLim) / numWindows
                )
                # now for the regressors
                for j in range(0, self.inSize):
                    predictors2[iChan, j] = (
                        np.sum(reg[i, indexArray, j] * weightsLim) / numWindows
                    )
            out, resids, weights = chatterjeeMachler(
                predictors2, observation2, intercept=self.intercept
            )
            # out, resids, scale, weights = mmestimateModel(predictors2, observation2, intercept=self.intercept)

            # now calculate out the varainces - have the solution out, have the weights
            # recalculate out the residuals with the final solution
            # calculate standard deviation of residuals
            # and then use chatterjee machler formula to estimate variances
            # this needs work - better to use an empirical bootstrap method, but this will do for now
            resids = np.absolute(observation - np.dot(predictors, out))
            scale = sampleMAD0(
                resids
            )  # some measure of standard deviation, rather than using the standard deviation
            residsVar = scale * scale
            varPred = np.dot(hermitianTranspose(predictors), weights * predictors)
            varPred = np.linalg.inv(varPred)  # this is a pxp matrix
            varOut = 1.91472 * residsVar * varPred
            varOut = np.diag(varOut).real  # this should be a real number

            if self.intercept:
                output[i] = out[1:]
                varOutput[i] = varOut[1:]
            else:
                output[i] = out
                varOutput[i] = varOut

        return output, varOutput
Exemple #50
0
 def test_constantEdges(self):
     edge_vec = np.ones(self.mesh.nE)
     assert all(np.absolute(self.mesh.aveE2CC * edge_vec - 1.) < TOL)
     assert all(np.absolute(self.mesh.aveE2CCV * edge_vec - 1.) < TOL)
def train_mfn_phantom(X_train, y_train, X_valid, y_valid, X_test, y_test, configs, mode, save_path):
	p = np.random.permutation(X_train.shape[0])
	X_train = X_train[p]
	y_train = y_train[p]

	X_train = X_train.swapaxes(0,1)
	X_valid = X_valid.swapaxes(0,1)
	X_test = X_test.swapaxes(0,1)

	d = X_train.shape[2]
	h = 128
	t = X_train.shape[0]
	output_dim = 1
	dropout = 0.5

	[config,NN1Config,NN2Config,gamma1Config,gamma2Config,outConfig] = configs

	#model = EFLSTM(d,h,output_dim,dropout)
	model = MFNPhantom(config,NN1Config,NN2Config,gamma1Config,gamma2Config,outConfig, mode)

	#optimizer = optim.SGD(model.parameters(),lr=config["lr"],momentum=config["momentum"])

	# optimizer = optim.SGD([
	#                 {'params':model.lstm_l.parameters(), 'lr':config["lr"]},
	#                 {'params':model.classifier.parameters(), 'lr':config["lr"]}
	#             ], momentum=0.9)

	device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
	model = model.to(device)

	optimizer = optim.Adam(model.parameters(),lr=config["lr"])
	scheduler = ReduceLROnPlateau(optimizer,mode='min',patience=100,factor=0.5,verbose=True)


	best_valid = 999999.0
	rand = random.randint(0,100000)



	print 'model number is:', rand
	model = torch.load('{}/mfn_phantom_{}.pt'.format(save_path, args.hparam_iter))
	model.eval()
	#model = copy.deepcopy(best_model).cpu().gpu()

	#model.calc_phantom_corr(config["batchsize"], X_valid, y_valid, 'after')

	for split in ['train', 'valid', 'test']:

		if split is 'train':
			X, y = X_train, y_train
		elif split is 'valid':
			X, y = X_valid, y_valid
		else:
			X, y = X_test, y_test

		predictions = model.predict(X)
		mae = np.mean(np.absolute(predictions-y))
		print split, "mae: ", mae
		corr = np.corrcoef(predictions,y)[0][1]
		print split, "corr: ", corr
		mult = round(sum(np.round(predictions)==np.round(y))/float(len(y)),5)
		#print split, "mult_acc: ", mult
		f_score = round(f1_score(np.round(predictions),np.round(y),average='weighted'),5)
		#print split, "mult f_score: ", f_score
		true_label = (y >= 0)
		predicted_label = (predictions >= 0)
		#print split, "Confusion Matrix :"
		#print confusion_matrix(true_label, predicted_label)
		#print split, "Classification Report :"
		#print classification_report(true_label, predicted_label, digits=5)
		#print split, "Accuracy ", accuracy_score(true_label, predicted_label)
	sys.stdout.flush()
Exemple #52
0
def fit_Lorentzian(data_dir, primary_bounds, seeded_bounds):

    x = np.load(data_dir + 'freqs.npy')
    y = np.load(data_dir + 'PSD_data.npy')

    peaks, _ = scipy.signal.find_peaks(y, distance=5)

    pLboundInd = np.argmin(np.absolute(x / 1.e6 - primary_bounds[0]))
    pRboundInd = np.argmin(np.absolute(x / 1.e6 - primary_bounds[1]))
    pPeakInd = np.argmax(y[pLboundInd:pRboundInd])

    sLboundInd = np.argmin(np.absolute(x / 1.e6 - seeded_bounds[0]))
    sRboundInd = np.argmin(np.absolute(x / 1.e6 - seeded_bounds[1]))
    sPeakInd = np.argmax(y[sLboundInd:sRboundInd])

    #    tP = np.linspace(x[pLboundInd],x[pRboundInd],1000)/1e6
    #    tS = np.linspace(x[sLboundInd],x[sRboundInd],1000)/1e6

    #    pBounds = ([0., 0., 0.],[np.inf, np.inf, np.inf])
    #    sBounds = ([0., 0., 0.],[np.inf, np.inf, np.inf])

    #
    bothBounds = (0., np.inf)  # Test Dual Fit

    #    p0P = [y[pLboundInd:pRboundInd][pPeakInd], 10., x[pLboundInd:pRboundInd][pPeakInd]]
    #    p0S = [y[sLboundInd:sRboundInd][sPeakInd], 10., x[sLboundInd:sRboundInd][sPeakInd]]

    #
    p0Both = [
        y[pLboundInd:pRboundInd][pPeakInd], 2.,
        x[pLboundInd:pRboundInd][pPeakInd], y[sLboundInd:sRboundInd][sPeakInd],
        2., x[sLboundInd:sRboundInd][sPeakInd], 23.
    ]  # Test Dual Fit

    #    poptP, pcovP = scipy.optimize.curve_fit(lorentzian, x[pLboundInd:pRboundInd], y[pLboundInd:pRboundInd], p0 = p0P, bounds = bothBounds)

    #    poptS, pcovS = scipy.optimize.curve_fit(lorentzian, x[sLboundInd:sRboundInd], y[sLboundInd:sRboundInd], p0 = p0S, bounds = bothBounds)

    #
    poptBoth, pcovBoth = scipy.optimize.curve_fit(
        double_lorentzian, x, y, p0=p0Both, bounds=bothBounds)  # Test Dual Fit

    #    poptP = np.array(poptP)
    #    poptS = np.array(poptS)

    #
    poptBoth = np.array(poptBoth)  # Test Dual Fit
    p_sigma = np.sqrt(np.diag(pcovBoth))

    #    poptP[1:] *= 1.e-6
    #    poptS[1:] *= 1.e-6

    #
    poptBoth[1:3] *= 1.e-6  # Test Dual Fit
    poptBoth[4:6] *= 1.e-6  # Test Dual Fit
    p_sigma[1:3] *= 1.e-6
    p_sigma[4:6] *= 1.e-6

    #    plt.plot(tP, lorentzian(tP, *poptP), c='purple')
    #    plt.plot(tS, lorentzian(tS, *poptS), c='green')
    #    plt.show(block=True)

    #
    return [
        poptBoth, p_sigma
    ]  #[poptP, pcovP], [poptS, pcovS], [poptBoth, pcovBoth] # Test Dual Fit
def power_spectrum(waves, n=None):
    if n == None:
        n = waves.shape[-1] 

    mag_spec = np.absolute(np.fft.rfft(waves, n))
    return 1.0 / n * np.square(mag_spec)
def ecc_plot(aryMean, vecEccBin, strPathOut):
    """
    Plot results for eccentricity & cortical depth analysis.

    This version plots the values using two separate colourmaps for negative
    and positive values.

    Plots statistical parameters (e.g. parameter estimates) by cortical depth
    (x-axis) and pRF eccentricity (y-axis). This function is part of a tool for
    analysis of cortical-depth-dependent fMRI responses at different
    retinotopic eccentricities.
    """
    # Number of eccentricity bins:
    varEccNum = vecEccBin.shape[0]

    # Font type:
    strFont = 'Liberation Sans'

    # Font colour:
    vecFontClr = np.array([17.0/255.0, 85.0/255.0, 124.0/255.0])

    # Find minimum and maximum correlation values:
    varMin = np.percentile(aryMean, 2.5)
    varMax = np.percentile(aryMean, 97.5)

    # Round:
    varMin = (np.floor(varMin * 0.1) / 0.1)
    varMax = (np.ceil(varMax * 0.1) / 0.1)

    # Same scale for negative and positive colour bar:
    if np.greater(np.absolute(varMin), varMax):
        varMax = np.absolute(varMin)
    else:
        varMin = np.multiply(-1.0, np.absolute(varMax))

    # Fixed axis limites for comparing plots across conditions/ROIs:
    # varMin = -400.0
    # varMax = 400.0

    # Create main figure:
    fig01 = plt.figure(figsize=(4.0, 3.0),
                       dpi=200.0,
                       facecolor=([1.0, 1.0, 1.0]),
                       edgecolor=([1.0, 1.0, 1.0]))

    # Big subplot in the background for common axes labels:
    axsCmn = fig01.add_subplot(111)

    # Turn off axis lines and ticks of the big subplot:
    axsCmn.spines['top'].set_color('none')
    axsCmn.spines['bottom'].set_color('none')
    axsCmn.spines['left'].set_color('none')
    axsCmn.spines['right'].set_color('none')
    axsCmn.tick_params(labelcolor='w',
                       top=False,
                       bottom=False,
                       left=False,
                       right=False)

    # Set and adjust common axes labels:
    axsCmn.set_xlabel('Cortical depth',
                      alpha=1.0,
                      fontname=strFont,
                      fontweight='normal',
                      fontsize=7.0,
                      color=vecFontClr,
                      position=(0.5, 0.0))
    axsCmn.set_ylabel('pRF eccentricity',
                      alpha=1.0,
                      fontname=strFont,
                      fontweight='normal',
                      fontsize=7.0,
                      color=vecFontClr,
                      position=(0.0, 0.5))
    axsCmn.set_title('fMRI signal change',
                     alpha=1.0,
                     fontname=strFont,
                     fontweight='bold',
                     fontsize=10.0,
                     color=vecFontClr,
                     position=(0.5, 1.1))

    # Create colour-bar axis:
    axsTmp = fig01.add_subplot(111)

    # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    # Number of colour increments:
    varNumClr = 20

    # Colour values for the first colormap (used for negative values):
    aryClr01 = plt.cm.PuBu(np.linspace(0.1, 1.0, varNumClr))

    # Invert the first colour map:
    aryClr01 = np.flipud(np.array(aryClr01, ndmin=2))

    # Colour values for the second colormap (used for positive values):
    aryClr02 = plt.cm.OrRd(np.linspace(0.1, 1.0, varNumClr))

    # Combine negative and positive colour arrays:
    aryClr03 = np.vstack((aryClr01, aryClr02))

    # Create new custom colormap, combining two default colormaps:
    objCustClrMp = colors.LinearSegmentedColormap.from_list('custClrMp',
                                                            aryClr03)

    # Lookup vector for negative colour range:
    vecClrRngNeg = np.linspace(varMin, 0.0, num=varNumClr)

    # Lookup vector for positive colour range:
    vecClrRngPos = np.linspace(0.0, varMax, num=varNumClr)

    # Stack lookup vectors:
    vecClrRng = np.hstack((vecClrRngNeg, vecClrRngPos))

    # 'Normalize' object, needed to use custom colour maps and lookup table
    # with matplotlib:
    objClrNorm = colors.BoundaryNorm(vecClrRng, objCustClrMp.N)

    # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++

    # Plot correlation coefficients of current depth level:
    pltTmpCorr = plt.imshow(aryMean,
                            interpolation='nearest',  # 'none',  # 'bicubic',
                            origin='lower',
                            norm=objClrNorm,
                            cmap=objCustClrMp)

    # Position of labels for the x-axis:
    vecXlblsPos = np.array([0, (aryMean.shape[1] - 1)])
    # Set position of labels for the x-axis:
    axsTmp.set_xticks(vecXlblsPos)
    # Create list of strings for labels:
    lstXlblsStr = ['WM', 'CSF']
    # Set the content of the labels (i.e. strings):
    axsTmp.set_xticklabels(lstXlblsStr,
                           alpha=0.9,
                           fontname=strFont,
                           fontweight='bold',
                           fontsize=8.0,
                           color=vecFontClr)

    # Position of labels for the y-axis:
    vecYlblsPos = np.arange(-0.5, (varEccNum - 0.5), 1.0)
    # Set position of labels for the y-axis:
    axsTmp.set_yticks(vecYlblsPos)
    # Create list of strings for labels:
    # lstYlblsStr = map(str,
    #                   np.around(vecEccBin, decimals=1)
    #                   )
    lstYlblsStr = [str(x) for x in np.around(vecEccBin, decimals=1)]
    # Set the content of the labels (i.e. strings):
    axsTmp.set_yticklabels(lstYlblsStr,
                           alpha=0.9,
                           fontname=strFont,
                           fontweight='bold',
                           fontsize=8.0,
                           color=vecFontClr)

    # Turn of ticks:
    axsTmp.tick_params(labelcolor=([0.0, 0.0, 0.0]),
                       top=False,
                       bottom=False,
                       left=False,
                       right=False)

    # We create invisible axes for the colour bar slightly to the right of the
    # position of the last data-axes. First, retrieve position of last
    # data-axes:
    objBbox = axsTmp.get_position()
    # We slightly adjust the x-position of the colour-bar axis, by shifting
    # them to the right:
    vecClrAxsPos = np.array([(objBbox.x0 * 7.5),
                             objBbox.y0,
                             objBbox.width,
                             objBbox.height])
    # Create colour-bar axis:
    axsClr = fig01.add_axes(vecClrAxsPos,
                            frameon=False)

    # Add colour bar:
    pltClrbr = fig01.colorbar(pltTmpCorr,
                              ax=axsClr,
                              fraction=1.0,
                              shrink=1.0)

    # The values to be labeled on the colour bar:
    # vecClrLblsPos01 = np.arange(varMin, 0.0, 10)
    # vecClrLblsPos02 = np.arange(0.0, varMax, 100)
    vecClrLblsPos01 = np.linspace(varMin, 0.0, num=3)
    vecClrLblsPos02 = np.linspace(0.0, varMax, num=3)
    vecClrLblsPos = np.hstack((vecClrLblsPos01, vecClrLblsPos02))

    # The labels (strings):
    # vecClrLblsStr = map(str, vecClrLblsPos)
    vecClrLblsStr = [str(x) for x in vecClrLblsPos]

    # Set labels on coloubar:
    pltClrbr.set_ticks(vecClrLblsPos)
    pltClrbr.set_ticklabels(vecClrLblsStr)
    # Set font size of colour bar ticks, and remove the 'spines' on the right
    # side:
    pltClrbr.ax.tick_params(labelsize=8.0,
                            tick2On=False)

    # Make colour-bar axis invisible:
    axsClr.axis('off')

    # Save figure:
    fig01.savefig(strPathOut,
                  dpi=160.0,
                  facecolor='w',
                  edgecolor='w',
                  orientation='landscape',
                  bbox_inches='tight',
                  pad_inches=0.2,
                  transparent=False,
                  frameon=None)
def birds_eye_height_slices(
        points,
        n_slices=8,
        height_range=(-2.73, 1.27),
        side_range=(-10, 10),
        fwd_range=(-10, 10),
        res=0.1,
        time_stamp=None,
):
    """ Creates an array that is a birds eye view representation of the
        reflectance values in the point cloud data, separated into different
        height slices.

    Args:
        points:     (numpy array)
                    Nx4 array of the points cloud data.
                    N rows of points. Each point represented as 4 values,
                    x,y,z, reflectance
        n_slices :  (int)
                    Number of height slices to use.
        height_range: (tuple of two floats)
                    (min, max) heights (in metres) relative to the sensor.
                    The slices calculated will be within this range, plus
                    two additional slices for clipping all values below the
                    min, and all values above the max.
                    Default is set to (-2.73, 1.27), which corresponds to a
                    range of -1m to 3m above a flat road surface given the
                    configuration of the sensor in the Kitti dataset.
        side_range: (tuple of two floats)
                    (-left, right) in metres
                    Left and right limits of rectangle to look at.
                    Defaults to 10m on either side of the car.
        fwd_range:  (tuple of two floats)
                    (-behind, front) in metres
                    back and front limits of rectangle to look at.
                    Defaults to 10m behind and 10m in front.
        res:        (float) desired resolution in metres to use
                    Each output pixel will represent an square region res x res
                    in size along the front and side plane.
    """
    x_points = points[:, 0]
    y_points = points[:, 1]
    z_points = points[:, 2]
    i_points = points[:, 3]  # Reflectance

    # FILTER INDICES - of only the points within the desired rectangle
    # Note left side is positive y axis in LIDAR coordinates
    ff = np.logical_and((x_points > fwd_range[0]), (x_points < fwd_range[1]))
    ss = np.logical_and((y_points > -side_range[1]),
                        (y_points < -side_range[0]))
    indices = np.argwhere(np.logical_and(ff, ss)).flatten()

    # KEEPERS - The actual points that are within the desired  rectangle
    y_points = y_points[indices]
    x_points = x_points[indices]
    z_points = z_points[indices]
    i_points = i_points[indices]

    # CONVERT TO PIXEL POSITION VALUES - Based on resolution
    x_img = (-y_points / res).astype(np.int32)  # x axis is -y in LIDAR
    y_img = (x_points / res).astype(np.int32)  # y axis is -x in LIDAR
    # direction to be inverted later
    # SHIFT PIXELS TO HAVE MINIMUM BE (0,0)
    # floor used to prevent issues with -ve vals rounding upwards
    x_img -= int(np.floor(side_range[0] / res))
    y_img -= int(np.floor(fwd_range[0] / res))

    # ASSIGN EACH POINT TO A HEIGHT SLICE
    # n_slices-1 is used because values above max_height get assigned to an
    # extra index when we call np.digitize().
    bins = np.linspace(height_range[0], height_range[1], num=n_slices - 1)
    slice_indices = np.digitize(z_points, bins=bins, right=False)

    # RESCALE THE REFLECTANCE VALUES - to be between the range 0-255
    pixel_values = scale_to_255(i_points, min=0.0, max=255.0)

    # FILL PIXEL VALUES IN IMAGE ARRAY
    # -y is used because images start from top left
    x_max = int((side_range[1] - side_range[0]) / res)
    y_max = int((fwd_range[1] - fwd_range[0]) / res)
    im = np.zeros([y_max, x_max, n_slices], dtype=np.uint8)
    im[-y_img, x_img, slice_indices] = pixel_values
    #for i in range(0,8):
    #cv2.imshow("image_raw %d"%(0), cv2.pyrDown(im[:,:,0]))

    minx = 9999999999
    count = 0
    tracklet = featurerecord.tracklet

    for i in range(1, len(tracklet)):
        if np.absolute(tracklet[i][0] - time_stamp
                       ) < minx:  #and tracklet[i][0] - time_stamp >= 0:
            minx = np.absolute(tracklet[i][0] - time_stamp)
            count = i
    tracklet = np.vstack((tracklet[:12], tracklet))
    if count < 12:
        count = 1
    #tracklet =  tracklet[12:]
    centPoint = np.array(
        [tracklet[count][1], tracklet[count][2], tracklet[count][3]])
    print(centPoint)
    #obslwh = np.array([4.2418, 1.4478, 1.5748])
    #obslwh = np.array([4.191, 1.5748, 1.524])
    obslwh = np.array([4.5212, 1.7018, 1.397])
    obslwh += np.array([1, 0.8, 0.4])
    obslwh = obslwh / 2

    x = np.array([centPoint[1] - obslwh[1], centPoint[1] + obslwh[1]])
    y = np.array([centPoint[0] - obslwh[0], centPoint[0] + obslwh[0]])
    xp = (-x / res).astype(np.int32)  # x axis is -y in LIDAR
    yp = (-y / res).astype(np.int32)  # y axis is -x in LIDAR

    xp -= int(np.floor(side_range[0] / res))
    yp -= int(np.floor(fwd_range[0] / res))

    xp = np.clip(xp, 0, x_max)
    yp = np.clip(yp, 0, y_max)

    boundingbox = np.array([
        xp[1], yp[1], xp[0], yp[0], centPoint[0], centPoint[1], centPoint[2],
        obslwh[0], obslwh[1], obslwh[2]
    ],
                           dtype=np.float)

    return im, boundingbox
Exemple #56
0
        newD = newD['fibergraph']
        newD = newD.todense()

        print newD

        #newD = np.delete(newD,0,1)
        #newD = np.delete(newD,0,0)

        oldD = genfromtxt(old_file, delimiter=' ')
        oldD = np.delete(oldD, 0, 1)
        oldD = np.delete(oldD, 0, 0)

        diff = np.subtract(newD, oldD)

        absdiff = np.absolute(diff)

        result = np.divide(absdiff, oldD)

        resultArray = np.add(diff, resultArray)

        #np.savetxt((outputDir + "analysis" + str(counter) + ".csv"), result, delimiter=",")
        #counter = counter + 1

        tempRow = list()

        subject = os.path.realpath(new_file)

        subject = subject[(subject.rfind('/') + 1):]
        subject = subject[:10]
        tempRow.append(subject)
def chapter10and11(bAwImg):

    #finds the edges in the image
    #this edge detection didnt work very well
    #too sensitive
    # edgedImg = cv2.Canny(bAwImg, 5, 250)
    # cv2.imshow("canny edge", edgedImg)

    image = bAwImg
    #image, data type, and then derivatives
    #1,0 for vertical edges
    #0,1 for horizontal edges
    #this works better for edges
    sobelX = cv2.Sobel(image, cv2.CV_64F, 1, 0)
    sobelY = cv2.Sobel(image, cv2.CV_64F, 0, 1)
    sobelX = np.uint8(np.absolute(sobelX))
    sobelY = np.uint8(np.absolute(sobelY))

    #or so that all edges are included
    comb = cv2.bitwise_or(sobelX, sobelY)
    #reverse it
    comb = cv2.bitwise_not(comb)
    cv2.imshow("edges with sobel", comb)
    cv2.waitKey()

    image = comb.copy()

    count1 = perf_counter()

    #LOOP THROUGH MANUALLY
    h, w = comb.shape[:2]

    #left edge finder
    leftCounter = 0
    leftTotal = 0
    for y in range(h):
        #try is to find the other side of an edge
        TRY = False
        #found is for confirming a find
        FOUND = False
        for x in range(w):
            #goes downwards toward the right
            color = comb[y - 1, x - 1]
            #if it finds a black, it will be ready to search
            if color > 240 and TRY == False and FOUND == False:
                TRY = True
                # print(x-1)
            if TRY == True and FOUND == False:
                #will search for the white part of the line to see if it is indeed an edge
                for n in range(5):
                    if x + n < w:
                        color = comb[y - 1, x + n]
                        if color < 240:
                            FOUND = True
                            leftCounter += x - 1
                            leftTotal += 1.0
                            break
            #once the edge is found, this loop ends
            if FOUND == True:
                continue

    leftAvg = leftCounter / leftTotal
    print(f"left bound = {leftAvg}")

    #right edge finder
    rightCounter = 0
    rightTotal = 0
    for y in range(h):
        #try is to find the other side of an edge
        TRY = False
        #found is for confirming a find
        FOUND = False
        for x in range(w):
            #goes downwards toward the left
            color = comb[y - 1, w - x - 1]
            #if it finds a black, it will be ready to search
            if color > 240 and TRY == False and FOUND == False:
                TRY = True
            if TRY == True and FOUND == False:
                #will search for the white part of the line to see if it is indeed an edge
                for n in range(5):
                    if x - n > 0:
                        color = comb[y - 1, x - n - 1]
                        if color < 240:
                            FOUND = True
                            rightCounter += w - x - 1
                            rightTotal += 1.0
                            break
            #once the edge is found, this loop ends
            if FOUND == True:
                continue

    rightAvg = rightCounter / rightTotal
    print(f"right bound = {rightAvg}")

    #top edge finder
    topCounter = 0
    topTotal = 0
    for x in range(w):
        #try is to find the other side of an edge
        TRY = False
        #found is for confirming a find
        FOUND = False
        for y in range(h):
            #goes rightwards toward the bottom
            color = comb[y - 1, x - 1]
            #if it finds a black, it will be ready to search
            if color > 240 and TRY == False and FOUND == False:
                TRY = True
            if TRY == True and FOUND == False:
                #will search for the white part of the line to see if it is indeed an edge
                for n in range(5):
                    if y + n < h:
                        color = comb[y + n, x - 1]
                        if color < 240:
                            FOUND = True
                            topCounter += y - 1
                            topTotal += 1.0
                            break
            #once the edge is found, this loop ends
            if FOUND == True:
                continue

    topAvg = topCounter / topTotal
    print(f"top bound = {topAvg}")

    #bottom edge finder
    bottomCounter = 0
    bottomTotal = 0
    for x in range(w):
        #try is to find the other side of an edge
        TRY = False
        #found is for confirming a find
        FOUND = False
        for y in range(h):
            #goes rightwards toward the bottom
            color = comb[h - y - 1, x - 1]
            #if it finds a black, it will be ready to search
            if color > 240 and TRY == False and FOUND == False:
                TRY = True
            if TRY == True and FOUND == False:
                #will search for the white part of the line to see if it is indeed an edge
                for n in range(5):
                    if h - y - n > 0:
                        color = comb[h - y - n - 1, x - 1]
                        if color < 240:
                            FOUND = True
                            bottomCounter += h - y - 1
                            bottomTotal += 1.0
                            break
            #once the edge is found, this loop ends
            if FOUND == True:
                continue

    bottomAvg = bottomCounter / bottomTotal
    print(f"bottom bound = {bottomAvg}")

    count2 = perf_counter()

    time = count2 - count1
    print(f"total time without multiprocessing: {time}")

    #https://stackoverflow.com/questions/32404825/how-to-run-multiple-functions-at-same-time
    executors_list = []

    cs = [[comb, "top"], [comb, "bottom"], [comb, "left"], [comb, "right"]]
    with ThreadPoolExecutor(max_workers=4) as executor:
        executors_list.append(executor.submit(edgeFind, cs[0]))
        executors_list.append(executor.submit(edgeFind, cs[1]))
        executors_list.append(executor.submit(edgeFind, cs[2]))
        executors_list.append(executor.submit(edgeFind, cs[3]))

    # for x in executors_list:
    #     print(x.result())

    count3 = perf_counter()

    time = count3 - count2
    print(f"total time with multithreading: {time}")

    cs = [[comb, "top"], [comb, "bottom"], [comb, "left"], [comb, "right"]]
    with Pool(processes=4, maxtasksperchild=1) as pool:
        results = pool.map(edgeFind, cs)
        pool.close()
        pool.join()

    # print(results)

    count4 = perf_counter()

    time = count4 - count3
    print(f"total time with multitprocessing: {time}")

    # cv2.imshow("cropped homework", image)
    #crops the image using the edges found
    y2 = int(bottomAvg)
    y1 = int(topAvg)
    x1 = int(leftAvg)
    x2 = int(rightAvg)
    croppedImg = image[y1:y2, x1:x2]
    cv2.imshow("cropped homework", croppedImg)

    cv2.waitKey()
def gsr_preprocessing(signals):
    ''' Preprocessing for GSR signals '''
    der_signals = np.gradient(signals)
    con_signals = 1.0 / signals
    nor_con_signals = (con_signals -
                       np.mean(con_signals)) / np.std(con_signals)

    mean = np.mean(signals)
    der_mean = np.mean(der_signals)
    neg_der_mean = np.mean(der_signals[der_signals < 0])
    neg_der_pro = float(der_signals[der_signals < 0].size) / float(
        der_signals.size)

    local_min = 0
    for i in range(signals.shape[0] - 1):
        if i == 0:
            continue
        if signals[i - 1] > signals[i] and signals[i] < signals[i + 1]:
            local_min += 1

    # Using SC calculates rising time
    det_nor_signals, trend = detrend(nor_con_signals)
    lp_det_nor_signals = butter_lowpass_filter(det_nor_signals, 0.5, 128.)
    der_lp_det_nor_signals = np.gradient(lp_det_nor_signals)

    rising_time = 0
    rising_cnt = 0
    for i in range(der_lp_det_nor_signals.size - 1):
        if der_lp_det_nor_signals[i] > 0:
            rising_time += 1
            if der_lp_det_nor_signals[i + 1] < 0:
                rising_cnt += 1

    avg_rising_time = rising_time * (1. / 128.) / rising_cnt

    freqs, power = getfreqs_power(signals,
                                  fs=128.,
                                  nperseg=signals.size,
                                  scaling='spectrum')
    power_0_24 = []
    for i in range(12):
        power_0_24.append(
            getBand_Power(freqs,
                          power,
                          lower=0 + (i * 0.2),
                          upper=0.2 + (i * 0.2)))

    SCSR, _ = detrend(butter_lowpass_filter(nor_con_signals, 0.2, 128.))
    SCVSR, _ = detrend(butter_lowpass_filter(nor_con_signals, 0.08, 128.))

    zero_cross_SCSR = 0
    zero_cross_SCVSR = 0
    peaks_cnt_SCSR = 0
    peaks_cnt_SCVSR = 0
    peaks_value_SCSR = 0.
    peaks_value_SCVSR = 0.

    zc_idx_SCSR = np.array([], int)  # must be int, otherwise it will be float
    zc_idx_SCVSR = np.array([], int)
    for i in range(nor_con_signals.size - 1):
        if SCSR[i] * next((j for j in SCSR[i + 1:] if j != 0), 0) < 0:
            zero_cross_SCSR += 1
            zc_idx_SCSR = np.append(zc_idx_SCSR, i + 1)
        if SCVSR[i] * next((j for j in SCVSR[i + 1:] if j != 0), 0) < 0:
            zero_cross_SCVSR += 1
            zc_idx_SCVSR = np.append(zc_idx_SCVSR, i)

    for i in range(zc_idx_SCSR.size - 1):
        peaks_value_SCSR += np.absolute(
            SCSR[zc_idx_SCSR[i]:zc_idx_SCSR[i + 1]]).max()
        peaks_cnt_SCSR += 1
    for i in range(zc_idx_SCVSR.size - 1):
        peaks_value_SCVSR += np.absolute(
            SCVSR[zc_idx_SCVSR[i]:zc_idx_SCVSR[i + 1]]).max()
        peaks_cnt_SCVSR += 1

    zcr_SCSR = zero_cross_SCSR / (nor_con_signals.size / 128.)
    zcr_SCVSR = zero_cross_SCVSR / (nor_con_signals.size / 128.)

    mean_peak_SCSR = peaks_value_SCSR / peaks_cnt_SCSR if peaks_cnt_SCSR != 0 else 0
    mean_peak_SCVSR = peaks_value_SCVSR / peaks_cnt_SCVSR if peaks_value_SCVSR != 0 else 0

    features = [
        mean, der_mean, neg_der_mean, neg_der_pro, local_min, avg_rising_time
    ] + power_0_24 + [zcr_SCSR, zcr_SCVSR, mean_peak_SCSR, mean_peak_SCVSR]
    return features
def PPsubroutine(C, C2, C3, b, angdistancem, angdistancep, vmax, speedlim, predec, succ, N, M, index):
    # C, C2, C3 are constants in the penalization function
    # angdistancem = $\delta_{c^-c}$
    # angdistancep = $\delta_{cc^+}$
    # vmax = maximum leaf speed
    # speedlim = s
    # predec = predecesor index, either an index or an empty list
    # succ = succesor index, either an index or an empty list
    # lcm = vector of left limits in the previous aperture
    # lcp = vector of left limits in the next aperture
    # rcm = vector of right limits in the previous aperture
    # rcp = vector of right limits in the previous aperture
    # N = Number of beamlets per row
    # M = Number of rows in an aperture
    # index = index location in the set of apertures that I have saved.

    posBeginningOfRow = 0
    D = Dlist[index]

    # vmaxm and vmaxp describe the speeds that are possible for the leaves from the predecessor and to the successor
    vmaxm = vmax
    vmaxp = vmax
    # Arranging the predecessors and the succesors.
    #Predecessor left and right indices
    if type(predec) is list:
        lcm = [0] * M
        rcm = [N] * M
        # If there is no predecessor is as if the pred. speed was infinite
        vmaxm = float("inf")
    else:
        lcm = data.llist[predec]
        rcm = data.rlist[predec]

    #Succesors left and right indices
    if type(succ) is list:
        lcp = [0] * M
        rcp = [N] * M
        # If there is no successor is as if the succ. speed was infinite.
        vmaxp = float("inf")
    else:
        lcp = data.llist[succ]
        rcp = data.rlist[succ]

    # Find geographical location of the first row.
    geolocX = data.xinter[0]
    # Find all possible locations of beamlets in this row according to geographical location
    indys = np.where(geolocX == data.xdirection[index])
    ys = data.ydirection[index][indys]
    validbeamlets = np.in1d(data.yinter, ys)
    validbeamlets = np.array(range(0, len(data.yinter)))[validbeamlets]
    # Keep the location of the most leaf
    leftmostleaf = len(ys) # Position in python position(-1) of the leftmost leaf
    nodesinpreviouslevel = 0
    oldflag = nodesinpreviouslevel
    # First handle the calculations for the first row

    beamGrad = D * data.voxelgradient

    nodesinpreviouslevel = 0
    posBeginningOfRow = 0
    thisnode = 0
    # Max beamlets per row
    bpr = 50
    networkNodesNumber = bpr * bpr + M * bpr * bpr + bpr * bpr # An overestimate of the network nodes in this network
    # Initialization of network vectors. This used to be a list before
    lnetwork = np.zeros(networkNodesNumber, dtype = np.int) #left limit vector
    rnetwork = np.zeros(networkNodesNumber, dtype = np.int) #right limit vector
    mnetwork = np.ones(networkNodesNumber, dtype = np.int) #Only to save some time in the first loop
    wnetwork = np.zeros(networkNodesNumber, dtype = np.float) # Weight Vector
    dadnetwork = np.zeros(networkNodesNumber, dtype = np.int) # Dad Vector. Where Dad is the combination of (l,r) in previous row
    # Work on the first row perimeter and area values
    for l in range(math.ceil(max(min(validbeamlets) - 1, lcm[0] - vmaxm * angdistancem/speedlim, lcp[0] - vmaxp * angdistancep / speedlim)), math.floor(min(max(validbeamlets), lcm[0] + vmaxm * angdistancem / speedlim, lcp[0] + vmaxp * angdistancep / speedlim))):
        for r in range(math.ceil(max(l + 1, rcm[0] - vmaxm * angdistancem/speedlim, rcp[0] - vmaxp * angdistancep / speedlim)), math.floor(min(max(validbeamlets)+1, rcm[0] + vmaxm * angdistancem / speedlim, rcp[0] + vmaxp * angdistancep / speedlim))):
            thisnode = thisnode + 1
            nodesinpreviouslevel = nodesinpreviouslevel + 1
            # First I have to make sure to add the beamlets that I am interested in
            if(l + 1 <= r -1): # prints r numbers starting from l + 1. So range(3,4) = 3
                # Dose = -sum( D[[i for i in range(l+1, r)],:] * data.voxelgradient)
                Dose = -beamGrad[l+1:r].sum()
                weight = C * ( C2 * (r - l) - C3 * b * (r - l)) - Dose
            else:
                weight = 0.0
            # Create node (1,l,r) in array of existing nodes and update the counter
            # Replace the following expression
            # networkNodes.append([1, l, r, weight, 0])
            lnetwork[thisnode] = l
            rnetwork[thisnode] = r
            wnetwork[thisnode] = weight
            # dadnetwork and mnetwork don't need to be changed here for obvious reasons

    posBeginningOfRow = posBeginningOfRow + nodesinpreviouslevel
    mystart = time.time()
    
    # Then handle the calculations for the m rows. Nodes that are neither source nor sink.
    for m in range(2,M):
        # Show time taken per row
        myend   =  time.time()
        mystart = myend
        # Find geographical location of this row.
        geolocX = data.xinter[m-1]
        # Find all possible locations of beamlets in this row according to geography
        indys = np.where(geolocX == data.xdirection[index])
        ys = data.ydirection[index][indys]
        validbeamlets = np.in1d(data.yinter, ys)
        validbeamlets = np.array(range(0, len(data.yinter)))[validbeamlets]
        oldflag = nodesinpreviouslevel
        nodesinpreviouslevel = 0

        # And now process normally checking against valid beamlets
        for l in range(math.ceil(max(min(validbeamlets)-1, lcm[m] - vmaxm * angdistancem/speedlim, lcp[m] - vmaxp * angdistancep / speedlim)), math.floor(min(max(validbeamlets), lcm[m] + vmaxm * angdistancem / speedlim, lcp[m] + vmaxp * angdistancep / speedlim))):
            for r in range(math.ceil(max(l + 1, rcm[m] - vmaxm * angdistancem/speedlim, rcp[m] - vmaxp * angdistancep / speedlim)), math.floor(min(max(validbeamlets) + 1, rcm[m] + vmaxm * angdistancem / speedlim, rcp[m] + vmaxp * angdistancep / speedlim))):
                nodesinpreviouslevel = nodesinpreviouslevel + 1
                thisnode = thisnode + 1
                # Create node (m, l, r) and update the level counter
                # networkNodes.append([m, l, r, float("inf"), float("inf")])
                lnetwork[thisnode] = l
                rnetwork[thisnode] = r
                mnetwork[thisnode] = m
                wnetwork[thisnode] = np.inf

                lmlimit = leftmostleaf
                rmlimit = (r - l) + leftmostleaf
                if(lmlimit + 1 <= rmlimit - 1):
                    #Dose = - sum(D[[i for i in range(lmlimit + 1, rmlimit)],:] * data.voxelgradient)
                    Dose = -beamGrad[lmlimit+1:rmlimit].sum()
                    C3simplifier = C3 * b * (r - l)
                else:
                    Dose = 0.0
                    C3simplifier = 0
                lambdaletter = np.absolute(lnetwork[(posBeginningOfRow - oldflag): posBeginningOfRow] - l) + np.absolute(rnetwork[(posBeginningOfRow - oldflag): posBeginningOfRow] - r) - 2 * np.maximum(0, lnetwork[(posBeginningOfRow - oldflag): posBeginningOfRow] - r) - 2 * np.maximum(0, l - np.absolute(rnetwork[(posBeginningOfRow - oldflag): posBeginningOfRow]))
                weight = C * (C2 * lambdaletter - C3simplifier) - Dose
                # Add the weights that were just calculated
                newweights = wnetwork[(posBeginningOfRow - oldflag): posBeginningOfRow] + weight
                # Find the minimum and its position in the vector.
                minloc = np.argmin(newweights)
                wnetwork[thisnode] = newweights[minloc]
                dadnetwork[thisnode] = minloc + posBeginningOfRow - oldflag

        posBeginningOfRow = nodesinpreviouslevel + posBeginningOfRow # This is the total number of network nodes
        # Keep the location of the leftmost leaf
        leftmostleaf = len(ys) + leftmostleaf
    thisnode = thisnode + 1
    for mynode in (range(posBeginningOfRow - nodesinpreviouslevel, posBeginningOfRow)):
        weight = C * ( C2 * (rnetwork[mynode] - lnetwork[mynode] ))
        if(wnetwork[mynode] + weight <= wnetwork[thisnode]):
            wnetwork[thisnode] = wnetwork[mynode] + weight
            dadnetwork[thisnode] = mynode
            p = wnetwork[thisnode]
    thenode = thisnode # WILMER take a look at this
    l = []
    r = []
    while(1):
        # Find the predecessor data
        l.append(lnetwork[thenode])
        r.append(rnetwork[thenode])
        thenode = dadnetwork[thenode]
        if(0 == thenode): # If at the origin then break
            break
    l.reverse()
    r.reverse()
    return(p, l, r)
Exemple #60
0
def rewardTF(t, direct, directHead, dOut, offset, offsetHead, angle_writer,
             angle_sub, body_angle_writer, body_angle_sub, radHead,
             lastTerm_writer, lastTerm_sub, model_states, PerformanceRecorder,
             terminator):
    arraySize = 625  # 4*500
    arrayBody = 625  # 4*500

    deltaAngleReward = 0.
    angleAbort = 35.

    deltaAngleRewardH = 0.
    angleAbortH = 40.
    import nest

    import Variables as VAR
    from vc import vec
    import tf
    import math
    if lastTerm_sub.value is None:
        lastTerm_writer.send_message(std_msgs.msg.Float64(0.0))

    if angle_sub.value is None or body_angle_sub.value is None:
        angle_writer.send_message(
            std_msgs.msg.Float32MultiArray(data=[0.
                                                 for i in range(arraySize)]))
        body_angle_writer.send_message(
            std_msgs.msg.Float32MultiArray(data=[0.
                                                 for i in range(arrayBody)]))
        # angle_writer.send_message(std_msgs.msg.Float32MultiArray([0.0 in range(arraySize)]))
        return

    if model_states.value is None or len(
            model_states.value.pose) < 2 or radHead.value is None:
        return

    radHead = radHead.value.data
    model_states = model_states.value

    robot = model_states.pose[0]
    rp = robot.position
    ro = robot.orientation

    ball = model_states.pose[1]
    bp = ball.position

    qRobot = list(
        tf.transformations.quaternion_inverse([ro.x, ro.y, ro.z, ro.w]))
    qRobotConj = tf.transformations.quaternion_conjugate(qRobot)

    T0R = [[1, 0, 0, -rp.x], [0, 1, 0, -rp.y], [0, 0, 1, -rp.z], [0, 0, 0, 1]]

    nppBallTrans = list(np.dot(T0R, [bp.x, bp.y, bp.z, 1]))
    npBall = tf.transformations.quaternion_multiply(
        tf.transformations.quaternion_multiply(qRobot, nppBallTrans),
        qRobotConj)

    xAxies = vec(1, 0)
    npBall2d = vec(npBall[0], npBall[1])

    angle = xAxies.findClockwiseAngle180(npBall2d)

    angleRaw = angle

    oH = 0.
    if directHead.value is not None:
        oH = directHead.value.data

    bodyAngleRaw = angleRaw + oH * 90.  # + (radHead * 180 / np.pi)

    temp = list(angle_sub.value.data)
    temp[int(math.floor((t * 50) % arraySize))] = angleRaw
    tempBody = list(body_angle_sub.value.data)
    # meanBodyAngleBefore = reduce(lambda a, b: a + b, tempBody) / len(tempBody)
    tempBody[int(math.floor((t * 50) % arrayBody))] = bodyAngleRaw

    angle_writer.send_message(std_msgs.msg.Float32MultiArray(data=temp))
    body_angle_writer.send_message(
        std_msgs.msg.Float32MultiArray(data=tempBody))

    meanAngle = reduce(lambda a, b: a + b, temp) / len(temp)
    meanBodyAngle = reduce(lambda a, b: a + b, tempBody) / len(tempBody)

    # angleTrust = 0.0
    # bodyAngleTrust = 0.0
    # angle = angleTrust * angleRaw + (1 - angleTrust) * meanAngle
    # bodyAngle = bodyAngleTrust * bodyAngleRaw + (1 - bodyAngleTrust) * meanBodyAngle

    l = nrp.config.brain_root.conn_l
    r = nrp.config.brain_root.conn_r

    lH = nrp.config.brain_root.conn_lH
    rH = nrp.config.brain_root.conn_rH

    if t % 1 < 0.02:
        distanceRB = np.sqrt(nppBallTrans[0]**2 + nppBallTrans[1]**2)
        direction = 0.
        directionHead = 0.
        if direct.value:
            direction = direct.value.data
        if directHead.value:
            directHead = directHead.value.data
        PerformanceRecorder.record_entry(t, direction, directHead, meanAngle,
                                         meanBodyAngle, angleRaw, bodyAngleRaw,
                                         distanceRB)

    # if VAR.ENBLE_LOGGING and t % 3 < 0.02:
    #     clientLogger.info("----------------Reward Function----------------")
    #     clientLogger.info('Current Angle:', angle)
    #     clientLogger.info('Current Distance: ', distanceRB)
    # angle_writer.send_message(std_msgs.msg.Float64(0.))
    # if(np.absolute(angle) > 50):
    #     clientLogger.info("angle to big! (>50)")
    # clientLogger.info(angleBody)

    if t < lastTerm_sub.value.data + 0.2:
        angle_writer.send_message(
            std_msgs.msg.Float32MultiArray(data=[0.
                                                 for i in range(arraySize)]))
        body_angle_writer.send_message(
            std_msgs.msg.Float32MultiArray(data=[0.
                                                 for i in range(arrayBody)]))
        angle = 0.
        bodyAngleRaw = 0.
        # bodyAngle = 0.

    # if t % 200 < 0.02:
    #     lastTerm_writer.send_message(std_msgs.msg.Float64(t-3))

    # -----------------------------------------------------------------------BBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBBB
    # ---------- Reward Setting for Body
    judgedAngle = bodyAngleRaw

    # if t < lastTerm_sub.value.data + 10.5:
    #     judgedAngle = meanAngle

    if (np.absolute(meanBodyAngle) > angleAbort):
        if (t > lastTerm_sub.value.data + 6.):
            angle_writer.send_message(
                std_msgs.msg.Float32MultiArray(
                    data=[0. for i in range(arraySize)]))
            body_angle_writer.send_message(
                std_msgs.msg.Float32MultiArray(
                    data=[0. for i in range(arrayBody)]))
            dOut.send_message(0.)

            offset.send_message(0.)
            offsetHead.send_message(0.)

            lastTerm_writer.send_message(std_msgs.msg.Float64(t))
            terminator.send_message(std_msgs.msg.Bool(True))
            return

    hiddenLayer = nrp.config.brain_root.hidden_layer
    # hiddenLayer_left = nrp.config.brain_root.hidden_layer_left
    # hiddenLayer_right = nrp.config.brain_root.hidden_layer_right
    # or (t < lastTerm_sub.value.data + 12.5):
    if (np.absolute(judgedAngle) < deltaAngleReward):
        nest.SetStatus(l, {"n": 0.})
        nest.SetStatus(r, {"n": 0.})
        # nest.SetStatus(l, {"c": 0.})
        # nest.SetStatus(r, {"c":  0.})

        for j in hiddenLayer:
            # outConnections = nest.GetConnections(source=[j])
            # weights = nest.GetStatus(outConnections, keys="weight")
            reward = 0.
            inCon = nest.GetConnections(target=[j])
            # nest.SetStatus(inCon, {"c": 0.})
            nest.SetStatus(inCon, {"n": reward})

    # ----------------------------------
    # ---------- Reward Setting 50r Head
    judgedAngleH = angleRaw
    if (np.absolute(judgedAngleH) > angleAbortH):
        if (t > lastTerm_sub.value.data + 2.5):
            angle_writer.send_message(
                std_msgs.msg.Float32MultiArray(
                    data=[0. for i in range(arraySize)]))
            body_angle_writer.send_message(
                std_msgs.msg.Float32MultiArray(
                    data=[0. for i in range(arrayBody)]))
            dOut.send_message(0.)

            offset.send_message(0.)
            offsetHead.send_message(0.)

            lastTerm_writer.send_message(std_msgs.msg.Float64(t))
            terminator.send_message(std_msgs.msg.Bool(True))
            return
    if (np.absolute(judgedAngleH) < deltaAngleRewardH
            or t < lastTerm_sub.value.data + 0.4):
        nest.SetStatus(lH, {"n": 0.})
        nest.SetStatus(rH, {"n": 0.})