Ejemplo n.º 1
0
 def __init__(self):
     # Generate universe functions
     self.distance = np.arange(0.,181.,1.)
     self.acceleration = np.arange(0.,0.1,0.01)
     
     # Generate Distance membership functions
     self.near = fuzz.trapmf(self.distance, (-1.,-1.,20.,65.))
     self.medium = fuzz.trapmf(self.distance,(35.,80.,120.,135.))
     self.far = fuzz.trapmf(self.distance,(105.,170.,180.,200.))
     
     # Generate Acceleration membership functions
     self.slow = fuzz.trimf(self.acceleration, (-1.,0.,0.05))
     self.normal = fuzz.trapmf(self.acceleration,(0.02,0.035,0.04,0.07))
     self.fast = fuzz.trapmf(self.acceleration,(0.06,0.085,0.1,0.2))
 
     # Fuzzy relation
     self.R1 = fuzz.relation_product(self.near,self.slow)
     self.R2 = fuzz.relation_product(self.medium,self.normal)
     self.R3 = fuzz.relation_product(self.far,self.fast)
     
     
     # Combine the fuzzy relation
     self.R_combined = np.fmax(self.R1, np.fmax(self.R2, self.R3))
     
     self.thetaOne = 0.0
     self.thetaTwo = 0.0
     
     self.InputDistanceAngle = 0.0
     self.OutputAcceleration = 0.0
     
     
     self.visualize = True
Ejemplo n.º 2
0
    def run(self):

            """ inputs[0] =  ERROR AXIS          ., so stores all possible error values
                inputs[1] =  DEL_ERROR AXIS      .,     ,,
                inputs[2] =  CONTROL_OUTPUT AXIS .,     ,,
                    
                ERROR                  DEL_ERROR               CONTROL_OUTPUT         m_value for crisp e and delta_e values

                b[0][0] -ve Medium  || b[1][0] -ve Medium  ||  b[2][0] -ve Medium   ..        f[0] |  f_d[0] 
                b[0][1] -ve small   || b[1][1] -ve small   ||  b[2][1] -ve small    ..        f[1] |  f_d[1]
                b[0][2] zero        || b[1][2] zero        ||  b[2][2] zero         ..        f[2] |  f_d[2]
                b[0][3] +ve small   || b[1][3] +ve small   ||  b[2][3] +ve small    ..        f[3] |  f_d[3]
                b[0][4] +ve Medium  || b[1][4] +ve Medium  ||  b[2][4] +_ve Medium  ..        f[4] |  f_d[4] 
                
                f_mat is fuzzy fuzzy_matrix
            """
            inputs = [ np.arange(var[0], var[1]+1, 1) for var in self.var_ranges] #step size  = 1, third dimension of b matrix. As of now, an assumption.
            b  = []
            output = [0,0,0,0,0]
            out_final = []
            for i in range(3) :
                    b.append( [membership_f(self.mu[i], inputs[i], a) for a in self.d_mu[i] ])
            # To visualize the membership func. call .. [ visualize_mf(b,inputs)  ]
            
            f ,f_d = error_fuzzify(inputs, b, self.error, self.delta_e)            
            f_mat = fuzzy_matrix(f,f_d)
            output = rule_base(b, f_mat, output)
            print 'output : ', output
            aggregated = np.fmax(output[0], np.fmax(output[1],np.fmax(output[2], np.fmax(output[3], output[4]))))
            out_final = fuzz.defuzz(inputs[2], aggregated, 'centroid')
            out_activation = fuzz.interp_membership(inputs[2], aggregated, out_final)  # for plot
            visualize.visualize_mf(b,inputs,output, out_final, out_activation, aggregated)
            visualize.visualize_output(b, inputs, out_final, out_activation, aggregated)
            plt.show()
Ejemplo n.º 3
0
def proj_weights(W, correlation=False):

    # From Chen, Y. & Ye, X. (2011). Projection onto a simplex.

    if correlation:

        k, n = W.shape
        W_proj = empty((k, n))

        for col_idx in range(n):
            w = sort(W[:, col_idx])
            idx = k - 2

            while(True):
                t_idx = (sum(w[idx + 1 :]) - 1) / (k - idx - 1)
                if t_idx >= w[idx]:
                    W_proj[:, col_idx] = fmax(W[:, col_idx] - t_idx, 0)
                    break
                else:
                    idx = idx - 1
                    if idx < 0:
                        t_idx = (sum(w) - 1) / k
                        W_proj[:, col_idx] = fmax(W[:, col_idx] - t_idx, 0)
                        break
        return W_proj
    else:
        return fmax(W, 0)
Ejemplo n.º 4
0
def _cmeans_predict0(test_data, cntr, u_old, c, m):
    """
    Single step in fuzzy c-means prediction algorithm. Clustering algorithm
    modified from Ross, Fuzzy Logic w/Engineering Applications (2010)
    p.352-353, equations 10.28 - 10.35, but this method to generate fuzzy
    predictions was independently derived by Josh Warner.

    Parameters inherited from cmeans()

    Very similar to initial clustering, except `cntr` is not updated, thus
    the new test data are forced into known (trained) clusters.

    """
    # Normalizing, then eliminating any potential zero values.
    u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
    u_old = np.fmax(u_old, np.finfo(float).eps)

    um = u_old ** m
    test_data = test_data.T

    # For prediction, we do not recalculate cluster centers. The test_data is
    # forced to conform to the prior clustering.

    d = _distance(test_data, cntr)
    d = np.fmax(d, np.finfo(float).eps)

    jm = (um * d ** 2).sum()

    u = d ** (- 2. / (m - 1))
    u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))

    return u, jm, d
Ejemplo n.º 5
0
def _cmeans0_2distw(distance1, u_old, c, m, *para):
	# the kth for each cluster
	k = para[0]
	distance2 = para[1]
	w = para[2]

	# Normalizing, then eliminating any potential zero values.
	u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
	u_old = np.fmax(u_old, np.finfo(np.float64).eps)

	um = u_old ** m

	# remain the belonging rate >= the k-th max location of each cluster in um_c
	filter_k = lambda row:row >= sorted(row, reverse=True)[k-1]
	large_k_indices = np.apply_along_axis(filter_k, axis=1, arr=um)

	# Calculate the average distance from entity to cluster 
	d1 = large_k_indices.dot(distance1) / np.ones((distance1.shape[1],1)).dot(np.atleast_2d(large_k_indices.sum(axis=1))).T
	d1 = d1 / np.std(d1)
	#print("d1:", d1[0:3, 0:5], " max:", np.amax(d1), " min:", np.amin(d1))

	# Get the distance from data2
	d2 = large_k_indices.dot(distance2) / np.ones((distance2.shape[1],1)).dot(np.atleast_2d(large_k_indices.sum(axis=1))).T
	d2 = d2 / np.std(d2)
	#print("d2:", d2[0:3, 0:5], " max:", np.amax(d2), " min:", np.amin(d2))

	d = w * d1 + (1 - w) * d2
	#print("d:", d[0:3, 0:5], " max:", np.amax(d), " min:", np.amin(d))

	d = np.fmax(d, np.finfo(np.float64).eps)
	jm = (um * d ** 2).sum()

	u = d ** (- 2. / (m - 1))
	u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))
	return u, jm, d
Ejemplo n.º 6
0
    def run_simulation(self, dt, timesteps, c, h, init_cond=np.zeros( (2, 75, 75) ) ):
        r_E = np.zeros((timesteps, self.N_pairs, self.N_pairs))
        r_I = np.copy(r_E)

        # add initial conditions:
        r_E[0,:,:] = init_cond[0]
        r_I[0,:,:] = init_cond[1]

        I_E = np.zeros((timesteps, self.N_pairs, self.N_pairs))
        I_I = np.copy(I_E)
        # rSS_E = np.copy(I_E)
        # rSS_I = np.copy(I_I)

        for t in range(1,timesteps):
            # Input drive from external input and network
            I_E[t,:,:] = c*h + np.sum( np.sum( self.W_EE * r_E[t-1,:,:],1 ), 1 ).reshape(self.N_pairs, self.N_pairs).T - np.sum( np.sum( self.W_EI * r_I[t-1,:,:],1 ), 1 ).reshape(self.N_pairs, self.N_pairs).T 
            I_I[t,:,:] = c*h + np.sum( np.sum( self.W_IE * r_E[t-1,:,:],1 ), 1 ).reshape(self.N_pairs, self.N_pairs).T - np.sum( np.sum( self.W_II * r_I[t-1,:,:],1 ), 1 ).reshape(self.N_pairs, self.N_pairs).T 
            
            # steady state firing rates - power law I/O
            rSS_E = np.multiply(self.k, np.power(np.fmax(0,I_E[t,:,:]), self.n_E))
            rSS_I = np.multiply(self.k, np.power(np.fmax(0,I_I[t,:,:]), self.n_I))

            # set negative steady state rates to zero
            rSS_E[rSS_E < 0] = 0
            rSS_I[rSS_I < 0] = 0

            # instantaneous firing rates approaching steady state
            r_E[t,:,:] = r_E[t-1,:,:] + dt*(np.divide(-r_E[t-1,:,:]+rSS_E, self.tau_E))
            r_I[t,:,:] = r_I[t-1,:,:] + dt*(np.divide(-r_I[t-1,:,:]+rSS_I, self.tau_I))
            
        return [r_E, r_I, I_E, I_I]
Ejemplo n.º 7
0
def _cmeans0(data, u_old, c, m, metric):
    """
    Single step in generic fuzzy c-means clustering algorithm.

    Modified from Ross, Fuzzy Logic w/Engineering Applications (2010),
    pages 352-353, equations 10.28 - 10.35.

    Parameters inherited from cmeans()
    """
    # Normalizing, then eliminating any potential zero values.
    u_old = normalize_columns(u_old)
    u_old = np.fmax(u_old, np.finfo(np.float64).eps)

    um = u_old ** m

    # Calculate cluster centers
    data = data.T
    cntr = um.dot(data) / np.atleast_2d(um.sum(axis=1)).T

    d = _distance(data, cntr, metric)
    d = np.fmax(d, np.finfo(np.float64).eps)

    jm = (um * d ** 2).sum()

    u = normalize_power_columns(d, - 2. / (m - 1))

    return cntr, u, jm, d
Ejemplo n.º 8
0
def pitchComparison(oriF0Array,singerF0Array,bestCorrespondingOriIndexList):

    lenSinger=len(singerF0Array)

    # turn F0 array into log F0 array
    # have a max here to prevent log(-1) gives us no number.
    oriLogF0Array=np.fmax(0,np.log2(oriF0Array))
    singerLogF0Array=np.fmax(0,np.log2(singerF0Array))

    subrating=[None for i in range(lenSinger)]
    rating=0.0
    numRated=0

    for i in range(lenSinger):
        if bestCorrespondingOriIndexList[i]!=None and singerLogF0Array[i]!=0.0:
            # check whether they're off by more than half an octave.
            # If so, move it up/down for them to stay within same octave
            currSingerLogF0=singerLogF0Array[i]
            currOriLogF0=oriLogF0Array[bestCorrespondingOriIndexList[i]]
            currSingerLogF0=currSingerLogF0+math.floor(currOriLogF0-currSingerLogF0+0.5)

            # triangle filter. Notes that are perfectly on pitch will have score 1.
            # Notes that are half-octave off will have score 0. Everything in between is scored linearly.
            subrating[i]=(1.0-abs(currOriLogF0-currSingerLogF0)/0.5)*100 # *100 to make everything on an 100 scale
            rating+=subrating[i]
            numRated+=1
        else:
            continue # The subrating will be None for notes that do not have correspondence.

    # divide by number of scores to get average scoring
    rating/=numRated


    return rating,subrating
Ejemplo n.º 9
0
def _cmeans0(data, u_old, c, m):
    """
    Single step in generic fuzzy c-means clustering algorithm. Modified from
    Ross, Fuzzy Logic w/Engineering Applications (2010) p.352-353, equations
    10.28 - 10.35.

    Parameters inherited from cmeans()

    This algorithm is a ripe target for Cython.

    """
    # Normalizing, then eliminating any potential zero values.
    u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
    u_old = np.fmax(u_old, np.finfo(float).eps)

    um = u_old ** m

    # Calculate cluster centers
    data = data.T
    cntr = um.dot(data) / (np.ones((data.shape[1],
                                    1)).dot(np.atleast_2d(um.sum(axis=1))).T)

    d = _distance(data, cntr)
    d = np.fmax(d, np.finfo(float).eps)

    jm = (um * d ** 2).sum()

    u = d ** (- 2. / (m - 1))
    u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))

    return cntr, u, jm, d
Ejemplo n.º 10
0
def _cmeans0_kth(data, u_old, c, m, *para):
	"""
	Single step in generic fuzzy c-means clustering algorithm.
	data2 is for intersect counting
	"""
	k = para[0]

	# Normalizing, then eliminating any potential zero values.
	u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
	u_old = np.fmax(u_old, np.finfo(np.float64).eps)

	um = u_old ** m

	# remain the belonging rate >= the k-th max location of each cluster in um_c
	filter_k = lambda row:row < sorted(row, reverse=True)[k-1]
	fail_indices = np.apply_along_axis(filter_k, axis=1, arr=u_old)
	um[fail_indices] = 0

	# Calculate cluster centers
	# data1:2861,2; um:30,2861
	data = data.T
	cntr = um.dot(data) / (np.ones((data.shape[1],1)).dot(np.atleast_2d(um.sum(axis=1))).T)
	d = cdistance.get_center_distance(data, cntr)
	
	d = np.fmax(d, np.finfo(np.float64).eps)
	jm = (um * d ** 2).sum()

	u = d ** (- 2. / (m - 1))
	u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))
	return cntr, u, jm, d
Ejemplo n.º 11
0
 def test_fmax(self):
     from numpy import fmax, array
     nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf')
     a = array((complex(ninf, 10), complex(10, ninf),
                complex( inf, 10), complex(10,  inf),
                5+5j, 5-5j, -5+5j, -5-5j,
                0+5j, 0-5j, 5, -5,
                complex(nan, 0), complex(0, nan)), dtype = complex)
     b = [ninf]*a.size
     res = [a[0 ], a[1 ], a[2 ], a[3 ],
            a[4 ], a[5 ], a[6 ], a[7 ],
            a[8 ], a[9 ], a[10], a[11],
            b[12], b[13]]
     assert (fmax(a, b) == res).all()
     b = [inf]*a.size
     res = [b[0 ], b[1 ], a[2 ], b[3 ],
            b[4 ], b[5 ], b[6 ], b[7 ],
            b[8 ], b[9 ], b[10], b[11],
            b[12], b[13]]
     assert (fmax(a, b) == res).all()
     b = [0]*a.size
     res = [b[0 ], a[1 ], a[2 ], a[3 ],
            a[4 ], a[5 ], b[6 ], b[7 ],
            a[8 ], b[9 ], a[10], b[11],
            b[12], b[13]]
     assert (fmax(a, b) == res).all()
Ejemplo n.º 12
0
    def _get_ln_y_ref(self, rup, dists, C):
        """
        Get an intensity on a reference soil.

        Implements eq. 13a.
        """
        # reverse faulting flag
        Frv = 1. if 30 <= rup.rake <= 150 else 0.
        # normal faulting flag
        Fnm = 1. if -120 <= rup.rake <= -60 else 0.
        # hanging wall flag

        Fhw = np.zeros_like(dists.rx)
        idx = np.nonzero(dists.rx >= 0.)
        Fhw[idx] = 1.

        # a part in eq. 11
        mag_test1 = np.cosh(2. * max(rup.mag - 4.5, 0))

        # centered DPP
        centered_dpp = self._get_centered_cdpp(dists)
        # centered_ztor
        centered_ztor = self._get_centered_ztor(rup, Frv)
        #

        ln_y_ref = (
            # first part of eq. 11
            C['c1']
            + (C['c1a'] + C['c1c'] / mag_test1) * Frv
            + (C['c1b'] + C['c1d'] / mag_test1) * Fnm
            + (C['c7'] + C['c7b'] / mag_test1) * centered_ztor
            + (C['c11'] + C['c11b'] / mag_test1) *
            np.cos(math.radians(rup.dip)) ** 2
            # second part
            + C['c2'] * (rup.mag - 6)
            + ((C['c2'] - C['c3']) / C['cn'])
            * np.log(1 + np.exp(C['cn'] * (C['cm'] - rup.mag)))
            # third part
            + C['c4']
            * np.log(dists.rrup + C['c5']
                     * np.cosh(C['c6'] * max(rup.mag - C['chm'], 0)))
            + (C['c4a'] - C['c4'])
            * np.log(np.sqrt(dists.rrup ** 2 + C['crb'] ** 2))
            # forth part
            + (C['cg1'] + C['cg2'] / (np.cosh(max(rup.mag - C['cg3'], 0))))
            * dists.rrup
            # fifth part
            + C['c8'] * np.fmax(1 - (np.fmax(dists.rrup - 40,
                                np.zeros_like(dists)) / 30.),
                                np.zeros_like(dists))[0]
            * min(max(rup.mag - 5.5, 0) / 0.8, 1.0)
            * np.exp(-1 * C['c8a'] * (rup.mag - C['c8b']) ** 2) * centered_dpp
            # sixth part
            + C['c9'] * Fhw * np.cos(math.radians(rup.dip)) *
            (C['c9a'] + (1 - C['c9a']) * np.tanh(dists.rx / C['c9b']))
            * (1 - np.sqrt(dists.rjb ** 2 + rup.ztor ** 2)
               / (dists.rrup + 1.0))
        )

        return ln_y_ref
Ejemplo n.º 13
0
def clip_to_window(boxlist, window):
  """Clip bounding boxes to a window.

  This op clips input bounding boxes (represented by bounding box
  corners) to a window, optionally filtering out boxes that do not
  overlap at all with the window.

  Args:
    boxlist: BoxList holding M_in boxes
    window: a numpy array of shape [4] representing the
            [y_min, x_min, y_max, x_max] window to which the op
            should clip boxes.

  Returns:
    a BoxList holding M_out boxes where M_out <= M_in
  """
  y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1)
  win_y_min = window[0]
  win_x_min = window[1]
  win_y_max = window[2]
  win_x_max = window[3]
  y_min_clipped = np.fmax(np.fmin(y_min, win_y_max), win_y_min)
  y_max_clipped = np.fmax(np.fmin(y_max, win_y_max), win_y_min)
  x_min_clipped = np.fmax(np.fmin(x_min, win_x_max), win_x_min)
  x_max_clipped = np.fmax(np.fmin(x_max, win_x_max), win_x_min)
  clipped = np_box_list.BoxList(
      np.hstack([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped]))
  clipped = _copy_extra_fields(clipped, boxlist)
  areas = area(clipped)
  nonzero_area_indices = np.reshape(np.nonzero(np.greater(areas, 0.0)),
                                    [-1]).astype(np.int32)
  return gather(clipped, nonzero_area_indices)
Ejemplo n.º 14
0
    def get_output(self, current_temp, target_temp, rate_of_change_per_minute):
        temp_err_in = self.temp_error_category(current_temp, target_temp, rate_of_change_per_minute)
        print "Temp Error", temp_err_in

        #What is the temperature doing?
        mf_temp_too_cold = temp_err_in['too_cold']
        mf_temp_cold     = temp_err_in['cold']
        mf_temp_optimal  = temp_err_in['optimal']
        mf_temp_hot      = temp_err_in['hot']
        mf_temp_too_hot  = temp_err_in['too_hot']

        mf_cooling_quickly = temp_err_in['cooling_quickly']

        #Then:
        when_too_cold = np.fmin(mf_temp_too_cold, self.ho_high)
        when_cold     = np.fmin(mf_temp_cold, self.ho_low)
        when_optimal  = np.fmin(mf_temp_optimal, self.co_off)
        when_hot      = np.fmin(mf_temp_hot, self.co_low)
        when_too_hot  = np.fmin(mf_temp_too_hot, self.co_high)

        #If the temperate is temp_hot AND cooling_quickly SET chiller off
        when_hot_and_cooling_quickly = np.fmin(np.fmin(mf_temp_hot, mf_cooling_quickly), self.co_off)

        aggregate_membership = np.fmax(when_hot_and_cooling_quickly, np.fmax(when_too_cold, np.fmax(when_cold, np.fmax(when_optimal, np.fmax(when_hot, when_too_hot)))))
        result = fuzz.defuzz(self.chill_out, aggregate_membership, 'centroid')

        return result
def TR(HIGH, LOW, CLOSE):
    CLOSELAG = LAG(CLOSE,1)
    range1 = HIGH - LOW
    range2 = np.abs(HIGH-CLOSELAG)
    range3 = np.abs(LOW -CLOSELAG)
    out = np.fmax(np.fmax(range1,range2),range3)
    return out
Ejemplo n.º 16
0
def parker(rbar,C,vbar_guess):
    tol = 1.0e-10
    # handle bifurcation point at r = r_c
    if rbar > 1.0:
        vbar = np.fmax(vbar_guess,1.0+tol)
    else:
        # also can't have vbar = 0 (log (0) = bad)
        vbar = np.fmax(vbar_guess,tol)
    print vbar
    it = 0
    while parker_err(vbar,rbar,C) > tol:

        dvbar = - parker_f(vbar,rbar,C) / \
               parker_dfdvbar(vbar)

        # Limit changes of vbar to be no larger
        # than 20% per iteration step.
        # This turns out to be neccessary to
        # keep the solution from wandering off into
        # bad territory (like vbar < 0).
        fac1 = np.fmin(0.2/np.abs(dvbar/vbar),1.0)
    
        vbar = vbar + fac1 * dvbar

# debug output
#        print it, fac1, parker_f(vbar,rbar,C), parker_err(vbar,rbar,C), vbar
        it = it+1

    return vbar
Ejemplo n.º 17
0
def centroid(x, mfx):
    """
    Defuzzification using centroid (`center of gravity`) method.

    Parameters
    ----------
    x : 1d array, length M
        Independent variable
    mfx : 1d array, length M
        Fuzzy membership function

    Returns
    -------
    u : 1d array, length M
        Defuzzified result

    See also
    --------
    skfuzzy.defuzzify.defuzz, skfuzzy.defuzzify.dcentroid
    """

    '''
    As we suppose linearity between each pair of points of x, we can calculate
    the exact area of the figure (a triangle or a rectangle).
    '''

    sum_moment_area = 0.0
    sum_area = 0.0

    # If the membership function is a singleton fuzzy set:
    if len(x) == 1:
        return x[0]*mfx[0] / np.fmax(mfx[0], np.finfo(float).eps).astype(float)

    # else return the sum of moment*area/sum of area
    for i in range(1, len(x)):
        x1 = x[i - 1]
        x2 = x[i]
        y1 = mfx[i - 1]
        y2 = mfx[i]

        # if y1 == y2 == 0.0 or x1==x2: --> rectangle of zero height or width
        if not(y1 == y2 == 0.0 or x1 == x2):
            if y1 == y2:  # rectangle
                moment = 0.5 * (x1 + x2)
                area = (x2 - x1) * y1
            elif y1 == 0.0 and y2 != 0.0:  # triangle, height y2
                moment = 2.0 / 3.0 * (x2-x1) + x1
                area = 0.5 * (x2 - x1) * y2
            elif y2 == 0.0 and y1 != 0.0:  # triangle, height y1
                moment = 1.0 / 3.0 * (x2 - x1) + x1
                area = 0.5 * (x2 - x1) * y1
            else:
                moment = (2.0 / 3.0 * (x2-x1) * (y2 + 0.5*y1)) / (y1+y2) + x1
                area = 0.5 * (x2 - x1) * (y1 + y2)

            sum_moment_area += moment * area
            sum_area += area

    return sum_moment_area / np.fmax(sum_area,
                                     np.finfo(float).eps).astype(float)
Ejemplo n.º 18
0
def kramer_unsoldt_opacity(dens, Z, A, Zbar, Te, lmbda):
    """
    Computes the  Kramer-Unsoldt opacity [Zel’dovich & Raizer 1967 p 27]
    
    Parameters:
    -----------
     dens: [ndarray] density in (g.cm⁻³)
     Z: [ndarray] atomic number 
     A: [ndarray] atomic mass
     Zbar: [ndarray] ionization
     Te: [ndarray] electron temperature (eV)
     lmdba: [ndarray] wavelength (nm)

    Returns:
    --------
     out: [ndarray] of the same shape as input containing the opacity [cm⁻¹]
    """
                                          # check sign here
    Ibar = 10.4*Z**(4./3) * (Zbar/Z)**2 / (1 - Zbar/Z)**(2./3)
    Ibar = np.fmax(Ibar, 6.0)
    y = 1240./(lmbda * Te)
    y1 = Ibar / Te
    Ni = dens * cst.N_A / A
    #print Ibar, y, y1, Ni
    return np.fmax(7.13e-16* Ni * (Zbar + 1)**2 * np.exp(y - y1) / (Te**2*y**3), 1e-16)
Ejemplo n.º 19
0
    def test_half_ufuncs(self):
        """Test the various ufuncs"""

        a = np.array([0, 1, 2, 4, 2], dtype=float16)
        b = np.array([-2, 5, 1, 4, 3], dtype=float16)
        c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16)

        assert_equal(np.add(a, b), [-2, 6, 3, 8, 5])
        assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1])
        assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6])
        assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625])

        assert_equal(np.equal(a, b), [False, False, False, True, False])
        assert_equal(np.not_equal(a, b), [True, True, True, False, True])
        assert_equal(np.less(a, b), [False, True, False, False, True])
        assert_equal(np.less_equal(a, b), [False, True, False, True, True])
        assert_equal(np.greater(a, b), [True, False, True, False, False])
        assert_equal(np.greater_equal(a, b), [True, False, True, True, False])
        assert_equal(np.logical_and(a, b), [False, True, True, True, True])
        assert_equal(np.logical_or(a, b), [True, True, True, True, True])
        assert_equal(np.logical_xor(a, b), [True, False, False, False, False])
        assert_equal(np.logical_not(a), [True, False, False, False, False])

        assert_equal(np.isnan(c), [False, False, False, True, False])
        assert_equal(np.isinf(c), [False, False, True, False, False])
        assert_equal(np.isfinite(c), [True, True, False, False, True])
        assert_equal(np.signbit(b), [True, False, False, False, False])

        assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3])

        assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3])
        x = np.maximum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [0, 5, 1, 0, 6])
        assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2])
        x = np.minimum(b, c)
        assert_(np.isnan(x[3]))
        x[3] = 0
        assert_equal(x, [-2, -1, -np.inf, 0, 3])
        assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3])
        assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6])
        assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2])
        assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3])

        assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0])
        assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2])
        assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2]))
        assert_equal(np.square(b), [4, 25, 1, 16, 9])
        assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125])
        assert_equal(np.ones_like(b), [1, 1, 1, 1, 1])
        assert_equal(np.conjugate(b), b)
        assert_equal(np.absolute(b), [2, 5, 1, 4, 3])
        assert_equal(np.negative(b), [2, -5, -1, -4, -3])
        assert_equal(np.positive(b), b)
        assert_equal(np.sign(b), [-1, 1, 1, 1, 1])
        assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b))
        assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2]))
        assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
Ejemplo n.º 20
0
def compute_erk_change(raf_value, time_value, initial_values, mfs):
	"""Rules-
		If raf is high and time is high then positive_change_erk is high
		If raf is high1 and time is low then positive_change_erk is low
		If raf is low then positive_change_erk is low
		If raf is low and time is high then negative_change_erk is high
		If raf is low and time is low then negative_change_erk is low"""

	
	#Antecedent 1
	f = interp1d(initial_values[0][0], mfs[0][1])
	a1_1 = f(raf_value) #raf_high[raf == raf_value]
	f = interp1d(initial_values[2], mfs[2][1])
	a1_2 = f(time_value) #time_high[time == time_value]

	a1 = min(a1_1, a1_2)
	c1 = np.fmin( a1, mfs[1][3]) #mfs[1][3] is positive_change_erk_high

	#Antecedent 2
	f = interp1d(initial_values[0][0], mfs[0][6])
	a2_1 = f(raf_value)
	f = interp1d(initial_values[2], mfs[2][0]) #time_low[time == time_value]
	a2_2 = f(time_value)

	a2 = min(a2_1, a2_2)
	c2 = np.fmin( a2, mfs[1][2]) #mfs[1][2] is positive_change_raf_low

	c_com_positive = np.fmax(c1,c2)

	f = interp1d(initial_values[0][0], mfs[0][0])
	a3 = f(raf_value)
	c3 = np.fmin(a3, mfs[1][2])

	c_com_positive = np.fmax(c_com_positive, c3)
	pos_change = fuzz.defuzz( initial_values[1][1], c_com_positive, 'centroid') #initial_values[1][1] is positive_change_erk

	###Negative Change

	#Antecedent 3
	'''f = interp1d(initial_values[0][0], mfs[0][0])
	a3_1 = f(raf_value) #raf_low[raf == raf_value]
	a3_2 = a1_2 #time_high[time == time_value]

	a3 = min(a3_1,a3_2)
	c3 = np.fmin(a3, mfs[1][5]) #mfs[1][3] is negative_change_erk_high

	#Antecedent 4
	a4_1 = a3_1 #raf_low[raf == raf_value]
	a4_2 = a2_2 #time_low[time == time_value]

	a4 = min(a4_1, a4_2)
	c4 = np.fmin(a4, mfs[1][4]) #mfs[1][4] is negative_change_erk_low

	c_com_negative = np.fmax(c3, c4)
	neg_change = fuzz.defuzz(initial_values[1][2], c_com_negative, 'centroid') #initial_values[1][2] is negative_change_erk'''
	
	#print pos_change, neg_change
	#print pos_change
	return pos_change 
Ejemplo n.º 21
0
def _cmeans0_kth(data1, similarity2, u_old, c, w, m, *para):
	"""
	Single step in generic fuzzy c-means clustering algorithm.
	data2 is for intersect counting
	"""
	k = para[0]

	# Normalizing, then eliminating any potential zero values.
	u_old /= np.ones((c, 1)).dot(np.atleast_2d(u_old.sum(axis=0)))
	u_old = np.fmax(u_old, np.finfo(np.float64).eps)

	um = u_old ** m

	# calculating u_c
	u_c = u_old / u_old.sum(axis=1)[:,None]

	# remain the belonging rate >= the k-th max location of each cluster in um_c
	filter_k = lambda row:row < sorted(row, reverse=True)[k-1]
	fail_indices = np.apply_along_axis(filter_k, axis=1, arr=u_c)
	um[fail_indices] = 0

	# Calculate cluster centers
	# data1:2861,2; um:30,2861
	d1 = 0
	if data1 is not None:
		data1 = data1.T
		cntr1 = um.dot(data1) / (np.ones((data1.shape[1],
										1)).dot(np.atleast_2d(um.sum(axis=1))).T)
		d1 = _distance(data1, cntr1) # euclidean distance
		#print("b4-- d1:", d1[0:5,0],d1[0:5,1])
		#print("  min d1:", np.min(d1), ";max d1:", np.max(d1), "   std 1:", np.std(d1))
		d1 = d1 / np.std(d1)

	# data2
	d2 = 0
	if similarity2 is not None:
		#print("similarity2", similarity2[0:5,0])
		#d2 = um_c.dot(similarity2)
		d2 = um.dot(1 - similarity2) / np.ones((similarity2.shape[1],1)).dot(np.atleast_2d(um.sum(axis=1))).T
		#print("b4--d2:", d2[0:5,0],d2[0:5,1])
		#print("  d2.shape", d2.shape, ",std2:", np.std(d2))
		d2 = d2 / np.std(d2)
	
	# combined distance and similarity of two data
	d = w * d1 + (1-w) * d2
	#print("-- d1:", d1[0:6,0],d1[0:6,1], " \n  ,d2:", d2[0:6,0],d2[0:6,1],d2[0:6,2], " \n  ,d:", d[0:5,0],d[0:5,1])
	#print("  min d1:", np.min(d1), ";max d1:", np.max(d1), ",max d2:", np.max(d2))
	#print("   std 1:", np.std(d1), " ,2:", np.std(d2), " ,d:", np.std(d))

	d = np.fmax(d, np.finfo(np.float64).eps)

	jm = (um * d ** 2).sum()

	u = d ** (- 2. / (m - 1))
	#print("end u.sum:", u.sum(axis=0), u.sum(axis=1), "\nu[:,0]:", u[:,0])
	#print("/:", np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0))))
	u /= np.ones((c, 1)).dot(np.atleast_2d(u.sum(axis=0)))
	#print("end u.sum:", u.sum(axis=0), u.sum(axis=1), "\nu:", u[:,0])
	return cntr1, u, jm, d1, d2, d
Ejemplo n.º 22
0
def visualize(oriF0Array,oriOffsetArray,singerF0Array,singerOffsetArray,rating,subrating):
    # First take the log
    oriLogF0Array=np.fmax(0,np.log2(oriF0Array))
    singerLogF0Array=np.fmax(0,np.log2(singerF0Array))

    lenOri=len(oriLogF0Array)
    lenSinger=len(singerLogF0Array)
    # Next we generate labels for each audio


    for oriIndex,oriLogF0 in enumerate(oriLogF0Array):
        if oriLogF0Array[oriIndex]!=0 and oriIndex+1<lenOri:
            # draw a horizontal line
            plt.plot([oriOffsetArray[oriIndex],oriOffsetArray[oriIndex+1]],[oriLogF0,oriLogF0],linewidth=5,c='r')


    for singerIndex,singerLogF0 in enumerate(singerLogF0Array):
        if singerLogF0Array[singerIndex]!=0 and singerIndex+1<lenSinger:
            # draw a horizontal line
            plt.plot([singerOffsetArray[singerIndex],singerOffsetArray[singerIndex+1]],[singerLogF0,singerLogF0],linewidth=5,c='b')

            if subrating[singerIndex]!=None:
                plt.annotate(
                    "{0:.1f}".format(subrating[singerIndex]),# Times 100 to make it look nicer.
                    xy = ((singerOffsetArray[singerIndex]+singerOffsetArray[singerIndex+1])/2.0, singerLogF0), xytext = (-20, 20),
                    textcoords = 'offset points', ha = 'right', va = 'bottom',
                    bbox = dict(boxstyle = 'round,pad=0.5', fc = 'blue', alpha = 0.5),
                    arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))


    maxLogF0=max(np.hstack((oriLogF0Array,singerLogF0Array)))
    minLogF0Without0=maxLogF0 # initialize to a large number first.

    for i in np.hstack((oriLogF0Array,singerLogF0Array)):
        if i>0:
            minLogF0Without0=min(minLogF0Without0,i)

    xlim=[min(np.hstack((oriOffsetArray,singerOffsetArray))),max(np.hstack((oriOffsetArray,singerOffsetArray)))]
    ylim=[minLogF0Without0-0.5,maxLogF0+0.5]  # -4 and +4 to make drawing nicer.

    yticks=generateYAxis(ylim)
    xrange=np.arange(start=0,stop=xlim[1]+0.05,step=0.2) # step 0.2 to make it look nicer
    yrange=np.arange(start=ylim[0],stop=ylim[1],step=(ylim[1]-ylim[0])/(np.floor((ylim[1]-ylim[0])*12.0)))
    plt.xticks(xrange)
    plt.yticks(yrange,yticks)
    plt.xlabel('Time(s)')
    plt.ylabel('Notes(Western Notations)')

    # Play a little trick on the legend to make it show the right thing.
    oriLegend = mlines.Line2D([], [], linewidth=5,c='r',label='Original')
    singerLegend = mlines.Line2D([], [], linewidth=5,c='b',label='Singer')
    plt.legend(handles=[oriLegend,singerLegend])

    plt.text(xlim[1]-4, ylim[0]+0.5, 'Pitch rating: '+"{0:.1f}".format(rating), style='italic',
        bbox={'facecolor':'red', 'alpha':0.5, 'pad':10})

    plt.show()
Ejemplo n.º 23
0
def _inbox( X, box, weight=1 ):
    """ -> [tub( Xj, loj, hij ) ... ]
        all 0  <=>  X in box, lo <= X <= hi
    """
    assert len(X) == len(box), \
        "len X %d != len box %d" % (len(X), len(box))
    return weight * np.array([
        np.fmax( lo - x, 0 ) + np.fmax( 0, x - hi )
            for x, (lo,hi) in zip( X, box )])
Ejemplo n.º 24
0
    def coord_to_px(self, x, y, latlon=False, rounded=True, check_valid=True):
        """ Convert x,y coordinates into pixel coordinates of raster.

        x,y may be either in native coordinate system of raster or lat/lon.

        Parameters:
            x : float, x coordinate to convert.
            y : float, y coordinate to convert.
            latlon : boolean, default False. Set as True if bounds in lat/lon.
            rounded : if set to True, return the rounded pixel coordinates, otherwise return the float values
            check_valid : bool, if set to True, will check that all pixels are in the valid range. 

        Returns:
            (x_pixel,y_pixel)

        """

        # Convert coordinates to map system if provided in lat/lon and image
        # is projected (rather than geographic)
        if latlon == True and self.proj != None:
            x, y = self.proj(x, y)

        # Shift to the centre of the pixel
        x = np.array(x - self.xres / 2)
        y = np.array(y - self.yres / 2)

        g0, g1, g2, g3, g4, g5 = self.trans
        if g2 == 0:
            xPixel = (x - g0) / float(g1)
            yPixel = (y - g3 - xPixel * g4) / float(g5)
        else:
            xPixel = (y * g2 - x * g5 + g0 * g5 - g2 * g3) / float(g2 * g4 - g1 * g5)
            yPixel = (x - g0 - xPixel * g1) / float(g2)

        # Round if required
        if rounded == True:
            xPixel = np.round(xPixel)
            yPixel = np.round(yPixel)

        if check_valid == False:
            return xPixel, yPixel

        # Check that pixel location is not outside image dimensions
        nx = self.ds.RasterXSize
        ny = self.ds.RasterYSize

        xPixel_new = np.copy(xPixel)
        yPixel_new = np.copy(yPixel)
        xPixel_new = np.fmin(xPixel_new, nx)
        yPixel_new = np.fmin(yPixel_new, ny)
        xPixel_new = np.fmax(xPixel_new, 0)
        yPixel_new = np.fmax(yPixel_new, 0)

        if np.any(xPixel_new != xPixel) or np.any(yPixel_new != yPixel):
            print ("Warning : some points are out of domain for file")

        return xPixel_new, yPixel_new
Ejemplo n.º 25
0
def macr_exact_bruteforce(bitmask, lower_range=None, upper_range=None, FRAGMENT_COST=FRAGMENT_COST):
    """Compute a Minimum Average Cost Rectangle among all rectangles with lower
    left corner in lower_range and upper right corner in upper_range, by
    computing the cost of all possible rectangles.

    Returns (cost, rectangle)

    Note: Returns a rectangle with cost strictly greater than FRAGMENT_COST + 1 if bitmask contains
    no set pixels.
    """
    if bitmask.shape[0] * bitmask.shape[1] > 4000:
        raise Exception('macr_exact_bruteforce called on a large bitmask')

    lower_range = lower_range or ((0, 0), bitmask.shape)
    upper_range = upper_range or ((0, 0), bitmask.shape)
    lower_ext = tuple(np.subtract(lower_range[1], lower_range[0]))
    upper_ext = tuple(np.subtract(upper_range[1], upper_range[0]))
    cum0 = bitmask.cumsum(0)
    cum1 = bitmask.cumsum(1)
    cum01 = cum0.cumsum(1)

    tr = cum01[upper_range[0][0]:upper_range[1][
        0], upper_range[0][1]:upper_range[1][1]]
    tr = np.tile(tr.reshape((1, 1) + upper_ext), lower_ext + (1, 1))
    tl = (cum01 - cum0)[upper_range[0][0]:upper_range[1]
                        [0], lower_range[0][1]:lower_range[1][1]]
    tl = np.tile(tl.transpose().reshape(
        (1, lower_ext[1], upper_ext[0], 1)), (lower_ext[0], 1, 1, upper_ext[1]))
    br = (cum01 - cum1)[lower_range[0][0]:lower_range[1]
                        [0], upper_range[0][1]:upper_range[1][1]]
    br = np.tile(br.reshape((lower_ext[0], 1, 1, upper_ext[
                 1])), (1, lower_ext[1], upper_ext[0], 1))
    bl = (cum01 - cum0 - cum1 + bitmask)[lower_range[0][0]                                         :lower_range[1][0], lower_range[0][1]:lower_range[1][1]]
    bl = np.tile(bl.reshape(lower_ext + (1, 1)), (1, 1) + upper_ext)

    indices = np.indices(lower_ext + upper_ext)
    covered = (tr - tl - br + bl) * \
        ((indices[2] >= indices[0]) & (indices[3] >= indices[1]))

    cost = (
        ((upper_range[0][0] - lower_range[0][0]) + indices[2] - indices[0] + 1) *
        ((upper_range[0][1] - lower_range[0][1]) + indices[3] - indices[1] + 1)
    )
    cost = FRAGMENT_COST + np.fmax(cost, 1)
    # print cost

    avg_cost = cost / np.fmax(covered, 0.1)
    # print avg_cost
    argmin_local = np.unravel_index(np.argmin(avg_cost), avg_cost.shape)
    argmin = (
        argmin_local[0] + lower_range[0][0],
        argmin_local[1] + lower_range[0][1],
        argmin_local[2] + upper_range[0][0] + 1,
        argmin_local[3] + upper_range[0][1] + 1
    )
    return avg_cost[argmin_local], argmin
Ejemplo n.º 26
0
    def weights(cls, ld, w_ld, N1, N2, M, h1, h2, rho_g, intercept_gencov=None,
                intercept_hsq1=None, intercept_hsq2=None, ii=None):
        '''
        Regression weights.

        Parameters
        ----------
        ld : np.matrix with shape (n_snp, 1)
            LD Scores (non-partitioned)
        w_ld : np.matrix with shape (n_snp, 1)
            LD Scores (non-partitioned) computed with sum r^2 taken over only those SNPs included
            in the regression.
        M : float > 0
            Number of SNPs used for estimating LD Score (need not equal number of SNPs included in
            the regression).
        N1, N2 :  np.matrix of ints > 0 with shape (n_snp, 1)
            Number of individuals sampled for each SNP for each study.
        h1, h2 : float in [0,1]
            Heritability estimates for each study.
        rhog : float in [0,1]
            Genetic covariance estimate.
        intercept : float
            Genetic covariance intercept, on the z1*z2 scale (so should be Ns*rho/sqrt(N1*N2)).

        Returns
        -------
        w : np.matrix with shape (n_snp, 1)
            Regression weights. Approx equal to reciprocal of conditional variance function.

        '''
        M = float(M)
        if intercept_gencov is None:
            intercept_gencov = 0
        if intercept_hsq1 is None:
            intercept_hsq1 = 1
        if intercept_hsq2 is None:
            intercept_hsq2 = 1

        h1, h2 = max(h1, 0.0), max(h2, 0.0)
        h1, h2 = min(h1, 1.0), min(h2, 1.0)
        rho_g = min(rho_g, 1.0)
        rho_g = max(rho_g, -1.0)
        ld = np.fmax(ld, 1.0)
        w_ld = np.fmax(w_ld, 1.0)
        a = np.multiply(N1, h1 * ld) / M + intercept_hsq1
        b = np.multiply(N2, h2 * ld) / M + intercept_hsq2
        sqrt_n1n2 = np.sqrt(np.multiply(N1, N2))
        c = np.multiply(sqrt_n1n2, rho_g * ld) / M + intercept_gencov
        try:
            het_w = 1.0 / (np.multiply(a, b) + np.square(c))
        except FloatingPointError:  # bizarre error; should never happen
            raise FloatingPointError('Why did you set hsq intercept <= 0?')

        oc_w = 1.0 / w_ld
        w = np.multiply(het_w, oc_w)
        return w
Ejemplo n.º 27
0
def compute_akt_change(pi3k_value, time_value, initial_values, mfs):
	"""Rules-
		If pi3k is high and time is high then positive_change_akt is high
		If pi3k is high1 and time is low then positive_change_akt is low
		If pi3k is low then positive_change_pi3k is low
		If pi3k is low and time is high then negative_change_akt is high
		If pi3k is low and time is low then negative_change_akt is low"""

	###Positive Change
	#Antecedent 1
	f = interp1d(initial_values[0][0], mfs[0][1])	
	a1_1 = f(pi3k_value) #pi3k_high[pi3k == pi3k_value]
	f = interp1d(initial_values[2], mfs[2][1])
	a1_2 = f(time_value) #time_high[time == time_value]

	a1 = min(a1_1, a1_2)
	c1 = np.fmin(a1, mfs[1][3]) #positive_change_akt is high

	#Antecedent 2
	f = interp1d(initial_values[0][0], mfs[0][6])
	a2_1 = f(pi3k_value) #pi3k_high[pi3k == pi3k_value]
	f = interp1d(initial_values[2], mfs[2][0])
	a2_2 = f(time_value) #time_low[time == time_value]

	a2 = min(a2_1, a2_2)
	c2 = np.fmin( a2, mfs[1][2]) #positive_change_akt is low

	c_com_positive = np.fmax(c1,c2)

	f = interp1d(initial_values[0][0], mfs[0][0])
	a3 = f(pi3k_value)
	c3 = np.fmin(a3, mfs[1][2])
	c_com_positive = np.fmax(c_com_positive, c3)
	pos_change = fuzz.defuzz( initial_values[1][1], c_com_positive, 'centroid') #initial_values[1][1] is positive_change_akt

	###Negative Change

	#Antecedent 3
	'''f = interp1d(initial_values[0][0], mfs[0][0])
	a3_1 = f(pi3k_value) #pi3k_low[pi3k == pi3k_value]
	a3_2 = a1_2 #time_high[time == time_value]

	a3 = min(a3_1, a3_2)
	c3 = np.fmin(a3, mfs[1][5]) #mfs[1][5] is negative_change_akt_high

	#Antecedent 4
	a4_1 = a3_1 #pi3k_low[pi3k == pi3k_value]
	a4_2 = a2_2 #time_low[time == time_value]

	a4 = min(a4_1, a4_2)
	c4 = np.fmin(a4, mfs[1][4]) #mfs[1][4] is negative_change_akt_low

	c_com_negative = np.fmax(c3, c4)
	neg_change = fuzz.defuzz(initial_values[1][2], c_com_negative, 'centroid') #initial_values[1][2] is negative_change_akt'''

	return pos_change 
Ejemplo n.º 28
0
def Fmat(tab, spec, *XYf):
    r"""Material flux: c_s (\rho*e + \rho (C_s**2/2) + P)
    Only works with eospac unit backend
    """
    XYf = list(XYf)
    dens = XYf[0]*1e3   # g/cc -> kg/m^3
    Ut_DT = np.fmax(0, tab.get_table('U{s}_DT', spec)(*XYf)) * 1e6          # MJ/kg -> J/kg 
    Pt_DT = np.fmax(0, tab.get_table('P{s}_DT', spec)(*XYf)) * 1e9     # GPa -> Pa
    C_s = tab.q['Cs2', spec](*XYf)**0.5  * 1e3   # km/s -> m/s
    return  C_s*(dens*Ut_DT + Pt_DT + dens*C_s**2/2 )
Ejemplo n.º 29
0
    def compute_expected_improvement(self, force_monte_carlo=False, force_1d_ei=False):
        r"""Compute the expected improvement at ``points_to_sample``, with ``points_being_sampled`` concurrent points being sampled.

        .. Note:: These comments were copied from
          :meth:`moe.optimal_learning.python.interfaces.expected_improvement_interface.ExpectedImprovementInterface.compute_expected_improvement`.

        ``points_to_sample`` is the "q" and ``points_being_sampled`` is the "p" in q,p-EI.

        Computes the expected improvement ``EI(Xs) = E_n[[f^*_n(X) - min(f(Xs_1),...,f(Xs_m))]^+]``, where ``Xs``
        are potential points to sample (union of ``points_to_sample`` and ``points_being_sampled``) and ``X`` are
        already sampled points.  The ``^+`` indicates that the expression in the expectation evaluates to 0 if it
        is negative.  ``f^*(X)`` is the MINIMUM over all known function evaluations (``points_sampled_value``),
        whereas ``f(Xs)`` are *GP-predicted* function evaluations.

        In words, we are computing the expected improvement (over the current ``best_so_far``, best known
        objective function value) that would result from sampling (aka running new experiments) at
        ``points_to_sample`` with ``points_being_sampled`` concurrent/ongoing experiments.

        In general, the EI expression is complex and difficult to evaluate; hence we use Monte-Carlo simulation to approximate it.
        When faster (e.g., analytic) techniques are available, we will prefer them.

        The idea of the MC approach is to repeatedly sample at the union of ``points_to_sample`` and
        ``points_being_sampled``. This is analogous to gaussian_process_interface.sample_point_from_gp,
        but we sample ``num_union`` points at once:
        ``y = \mu + Lw``
        where ``\mu`` is the GP-mean, ``L`` is the ``chol_factor(GP-variance)`` and ``w`` is a vector
        of ``num_union`` draws from N(0, 1). Then:
        ``improvement_per_step = max(max(best_so_far - y), 0.0)``
        Observe that the inner ``max`` means only the smallest component of ``y`` contributes in each iteration.
        We compute the improvement over many random draws and average.

        :param force_monte_carlo: whether to force monte carlo evaluation (vs using fast/accurate analytic eval when possible)
        :type force_monte_carlo: bool
        :param force_1d_ei: whether to force using the 1EI method. Used for testing purposes only. Takes precedence when force_monte_carlo is also True
        :type force_1d_ei: bool
        :return: the expected improvement from sampling ``points_to_sample`` with ``points_being_sampled`` concurrent experiments
        :rtype: float64

        """
        num_points = self.num_to_sample + self.num_being_sampled
        union_of_points = numpy.reshape(
            numpy.append(self._points_to_sample, self._points_being_sampled), (num_points, self.dim)
        )

        mu_star = self._gaussian_process.compute_mean_of_points(union_of_points)
        var_star = self._gaussian_process.compute_variance_of_points(union_of_points)

        if force_monte_carlo is False and force_1d_ei is False:
            var_star = numpy.fmax(MINIMUM_VARIANCE_EI, var_star)  # TODO(272): Check if this is needed.
            return self._compute_expected_improvement_qd_analytic(mu_star, var_star)
        elif force_1d_ei is True:
            var_star = numpy.fmax(MINIMUM_VARIANCE_EI, var_star)
            return self._compute_expected_improvement_1d_analytic(mu_star[0], var_star[0, 0])
        else:
            return self._compute_expected_improvement_monte_carlo(mu_star, var_star)
Ejemplo n.º 30
0
    def compute(self, clusters, force_no_dropout=False):
        if not force_no_dropout:
            self.counter += 1
            self.counter %= 50
            if self.counter == 0:
                self.randomize_dropout(0.5)
        # clusters are a dictionary of arrays of brain data.

        # this is where we leave the products of multiplications
        output_multiply_vector = clusters[self.output_name][MULTIPLY_ROW, :]

        # this is where we sum up various products of multiplications
        output_add_vector = clusters[self.output_name][ADD_ROW, :]
        output_add_vector.fill(0.0)

        # for each input to our block
        for input_name, input_size in self.input_info:
            input_vector = clusters[input_name][FINAL_ROW, :]
            # multiply
            if force_no_dropout:
                dot(input_vector, self.matrices[input_name],
                    out=output_multiply_vector)
            else:
                dot(input_vector, self.dropout_matrices[input_name],
                    out=output_multiply_vector)
            # add
            output_add_vector += output_multiply_vector

        # apply sigmoid function
        output_final_vector = clusters[self.output_name][FINAL_ROW, :]

        np.clip(output_add_vector, -100, 100, out=output_add_vector)

        if (self.activation == EXPIT):
            expit(output_add_vector, out=output_final_vector)
        elif (self.activation == TANH):
            tanh(output_add_vector, out=output_final_vector)
        elif (self.activation == NORMALIZE):
            output_final_vector[:] = output_add_vector
            norm = np.linalg.norm(output_add_vector)
            if norm != 0.0:
                output_final_vector *= 1 / norm
                output_final_vector *= 1 / (1 + exp(-norm))
        elif (self.activation == RELU):
            fmax(output_add_vector, 0.0, out=output_final_vector)
        elif (self.activation is None):
            output_final_vector[:] = output_add_vector
        else:
            raise NotImplementedError("Invalid activation", self.activation)

        if np.isnan(output_final_vector).any():
            self.loginformation(clusters)
            raise Exception("Activation produced nans.")
Ejemplo n.º 31
0
    def u_prime_inv(self, x):
        eps = 1e-8

        return np.fmax(x, eps)**(-1 / self.sigma)
Ejemplo n.º 32
0
def cmeans_predict(test_data, cntr_trained, m, error, maxiter,
                   metric='euclidean',
                   init=None,
                   seed=None):
    """
    Prediction of new data in given a trained fuzzy c-means framework [1].

    Parameters
    ----------
    test_data : 2d array, size (S, N)
        New, independent data set to be predicted based on trained c-means
        from ``cmeans``. N is the number of data sets; S is the number of
        features within each sample vector.
    cntr_trained : 2d array, size (c, S)
        Location of trained centers from prior training c-means.
    m : float
        Array exponentiation applied to the membership function u_old at each
        iteration, where U_new = u_old ** m.
    error : float
        Stopping criterion; stop early if the norm of (u[p] - u[p-1]) < error.
    maxiter : int
        Maximum number of iterations allowed.
    metric: string
        By default is set to euclidean. Passes any option accepted by
        ``scipy.spatial.distance.cdist``.
    init : 2d array, size (c, N)
        Initial fuzzy c-partitioned matrix. If none provided, algorithm is
        randomly initialized.
    seed : int
        If provided, sets random seed of init. No effect if init is
        provided. Mainly for debug/testing purposes.

    Returns
    -------
    u : 2d array, (c, N)
        Final fuzzy c-partitioned matrix.
    u0 : 2d array, (c, N)
        Initial guess at fuzzy c-partitioned matrix (either provided init or
        random guess used if init was not provided).
    d : 2d array, (c, N)
        Final Euclidian distance matrix.
    jm : 1d array, length P
        Objective function history.
    p : int
        Number of iterations run.
    fpc : float
        Final fuzzy partition coefficient.

    Notes
    -----
    Ross et al. [1]_ did not include a prediction algorithm to go along with
    fuzzy c-means. This prediction algorithm works by repeating the clustering
    with fixed centers, then efficiently finds the fuzzy membership at all
    points.

    References
    ----------
    .. [1] Ross, Timothy J. Fuzzy Logic With Engineering Applications, 3rd ed.
           Wiley. 2010. ISBN 978-0-470-74376-8 pp 352-353, eq 10.28 - 10.35.
    """
    c = cntr_trained.shape[0]

    # Setup u0
    if init is None:
        if seed is not None:
            np.random.seed(seed=seed)
        n = test_data.shape[1]
        u0 = np.random.rand(c, n)
        u0 = normalize_columns(u0)
        init = u0.copy()
    u0 = init
    u = np.fmax(u0, np.finfo(np.float64).eps)

    # Initialize loop parameters
    jm = np.zeros(0)
    p = 0

    # Main cmeans loop
    while p < maxiter - 1:
        u2 = u.copy()
        [u, Jjm, d] = _cmeans_predict0(test_data, cntr_trained, u2, c, m,
                                       metric)
        jm = np.hstack((jm, Jjm))
        p += 1

        # Stopping rule
        if np.linalg.norm(u - u2) < error:
            break

    # Final calculations
    error = np.linalg.norm(u - u2)
    fpc = _fp_coeff(u)

    return u, u0, d, jm, p, fpc
def numba_isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
    return np.fabs(a - b) <= np.fmax(rel_tol * np.fmax(np.fabs(a), np.fabs(b)),
                                     abs_tol)
Ejemplo n.º 34
0
def motor_rmp(value):
    # Generate universe variables
    volt = np.arange(0, 6, 1)
    rpm = np.arange(0, 500, 100)

    # Generate fuzzy membership functions for input
    i_null = fuzz.trimf(volt, [0, 0, 1])
    i_zero = fuzz.trimf(volt, [0, 1, 2])
    i_small = fuzz.trimf(volt, [1, 2, 3])
    i_medium = fuzz.trimf(volt, [2, 3, 4])
    i_large = fuzz.trimf(volt, [3, 4, 5])
    i_very_large = fuzz.trimf(volt, [4, 5, 5])

    # Output membership function

    o_zero = fuzz.trimf(rpm, [0, 0, 100])
    o_small = fuzz.trimf(rpm, [0, 100, 200])
    o_medium = fuzz.trimf(rpm, [100, 200, 300])
    o_large = fuzz.trimf(rpm, [200, 300, 400])
    o_very_large = fuzz.trimf(rpm, [300, 400, 400])

    # membership value calculation using the value
    volt_level_null = fuzz.interp_membership(volt, i_null, value)
    volt_level_zero = fuzz.interp_membership(volt, i_zero, value)
    volt_level_small = fuzz.interp_membership(volt, i_small, value)
    volt_level_medium = fuzz.interp_membership(volt, i_medium, value)
    volt_level_large = fuzz.interp_membership(volt, i_large, value)
    volt_level_very_large = fuzz.interp_membership(volt, i_very_large, value)

    active_rule1 = np.fmax(volt_level_null, volt_level_zero)
    rpm_activation_zero = np.fmin(active_rule1, o_zero)
    rpm_activation_small = np.fmin(volt_level_small, o_small)
    rpm_activation_medium = np.fmin(volt_level_medium, o_medium)
    rpm_activation_large = np.fmin(volt_level_large, o_large)
    rpm_activation_very_large = np.fmin(volt_level_very_large, o_very_large)

    rmp_dummy = np.zeros_like(rpm)
    aggregated = np.fmax(
        rpm_activation_zero,
        np.fmax(
            rpm_activation_small,
            np.fmax(rpm_activation_medium,
                    np.fmax(rpm_activation_large, rpm_activation_very_large))))

    # Calculate defuzzified result
    rpm_out = fuzz.defuzz(rpm, aggregated, 'centroid')
    print(rpm_out)
    rpm_activation = fuzz.interp_membership(rpm, aggregated,
                                            rpm_out)  # for plot

    # Visualize
    fig, (ax0, ax1, ax2, ax3) = plt.subplots(nrows=4, figsize=(8, 9))

    ax0.plot(
        volt,
        i_null,
        'b',
        linewidth=1.5,
    )
    ax0.plot(
        volt,
        i_zero,
        'r',
        linewidth=1.5,
    )
    ax0.plot(
        volt,
        i_small,
        'b',
        linewidth=1.5,
    )
    ax0.plot(
        volt,
        i_medium,
        'r',
        linewidth=1.5,
    )
    ax0.plot(
        volt,
        i_large,
        'b',
        linewidth=1.5,
    )
    ax0.plot(
        volt,
        i_very_large,
        'r',
        linewidth=1.5,
    )
    ax0.set_title('INPUT membership function (Volt)')
    ax0.legend()

    ax1.plot(
        rpm,
        o_zero,
        'r',
        linewidth=1.5,
    )
    ax1.plot(
        rpm,
        o_small,
        'b',
        linewidth=1.5,
    )
    ax1.plot(
        rpm,
        o_medium,
        'r',
        linewidth=1.5,
    )
    ax1.plot(
        rpm,
        o_large,
        'b',
        linewidth=1.5,
    )
    ax1.plot(
        rpm,
        o_very_large,
        'r',
        linewidth=1.5,
    )
    ax1.set_title('OUTPUT membership function (RPM)')
    ax1.legend()

    ax2.fill_between(rpm,
                     rmp_dummy,
                     rpm_activation_zero,
                     facecolor='b',
                     alpha=0.7)
    ax2.plot(
        rpm,
        rpm_activation_zero,
        'b',
        linewidth=0.5,
        linestyle='--',
    )
    ax2.fill_between(rpm,
                     rmp_dummy,
                     rpm_activation_small,
                     facecolor='g',
                     alpha=0.7)
    ax2.plot(rpm, rpm_activation_small, 'g', linewidth=0.5, linestyle='--')
    ax2.fill_between(rpm,
                     rmp_dummy,
                     rpm_activation_medium,
                     facecolor='r',
                     alpha=0.7)
    ax2.plot(rpm, rpm_activation_medium, 'r', linewidth=0.5, linestyle='--')
    ax2.fill_between(rpm,
                     rmp_dummy,
                     rpm_activation_large,
                     facecolor='r',
                     alpha=0.7)
    ax2.plot(rpm, rpm_activation_large, 'r', linewidth=0.5, linestyle='--')
    ax2.fill_between(rpm,
                     rmp_dummy,
                     rpm_activation_very_large,
                     facecolor='r',
                     alpha=0.7)
    ax2.plot(rpm,
             rpm_activation_very_large,
             'r',
             linewidth=0.5,
             linestyle='--')
    ax2.set_title('Output membership activity')
    ax1.legend()

    ax3.plot(
        rpm,
        o_zero,
        'b',
        linewidth=0.5,
        linestyle='--',
    )
    ax3.plot(rpm, o_small, 'g', linewidth=0.5, linestyle='--')
    ax3.plot(rpm, o_medium, 'r', linewidth=0.5, linestyle='--')
    ax3.plot(
        rpm,
        o_large,
        'y',
        linewidth=0.5,
        linestyle='--',
    )
    ax3.plot(rpm, o_very_large, 'v', linewidth=0.5, linestyle='--')

    ax3.fill_between(rpm, rmp_dummy, aggregated, facecolor='Orange', alpha=0.7)
    ax3.plot([rpm_out, rpm_out], [0, rpm_activation],
             'k',
             linewidth=1.5,
             alpha=0.9)
    ax3.set_title('Aggregated membership and result (line)')
    ax3.legend()

    # Turn off top/right axes
    for ax in (ax0, ax1, ax2, ax3):
        ax.spines['top'].set_visible(False)
        ax.spines['right'].set_visible(False)
        ax.get_xaxis().tick_bottom()
        ax.get_yaxis().tick_left()

    plt.tight_layout()

    plt.savefig('output/out.png')
    # cv2.imwrite('output/output.png',(cv2.resize(cv2.imread('output/out.png')),(300,400)))

    return rpm_out
Ejemplo n.º 35
0
    def result_viz(self, interp='bicubic', person_specific=False):
        figs = []
        if person_specific:
            for idx, (head_box, gaze_prediction, final_map) in enumerate(
                    zip(self.head_boxes, self.predictions, self.final_maps),
                    1):
                if self.inputs.input_image.shape[1] > 1600:
                    fig = plt.figure(
                        idx,
                        figsize=(self.inputs.input_image.shape[1] / 1000.,
                                 self.inputs.input_image.shape[0] / 1000.),
                        dpi=100)
                else:
                    fig = plt.figure(idx, dpi=100)
                image_with_frame = np.copy(self.inputs.input_image)

                top_left_x = int(head_box[0])
                top_left_y = int(head_box[1])
                bottom_right_x = int(head_box[2])
                bottom_right_y = int(head_box[3])

                thickness, radius = 3, 10
                if self.inputs.input_image.shape[1] > 1600:
                    thickness, radius = 10, 30

                # draw head box
                cv2.rectangle(image_with_frame, (top_left_x, top_left_y),
                              (bottom_right_x, bottom_right_y),
                              color=(20, 200, 20),
                              thickness=3)
                # draw predicted gaze coordinate
                #cv2.circle(image_with_frame, center=(gaze_prediction[1], gaze_prediction[0]), color=(200, 20, 20), radius=10, thickness=-1)
                # draw line connecting center of the head box and the predicted gaze coordinate
                #cv2.line(image_with_frame, pt1=((top_left_x+bottom_right_x+1)>>1, (top_left_y+bottom_right_y)>>1), \
                #            pt2=(gaze_prediction[1], gaze_prediction[0]), color=(20, 200, 20), thickness=3)

                plt.imshow(image_with_frame)
                plt.hold(True)
                plt.imshow(final_map, alpha=0.35)
                plt.axis('off')
                for axis in fig.axes:
                    axis.get_xaxis().set_visible(False)
                    axis.get_yaxis().set_visible(False)
                plt.tight_layout(pad=0.)
                figs.append(fig)

        else:
            image_with_frame = np.copy(self.inputs.input_image)
            if self.inputs.input_image.shape[1] > 1600:
                fig = plt.figure(
                    1,
                    figsize=(self.inputs.input_image.shape[1] / 500.,
                             self.inputs.input_image.shape[0] / 500.),
                    dpi=100)
            else:
                fig = plt.figure(1, dpi=100)

            # element-wise maximum computation for fusing the result from those 5 gaze maps
            final_map = self.final_maps[0]
            for map in self.final_maps[1::]:
                final_map = np.fmax(map, final_map)

            for head_box, gaze_prediction in zip(self.head_boxes,
                                                 self.predictions):
                top_left_x = int(head_box[0])
                top_left_y = int(head_box[1])
                bottom_right_x = int(head_box[2])
                bottom_right_y = int(head_box[3])

                thickness, radius = 3, 10
                if self.inputs.input_image.shape[1] > 1600:
                    thickness, radius = 10, 30

                # draw head box
                cv2.rectangle(image_with_frame, (top_left_x, top_left_y),
                              (bottom_right_x, bottom_right_y),
                              color=(20, 200, 20),
                              thickness=thickness)
                # draw predicted gaze coordinate
                #cv2.circle(image_with_frame, center=(gaze_prediction[1], gaze_prediction[0]), color=(220, 220, 255), radius=radius, thickness=-1)
                # draw line connecting center of the head box and the predicted gaze coordinate
                #cv2.line(image_with_frame, pt1=((top_left_x+bottom_right_x+1)>>1, (top_left_y+bottom_right_y)>>1), \
                #            pt2=(gaze_prediction[1], gaze_prediction[0]), color=(20, 200, 20), thickness=thickness)

            plt.imshow(image_with_frame)
            plt.hold(True)
            plt.imshow(final_map, alpha=0.35)
            plt.axis('off')
            for axis in fig.axes:
                axis.get_xaxis().set_visible(False)
                axis.get_yaxis().set_visible(False)

            plt.tight_layout(pad=0.)
            figs.append(fig)

        return figs
Ejemplo n.º 36
0
 "arctan2":
 F.atan2,
 "bitwise_and":
 lambda c1, c2: c1.bitwiseAND(c2),
 "bitwise_or":
 lambda c1, c2: c1.bitwiseOR(c2),
 "bitwise_xor":
 lambda c1, c2: c1.bitwiseXOR(c2),
 "copysign":
 F.pandas_udf(lambda s1, s2: np.copysign(s1, s2), DoubleType()),
 "float_power":
 F.pandas_udf(lambda s1, s2: np.float_power(s1, s2), DoubleType()),
 "floor_divide":
 F.pandas_udf(lambda s1, s2: np.floor_divide(s1, s2), DoubleType()),
 "fmax":
 F.pandas_udf(lambda s1, s2: np.fmax(s1, s2), DoubleType()),
 "fmin":
 F.pandas_udf(lambda s1, s2: np.fmin(s1, s2), DoubleType()),
 "fmod":
 F.pandas_udf(lambda s1, s2: np.fmod(s1, s2), DoubleType()),
 "gcd":
 F.pandas_udf(lambda s1, s2: np.gcd(s1, s2), DoubleType()),
 "heaviside":
 F.pandas_udf(lambda s1, s2: np.heaviside(s1, s2), DoubleType()),
 "hypot":
 F.hypot,
 "lcm":
 F.pandas_udf(lambda s1, s2: np.lcm(s1, s2), DoubleType()),
 "ldexp":
 F.pandas_udf(lambda s1, s2: np.ldexp(s1, s2), DoubleType()),
 "left_shift":
Ejemplo n.º 37
0
    def u_prime(self, c):
        eps = 1e-8

        return np.fmax(c, eps)**(-self.sigma)
Ejemplo n.º 38
0
ax0.plot(x_tip, tip_md, 'g', linewidth=0.5, linestyle='--')
ax0.fill_between(x_tip, tip0, tip_activation_hi, facecolor='r', alpha=0.7)
ax0.plot(x_tip, tip_hi, 'r', linewidth=0.5, linestyle='--')
ax0.set_title('Output membership activity')

# Turn off top/right axes
for ax in (ax0, ):
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    ax.get_xaxis().tick_bottom()
    ax.get_yaxis().tick_left()

plt.tight_layout()

# Aggregate all three output membership functions together
aggregated = np.fmax(tip_activation_lo,
                     np.fmax(tip_activation_md, tip_activation_hi))

# Calculate defuzzified result
tip = fuzz.defuzz(x_tip, aggregated, 'centroid')
tip_activation = fuzz.interp_membership(x_tip, aggregated, tip)  # for plot

# Visualize this
fig, ax0 = plt.subplots(figsize=(8, 3))

ax0.plot(
    x_tip,
    tip_lo,
    'b',
    linewidth=0.5,
    linestyle='--',
)
def numba_isclose(a, b, rel_tol=1e-09, abs_tol=0.0):
    # rel_tol: relative tolerance
    # abs_tol: absolute tolerance
    return np.fabs(a - b) <= np.fmax(rel_tol * np.fmax(np.fabs(a), np.fabs(b)),
                                     abs_tol)
Ejemplo n.º 40
0
np.random.seed(1)

lowD_x = np.zeros((nClass * num, 2))
train_y = np.zeros((nClass * num, 1))
for i in xrange(nClass):
    lowD_x[i * num:(i + 1) *
           num] = np.tile(c[i, :], (num, 1)) + sigma * np.random.randn(num, 2)
    # Class lables: 0, 1, 2...
    train_y[i * num:(i + 1) * num] = i * np.ones((num, 1))

train_y0 = train_y

# tanh(Wx), linear mapping with tanh() nonlinearity

W = np.random.randn(100, 2)
train_x = sigmoid(np.fmax(0, np.dot(lowD_x, W.T)))

#W1 = np.random.randn(10, 2)
#W2 = np.random.randn(100, 10)
#t1 = sigmoid(np.dot(lowD_x, W1.T))
#t2 = sigmoid(np.dot(t1, W2.T))
#train_x = t2

#W = np.random.randn(100, 2)
#train_x = np.tanh(sigmoid(np.dot(lowD_x, W.T)))

#W = np.random.randn(100, 2)
#train_x = np.power(sigmoid(np.dot(lowD_x, W.T)), 2)

#l1 = np.maximum(np.dot(lowD_x, W.T), 0)
#train_x = np.tanh(np.dot(l1, np.random.randn(dim, dim)))
Ejemplo n.º 41
0
    def train(self, memory):
        minibatch = random.sample(memory, self.batch_size)
        state_stack = [mini[0] for mini in minibatch]
        next_state_stack = [mini[1] for mini in minibatch]
        action_stack = [mini[2] for mini in minibatch]
        reward_stack = [mini[3] for mini in minibatch]
        done_stack = [mini[4] for mini in minibatch]
        done_stack = [int(i) for i in done_stack]

        if self.model == 'IQN':
            t = np.random.rand(self.batch_size, self.num_support)
            Q_next_state = self.sess.run(self.target_network, feed_dict={self.state: next_state_stack, self.tau: t})
            next_action = np.argmax(np.mean(Q_next_state, axis=2), axis=1)
            Q_next_state_next_action = [Q_next_state[i, action, :] for i, action in enumerate(next_action)]
            T_theta = [reward + (1-done)*self.gamma*Q for reward, Q, done in zip(reward_stack, Q_next_state_next_action, done_stack)]
            return self.sess.run([self.train_op, self.loss], feed_dict={self.state: state_stack, self.action: action_stack, self.tau:t, self.Y: T_theta})


        elif self.model == 'DQN':
            Q_next_state = self.sess.run(self.target_network, feed_dict={self.state: next_state_stack})
            next_action = np.argmax(Q_next_state, axis=1)
            Q_next_state_next_action = [s[a] for s, a in zip(Q_next_state, next_action)]
            T_theta = [[reward + (1-done)*self.gamma * Q] for reward, Q, done in zip(reward_stack, Q_next_state_next_action, done_stack)]
            return self.sess.run([self.train_op, self.loss],
                                 feed_dict={self.state: state_stack, self.action: action_stack, self.dqn_Y: T_theta})

        elif self.model == 'C51':
            z_space = tf.tile(tf.reshape(self.z, [1, 1, self.num_support]), [self.batch_size, self.action_size, 1])
            prob_next_state = self.sess.run(self.target_network, feed_dict={self.state: next_state_stack})
            Q_next_state = self.sess.run(self.target_action_support * z_space, feed_dict={self.state: next_state_stack})
            next_action = np.argmax(np.sum(Q_next_state, axis=2), axis=1)
            prob_next_state_action = [prob_next_state[i, action, :] for i, action in enumerate(next_action)]

            m_prob = np.zeros([self.batch_size, self.num_support])

            for i in range(self.batch_size):
                for j in range(self.num_support):
                    Tz = np.fmin(self.V_max, np.fmax(self.V_min, reward_stack[i] + (1 - done_stack[i]) * 0.99 * (self.V_min + j * self.dz)))
                    bj = (Tz - self.V_min) / self.dz

                    lj = np.floor(bj).astype(int)
                    uj = np.ceil(bj).astype(int)

                    blj = bj - lj
                    buj = uj - bj

                    m_prob[i, lj] += (done_stack[i] + (1 - done_stack[i]) * (prob_next_state_action[i][j])) * buj
                    m_prob[i, uj] += (done_stack[i] + (1 - done_stack[i]) * (prob_next_state_action[i][j])) * blj

            m_prob = m_prob / m_prob.sum(axis=1, keepdims=1)

            return self.sess.run([self.train_op, self.loss],
                                 feed_dict={self.state: state_stack, self.action: action_stack, self.M: m_prob})

        elif self.model == 'QRDQN':
            Q_next_state = self.sess.run(self.target_network, feed_dict={self.state: next_state_stack})
            next_action = np.argmax(np.mean(Q_next_state, axis=2), axis=1)
            Q_next_state_next_action = [Q_next_state[i, action, :] for i, action in enumerate(next_action)]
            Q_next_state_next_action = np.sort(Q_next_state_next_action)
            T_theta = [np.ones(self.num_support) * reward if done else reward + self.gamma * Q for reward, Q, done in
                       zip(reward_stack, Q_next_state_next_action, done_stack)]
            return self.sess.run([self.train_op, self.loss],
                                 feed_dict={self.state: state_stack, self.action: action_stack, self.Y: T_theta})
def make_status_df(
    ens_paths: dict,
    status_file: str,
) -> pd.DataFrame:
    """Return DataFrame of information from status.json files.
    *Finds status.json filepaths.
    For jobs:
    *Loads data into pandas DataFrames.
    *Calculates runtimes and normalized runtimes.
    *Creates hoverinfo column to be used in visualization.
    For realizations:
    *Creates DataFrame of success/failure and total running time.
    """

    parameter_df = load_parameters(
        ensemble_paths=ens_paths,
        ensemble_set_name="EnsembleSet",
        filter_file=None,
    )

    # sub-method to process ensemble data when all realizations in ensemble have been processed
    def ensemble_post_processing() -> list:
        # add missing realizations to get whitespace in heatmap matrix
        if len(set(range(min(reals), max(reals) + 1))) > len(set(reals)):
            missing_df = ens_dfs[0].copy()
            missing_df["STATUS"] = "Realization not started"
            missing_df["RUNTIME"] = np.NaN
            missing_df["JOB_SCALED_RUNTIME"] = np.NaN
            missing_df["ENS_SCALED_RUNTIME"] = np.NaN
            for missing_real in set(range(min(reals),
                                          max(reals) + 1)).difference(
                                              set(reals)):
                ens_dfs.append(missing_df.copy())
                ens_dfs[-1]["REAL"] = missing_real
                ens_dfs[-1]["ENSEMBLE"] = ens
        # Concatenate realization DataFrames to an Ensemble DataFrame and store in list
        job_status_dfs.append(pd.concat(ens_dfs))
        # Find max running time of job in ensemble and create scaled columns
        job_status_dfs[-1]["JOB_MAX_RUNTIME"] = pd.concat(
            [ens_max_job_runtime] * (len(ens_dfs)))
        job_status_dfs[-1]["JOB_SCALED_RUNTIME"] = (
            job_status_dfs[-1]["RUNTIME"] /
            job_status_dfs[-1]["JOB_MAX_RUNTIME"])
        job_status_dfs[-1]["ENS_SCALED_RUNTIME"] = job_status_dfs[-1][
            "RUNTIME"] / np.amax(ens_max_job_runtime)
        # Return ensemble DataFrame list updated with the latest ensemble
        return job_status_dfs

    # find status filepaths
    ens_set = load_ensemble_set(ens_paths, filter_file=None)
    df = pd.concat([
        ens_set[ens].find_files(status_file).assign(ENSEMBLE=ens)
        for ens in ens_set.ensemblenames
    ])
    # Initial values for local variables
    job_status_dfs: list = []
    ens_dfs: list = []
    real_status: list = []
    ens_max_job_runtime = 1
    ens = ""
    reals: list = []

    # Loop through identified filepaths and get realization data
    for row in df.itertuples(index=False):
        # Load each json-file to a DataFrame for the realization
        with open(row.FULLPATH) as fjson:
            status_dict = json.load(fjson)
        real_df = pd.DataFrame(status_dict["jobs"])

        # If new ensemble, calculate ensemble scaled runtimes
        # for previous ensemble and reset temporary ensemble data
        if ens != row.ENSEMBLE:
            if ens == "":  # First ensemble
                ens = row.ENSEMBLE
            else:  # Store last ensemble and reset temporary ensemble data
                job_status_dfs = ensemble_post_processing()
                ens_max_job_runtime = 1
                ens_dfs = []
                ens = row.ENSEMBLE
                reals = []

        # Additional realization data into realization DataFrame
        real_df["RUNTIME"] = real_df["end_time"] - real_df["start_time"]
        real_df["REAL"] = row.REAL
        real_df["ENSEMBLE"] = row.ENSEMBLE
        real_df["REAL_SCALED_RUNTIME"] = real_df["RUNTIME"] / max(
            real_df["RUNTIME"].dropna())
        real_df = real_df[[
            "ENSEMBLE", "REAL", "RUNTIME", "REAL_SCALED_RUNTIME", "name",
            "status"
        ]].rename(columns={
            "name": "JOB",
            "status": "STATUS"
        })
        # Status DataFrame to be used with parallel coordinates
        if all(real_df["STATUS"] == "Success"):
            real_status.append({
                "ENSEMBLE":
                row.ENSEMBLE,
                "REAL":
                row.REAL,
                "STATUS":
                "Success",
                "STATUS_BOOL":
                1,
                "RUNTIME":
                status_dict["end_time"] - status_dict["start_time"],
            })
        else:
            real_status.append({
                "ENSEMBLE": row.ENSEMBLE,
                "REAL": row.REAL,
                "STATUS": "Failure",
                "STATUS_BOOL": 0,
                "RUNTIME": None,
            })

        # Need unique job ids names to separate jobs in same realization with same name in json file
        real_df["JOB_ID"] = range(0, len(real_df["JOB"]))

        # Update max runtime for jobs in ensemble
        ens_max_job_runtime = np.fmax(real_df["RUNTIME"], ens_max_job_runtime)

        # Append realization to ensemble data
        reals.append(row.REAL)
        ens_dfs.append(real_df)

    # Add last ensemble
    job_status_dfs = ensemble_post_processing()
    job_status_df = pd.concat(job_status_dfs, sort=False)

    # Create hoverinfo
    job_status_df["HOVERINFO"] = (
        "Real: " + job_status_df["REAL"].astype(str) + "<br>" + "Job: #" +
        job_status_df["JOB_ID"].astype(str) + "<br>" +
        job_status_df["JOB"].astype(str) + "<br>" + "Running time: " +
        job_status_df["RUNTIME"].astype(str) + " s" + "<br>" + "Status: " +
        job_status_df["STATUS"])
    # Create dataframe of realization status and merge with realization parameters for parameter
    # parallel coordinates
    real_status_df = pd.DataFrame(real_status).merge(parameter_df,
                                                     on=["ENSEMBLE", "REAL"])
    # Has to be stored in one df due to webvizstore, see issue #206 in webviz-config
    return pd.concat([job_status_df, real_status_df],
                     keys=["job", "real"],
                     sort=False)
 def activate(x):
     return np.fmax(0.1 * x, x)
Ejemplo n.º 44
0
# Prepare echoRD

#connect to echoRD
import run_echoRD as rE
#connect and load project
[dr, mc, mcp, pdyn, cinf, vG] = rE.loadconnect(pathdir='../',
                                               mcinif='mcini_weierbach_z3',
                                               experimental=True)
mc = mcp.mcpick_out(mc, 'weierbach_z3.pickle')

runname = 'weierbach_z3'

mc.advectref = 'Shipitalo'
mc.soilmatrix = pd.read_csv(mc.matrixbf, sep=' ')
mc.soilmatrix['m'] = np.fmax(1 - 1 / mc.soilmatrix.n, 0.1)
mc.md_macdepth = mc.md_depth[np.fmax(
    2,
    np.sum(np.ceil(mc.md_contact), axis=1).astype(int))]
mc.md_macdepth[mc.md_macdepth <= 0.] = 0.065

precTS = pd.read_csv(mc.precf, sep=',', skiprows=3)

precTS.tstart = 360
precTS.tend = 360 + 3600
precTS.total = 0.04
precTS.intense = precTS.total / (precTS.tend - precTS.tstart)

#use modified routines for binned retention definitions
mc.part_sizefac = 500
mc.gridcellA = mc.mgrid.vertfac * mc.mgrid.latfac
 def activate(x):
     return np.fmax(0, x)
Ejemplo n.º 46
0
    def infer(self, p, v, view=False):
        # Cálculo do erro de posição
        e = p - self.ref - 0.07 # Correção de viés de 7,0 cm
        # Fuzzificação do erro de posição
        e_N = skfuzzy.interp_membership(self.position_error_range, self.e_N, e)
        e_Z = skfuzzy.interp_membership(self.position_error_range, self.e_Z, e)
        e_P = skfuzzy.interp_membership(self.position_error_range, self.e_P, e)
        # Fuzzificação da velocidade
        v_N = skfuzzy.interp_membership(self.velocity_range, self.v_N, v)
        v_Z = skfuzzy.interp_membership(self.velocity_range, self.v_Z, v)
        v_P = skfuzzy.interp_membership(self.velocity_range, self.v_P, v)
        # Aplicação das operações fuzzy codificadas nas regras do modelo
        R1 = numpy.fmin(e_N, v_N) # Se o sistema está abaixo da referência e movendo-se para baixo
        R2 = numpy.fmin(e_N, v_Z) # Se o sistema está abaixo da referência e sem movimento
        R3 = numpy.fmin(e_N, v_P) # Se o sistema está abaixo da referência e movendo-se para cima
        R4 = numpy.fmin(e_Z, v_N) # Se o sistema está na referência e movendo-se para baixo
        R5 = numpy.fmin(e_Z, v_Z) # Se o sistema está na referência e sem movimento
        R6 = numpy.fmin(e_Z, v_P) # Se o sistema está na referência e movendo-se para cima
        R7 = numpy.fmin(e_P, v_N) # Se o sistema está acima da referência e movendo-se para baixo
        R8 = numpy.fmin(e_P, v_Z) # Se o sistema está acima da referência e sem movimento
        R9 = numpy.fmin(e_P, v_P) # Se o sistema está acima da referência e movendo-se para cima
        # Combinação das regras
        DT = R3 + R6 + R8 + R9
        NC = R5
        IT = R1 + R2 + R4 + R7
        # Corte das funções de pertinência da variável de saída
        IT = numpy.fmin(IT, self.o_P)
        NC = numpy.fmin(NC, self.o_Z)
        DT = numpy.fmin(DT, self.o_N)
        # Agregação dos conjuntos fuzzy de saída
        aggregated = numpy.fmax(IT, numpy.fmax(NC, DT))
        # Deffuzificação
        output = skfuzzy.defuzz(self.output_range, aggregated, 'centroid')
        # Atualização da força aplicada pelo sistema de controle
        if self.f == 0:
            self.f = output*self.f_max
        else:
            # Incrementa ou decrementa a força atual aplicada de acordo com a
            # saída do sistema fuzzy
            self.f += output*self.f
        # Impede que o sistema de controle aplique forças negativas
        self.f = max(self.f, 0)
        # Impede que o sistema de controle aplique uma força acima da máxima
        self.f = min(self.f, self.f_max)

        if view:
            # Visualização das funções de pertinência associadas aos conjuntos fuzzy de saída
            out_ = numpy.zeros_like(self.output_range)
            fig, ax0 = plt.subplots(figsize=(8, 3))

            ax0.fill_between(self.output_range, out_, DT, facecolor='b', alpha=0.7)
            ax0.plot(self.output_range, self.o_N, 'b', linewidth=0.5, linestyle='--', )
            ax0.fill_between(self.output_range, out_, NC, facecolor='g', alpha=0.7)
            ax0.plot(self.output_range, self.o_Z, 'g', linewidth=0.5, linestyle='--')
            ax0.fill_between(self.output_range, out_, IT, facecolor='r', alpha=0.7)
            ax0.plot(self.output_range, self.o_P, 'r', linewidth=0.5, linestyle='--')
            ax0.set_title('Consequências das regras sobre o conjunto fuzzy de saída')

            for ax in (ax0,):
                ax.spines['top'].set_visible(False)
                ax.spines['right'].set_visible(False)
                ax.get_xaxis().tick_bottom()
                ax.get_yaxis().tick_left()

            plt.tight_layout()

            # Visualização da agregação dos conjuntos fuzzy de saída e da deffuzificação
            activation = skfuzzy.interp_membership(self.output_range, aggregated, output)
            fig, ax0 = plt.subplots(figsize=(8, 3))

            ax0.plot(self.output_range, self.o_N, 'b', linewidth=0.5, linestyle='--', )
            ax0.plot(self.output_range, self.o_Z, 'g', linewidth=0.5, linestyle='--')
            ax0.plot(self.output_range, self.o_P, 'r', linewidth=0.5, linestyle='--')
            ax0.fill_between(self.output_range, out_, aggregated, facecolor='Orange', alpha=0.7)
            ax0.plot([output, output], [0, output], 'k', linewidth=1.5, alpha=0.9)
            ax0.set_title('Funções de pertinência agregadas e resultado da deffuzificação')

            for ax in (ax0,):
                ax.spines['top'].set_visible(False)
                ax.spines['right'].set_visible(False)
                ax.get_xaxis().tick_bottom()
                ax.get_yaxis().tick_left()

            plt.tight_layout()

        return self.f
Ejemplo n.º 47
0
def ineichen(apparent_zenith,
             airmass_absolute,
             linke_turbidity,
             altitude=0,
             dni_extra=1364.,
             perez_enhancement=False):
    '''
    Determine clear sky GHI, DNI, and DHI from Ineichen/Perez model.

    Implements the Ineichen and Perez clear sky model for global
    horizontal irradiance (GHI), direct normal irradiance (DNI), and
    calculates the clear-sky diffuse horizontal (DHI) component as the
    difference between GHI and DNI*cos(zenith) as presented in [1, 2]. A
    report on clear sky models found the Ineichen/Perez model to have
    excellent performance with a minimal input data set [3].

    Default values for monthly Linke turbidity provided by SoDa [4, 5].

    Parameters
    -----------
    apparent_zenith : numeric
        Refraction corrected solar zenith angle in degrees.

    airmass_absolute : numeric
        Pressure corrected airmass.

    linke_turbidity : numeric
        Linke Turbidity.

    altitude : numeric, default 0
        Altitude above sea level in meters.

    dni_extra : numeric, default 1364
        Extraterrestrial irradiance. The units of ``dni_extra``
        determine the units of the output.

    perez_enhancement : bool, default False
        Controls if the Perez enhancement factor should be applied.
        Setting to True may produce spurious results for times when
        the Sun is near the horizon and the airmass is high.
        See https://github.com/pvlib/pvlib-python/issues/435

    Returns
    -------
    clearsky : DataFrame (if Series input) or OrderedDict of arrays
        DataFrame/OrderedDict contains the columns/keys
        ``'dhi', 'dni', 'ghi'``.

    See also
    --------
    lookup_linke_turbidity
    pvlib.location.Location.get_clearsky

    References
    ----------
    .. [1] P. Ineichen and R. Perez, "A New airmass independent formulation for
       the Linke turbidity coefficient", Solar Energy, vol 73, pp. 151-157,
       2002.

    .. [2] R. Perez et. al., "A New Operational Model for Satellite-Derived
       Irradiances: Description and Validation", Solar Energy, vol 73, pp.
       307-317, 2002.

    .. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance
       Clear Sky Models: Implementation and Analysis", Sandia National
       Laboratories, SAND2012-2389, 2012.

    .. [4] http://www.soda-is.com/eng/services/climat_free_eng.php#c5 (obtained
       July 17, 2012).

    .. [5] J. Remund, et. al., "Worldwide Linke Turbidity Information", Proc.
       ISES Solar World Congress, June 2003. Goteborg, Sweden.
    '''

    # ghi is calculated using either the equations in [1] by setting
    # perez_enhancement=False (default behavior) or using the model
    # in [2] by setting perez_enhancement=True.

    # The NaN handling is a little subtle. The AM input is likely to
    # have NaNs that we'll want to map to 0s in the output. However, we
    # want NaNs in other inputs to propagate through to the output. This
    # is accomplished by judicious use and placement of np.maximum,
    # np.minimum, and np.fmax

    # use max so that nighttime values will result in 0s instead of
    # negatives. propagates nans.
    cos_zenith = np.maximum(tools.cosd(apparent_zenith), 0)

    tl = linke_turbidity

    fh1 = np.exp(-altitude / 8000.)
    fh2 = np.exp(-altitude / 1250.)
    cg1 = 5.09e-05 * altitude + 0.868
    cg2 = 3.92e-05 * altitude + 0.0387

    ghi = np.exp(-cg2 * airmass_absolute * (fh1 + fh2 * (tl - 1)))

    # https://github.com/pvlib/pvlib-python/issues/435
    if perez_enhancement:
        ghi *= np.exp(0.01 * airmass_absolute**1.8)

    # use fmax to map airmass nans to 0s. multiply and divide by tl to
    # reinsert tl nans
    ghi = cg1 * dni_extra * cos_zenith * tl / tl * np.fmax(ghi, 0)

    # From [1] (Following [2] leads to 0.664 + 0.16268 / fh1)
    # See https://github.com/pvlib/pvlib-python/pull/808
    b = 0.664 + 0.163 / fh1
    # BncI = "normal beam clear sky radiation"
    bnci = b * np.exp(-0.09 * airmass_absolute * (tl - 1))
    bnci = dni_extra * np.fmax(bnci, 0)

    # "empirical correction" SE 73, 157 & SE 73, 312.
    bnci_2 = ((1 - (0.1 - 0.2 * np.exp(-tl)) / (0.1 + 0.882 / fh1)) /
              cos_zenith)
    bnci_2 = ghi * np.fmin(np.fmax(bnci_2, 0), 1e20)

    dni = np.minimum(bnci, bnci_2)

    dhi = ghi - dni * cos_zenith

    irrads = OrderedDict()
    irrads['ghi'] = ghi
    irrads['dni'] = dni
    irrads['dhi'] = dhi

    if isinstance(dni, pd.Series):
        irrads = pd.DataFrame.from_dict(irrads)

    return irrads
Ejemplo n.º 48
0
def cmeans(data, c, m, error, maxiter,
           metric='euclidean',
           init=None, seed=None):
    """
    Fuzzy c-means clustering algorithm [1].

    Parameters
    ----------
    data : 2d array, size (S, N)
        Data to be clustered.  N is the number of data sets; S is the number
        of features within each sample vector.
    c : int
        Desired number of clusters or classes.
    m : float
        Array exponentiation applied to the membership function u_old at each
        iteration, where U_new = u_old ** m.
    error : float
        Stopping criterion; stop early if the norm of (u[p] - u[p-1]) < error.
    maxiter : int
        Maximum number of iterations allowed.
    metric: string
        By default is set to euclidean. Passes any option accepted by
        ``scipy.spatial.distance.cdist``.
    init : 2d array, size (c, N)
        Initial fuzzy c-partitioned matrix. If none provided, algorithm is
        randomly initialized.
    seed : int
        If provided, sets random seed of init. No effect if init is
        provided. Mainly for debug/testing purposes.

    Returns
    -------
    cntr : 2d array, size (c, S)
        Cluster centers.  Data for each center along each feature provided
        for every cluster (of the `c` requested clusters).
    u : 2d array, (c, N)
        Final fuzzy c-partitioned matrix.
    u0 : 2d array, (c, N)
        Initial guess at fuzzy c-partitioned matrix (either provided init or
        random guess used if init was not provided).
    d : 2d array, (c, N)
        Final Euclidian distance matrix.
    jm : 1d array, length P
        Objective function history.
    p : int
        Number of iterations run.
    fpc : float
        Final fuzzy partition coefficient.


    Notes
    -----
    The algorithm implemented is from Ross et al. [1]_.

    Fuzzy C-Means has a known problem with high dimensionality datasets, where
    the majority of cluster centers are pulled into the overall center of
    gravity. If you are clustering data with very high dimensionality and
    encounter this issue, another clustering method may be required. For more
    information and the theory behind this, see Winkler et al. [2]_.

    References
    ----------
    .. [1] Ross, Timothy J. Fuzzy Logic With Engineering Applications, 3rd ed.
           Wiley. 2010. ISBN 978-0-470-74376-8 pp 352-353, eq 10.28 - 10.35.

    .. [2] Winkler, R., Klawonn, F., & Kruse, R. Fuzzy c-means in high
           dimensional spaces. 2012. Contemporary Theory and Pragmatic
           Approaches in Fuzzy Computing Utilization, 1.
    """
    # Setup u0
    if init is None:
        if seed is not None:
            np.random.seed(seed=seed)
        n = data.shape[1]
        u0 = np.random.rand(c, n)
        u0 = normalize_columns(u0)
        init = u0.copy()
    u0 = init
    u = np.fmax(u0, np.finfo(np.float64).eps)

    # Initialize loop parameters
    jm = np.zeros(0)
    p = 0

    # Main cmeans loop
    while p < maxiter - 1:
        u2 = u.copy()
        [cntr, u, Jjm, d] = _cmeans0(data, u2, c, m, metric)
        jm = np.hstack((jm, Jjm))
        p += 1

        # Stopping rule
        if np.linalg.norm(u - u2) < error:
            break

    # Final calculations
    error = np.linalg.norm(u - u2)
    fpc = _fp_coeff(u)

    return cntr, u, u0, d, jm, p, fpc
def classification_entropy(data, centers):
    dist = cdist(centers, data, metric='sqeuclidean')
    u = 1 / np.fmax(dist, np.finfo(float).eps)
    u = np.fmax(u / u.sum(axis=0), np.finfo(float).eps)
    return -(u * np.log(u)).sum() / data.shape[0]
Ejemplo n.º 50
0
def stat_plot(xg_model, feature, label, type='weight', max_num_features=30):

    fscore = xg_model.get_score(importance_type=type)
    fscore = sorted(fscore.items(), key=itemgetter(1),
                    reverse=True)  # sort scores
    fea_index = get_fea_index(fscore, max_num_features)

    fig, AX = plt.subplots(nrows=1, ncols=3)
    plt.suptitle('Feature Selection - Statistical Comparison Results\nBy ' +
                 type)

    dimension = len(fea_index)
    X = range(1, dimension + 1)
    feature = feature[:, fea_index]

    Yp = np.mean(feature[np.where(label == 1)[0]], axis=0)
    Yn = np.mean(feature[np.where(label != 1)[0]], axis=0)
    for i in range(0, dimension):
        param = np.fmax(Yp[i], Yn[i])
        Yp[i] /= param
        Yn[i] /= param
    p1 = AX[0].bar(X, +Yp, facecolor='#ff9999', edgecolor='white')
    p2 = AX[0].bar(X, -Yn, facecolor='#9999ff', edgecolor='white')
    AX[0].legend((p1, p2), ('Malware', 'Normal'))
    AX[0].set_title('Comparison of selected features by their means')
    AX[0].set_xlabel('Feature Index')
    AX[0].set_ylabel('Mean Value')
    AX[0].set_ylim(-1.1, 1.1)
    plt.sca(AX[0])
    plt.xticks(X, fea_index, rotation=80)

    Yp = np.var(feature[np.where(label == 1)[0]], axis=0)
    Yn = np.var(feature[np.where(label != 1)[0]], axis=0)
    for i in range(0, dimension):
        param = np.fmax(Yp[i], Yn[i])
        Yp[i] /= param
        Yn[i] /= param
    p1 = AX[1].bar(X, +Yp, facecolor='#ff9999', edgecolor='white')
    p2 = AX[1].bar(X, -Yn, facecolor='#9999ff', edgecolor='white')
    AX[1].legend((p1, p2), ('Malware', 'Normal'))
    AX[1].set_title('Comparison of selected features by their variances')
    AX[1].set_xlabel('Feature Index')
    AX[1].set_ylabel('Variance Value')
    AX[1].set_ylim(-1.1, 1.1)
    plt.sca(AX[1])
    plt.xticks(X, fea_index, rotation=80)

    t = chi2(feature, label)[0]
    _chi2 = np.zeros(len(t) + 1)
    _chi2[0:len(t)] = t

    all = range(0, feature.shape[1])
    _left = list(set(all).difference(set(fea_index)))
    _chi2_left = chi2(feature[:, _left], label)[0]
    _chi2_left_mean = np.mean(_chi2_left)
    _chi2[len(t)] = _chi2_left_mean
    X = range(1, dimension + 2)
    AX[2].bar(X, _chi2, facecolor='#ff9999', edgecolor='white')
    AX[2].set_title('Comparison of selected features by their chi2')
    AX[2].set_xlabel('Feature Index')
    AX[2].set_ylabel('chi2 Value')
    fea_index = fea_index.tolist()
    fea_index.append('Other')
    plt.sca(AX[2])
    plt.xticks(X, fea_index, rotation=80)
Ejemplo n.º 51
0
 def set_brake(self, input_brake):
     # Clamp the steering command to valid bounds
     brake           = np.fmax(np.fmin(input_brake, 1.0), 0.0)
     self._set_brake = brake
Ejemplo n.º 52
0
def fea_plot(xg_model,
             feature,
             label,
             type='weight',
             max_num_features=None,
             x_axis_label=None,
             ranks_dir='./'):
    fig, AX = plt.subplots(nrows=1, ncols=2)
    fscore = xg_model.get_score(importance_type=type)
    fscore = sorted(fscore.items(), key=itemgetter(1),
                    reverse=True)  # sort scores
    fea_index = get_fea_index(fscore, max_num_features)

    #save ranks to files
    path_to_save = '../average_rank/ranks/' + ranks_dir
    if not os.path.isdir(path_to_save):
        os.mkdir(path_to_save)
    path_to_save = path_to_save + '/index_' + type + '.txt'

    save_rank_file = open(path_to_save, 'w')
    all_feat_index = get_fea_index(fscore, None)
    all_feat_index = [i + 1 for i in all_feat_index]
    print('fscore len')
    print(len(all_feat_index))
    if (x_axis_label != None):
        all_x_axis_label = get_axis_label(all_feat_index, x_axis_label)
    else:
        all_x_axis_label = all_feat_index
    for item in all_x_axis_label:
        save_rank_file.write("%s\n" % item)
    save_rank_file.close()

    if (x_axis_label != None):
        mapper = {'f{0}'.format(i): v for i, v in enumerate(x_axis_label)}
        mapped = {
            mapper[k]: v
            for k, v in xg_model.get_score(importance_type=type).items()
        }
        xgb.plot_importance(mapped,
                            xlabel=type,
                            ax=AX[0],
                            max_num_features=max_num_features)
    else:
        xgb.plot_importance(xg_model,
                            xlabel=type,
                            importance_type=type,
                            ax=AX[0],
                            max_num_features=max_num_features)

    print(fea_index)
    print(max_num_features)

    feature = feature[:, fea_index]
    dimension = len(fea_index)
    X = range(1, dimension + 1)

    Yp = np.mean(feature[np.where(label == 1)[0]], axis=0)
    Yn = np.mean(feature[np.where(label != 1)[0]], axis=0)
    for i in range(0, dimension):
        param = np.fmax(Yp[i], Yn[i])
        if (param != 0):
            Yp[i] /= param
            Yn[i] /= param
        else:
            print('oops!seems wrong')
    p1 = AX[1].bar(X, +Yp, facecolor='#ff9999', edgecolor='white')
    p2 = AX[1].bar(X, -Yn, facecolor='#9999ff', edgecolor='white')
    AX[1].legend((p1, p2), ('Malware', 'Normal'))
    AX[1].set_title('Comparison of selected features by their means')
    AX[1].set_xlabel('Feature Index')
    AX[1].set_ylabel('Mean Value')
    AX[1].set_ylim(-1.1, 1.1)
    #update on 5/25/2017, this line should be added or removed according to the inputdata format
    fea_index = [i + 1 for i in fea_index]
    if (x_axis_label != None):
        tar_x_axis_label = get_axis_label(fea_index, x_axis_label)
    else:
        tar_x_axis_label = fea_index
    plt.xticks(X, tar_x_axis_label, rotation=80)
    plt.suptitle('Feature Selection results')

    #seems useless
    SMALL_SIZE = 8
    MEDIUM_SIZE = 10
    BIGGER_SIZE = 11

    plt.rc('font', size=SMALL_SIZE)  # controls default text sizes
    plt.rc('axes', titlesize=SMALL_SIZE)  # fontsize of the axes title
    plt.rc('axes', labelsize=BIGGER_SIZE)  # fontsize of the x and y labels
    plt.rc('xtick', labelsize=BIGGER_SIZE)  # fontsize of the tick labels
    plt.rc('ytick', labelsize=BIGGER_SIZE)  # fontsize of the tick labels
    plt.rc('legend', fontsize=SMALL_SIZE)  # legend fontsize
    plt.rc('figure', titlesize=BIGGER_SIZE)  # fontsize of the figure title
Ejemplo n.º 53
0
 def set_throttle(self, input_throttle):
     # Clamp the throttle command to valid bounds
     throttle           = np.fmax(np.fmin(input_throttle, 1.0), 0.0)
     self._set_throttle = throttle
Ejemplo n.º 54
0
def evaluate_new_fuzzy_system(ws, data, target):
    universe = np.linspace(0, 1, 10)
    x = []
    for w in ws:
        x.append(
            {
                'x': fuzz.trimf(universe, [0.0, 0.0, w]),
                's': fuzz.trimf(universe, [0.0, w, 1.0]),
                'm': fuzz.trimf(universe, [w, 2.0, 2.0]),
                'l': fuzz.trimf(universe, [w, 1, 1])
            }
            #     for w0 in ws:
            #         w0=0
            #         w1=0
            #         for w1 in ws:

            #             x.append({'x': fuzz.trimf(universe, [0.0, 0.0, w0]),
            #                   's': fuzz.trimf(universe, [0.0, w0, w1]),
            # 		          'm': fuzz.trimf(universe, [w0, w1, 1]),
            #  			      'l': fuzz.trimf(universe, [w1, 1.0, 1.0])
            #                   }
        )

    # membership
    x_memb = []
    for i in range(len(ws)):
        x_memb.append({})
        for t in ['x', 's', 'm', 'l']:
            x_memb[i][t] = fuzz.interp_membership(universe, x[i][t], data[:,
                                                                          i])
    # MY RULES ###########
    # R2 = x7 = x8 = long and x15 = x16 = middle and x23 = x24 = short then Efficient
    # What I understood:
    #
    #
    # x7 is long AND x8 is LONG AND x15 is middle AND x16 is middle AND x23 is midle AND x24 is short -> efficient
    #
    #
    # Since logical OR become a MAX and logical AND becomes MIN
    # then we should have something like:
    # is_ext_inefficient = np.fmin(x_memb[0]['x'], x_memb[1]['x'])
    is_ext_inefficient = np.fmin(
        x_memb[0]['x'],
        np.fmax(
            x_memb[1]['x'],
            np.fmin(
                x_memb[2]['x'],
                np.fmin(x_memb[3]['x'], np.fmax(x_memb[4]['x'],
                                                x_memb[5]['x'])))))

    is_efficient = np.fmin(
        x_memb[21]['s'],
        np.fmin(
            x_memb[20]['s'],
            np.fmin(
                x_memb[20]['x'],
                np.fmax(
                    x_memb[16]['l'],
                    np.fmax(x_memb[19]['l'],
                            np.fmax(x_memb[22]['s'], x_memb[23]['s']))))))

    # R1 = x1 = x2 = long and X5 = x6 = long and x9 = x10 = middle and x17 = x18 = short then Inefficient
    is_inefficient = np.fmin(x_memb[1]['x'],
                             np.fmax(
                                 x_memb[2]['x'],
                                 x_memb[3]['x'],
                             ))

    # R3 = x3 = x4 = long and x11 = x12 = middle and x13 = x14 = middle and x19 = x20 = short and x21 = x22 = short then Mixt
    is_mixed = np.fmax(
        x_memb[14]['m'],
        np.fmin(
            x_memb[15]['m'],
            np.fmax(
                x_memb[10]['m'],
                np.fmin(
                    x_memb[11]['m'],
                    np.fmin(
                        x_memb[12]['m'],
                        np.fmin(x_memb[13]['l'],
                                np.fmin(
                                    x_memb[9]['x'],
                                    x_memb[8]['x'],
                                )))))))

    # is_efficient =  np.fmin(
    #     x_memb[21]['l'],
    #     np.fmin(
    #         x_memb[19]['l'],
    #         np.fmin(
    #             x_memb[20]['l'],
    #             np.fmin(
    #                 x_memb[18]['l'],
    #                 np.fmin(
    #                     x_memb[22]['l'],
    #                     x_memb[23]['l']
    #                 )
    #             )
    #         )
    #     )
    # )

    # # R1 = x1 = x2 = long and X5 = x6 = long and x9 = x10 = middle and x17 = x18 = short then Inefficient
    # is_inefficient = np.fmin(
    #     x_memb[0]['s'],
    #     np.fmin(
    #         x_memb[1]['s'],
    #         np.fmin(
    #             x_memb[4]['s'],
    #             np.fmin(
    #                 x_memb[5]['s'],
    #                 np.fmin(
    #                     x_memb[2]['s'],
    #                     np.fmin(
    #                         x_memb[7]['s'],
    #                         np.fmin(
    #                             x_memb[3]['s'],
    #                             x_memb[6]['s'],
    #                         )
    #                     )
    #                 )
    #             )
    #         )
    #     )
    # )

    # # R3 = x3 = x4 = long and x11 = x12 = middle and x13 = x14 = middle and x19 = x20 = short and x21 = x22 = short then Mixt
    # is_mixed = np.fmin(
    #     x_memb[14]['m'],
    #     np.fmin(
    #         x_memb[15]['m'],
    #         np.fmin(
    #             x_memb[10]['m'],
    #             np.fmin(
    #                 x_memb[11]['m'],
    #                 np.fmin(
    #                     x_memb[12]['m'],
    #                     np.fmin(
    #                         x_memb[13]['m'],
    #                         np.fmin(
    #                             x_memb[18]['l'],
    #                             np.fmin(
    #                                 x_memb[19]['l'],
    #                                 np.fmin(
    #                                     x_memb[9]['s'],
    #                                     x_memb[8]['s'],
    #                                 )
    #                             )
    #                         )
    #                     )
    #                 )
    #             )
    #         )
    #     )
    # )

    # result = np.argmax([is_efficient, is_mixed, is_inefficient], axis=0)

    result = np.argmax(
        [is_efficient, is_mixed, is_inefficient, is_ext_inefficient], axis=0)
    return (result == target).mean()
Ejemplo n.º 55
0
def PSO(func,
        LB,
        UB,
        nPop=40,
        epochs=500,
        K=0,
        phi=2.05,
        vel_fact=0.5,
        conf_type='RB',
        IntVar=None,
        normalize=False,
        rad=0.1,
        args=[],
        Xinit=None):
    """
    func            Function to minimize
    LB              Lower boundaries of the search space
    UB              Upper boundaries of the search space
    nPop            Number of agents (population)
    epochs          Number of iterations
    K               Average size of each agent's group of informants
    phi             Coefficient to calculate the two confidence coefficients
    vel_fact        Velocity factor to calculate the maximum and the minimum
                    allowed velocities
    conf_type       Confinement type (on the velocities)
    IntVar          List of indexes specifying which variable should be treated
                    as integers
    normalize       Specifies if the search space should be normalized (to
                    improve convergency)
    rad             Normalized radius of the hypersphere centered on the best
                    particle
    args            Tuple containing any parameter that needs to be passed to
                    the function
    Xinit           Initial position of each agent

    Dimensions:
    (nVar, )        LB, UB, LB_orig, UB_orig, vel_max, vel_min, swarm_best_pos
                    Xinit
    (nPop, nVar)    agent_pos, agent_vel, agent_best_pos, Gr, group_best_pos,
                    agent_pos_orig, agent_pos_tmp, vel_conf, out, x_sphere, u
    (nPop, nPop)    informants, informants_cost
    (nPop)          agent_best_cost, agent_cost, p_equal_g, better, r_max, r,
                    norm
    (0-nVar, )      IntVar
    """
    # Dimension of the search space and max. allowed velocities
    nVar = len(LB)
    vel_max = vel_fact * (UB - LB)
    vel_min = -vel_max

    # Confidence coefficients
    w = 1.0 / (phi - 1.0 + np.sqrt(phi**2 - 2.0 * phi))
    cmax = w * phi

    # Probability an agent is an informant
    p_informant = 1.0 - (1.0 - 1.0 / float(nPop))**K

    # Normalize search space
    if (normalize):
        LB_orig = LB.copy()
        UB_orig = UB.copy()
        LB = np.zeros(nVar)
        UB = np.ones(nVar)

    # Define (if any) which variables are treated as integers (indexes are in
    # the range 1 to nVar)
    if (IntVar is None):
        nIntVar = 0
    elif (IntVar == 'all'):
        IntVar = np.arange(nVar, dtype=int)
        nIntVar = nVar
    else:
        IntVar = np.asarray(IntVar, dtype=int) - 1
        nIntVar = len(IntVar)

    # Initial position of each agent
    if (Xinit is None):
        agent_pos = LB + np.random.rand(nPop, nVar) * (UB - LB)
    else:
        Xinit = np.tile(Xinit, (nPop, 1))
        if (normalize):
            agent_pos = (Xinit - LB_orig) / (UB_orig - LB_orig)
        else:
            agent_pos = Xinit

    # Initial velocity of each agent (with velocity limits)
    agent_vel = (LB - agent_pos) + np.random.rand(nPop, nVar) * (UB - LB)
    agent_vel = np.fmin(np.fmax(agent_vel, vel_min), vel_max)

    # Initial cost of each agent
    if (normalize):
        agent_pos_orig = LB_orig + agent_pos * (UB_orig - LB_orig)
        agent_cost = func(agent_pos_orig, args)
    else:
        agent_cost = func(agent_pos, args)

    # Initial best position/cost of each agent
    agent_best_pos = agent_pos.copy()
    agent_best_cost = agent_cost.copy()

    # Initial best position/cost of the swarm
    idx = np.argmin(agent_best_cost)
    swarm_best_pos = agent_best_pos[idx, :]
    swarm_best_cost = agent_best_cost[idx]
    swarm_best_idx = idx

    # Initial best position of each agent using the swarm
    if (K == 0):
        group_best_pos = np.tile(swarm_best_pos, (nPop, 1))
        p_equal_g = \
            (np.where(np.arange(nPop) == idx, 0.75, 1.0)).reshape(nPop, 1)

    # Initial best position of each agent using informants
    else:
        informants = np.where(np.random.rand(nPop, nPop) < p_informant, 1, 0)
        np.fill_diagonal(informants, 1)
        group_best_pos, p_equal_g = group_best(informants, agent_best_pos,
                                               agent_best_cost)

    # Main loop
    for epoch in range(epochs):

        # Determine the updated velocity for each agent
        Gr = agent_pos + (1.0 / 3.0) * cmax * \
             (agent_best_pos + group_best_pos - 2.0 * agent_pos) * p_equal_g
        x_sphere = hypersphere_point(Gr, agent_pos)
        agent_vel = w * agent_vel + Gr + x_sphere - agent_pos

        # Impose velocity limits
        agent_vel = np.fmin(np.fmax(agent_vel, vel_min), vel_max)

        # Temporarly update the position of each agent to check if it is
        # outside the search space
        agent_pos_tmp = agent_pos + agent_vel
        if (nIntVar > 0):
            agent_pos_tmp[:, IntVar] = np.round(agent_pos_tmp[:, IntVar])
        out = np.logical_not((agent_pos_tmp > LB) * (agent_pos_tmp < UB))

        # Apply velocity confinement rules
        if (conf_type == 'RB'):
            vel_conf = random_back_conf(agent_vel)

        elif (conf_type == 'HY'):
            vel_conf = hyperbolic_conf(agent_pos, agent_vel, UB, LB)

        elif (conf_type == 'MX'):
            vel_conf = mixed_conf(agent_pos, agent_vel, UB, LB)

        # Update velocity and position of each agent (all <vel_conf> velocities
        # are smaller than the max. allowed velocity)
        agent_vel = np.where(out, vel_conf, agent_vel)
        agent_pos += agent_vel
        if (nIntVar > 0):
            agent_pos[:, IntVar] = np.round(agent_pos[:, IntVar])

        # Apply position confinement rules to agents outside the search space
        agent_pos = np.fmin(np.fmax(agent_pos, LB), UB)
        if (nIntVar > 0):
            agent_pos[:, IntVar] = np.fmax(agent_pos[:, IntVar],
                                           np.ceil(LB[IntVar]))
            agent_pos[:, IntVar] = np.fmin(agent_pos[:, IntVar],
                                           np.floor(UB[IntVar]))

        # Calculate new cost of each agent
        if (normalize):
            agent_pos_orig = LB_orig + agent_pos * (UB_orig - LB_orig)
            agent_cost = func(agent_pos_orig, args)
        else:
            agent_cost = func(agent_pos, args)

        # Update best position/cost of each agent
        better = (agent_cost < agent_best_cost)
        agent_best_pos[better, :] = agent_pos[better, :]
        agent_best_cost[better] = agent_cost[better]

        # Update best position/cost of the swarm
        idx = np.argmin(agent_best_cost)
        if (agent_best_cost[idx] < swarm_best_cost):
            swarm_best_pos = agent_best_pos[idx, :]
            swarm_best_cost = agent_best_cost[idx]
            swarm_best_idx = idx

        # If the best cost of the swarm did not improve ....
        else:
            # .... when using swarm -> do nothing
            if (K == 0):
                pass

            # .... when using informants -> change informant groups
            else:
                informants = \
                    np.where(np.random.rand(nPop, nPop) < p_informant, 1, 0)
                np.fill_diagonal(informants, 1)

        # Update best position of each agent using the swarm
        if (K == 0):
            group_best_pos = np.tile(swarm_best_pos, (nPop, 1))

        # Update best position of each agent using informants
        else:
            group_best_pos, p_equal_g, = group_best(informants, agent_best_pos,
                                                    agent_best_cost)

    # If necessary de-normalize and determine the (normalized) distance between
    # the best particle and all the others
    if (normalize):
        delta = agent_best_pos - swarm_best_pos  # (UB-LB = 1)
        swarm_best_pos = LB_orig + swarm_best_pos * (UB_orig - LB_orig)
    else:
        deltaB = np.fmax(UB - LB, 1.e-10)  # To avoid /0 when LB = UB
        delta = (agent_best_pos - swarm_best_pos) / deltaB

    # Number of particles in the hypersphere of radius <rad> around the best
    # particle
    dist = np.linalg.norm(delta / np.sqrt(nPop), axis=1)
    in_rad = (dist < rad).sum()

    # Return info about the solution
    info = (swarm_best_cost, swarm_best_idx, in_rad)

    return swarm_best_pos, info
Ejemplo n.º 56
0
def eta(x, threshold):
    return np.sign(x) * np.fmax(np.abs(x) - threshold, 0)
Ejemplo n.º 57
0
def doit(rootname='sphere', smooth=21, fig_no=2):
    '''
    Plot the spectra contained in the spec_tot 
    file

    141125    ksl    Updated for new formats which use astropy
    191210  ksl Modified so that what is ploted is nuL_nu, and 
                did a better job at setting limits for the plot
    '''
    # Make sure we only have the rootname

    rootname = rootname.replace('.log_spec_tot', '')
    filename = rootname + '.log_spec_tot'

    try:
        data = ascii.read(filename)
    except IOError:
        print('Error: Could not find %s' % filename)
        return

    # print(data.colnames)

    pylab.figure(fig_no, (6, 6))
    pylab.clf()

    created = data['Freq.'] * xsmooth(data['Created'])
    emitted = data['Freq.'] * xsmooth(data['Emitted'])
    hit_surf = data['Freq.'] * xsmooth(data['HitSurf'])
    wind = data['Freq.'] * xsmooth(data['Wind'])

    pylab.loglog(data['Freq.'], created, label='Created')
    # pylab.loglog(data['Freq.'],data['Created'],label='Created')

    pylab.loglog(data['Freq.'], emitted, label='Observed')
    # pylab.loglog(data['Freq.'],data['Emitted'],label='Observed')

    pylab.loglog(data['Freq.'], hit_surf, label='Hit Surface')
    # pylab.loglog(data['Freq.'],data['HitSurf'],label='Hit Surface')

    pylab.loglog(data['Freq.'], wind, label='Wind Observed')
    # pylab.loglog(data['Freq.'],data['Wind'],label='Wind Observed')

    zz = pylab.axis()

    # Find the maxium values of all of these arrays

    q = numpy.fmax(created, emitted)
    qq = numpy.fmax(q, wind)
    ymax = 3 * numpy.max(qq)
    pylab.ylim(ymax / 1e8, ymax)

    # make a mask of rom the values of qq

    test = data['Freq.'][qq > ymax / 1e8]
    pylab.xlim(test[0], test[len(test) - 1])

    # pylab.xlim(zz[0],zz[1])

    # pylab.axis((zz[0],zz[1],zz[3]/1e8,zz[3]))

    pylab.text(Lyman,
               0.9 * ymax,
               'H',
               horizontalalignment='center',
               verticalalignment='top',
               size=14)
    pylab.text(HeII,
               0.9 * ymax,
               'HeII',
               horizontalalignment='center',
               verticalalignment='top',
               size=14)
    pylab.plot([LymanA, LymanA], [0.5 * ymax, 0.9 * ymax], '-k')
    pylab.plot([HB, HB], [0.5 * ymax, 0.9 * ymax], '-k')
    pylab.plot([HA, HA], [0.5 * ymax, 0.9 * ymax], '-k')
    pylab.legend(loc='best')
    pylab.title(rootname, size=16)
    pylab.xlabel(r'$ {\nu} $', size=16)
    pylab.ylabel(r'$\nu$L$_{\nu}$', size=16)

    pylab.draw()
    pylab.savefig(rootname + '.spec_tot.png')

    # Since the total spectra are basically written out as L_nu we need to
    # integrate over frequency

    freq = numpy.array(data['Freq.'])

    dfreq = []
    i = 0
    while i < len(freq):
        if i == 0:
            dfreq.append(freq[1] - freq[0])
        elif i == len(freq) - 1:
            dfreq.append(freq[i] - freq[i - 1])
        else:
            dfreq.append(0.5 * (freq[i + 1] - freq[i - 1]))

        i += 1

    dfreq = numpy.array(dfreq)

    lum_created = numpy.sum(dfreq * data['Created'])
    lum = numpy.sum(dfreq * data['Emitted'])

    #dfreq=freq[1]-freq[0]  # For a linear scale the frequencies are all equally spaced

    # lum_created=dfreq*numpy.sum(numpy.array(data['Created']))
    # lum=dfreq*numpy.sum(data['Emitted'])
    # print(data['Created'])
    print('The Created luminosity was ', lum_created)
    print('The emitted luminosity was ', lum)

    return
Ejemplo n.º 58
0
def k_shell_hydrogenic_gos(atomic_number: int, edge_onset_eV: float, edge_delta_eV: float,
                            beam_energy_eV: float, collection_angle_rad: float) -> numpy.ndarray:
    """Return the K-shell generalized oscillator strength (GOS) calculated on the hydrogenic model as an ndarray.

    This algorithm is based on the hydrogenic model formulation given by Egerton in chapter 3 of his book
    entitled Electron Energy-Loss Spectroscopy in the Electron Microscope (in its 3rd edition as of 2011).
    Egerton's original expressions (formula numbers given below) have been slightly reformulated by Mike Kundmann
    to minimize the fundamental constants required, eliminate unnecessary approximations in the relativistic
    kinematics, and simplify the GOS expressions.  In order for the angular portion of any subsequent cross-section
    computation to be carried out via straightforward NumPy array integration with fixed limits of integration,
    the GOS table is computed versus scattering angle, theta, rather than (dimensionless) momentum transfer, qa0.

    The returned GOS array has the following properties:
      0-axis is the energy loss dimension, from edge_onset_eV through edge_onset_eV + edge_delta_eV
      1-axis is the scattering angle dimension, from 0 through collection_angle_rad
      intensity is in units of 1 / (eV * steradian)
    """
    electronRestEnergy_eV = 510999.0
    fineStructureConstant = 1 / 137.036
    rydbergEnergy_eV = 0.5 * electronRestEnergy_eV * fineStructureConstant ** 2

    beamGamma = 1 + beam_energy_eV / electronRestEnergy_eV
    beamBeta2 = 1 - 1 / beamGamma ** 2

    energySampleCount = int(numpy.fmin(numpy.fmax(50, 100 * numpy.round(edge_delta_eV / 100)), 1000) + 1)
    thetaSampleCount = int(numpy.fmin(numpy.fmax(50, 100 * numpy.round(collection_angle_rad / 0.050)), 400) + 1)

    screenedZ2 = 1.
    shellOccupancy = 1
    if atomic_number > 1:
        screenedZ2 = (atomic_number - 0.5) ** 2
        shellOccupancy = 2

    # Generate epsilon array (scaled energy-loss) = E/m0c^2 = E/Me over requested energy loss range
    epsilon = numpy.linspace(edge_onset_eV, edge_onset_eV + edge_delta_eV, energySampleCount, dtype = numpy.float64) / electronRestEnergy_eV

    # Generate corresponding phiE array = 1-k1/k0 ~= thetaE
    phiE = 1 - numpy.sqrt(1 - 2 * epsilon * (beamGamma - epsilon / 2) / (beamGamma ** 2 * beamBeta2))

    # Generate thetaTerm array = 4sin(theta/2)^2 for requested collection angle
    thetaTerm = numpy.linspace(0, collection_angle_rad, thetaSampleCount, dtype = numpy.float64)
    thetaTerm = 4 * numpy.sin(thetaTerm / 2) ** 2

    # Generate Q^2 map = K0^2[phiE^2 + 4(1-phiE)sin(theta/2)^2], where
    # Q = qa0, K0 = k0a0 = gamma0*beta0/alpha, and alpha = fine structure constant.
    # This is an exact reformulation of Egerton's equation 3.141, making approximations 3.144 and 3.146 unnecessary.
    # A complete 2D GOS map is generated thanks to Python broadcasting and judicious shaping of the thetaTerm array.
    Q2 = (phiE ** 2 + (1 - phiE) * thetaTerm.reshape(thetaSampleCount, 1)) * beamBeta2 * (beamGamma / fineStructureConstant) ** 2

    # Generate epsilonR array (energy-loss in Rydbergs) = E/Ry = 2*epsilon/alpha^2
    epsilonR = 2 * epsilon / fineStructureConstant ** 2

    # Generate common GOS pre-factor map = 128*Ne(E/Ry)(Q^2+(E/Ry)/3)/[((Q^2-E/Ry)/Zs)^2+4*Q^2]^3/Ry
    # This is an exact reformulation of the common factor in Egerton's equations 3.125 and 3.126.
    # His equations assume 2 electrons (Ne = 2) occupy the 1s shell, which is not true for hydrogen.
    # We compensate with the shellOccupancy factor, following Egerton's use of RNK in his SIMGAK3 program.
    gos = 128 * shellOccupancy * epsilonR * (Q2 + epsilonR / 3) / ((Q2 - epsilonR) ** 2 / screenedZ2 + 4 * Q2) ** 3 / rydbergEnergy_eV

    # Generate kH array = (|(E/Ry)/Zs^2) - 1|)^(1/2)
    # To simplify Egerton equation 3.127, we take kH to be the absolute value of that defined in equation 3.124.
    # We avoid computational overflow in the GOS exponential factors by limiting the smallest value of kH to no less than 0.01.
    # This yields the correct kH -> 0 limiting value of the exponential factor in 3.125 and 3.126: exp(-4*Zs^2/(Q^2-E/Ry+2Zs^2))
    kH = numpy.fmax(numpy.sqrt(numpy.fabs(epsilonR / screenedZ2 - 1)), 0.01)

    # Determine energy array index at which "free" states begin
    boundStateMask = numpy.zeros_like(epsilonR)
    boundStateMask[epsilonR < screenedZ2] = 1
    freeStateStartIndex = int(boundStateMask.sum())

    # Generate GOS 'exponential' factor map, i.e. the exponential factors in Egerton's equations 3.125 and 3.126
    gosFactor = numpy.zeros_like(Q2)

    # Compute bound-state portion = exp(-y), as in 3.126.
    # Note that -y has been reformulated as log[(Q^2-E/Ry+2(1-kH)Zs^2)/(Q^2-E/Ry+2(1+kH)Zs^2))/kH.
    epsilonR_bound = epsilonR[:freeStateStartIndex]
    kH_bound = kH[:freeStateStartIndex]
    Q2_bound = Q2[:, :freeStateStartIndex]
    gosFactor_bound = gosFactor[:, :freeStateStartIndex]
    gosFactor_bound[...] = Q2_bound - epsilonR_bound + 2 * screenedZ2 * (1 - kH_bound)
    gosFactor_bound[...] /= Q2_bound - epsilonR_bound + 2 * screenedZ2 * (1 + kH_bound)
    gosFactor_bound[...] = numpy.exp(numpy.log(gosFactor_bound) / kH_bound)

    if freeStateStartIndex < energySampleCount:
        # Compute free-state portion = exp(-2*betaPrime/kH)/[1-exp(-2*pi/kH)], as in 3.125.
        # Note that betaPrime has been reformulated as arctan(2*Zs^2kH/(Q^2 - E/Ry + 2*Zs^2).
        epsilonR_free = epsilonR[freeStateStartIndex:]
        kH_free = kH[freeStateStartIndex:]
        Q2_free = Q2[:, freeStateStartIndex:]
        gosFactor_free = gosFactor[:, freeStateStartIndex:]
        gosFactor_free[...] = numpy.arctan2(2 * screenedZ2 * kH_free, Q2_free - epsilonR_free + 2 * screenedZ2)
        gosFactor_free[...] = numpy.exp(-2 * gosFactor_free / kH_free) / (1 - numpy.exp(-2 * numpy.pi / kH_free))

    gos *= gosFactor

    return gos
Ejemplo n.º 59
0
def easy_plot(psfs,
              labels,
              oversample_factor=1,
              res=1,
              gam=0.3,
              vmin=1e-3,
              interpolation="bicubic"):
    ncols = len(psfs)

    assert ncols == len(labels), "Lengths mismatched"
    assert ncols < 10

    plot_size = 2.0

    fig = plt.figure(None, (plot_size * ncols, plot_size * 4), dpi=150)
    grid = ImageGrid(fig, 111, nrows_ncols=(4, ncols), axes_pad=0.1)

    fig2, axp = plt.subplots(dpi=150, figsize=(plot_size * ncols, 4))

    for (i, p), l, col in zip(enumerate(psfs), labels, grid.axes_column):
        p = bin_ndarray(p, bin_size=oversample_factor)
        p /= p.max()
        col[0].imshow(p.max(1),
                      norm=mpl.colors.PowerNorm(gam),
                      interpolation=interpolation)
        col[1].imshow(p.max(0),
                      norm=mpl.colors.PowerNorm(gam),
                      interpolation=interpolation)

        col[0].set_title(l)

        otf = abs(easy_fft(p))
        otf /= otf.max()
        otf = np.fmax(otf, vmin)
        c = (len(otf) + 1) // 2

        col[2].matshow(otf[:, c],
                       norm=mpl.colors.LogNorm(),
                       interpolation=interpolation)
        col[3].matshow(otf[c],
                       norm=mpl.colors.LogNorm(),
                       interpolation=interpolation)

        pp = p[:, c, c]
        axp.plot((np.arange(len(pp)) - (len(pp) + 1) // 2) * res,
                 pp / pp.max(),
                 label=l)

    for ax in grid:
        ax.xaxis.set_major_locator(plt.NullLocator())
        ax.yaxis.set_major_locator(plt.NullLocator())

    ylabels = "XZ", "XY"
    ylabels += tuple(map(lambda x: r"$k_{{{}}}$".format(x), ylabels))
    for ax, l in zip(grid.axes_column[0], ylabels):
        ax.set_ylabel(l)

    axp.yaxis.set_major_locator(plt.NullLocator())
    axp.set_xlabel("Axial Position (µm)")
    axp.set_title("On Axis Intensity")
    axp.legend()