def compute_pi3k(egfr_value, erk_value, time_value, initial_values, mfs): """Rules --- if egfr is high and erk is low and time is high then pi3k is high if egfr is low or erk is high or time is low then pi3k is low""" a1_1 = mfs[0][1][initial_values[0] == egfr_value] #egfr_high[egfr == egfr_value] a1_2 = mfs[1][0][ initial_values[1] == erk_value] #erk_low[erk == erk_value] a1_3 = mfs[3][1][ initial_values[3] == time_value] if( a1_1.size == 0): a1_1 = mfs[0][1][ find_closest(initial_values[0], egfr_value)] if( a1_2.size == 0): a1_2 = mfs[1][0][ find_closest(initial_values[1], erk_value)] if( a1_3.size == 0): a1_3 = mfs[3][1][ find_closest(initial_values[3], time_value)] a1 = min(a1_1 , a1_2, a1_3) c1 = np.fmin( np.linspace(a1, a1, 100), mfs[2][1]) a2_1 = mfs[0][0][ initial_values[0] == egfr_value] #egfr_low[egfr == egfr_value] a2_2 = mfs[1][1][ initial_values[1] == erk_value] #erk_high[erk == erk_value] a2_3 = mfs[3][0][ initial_values[3] == time_value] if( a2_1.size == 0): a2_1 = mfs[0][0][ find_closest(initial_values[0], egfr_value)] if( a2_2.size == 0): a2_2 = mfs[1][1][ find_closest(initial_values[1], erk_value)] if( a2_3.size == 0): a2_3 = mfs[3][0][ find_closest(initial_values[3], time_value)] a2 = max(a2_1 , a2_2, a2_3) c2 = np.fmin( np.linspace(a2, a2, 100), mfs[2][0] ) c_com = np.fmax(c1, c2) return fuzz.defuzz(initial_values[2], c_com, 'centroid')
def newton(B, b, g, x0, eps, kmax, s): k = 0 x = np.copy(x0) x1 = np.copy(x0) err = eps + 1 B1 = np.copy(B.toarray()) while (k < kmax and err > eps): k = k + 1 F = np.fmin(B.dot(x) - b, x - g) Fp = csr_matrix(B, copy=True) """ ############# F1 = np.fmin(np.dot(B1, x1) - b, x1 - g) Fp1 = np.eye(len(B1)) i1 = np.where((np.dot(B1, x1) - b) <= (x1 - g)) Fp1[i1, :] = B1[i1, :] x1 = x1 - np.linalg.solve(Fp1, F1) ############# """ i = np.where((B.dot(x) - b) > (x - g)) for i_row in i[0][:]: csr_row_set_nz_to_val(Fp, i_row) # set row to identity x = x - sparse.linalg.spsolve(Fp, F) #x = x - np.linalg.solve(Fp, F) err = np.linalg.norm(np.fmin(B.dot(x) - b , x - g), np.inf) err1 = np.linalg.norm(np.fmin(np.dot(B1, x1) - b, x1 - g), np.inf) #txt = "Scheme (k=" + str(k) + ")" #plt.plot(s, x[992:1024], label=txt ) #plt.show() return x
def fuzzy_min(x, A, y, B): """ Finds minimum between fuzzy set A in universe x and fuzzy set B in universe y. Parameters ---------- x : 1d array, length N Universe variable for fuzzy set A. A : 1d array, length N Fuzzy set for universe x. y : 1d array, length M Universe variable for fuzzy set B. B : 1d array, length M Fuzzy set for universe y. Returns ------- z : 1d array Output variable. mfz : 1d array Fuzzy membership set for variable z. Note ---- Uses Zadeh's Extension Principle from Ross, Fuzzy Logic w/Engineering Applications, (2010), pp.414, Eq. 12.17. """ # A and x, and B and y, are formed into (MxN) matrices. The former has # identical rows; the latter identical identical columns. N = len(B) AA = np.dot(np.atleast_2d(A).T, np.ones((1, N))) X = np.dot(np.atleast_2d(x).T, np.ones((1, N))) M = len(A) BB = np.dot(np.ones((M, 1)), np.atleast_2d(B)) Y = np.dot(np.ones((M, 1)), np.atleast_2d(y)) # Take the element-wise minimum Z = np.fmin(X, Y).ravel() Z_index = np.argsort(Z) Z = np.sort(Z) # Array min() operation C = np.fmin(AA, BB).ravel() C = C[Z_index] # Initialize loop z, mfz = np.empty(0), np.empty(0) idx = 0 for i in range(len(C)): index = np.nonzero(Z == Z[idx])[0] z = np.hstack((z, Z[idx])) mfz = np.hstack((mfz, C[index].max())) if Z[idx] == Z.max(): break idx = index.max() + 1 return z, mfz
def rule_base(b, f_mat): """ Returns y values of output by clipping by an amount of output activations for output fuzzy subsets arguments: f_mat - rule_strength matrix b - b[2] , y values of output fuzzy subsets E / DEL_E| NM || NS || Z || PS || PM ---------------------------------------------------------------------------------------------------------- NM | f_mat[0][0] NM || f_mat[0][1] NM || f_mat[0][2] NS || f_mat[0][3] Z || f_mat[0][4] PS NS | f_mat[1][0] NM || f_mat[1][1] NM || f_mat[1][2] NS || f_mat[1][3] PS || f_mat[1][4] PM Z | f_mat[2][0] NM || f_mat[2][1] NS || f_mat[2][2] Z || f_mat[2][3] PS || f_mat[2][4] PM PS | f_mat[3][0] NM || f_mat[3][1] NS || f_mat[3][2] PS || f_mat[3][3] PM || f_mat[3][4] PM PM | f_mat[4][0] NS || f_mat[4][1] Z || f_mat[4][2] PS || f_mat[4][3] PM || f_mat[4][4] PM """ NM = max(f_mat[0][0], f_mat[0][1], f_mat[1][0], f_mat[1][1], f_mat[2][0], f_mat[3][0]) b[2][0] = np.fmin(NM, b[2][0]) NS = max(f_mat[0][2], f_mat[1][2], f_mat[2][1], f_mat[3][1], f_mat[4][0]) b[2][1] = np.fmin(NS, b[2][1]) Z = max(f_mat[0][3], f_mat[2][2], f_mat[4][1]) b[2][2] = np.fmin(Z, b[2][2]) PS = max(f_mat[0][4], f_mat[1][3], f_mat[2][3], f_mat[3][2], f_mat[4][2]) b[2][3] = np.fmin(PS, b[2][3]) PM = max(f_mat[1][4], f_mat[2][4], f_mat[3][4], f_mat[3][3], f_mat[4][3], f_mat[4][4]) b[2][4] = np.fmin(PM, b[2][4]) return b[2]
def compute_akt(pi3k_value, time_value, initial_values, mfs): """Rules- If pi3k is high and time is high akt is high If pi3k is low or time is low then akt is low""" a1_1 = mfs[0][1][initial_values[0] == pi3k_value] a1_2 = mfs[2][1][initial_values[2] == time_value] if( a1_1.size == 0): a1_1 = mfs[0][1][ find_closest(initial_values[0], pi3k_value)] if( a1_2.size == 0): a1_2 = mfs[2][1][ find_closest(initial_values[2], time_value)] a1 = min(a1_1, a1_2) c1 = np.fmin( np.linspace(a1, a1, 100), mfs[1][1]) a2_1 = mfs[0][0][initial_values[0] == pi3k_value] a2_2 = mfs[2][0][initial_values[2] == time_value] if( a2_1.size == 0): a2_1 = mfs[0][0][ find_closest(initial_values[0], pi3k_value)] if( a2_2.size == 0): a2_2 = mfs[2][0][ find_closest(initial_values[2], time_value)] a2 = max(a2_1, a2_2) c2 = np.fmin( np.linspace(a2, a2, 100), mfs[1][0]) c_com = np.fmax(c1,c2) return fuzz.defuzz( initial_values[1], c_com, 'centroid')
def work(self, input_items, output_items): if self.mlimstage: for x in xrange(0, len(input_items)): try: if self.input_blocks[x] is self.null_src: continue except Exception as e: print 'x:%s len(input_items):%s len(input_blocks):%s' % (x, len(input_items), len(self.input_blocks)) raise e ia = input_items[x] numpy.fmin(ia, self.limvals[x], ia) numpy.multiply(ia, self.mulvals[x], ia) input_items[x] = ia cur = None for x in xrange(0, len(input_items)): if self.input_blocks[x] is self.null_src: continue if cur is None: cur = input_items[x] else: numpy.add(cur, input_items[x], cur) bsize = len(output_items[0]) if cur is None: output_items[0][:] = numpy.zeros(bsize, self.dtype) else: output_items[0][:] = cur return bsize
def test_fmin(self): from numpy import fmin, array nnan, nan, inf, ninf = float('-nan'), float('nan'), float('inf'), float('-inf') a = array((complex(ninf, 10), complex(10, ninf), complex( inf, 10), complex(10, inf), 5+5j, 5-5j, -5+5j, -5-5j, 0+5j, 0-5j, 5, -5, complex(nan, 0), complex(0, nan)), dtype = complex) b = [inf]*a.size res = [a[0 ], a[1 ], b[2 ], a[3 ], a[4 ], a[5 ], a[6 ], a[7 ], a[8 ], a[9 ], a[10], a[11], b[12], b[13]] assert (fmin(a, b) == res).all() b = [ninf]*a.size res = [b[0 ], b[1 ], b[2 ], b[3 ], b[4 ], b[5 ], b[6 ], b[7 ], b[8 ], b[9 ], b[10], b[11], b[12], b[13]] assert (fmin(a, b) == res).all() b = [0]*a.size res = [a[0 ], b[1 ], b[2 ], b[3 ], b[4 ], b[5 ], a[6 ], a[7 ], b[8 ], a[9 ], b[10], a[11], b[12], b[13]] assert (fmin(a, b) == res).all()
def periodic_3d_distance(x1, y1, z1, x2, y2, z2, Lbox): """ Function computes the distance between two sets of coordinates with the same number of points, accounting for PBCs. Parameters ------------ x1, y1, z1 : array_like Length-Npts arrays storing Cartesian coordinates x2, y2, z2 : array_like Length-Npts arrays storing Cartesian coordinates Lbox : float Box length defining the periodic boundary conditions Returns -------- r : array_like Length-Npts array storing the 3d distance between the input points, accounting for box periodicity. """ dx = np.fabs(x1 - x2) dx = np.fmin(dx, Lbox - dx) dy = np.fabs(y1 - y2) dy = np.fmin(dy, Lbox - dy) dz = np.fabs(z1 - z2) dz = np.fmin(dz, Lbox - dz) return np.sqrt(dx*dx+dy*dy+dz*dz)
def clip_to_window(boxlist, window): """Clip bounding boxes to a window. This op clips input bounding boxes (represented by bounding box corners) to a window, optionally filtering out boxes that do not overlap at all with the window. Args: boxlist: BoxList holding M_in boxes window: a numpy array of shape [4] representing the [y_min, x_min, y_max, x_max] window to which the op should clip boxes. Returns: a BoxList holding M_out boxes where M_out <= M_in """ y_min, x_min, y_max, x_max = np.array_split(boxlist.get(), 4, axis=1) win_y_min = window[0] win_x_min = window[1] win_y_max = window[2] win_x_max = window[3] y_min_clipped = np.fmax(np.fmin(y_min, win_y_max), win_y_min) y_max_clipped = np.fmax(np.fmin(y_max, win_y_max), win_y_min) x_min_clipped = np.fmax(np.fmin(x_min, win_x_max), win_x_min) x_max_clipped = np.fmax(np.fmin(x_max, win_x_max), win_x_min) clipped = np_box_list.BoxList( np.hstack([y_min_clipped, x_min_clipped, y_max_clipped, x_max_clipped])) clipped = _copy_extra_fields(clipped, boxlist) areas = area(clipped) nonzero_area_indices = np.reshape(np.nonzero(np.greater(areas, 0.0)), [-1]).astype(np.int32) return gather(clipped, nonzero_area_indices)
def fuzzy_min(x, a, y, b): """ Finds minimum between fuzzy set a in universe x and fuzzy set b in universe y. Parameters ---------- x : 1d array, length N Universe variable for fuzzy set a. a : 1d array, length N Fuzzy set for universe x. y : 1d array, length M Universe variable for fuzzy set b. b : 1d array, length M Fuzzy set for universe y. Returns ------- z : 1d array Output variable. mfz : 1d array Fuzzy membership set for variable z. Note ---- Uses Zadeh's Extension Principle from Ross, Fuzzy Logic w/Engineering Applications, (2010), pp.414, Eq. 12.17. """ # a and x, and b and y, are formed into (MxN) matrices. The former has # identical rows; the latter identical identical columns. n = len(b) aa = np.dot(np.atleast_2d(a).T, np.ones((1, n))) x = np.dot(np.atleast_2d(x).T, np.ones((1, n))) m = len(a) bb = np.dot(np.ones((m, 1)), np.atleast_2d(b)) y = np.dot(np.ones((m, 1)), np.atleast_2d(y)) # Take the element-wise minimum zz = np.fmin(x, y).ravel() zz_index = np.argsort(zz) zz = np.sort(zz) # Array min() operation c = np.fmin(aa, bb).ravel() c = c[zz_index] # Initialize loop z, mfz = np.empty(0), np.empty(0) idx = 0 for i in range(len(c)): index = np.nonzero(zz == zz[idx])[0] z = np.hstack((z, zz[idx])) mfz = np.hstack((mfz, c[index].max())) if zz[idx] == zz.max(): break idx = index.max() + 1 return z, mfz
def adjust(p, method='bonferroni'): ''' Usage: pvalues.adjust(p, method='bonferroni') returns p-values adjusted using one of several methods. p ... numeric vector of p-values (possibly with NAs) method ... correction method bonferroni: Use Bonferroni method to control the family-wise error rate strictly. BH: Use method of Benjamini and Hochberg to control the false discovery rate. fdr: same as 'BH'. ''' try: p = np.array(p).astype(float) n = len(p) except ValueError: print 'Error: input p-values contain invalid string elements.' quit() # remove "n.a." values from input vector p0 = p not_na = ~np.isnan(p) p = p[not_na] lp = len(p) if lp <= 1: return p0 if method == 'bonferroni': p0[not_na] = np.fmin(1., n*p) elif method == 'BH' or method == 'fdr': i = np.arange(lp+1)[:0:-1] o = np.argsort(p)[::-1] ro = np.argsort(o) p0[not_na] = np.fmin(1., np.minimum.accumulate(float(n) / i.astype(float) * p[o]))[ro] return p0
def compute_erk_change(raf_value, time_value, initial_values, mfs): """Rules- If raf is high and time is high then positive_change_erk is high If raf is high1 and time is low then positive_change_erk is low If raf is low then positive_change_erk is low If raf is low and time is high then negative_change_erk is high If raf is low and time is low then negative_change_erk is low""" #Antecedent 1 f = interp1d(initial_values[0][0], mfs[0][1]) a1_1 = f(raf_value) #raf_high[raf == raf_value] f = interp1d(initial_values[2], mfs[2][1]) a1_2 = f(time_value) #time_high[time == time_value] a1 = min(a1_1, a1_2) c1 = np.fmin( a1, mfs[1][3]) #mfs[1][3] is positive_change_erk_high #Antecedent 2 f = interp1d(initial_values[0][0], mfs[0][6]) a2_1 = f(raf_value) f = interp1d(initial_values[2], mfs[2][0]) #time_low[time == time_value] a2_2 = f(time_value) a2 = min(a2_1, a2_2) c2 = np.fmin( a2, mfs[1][2]) #mfs[1][2] is positive_change_raf_low c_com_positive = np.fmax(c1,c2) f = interp1d(initial_values[0][0], mfs[0][0]) a3 = f(raf_value) c3 = np.fmin(a3, mfs[1][2]) c_com_positive = np.fmax(c_com_positive, c3) pos_change = fuzz.defuzz( initial_values[1][1], c_com_positive, 'centroid') #initial_values[1][1] is positive_change_erk ###Negative Change #Antecedent 3 '''f = interp1d(initial_values[0][0], mfs[0][0]) a3_1 = f(raf_value) #raf_low[raf == raf_value] a3_2 = a1_2 #time_high[time == time_value] a3 = min(a3_1,a3_2) c3 = np.fmin(a3, mfs[1][5]) #mfs[1][3] is negative_change_erk_high #Antecedent 4 a4_1 = a3_1 #raf_low[raf == raf_value] a4_2 = a2_2 #time_low[time == time_value] a4 = min(a4_1, a4_2) c4 = np.fmin(a4, mfs[1][4]) #mfs[1][4] is negative_change_erk_low c_com_negative = np.fmax(c3, c4) neg_change = fuzz.defuzz(initial_values[1][2], c_com_negative, 'centroid') #initial_values[1][2] is negative_change_erk''' #print pos_change, neg_change #print pos_change return pos_change
def test_half_ufuncs(self): """Test the various ufuncs""" a = np.array([0, 1, 2, 4, 2], dtype=float16) b = np.array([-2, 5, 1, 4, 3], dtype=float16) c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) assert_equal(np.equal(a, b), [False, False, False, True, False]) assert_equal(np.not_equal(a, b), [True, True, True, False, True]) assert_equal(np.less(a, b), [False, True, False, False, True]) assert_equal(np.less_equal(a, b), [False, True, False, True, True]) assert_equal(np.greater(a, b), [True, False, True, False, False]) assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) assert_equal(np.logical_and(a, b), [False, True, True, True, True]) assert_equal(np.logical_or(a, b), [True, True, True, True, True]) assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) assert_equal(np.logical_not(a), [True, False, False, False, False]) assert_equal(np.isnan(c), [False, False, False, True, False]) assert_equal(np.isinf(c), [False, False, True, False, False]) assert_equal(np.isfinite(c), [True, True, False, False, True]) assert_equal(np.signbit(b), [True, False, False, False, False]) assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) x = np.maximum(b, c) assert_(np.isnan(x[3])) x[3] = 0 assert_equal(x, [0, 5, 1, 0, 6]) assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) x = np.minimum(b, c) assert_(np.isnan(x[3])) x[3] = 0 assert_equal(x, [-2, -1, -np.inf, 0, 3]) assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2])) assert_equal(np.square(b), [4, 25, 1, 16, 9]) assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) assert_equal(np.conjugate(b), b) assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) assert_equal(np.negative(b), [2, -5, -1, -4, -3]) assert_equal(np.positive(b), b) assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
def coord_to_px(self, x, y, latlon=False, rounded=True, check_valid=True): """ Convert x,y coordinates into pixel coordinates of raster. x,y may be either in native coordinate system of raster or lat/lon. Parameters: x : float, x coordinate to convert. y : float, y coordinate to convert. latlon : boolean, default False. Set as True if bounds in lat/lon. rounded : if set to True, return the rounded pixel coordinates, otherwise return the float values check_valid : bool, if set to True, will check that all pixels are in the valid range. Returns: (x_pixel,y_pixel) """ # Convert coordinates to map system if provided in lat/lon and image # is projected (rather than geographic) if latlon == True and self.proj != None: x, y = self.proj(x, y) # Shift to the centre of the pixel x = np.array(x - self.xres / 2) y = np.array(y - self.yres / 2) g0, g1, g2, g3, g4, g5 = self.trans if g2 == 0: xPixel = (x - g0) / float(g1) yPixel = (y - g3 - xPixel * g4) / float(g5) else: xPixel = (y * g2 - x * g5 + g0 * g5 - g2 * g3) / float(g2 * g4 - g1 * g5) yPixel = (x - g0 - xPixel * g1) / float(g2) # Round if required if rounded == True: xPixel = np.round(xPixel) yPixel = np.round(yPixel) if check_valid == False: return xPixel, yPixel # Check that pixel location is not outside image dimensions nx = self.ds.RasterXSize ny = self.ds.RasterYSize xPixel_new = np.copy(xPixel) yPixel_new = np.copy(yPixel) xPixel_new = np.fmin(xPixel_new, nx) yPixel_new = np.fmin(yPixel_new, ny) xPixel_new = np.fmax(xPixel_new, 0) yPixel_new = np.fmax(yPixel_new, 0) if np.any(xPixel_new != xPixel) or np.any(yPixel_new != yPixel): print ("Warning : some points are out of domain for file") return xPixel_new, yPixel_new
def compute_akt_change(pi3k_value, time_value, initial_values, mfs): """Rules- If pi3k is high and time is high then positive_change_akt is high If pi3k is high1 and time is low then positive_change_akt is low If pi3k is low then positive_change_pi3k is low If pi3k is low and time is high then negative_change_akt is high If pi3k is low and time is low then negative_change_akt is low""" ###Positive Change #Antecedent 1 f = interp1d(initial_values[0][0], mfs[0][1]) a1_1 = f(pi3k_value) #pi3k_high[pi3k == pi3k_value] f = interp1d(initial_values[2], mfs[2][1]) a1_2 = f(time_value) #time_high[time == time_value] a1 = min(a1_1, a1_2) c1 = np.fmin(a1, mfs[1][3]) #positive_change_akt is high #Antecedent 2 f = interp1d(initial_values[0][0], mfs[0][6]) a2_1 = f(pi3k_value) #pi3k_high[pi3k == pi3k_value] f = interp1d(initial_values[2], mfs[2][0]) a2_2 = f(time_value) #time_low[time == time_value] a2 = min(a2_1, a2_2) c2 = np.fmin( a2, mfs[1][2]) #positive_change_akt is low c_com_positive = np.fmax(c1,c2) f = interp1d(initial_values[0][0], mfs[0][0]) a3 = f(pi3k_value) c3 = np.fmin(a3, mfs[1][2]) c_com_positive = np.fmax(c_com_positive, c3) pos_change = fuzz.defuzz( initial_values[1][1], c_com_positive, 'centroid') #initial_values[1][1] is positive_change_akt ###Negative Change #Antecedent 3 '''f = interp1d(initial_values[0][0], mfs[0][0]) a3_1 = f(pi3k_value) #pi3k_low[pi3k == pi3k_value] a3_2 = a1_2 #time_high[time == time_value] a3 = min(a3_1, a3_2) c3 = np.fmin(a3, mfs[1][5]) #mfs[1][5] is negative_change_akt_high #Antecedent 4 a4_1 = a3_1 #pi3k_low[pi3k == pi3k_value] a4_2 = a2_2 #time_low[time == time_value] a4 = min(a4_1, a4_2) c4 = np.fmin(a4, mfs[1][4]) #mfs[1][4] is negative_change_akt_low c_com_negative = np.fmax(c3, c4) neg_change = fuzz.defuzz(initial_values[1][2], c_com_negative, 'centroid') #initial_values[1][2] is negative_change_akt''' return pos_change
def calaculate_akt(pi3k_mfs, akt_mfs, akt, pi3k_index): a1 = pi3k_mfs[0][pi3k_index] c1 = np.fmin(a1, akt_mfs[0]) a2 = pi3k_mfs[1][pi3k_index] c2 = np.fmin(a2, akt_mfs[1]) c_com = np.fmax(c1, c2) try: akt_val = fuzz.defuzz(akt, c_com, 'centroid') except AssertionError as e: akt_val = 0 return akt_val
def calculate_egfr(time_mfs, egfr_mfs, egfr, time_index): a1 = time_mfs[0][time_index] c1 = np.fmin(a1, egfr_mfs[0]) a2 = time_mfs[1][time_index] c2 = np.fmin(a2, egfr_mfs[1]) c_com = np.fmax(c1, c2) try: egfr_val = fuzz.defuzz(egfr, c_com, 'centroid') except AssertionError as e: egfr_val = 0 return egfr_val
def calculate_erk(raf_mfs, erk_mfs, erk, raf_index): a1 = raf_mfs[0][raf_index] c1 = np.fmin(a1, erk_mfs[0]) a2 = raf_mfs[1][raf_index] c2 = np.fmin(a2, erk_mfs[1]) c_com = np.fmax(c1, c2) try: erk_val = fuzz.defuzz(erk, c_com, 'centroid') except AssertionError as e: erk_val = 0 return erk_val
def test_contrast(): a = np.r_[0, 0, 0, 0.3, 0.7, 1, 0.9, 0] z = contrast(a, 1.8) # Legacy slower code which should produce identical result p = 1.8 m = 0.5 ymin = np.fmin(a, m) ymax = np.fmax(a, m) w = np.arange(len(a)) wmax = w[ymax > m] wmin = w[ymax <= m] ymin = 2 ** (p - 1) * ymin ** p ymax = 1 - 2 ** (p - 1) * (1 - ymax) ** p ymin[wmax] = 0 ymax[wmin] = 0 assert_allclose(z, ymin + ymax) # Legacy slower code which should produce identical result p = 0.5 m = 0.5 z = contrast(a, 0.5) ymin = np.fmin(a, m) ymax = np.fmax(a, m) w = np.arange(len(a)) wmax = w[ymax > m] wmin = w[ymax <= m] ymin = 2 ** (p - 1) * ymin ** p ymax = 1 - 2 ** (p - 1) * (1 - ymax) ** p ymin[wmax] = 0 ymax[wmin] = 0 assert_allclose(z, ymin + ymax) # Legacy slower code which should produce identical result p = 2. m = 0.5 z = contrast(a, 2.) ymin = np.fmin(a, m) ymax = np.fmax(a, m) w = np.arange(len(a)) wmax = w[ymax > m] wmin = w[ymax <= m] ymin = 2 ** (p - 1) * ymin ** p ymax = 1 - 2 ** (p - 1) * (1 - ymax) ** p ymin[wmax] = 0 ymax[wmin] = 0 assert_allclose(z, ymin + ymax)
def rule_actividad(value, graficar=False): # recibe los datos para poder seguir el proceso difuso para encontrar la pertenencia # retorna el valor al que pertenece en la clase segun la hora mf_actividad = generar_actividad(False) mf_calorico = generar_calorico(False) # se usa para encontrar los grados de pertenencia del valor, borrificacion actividad_nivel_rest = fuzz.interp_membership(mf_actividad['intensity'], mf_actividad['rest'], value) actividad_nivel_std = fuzz.interp_membership(mf_actividad['intensity'], mf_actividad['active'], value) actividad_nivel_work = fuzz.interp_membership(mf_actividad['intensity'], mf_actividad['workout'], value) # regla: si rest -> low rest_activation = np.fmin(actividad_nivel_rest, mf_calorico['low']) # regla: si active -> std active_activation = np.fmin(actividad_nivel_std, mf_calorico['standard']) # regla: si workout -> high workout_activation = np.fmin(actividad_nivel_work, mf_calorico['high']) # deborrificacion agregado = np.fmax(rest_activation, np.fmax(active_activation, workout_activation)) caloric = fuzz.defuzz(mf_calorico['caloric'], agregado, 'centroid') # # graficar # if graficar: # select_caloric = fuzz.interp_membership(mf_calorico['caloric'], agregado, caloric) # caloric0 = np.zeros_like(mf_calorico['caloric']) # fig, ax0 = plt.subplots(figsize=(8, 3)) # # ax0.plot(mf_calorico['caloric'], mf_calorico['low'], 'b', linewidth=0.5, linestyle='--', label='Low') # ax0.plot(mf_calorico['caloric'], mf_calorico['standard'], 'g', linewidth=0.5, linestyle='--', label='Standard') # ax0.plot(mf_calorico['caloric'], mf_calorico['high'], 'r', linewidth=0.5, linestyle='--', label='High') # ax0.fill_between(mf_calorico['caloric'], caloric0, agregado, facecolor='Orange', alpha=0.7) # ax0.plot([caloric, caloric], [0, select_caloric], 'k', linewidth=1.5, alpha=0.9) # ax0.set_title('Dish Classification and Result (line)') # ax0.legend() # # Turn off top/right axes # for ax in (ax0,): # ax.spines['top'].set_visible(False) # ax.spines['right'].set_visible(False) # ax.get_xaxis().tick_bottom() # ax.get_yaxis().tick_left() # # plt.tight_layout() # #fig.savefig('graphs/result_calorico.png', bbox_inches='tight') # return caloric, "caloric"
def game_type(player, comp): """ A fuzzy algorithm to define the offensiveness and/or defensiveness of the game. Determines how aggressive the fuzzy player is""" score_diff = float(player-comp) ### Inputs ### # Input Variable Domain score = np.arange(-21, 21, 1) # Input membership functions score_ahead = fuzz.gaussmf(score, -21, 8.823) score_tied = fuzz.gaussmf(score, 0, 9.012) score_behind = fuzz.gaussmf(score, 21, 8.823) # Fuzzifying the current input def score_category(sc): score_cat_ahead = fuzz.interp_membership(score, score_ahead, sc) score_cat_tied = fuzz.interp_membership(score, score_tied, sc) score_cat_behind = fuzz.interp_membership(score, score_behind, sc) return dict(ahead = score_cat_ahead, tied = score_cat_tied, behind = score_cat_behind) ### Outputs ### # Output Variable Domain game = np.arange(0, 1, 1) # Output membership functions game_defensive = fuzz.gaussmf(game, 0, 0.162899) game_offensive = fuzz.gauss2mf(game, 0.30291, 0.090976, 1.31, 0.416) ### Rules ### current_score = score_category(score_diff) # Going to make this a hard opponent, so if the score is tied or # if the human is winning, it will play offensively rule1 = current_score['ahead'] rule2 = np.fmax(current_score['tied'], current_score['behind']) # Apply implication operator (Mamdami) imp1 = np.fmin(rule1, game_defensive) imp2 = np.fmin(rule2, game_offensive) # Aggregate outputs using max aggregate_membership = np.fmax(imp1, imp2) # Defuzzify using centroid and return the result result_game = fuzz.defuzz(game, aggregate_membership, 'centroid') return result_game
def test_complex_nans(self): nan = np.nan for cnan in [complex(nan, 0), complex(0, nan), complex(nan, nan)]: arg1 = np.array([0, cnan, cnan], dtype=np.complex) arg2 = np.array([cnan, 0, cnan], dtype=np.complex) out = np.array([0, 0, nan], dtype=np.complex) assert_equal(np.fmin(arg1, arg2), out)
def get_ry0_distance(self, mesh): """ :param mesh: :class:`~openquake.hazardlib.geo.mesh.Mesh` of points to calculate Ry0-distance to. :returns: Numpy array of distances in km. See also :meth:`superclass method <.base.BaseSurface.get_ry0_distance>` for spec of input and result values. This is version specific to the planar surface doesn't make use of the mesh """ dst1 = geodetic.distance_to_arc(self.top_left.longitude, self.top_left.latitude, (self.strike + 90.) % 360, mesh.lons, mesh.lats) dst2 = geodetic.distance_to_arc(self.top_right.longitude, self.top_right.latitude, (self.strike + 90.) % 360, mesh.lons, mesh.lats) # Find the points on the rupture # Get the shortest distance from the two lines idx = numpy.sign(dst1) == numpy.sign(dst2) dst = numpy.zeros_like(dst1) dst[idx] = numpy.fmin(numpy.abs(dst1[idx]), numpy.abs(dst2[idx])) return dst
def cartprod(x, y): """ Cartesian product of two fuzzy membership vectors. Uses ``min()``. Parameters ---------- x : 1D array or iterable First fuzzy membership vector, of length M. y : 1D array or iterable Second fuzzy membership vector, of length N. Returns ------- z : 2D array Cartesian product of ``x`` and ``y``, of shape (M, N). """ # Ensure rank-1 input x, y = np.asarray(x).ravel(), np.asarray(y).ravel() m, n = len(x), len(y) a = np.dot(np.atleast_2d(x).T, np.ones((1, n))) b = np.dot(np.ones((m, 1)), np.atleast_2d(y)) return np.fmin(a, b)
def searchsortednearest(a,v): higher_inds = np.fmin(np.searchsorted(a,v), len(a)-1) lower_inds = np.fmax(higher_inds-1, 0) closer_inds = higher_inds lower_is_better = np.abs(a[higher_inds] - v) > np.abs(a[lower_inds] - v) closer_inds[lower_is_better] = lower_inds[lower_is_better] return closer_inds
def maxmin_composition(s, r): """ The max-min composition ``t`` of two fuzzy relation matrices. Parameters ---------- s : 2d array, (M, N) Fuzzy relation matrix #1. r : 2d array, (N, P) Fuzzy relation matrix #2. Returns ------- T ; 2d array, (M, P) Max-min composition, defined by ``T = s o r``. """ if s.ndim < 2: s = np.atleast_2d(s) if r.ndim < 2: r = np.atleast_2d(r).T m = s.shape[0] p = r.shape[1] t = np.zeros((m, p)) for pp in range(p): for mm in range(m): t[mm, pp] = (np.fmin(s[mm, :], r[:, pp].T)).max() return t
def rateOne(file,results="./OverlapResults.csv", methord=slicescore.OverlapCoeff, template=template): if os.path.isfile(template): data1, header1 = nrrd.read(template) else: print 'Template file missing!' data1 = [] if not data1==[]: ctemplate = xscore.xslice(data1) ztemplate = slicescore.zsampleslice(data1) ytemplate = slicescore.ysampleslice(data1) xtemplate = slicescore.xsampleslice(data1) del data1, header1 r=np.float128(0.0) if os.path.isfile(file): print "testing:" + os.path.basename(file) data2, header2 = nrrd.read(file) calignment = xscore.xslice(data2) zalignment = slicescore.zsampleslice(data2) yalignment = slicescore.ysampleslice(data2) xalignment = slicescore.xsampleslice(data2) del data2, header2 r = np.fmin(np.array([xscore.symTest(slicescore.OverlapCoeff,calignment),methord(ctemplate,calignment),methord(ztemplate,zalignment),methord(ytemplate,yalignment),methord(xtemplate,xalignment)])) if not results==None: with open(results, 'a') as csvfile: spamwriter = csv.writer(csvfile) spamwriter.writerow([path,r[-1][0],"[Symmetry,Diagonal,Zsample,Ysample,Xsample,Final]score",r[-1][1],r[-1][2],r[-1][3],r[-1][4],r[-1][5],min(r[-1][1:])]) del xalignment return r
def deblend(image, peaks, interpolate=False, force_interpolate=False): """ Quick and dirty deblender. Args ---- @param image A numpy array representing an image of a blend. @param peaks A list of tuples representing the peak positions of objects in the blend. @param interpolate If at least one component of rot_center is not a half-integer, use GalSim to rotate the image. This currently doesn't work very well!!! @param force_interpolate Use GalSim to rotate the image, even if rot_center components are half-integer and rotation via numpy array operations is possible. This currently doesn't work very well!!! @returns templates, template_fractions, children """ work_image = image+1.e-20 # Step 1: Make symmetric templates templates = [np.fmin(work_image, rotate(work_image, peak, interpolate=interpolate, force_interpolate=force_interpolate)) for peak in peaks] # Step 2: Calculate relative contribution of each template template_sum = np.sum(templates, axis=0) template_fractions = [template/template_sum * (template_sum != 0) for template in templates] # Step 3: Calculate deblended children children = [t * image for t in template_fractions] return templates, template_fractions, children
def create_visualization_for_pw_single_image(edges, edge_weights, imsz): """ Create visualization for pairwise potentials of a single image, for grid graph only. The visualization is to transfer pairwise potentials into an image the same size as the original image. The process is simple, for each pixel, look at the two edges coming from the north and west then take the maximum of the negative edge weights. Then for each pixel we get a number related to the likelihood of an edge at that point, this is used for visualization. edges: E*2 edge matrix, E is the number of edges edge_weights: E-dimensional vector for edge weights, (edges, edge_weights) are generated by one of the get_pw functions imsz: size of the image, (H,W) return: a visualization image of size H*W """ H, W = imsz pw = np.ones(imsz, dtype=np.single) * edge_weights.max() pw[:, 1:W] = edge_weights[: H * (W - 1)].reshape(H, W - 1) pw[1:H, :] = np.fmin(edge_weights[H * (W - 1) :].reshape(W, H - 1).T, pw[1:H, :]) return -pw
def get_ry0_distance(self, mesh): """ :param mesh: :class:`~openquake.hazardlib.geo.mesh.Mesh` of points to calculate Ry0-distance to. :returns: Numpy array of distances in km. See also :meth:`superclass method <.base.BaseSurface.get_ry0_distance>` for spec of input and result values. This method uses an average strike direction to compute ry0. """ # This computes ry0 by using an average strike direction top_edge = self.get_mesh()[0:1] mean_strike = self.get_strike() dst1 = geodetic.distance_to_arc(top_edge.lons[0, 0], top_edge.lats[0, 0], (mean_strike + 90.) % 360, mesh.lons, mesh.lats) dst2 = geodetic.distance_to_arc(top_edge.lons[0, -1], top_edge.lats[0, -1], (mean_strike + 90.) % 360, mesh.lons, mesh.lats) # Find the points on the rupture # Get the shortest distance from the two lines idx = numpy.sign(dst1) == numpy.sign(dst2) dst = numpy.zeros_like(dst1) dst[idx] = numpy.fmin(numpy.abs(dst1[idx]), numpy.abs(dst2[idx])) return dst
print "On a scale of 0 to 10 (10 being nearest), how close is the workplace?" dist = float(raw_input()) dist_level_lo = fuzz.interp_membership(x_dist, dist_lo, dist) dist_level_md = fuzz.interp_membership(x_dist, dist_md, dist) dist_level_hi = fuzz.interp_membership(x_dist, dist_hi, dist) part1 = min(sal_level_hi, min(int_level_hi, dist_level_hi)) part2 = min(sal_level_hi, min(int_level_hi, dist_level_md)) part3 = min(sal_level_hi, min(int_level_hi, dist_level_lo)) part4 = min(sal_level_md, min(int_level_hi, dist_level_hi)) part5 = min(sal_level_md, min(int_level_hi, dist_level_md)) part6 = min(sal_level_lo, min(int_level_hi, dist_level_hi)) active_rule1 = max(part1, part2, part3, part4, part5, part6) score_activation_hi = np.fmin(active_rule1, score_hi) part1 = min(sal_level_lo, min(int_level_hi, dist_level_md)) part2 = min(sal_level_md, min(int_level_md, dist_level_hi)) part3 = min(sal_level_hi, min(int_level_md, dist_level_hi)) part4 = min(sal_level_hi, min(int_level_md, dist_level_md)) part5 = min(sal_level_hi, min(int_level_lo, dist_level_hi)) part6 = min(sal_level_md, min(int_level_hi, dist_level_lo)) part7 = min(sal_level_md, min(int_level_md, dist_level_md)) part8 = min(sal_level_md, min(int_level_md, dist_level_lo)) part9 = min(sal_level_lo, min(int_level_hi, dist_level_lo)) active_rule2 = max(part1, part2, part3, part4, part5, part6, part7, part8, part9) score_activation_md = np.fmin(active_rule2, score_md)
lambda c1, c2: c1.bitwiseOR(c2), "bitwise_xor": lambda c1, c2: c1.bitwiseXOR(c2), "copysign": pandas_udf(lambda s1, s2: np.copysign(s1, s2), DoubleType()), # type: ignore "float_power": pandas_udf( # type: ignore lambda s1, s2: np.float_power(s1, s2), DoubleType()), "floor_divide": pandas_udf( # type: ignore lambda s1, s2: np.floor_divide(s1, s2), DoubleType()), "fmax": pandas_udf(lambda s1, s2: np.fmax(s1, s2), DoubleType()), # type: ignore "fmin": pandas_udf(lambda s1, s2: np.fmin(s1, s2), DoubleType()), # type: ignore "fmod": pandas_udf(lambda s1, s2: np.fmod(s1, s2), DoubleType()), # type: ignore "gcd": pandas_udf(lambda s1, s2: np.gcd(s1, s2), DoubleType()), # type: ignore "heaviside": pandas_udf(lambda s1, s2: np.heaviside(s1, s2), DoubleType()), # type: ignore "hypot": F.hypot, "lcm": pandas_udf(lambda s1, s2: np.lcm(s1, s2), DoubleType()), # type: ignore "ldexp": pandas_udf(lambda s1, s2: np.ldexp(s1, s2), DoubleType()), # type: ignore "left_shift": pandas_udf(lambda s1, s2: np.left_shift(s1, s2),
def ineichen(apparent_zenith, airmass_absolute, linke_turbidity, altitude=0, dni_extra=1364.): ''' Determine clear sky GHI, DNI, and DHI from Ineichen/Perez model. Implements the Ineichen and Perez clear sky model for global horizontal irradiance (GHI), direct normal irradiance (DNI), and calculates the clear-sky diffuse horizontal (DHI) component as the difference between GHI and DNI*cos(zenith) as presented in [1, 2]. A report on clear sky models found the Ineichen/Perez model to have excellent performance with a minimal input data set [3]. Default values for monthly Linke turbidity provided by SoDa [4, 5]. Parameters ----------- apparent_zenith : numeric Refraction corrected solar zenith angle in degrees. airmass_absolute : numeric Pressure corrected airmass. linke_turbidity : numeric Linke Turbidity. altitude : numeric, default 0 Altitude above sea level in meters. dni_extra : numeric, default 1364 Extraterrestrial irradiance. The units of ``dni_extra`` determine the units of the output. Returns ------- clearsky : DataFrame (if Series input) or OrderedDict of arrays DataFrame/OrderedDict contains the columns/keys ``'dhi', 'dni', 'ghi'``. See also -------- lookup_linke_turbidity pvlib.location.Location.get_clearsky References ---------- [1] P. Ineichen and R. Perez, "A New airmass independent formulation for the Linke turbidity coefficient", Solar Energy, vol 73, pp. 151-157, 2002. [2] R. Perez et. al., "A New Operational Model for Satellite-Derived Irradiances: Description and Validation", Solar Energy, vol 73, pp. 307-317, 2002. [3] M. Reno, C. Hansen, and J. Stein, "Global Horizontal Irradiance Clear Sky Models: Implementation and Analysis", Sandia National Laboratories, SAND2012-2389, 2012. [4] http://www.soda-is.com/eng/services/climat_free_eng.php#c5 (obtained July 17, 2012). [5] J. Remund, et. al., "Worldwide Linke Turbidity Information", Proc. ISES Solar World Congress, June 2003. Goteborg, Sweden. ''' # Dan's note on the TL correction: By my reading of the publication # on pages 151-157, Ineichen and Perez introduce (among other # things) three things. 1) Beam model in eqn. 8, 2) new turbidity # factor in eqn 9 and appendix A, and 3) Global horizontal model in # eqn. 11. They do NOT appear to use the new turbidity factor (item # 2 above) in either the beam or GHI models. The phrasing of # appendix A seems as if there are two separate corrections, the # first correction is used to correct the beam/GHI models, and the # second correction is used to correct the revised turibidity # factor. In my estimation, there is no need to correct the # turbidity factor used in the beam/GHI models. # Create the corrected TL for TL < 2 # TLcorr = TL; # TLcorr(TL < 2) = TLcorr(TL < 2) - 0.25 .* (2-TLcorr(TL < 2)) .^ (0.5); # This equation is found in Solar Energy 73, pg 311. Full ref: Perez # et. al., Vol. 73, pp. 307-317 (2002). It is slightly different # than the equation given in Solar Energy 73, pg 156. We used the # equation from pg 311 because of the existence of known typos in # the pg 156 publication (notably the fh2-(TL-1) should be fh2 * # (TL-1)). # The NaN handling is a little subtle. The AM input is likely to # have NaNs that we'll want to map to 0s in the output. However, we # want NaNs in other inputs to propagate through to the output. This # is accomplished by judicious use and placement of np.maximum, # np.minimum, and np.fmax # use max so that nighttime values will result in 0s instead of # negatives. propagates nans. cos_zenith = np.maximum(tools.cosd(apparent_zenith), 0) tl = linke_turbidity fh1 = np.exp(-altitude / 8000.) fh2 = np.exp(-altitude / 1250.) cg1 = 5.09e-05 * altitude + 0.868 cg2 = 3.92e-05 * altitude + 0.0387 ghi = (np.exp(-cg2 * airmass_absolute * (fh1 + fh2 * (tl - 1))) * np.exp(0.01 * airmass_absolute**1.8)) # use fmax to map airmass nans to 0s. multiply and divide by tl to # reinsert tl nans ghi = cg1 * dni_extra * cos_zenith * tl / tl * np.fmax(ghi, 0) # BncI = "normal beam clear sky radiation" b = 0.664 + 0.163 / fh1 bnci = b * np.exp(-0.09 * airmass_absolute * (tl - 1)) bnci = dni_extra * np.fmax(bnci, 0) # "empirical correction" SE 73, 157 & SE 73, 312. bnci_2 = ((1 - (0.1 - 0.2 * np.exp(-tl)) / (0.1 + 0.882 / fh1)) / cos_zenith) bnci_2 = ghi * np.fmin(np.fmax(bnci_2, 0), 1e20) dni = np.minimum(bnci, bnci_2) dhi = ghi - dni * cos_zenith irrads = OrderedDict() irrads['ghi'] = ghi irrads['dni'] = dni irrads['dhi'] = dhi if isinstance(dni, pd.Series): irrads = pd.DataFrame.from_dict(irrads) return irrads
def ci_conservative_generic(X, K, theta_grid, alpha_level, log_likelihood, sample, t, verbose=False): L = len(theta_grid) # Generate samples from the mixture proposal distribution Y = [] for k in range(K): l_k = np.random.randint(L) theta_k = theta_grid[l_k] Y.append(sample(theta_k)) # Test statistic at observation t_X = t(X) # Statistics for the samples from the proposal distribution only # need to be calculated once... t_Y = np.zeros(K + 1) for k in range(K): t_Y[k] = t(Y[k]) I_t_Y_plus = t_Y >= t_X I_t_Y_plus[K] = True I_t_Y_minus = -t_Y >= -t_X I_t_Y_minus[K] = True # Probabilities under each component of the proposal distribution # only need to be calculated once... log_Q_X = np.empty(L) log_Q_Y = np.empty((L, K)) for l in range(L): theta_l = theta_grid[l] log_Q_X[l] = log_likelihood(X, theta_l) for k in range(K): log_Q_Y[l, k] = log_likelihood(Y[k], theta_l) if verbose: print '%.2f: %.2g, %.2g' % \ (theta_l, np.exp(log_Q_X[l]), np.exp(log_Q_Y[l].max())) log_Q_sum_X = logsumexp(log_Q_X) log_Q_sum_Y = np.empty(K) for k in range(K): log_Q_sum_Y[k] = logsumexp(log_Q_Y[:, k]) # Step over the grid, calculating approximate p-values log_p_plus = np.empty(L) log_p_minus = np.empty(L) for l in range(L): theta_l = theta_grid[l] log_w_l = np.empty(K + 1) # X contribution log_w_l[K] = (theta_l * t_X) - log_Q_sum_X # Y contribution for k in range(K): log_w_l[k] = (theta_l * t_Y[k]) - log_Q_sum_Y[k] log_p_num_plus = logsumexp(log_w_l[I_t_Y_plus]) log_p_num_minus = logsumexp(log_w_l[I_t_Y_minus]) log_p_denom = logsumexp(log_w_l) if verbose: print '%.2f: %.2g (%.2g, %.2g)' % \ (theta_l, log_w_l[K], log_w_l[0:K].min(), log_w_l[0:K].max()) log_p_plus[l] = log_p_num_plus - log_p_denom log_p_minus[l] = log_p_num_minus - log_p_denom # p_pm = min(1, 2 * min(p_plus, p_minus)) log_p_pm = np.fmin(0, np.log(2) + np.fmin(log_p_plus, log_p_minus)) return invert_test(theta_grid, log_p_pm, np.log(alpha_level))
import numpy as np x = 1.0 #define a float y = 2.0 #define another float #exponents and logarithms print(np.exp(x)) #e^x print(np.log(x)) #ln x print(np.log10(x)) #log_10 x print(np.log2(x)) #log_2 x #min/max/misc print(np.fabs(x)) #absolute value as a float print(np.fmin(x,y)) #min of x and y print(np.fmax(x,y)) #max of x and y #populate arrays n = 100 #define an int z = np.arange(n,dtype=float) #getan array [0.0,n-1.] z *= 2.0*np.pi /float(n-1) #z = [0,2*pi] sin_z = np.sin(z) #get an array sin(z) #interpolation print(np.interp(0.75,z,sin_z)) #interpolate sin(0.75) print(np.sin(0.75))
zb_c_y10_0 = 0.2 + 0.1 * np.arange(10) zb_c_y10_1 = 0.2 + 0.1 * (np.arange(10) + 1) sz_c_y10 = 0.03 * (1 + 0.5 * (zb_c_y10_0 + zb_c_y10_1)) zbz_y10 = [zb_c_y10_0[0]] bz_y10 = [0.95 / ccl.growth_factor(cosmo, 1. / (1 + zb_c_y10_0[0]))] mask_y10 = [0] lmax_c_y10 = [] for z in 0.5 * (zb_c_y10_0 + zb_c_y10_1): bz_y10.append(0.95 / ccl.growth_factor(cosmo, 1. / (1 + z))) zbz_y10.append(z) mask_y10.append(0) #mask_y10.append(1) lmax_c_y10.append( np.fmin( 0.75 * 0.6727 * ccl.comoving_radial_distance(cosmo, 1. / (1 + z)), 5400)) bz_y10.append(0.95 / ccl.growth_factor(cosmo, 1. / (1 + 3.4))) zbz_y10.append(3.4) mask_y10.append(0) bz_y10 = np.array(bz_y10) zbz_y10 = np.array(zbz_y10) mask_y10 = np.array(mask_y10) lmax_c_y10 = np.array(lmax_c_y10) np.savetxt("bz_clustering_y10.txt", np.transpose([zbz_y10, bz_y10, mask_y10]), fmt='%lf %lf %d -1') np.savetxt("bins_clustering_y10.txt", np.transpose([zb_c_y10_0, zb_c_y10_1, sz_c_y10, lmax_c_y10]), fmt='%.5lf %.5lf %.5lf 0 0 %d', header=' [0]z0 [1]zf [2]sigma_z [3]marg_sz [4]marg_bz [5]lmax')
def set_brake(self, input_brake): # Clamp the steering command to valid bounds brake = np.fmax(np.fmin(input_brake, 1.0), 0.0) self._set_brake = brake
def update_controls(self): ###################################################### # RETRIEVE SIMULATOR FEEDBACK ###################################################### x = self._current_x y = self._current_y yaw = self._current_yaw v = self._current_speed self.update_desired_speed() v_desired = self._desired_speed t = self._current_timestamp waypoints = self._waypoints throttle_output = 0 steer_output = 0 brake_output = 0 self.vars.create_var('kp', 0.50) self.vars.create_var('ki', 0.30) self.vars.create_var('integrator_min', 0.0) self.vars.create_var('integrator_max', 10.0) self.vars.create_var('kd', 0.13) self.vars.create_var('kp_heading', 8.00) self.vars.create_var('k_speed_crosstrack', 0.00) self.vars.create_var('cross_track_deadband', 0.01) self.vars.create_var('x_prev', 0.0) self.vars.create_var('y_prev', 0.0) self.vars.create_var('yaw_prev', 0.0) self.vars.create_var('v_prev', 0.0) self.vars.create_var('t_prev', 0.0) self.vars.create_var('v_error', 0.0) self.vars.create_var('v_error_prev', 0.0) self.vars.create_var('v_error_integral', 0.0) # Skip the first frame to store previous values properly if self._start_control_loop: self.vars.v_error = v_desired - v self.vars.v_error_integral += self.vars.v_error * \ (t - self.vars.t_prev) v_error_rate_of_change = (self.vars.v_error - self.vars.v_error_prev) /\ (t - self.vars.t_prev) # cap the integrator sum to a min/max self.vars.v_error_integral = \ np.fmax(np.fmin(self.vars.v_error_integral, self.vars.integrator_max), self.vars.integrator_min) throttle_output = self.vars.kp * self.vars.v_error +\ self.vars.ki * self.vars.v_error_integral +\ self.vars.kd * v_error_rate_of_change # Find cross track error (assume point with closest distance) crosstrack_error = float("inf") crosstrack_vector = np.array([float("inf"), float("inf")]) ce_idx = self.get_lookahead_index(self._lookahead_distance) crosstrack_vector = np.array([waypoints[ce_idx][0] - \ x - self._lookahead_distance*np.cos(yaw), waypoints[ce_idx][1] - \ y - self._lookahead_distance*np.sin(yaw)]) crosstrack_error = np.linalg.norm(crosstrack_vector) # set deadband to reduce oscillations print(crosstrack_error) if crosstrack_error < self.vars.cross_track_deadband: crosstrack_error = 0.0 # Compute the sign of the crosstrack error crosstrack_heading = np.arctan2(crosstrack_vector[1], crosstrack_vector[0]) crosstrack_heading_error = crosstrack_heading - yaw crosstrack_heading_error = \ (crosstrack_heading_error + self._pi) % \ self._2pi - self._pi crosstrack_sign = np.sign(crosstrack_heading_error) # Compute heading relative to trajectory (heading error) # First ensure that we are not at the last index. If we are, # flip back to the first index (loop the waypoints) if ce_idx < len(waypoints) - 1: vect_wp0_to_wp1 = np.array([ waypoints[ce_idx + 1][0] - waypoints[ce_idx][0], waypoints[ce_idx + 1][1] - waypoints[ce_idx][1] ]) trajectory_heading = np.arctan2(vect_wp0_to_wp1[1], vect_wp0_to_wp1[0]) else: vect_wp0_to_wp1 = np.array([ waypoints[0][0] - waypoints[-1][0], waypoints[0][1] - waypoints[-1][1] ]) trajectory_heading = np.arctan2(vect_wp0_to_wp1[1], vect_wp0_to_wp1[0]) heading_error = trajectory_heading - yaw heading_error = \ (heading_error + self._pi) % self._2pi - self._pi # Compute steering command based on error steer_output = heading_error + \ np.arctan(self.vars.kp_heading * \ crosstrack_sign * \ crosstrack_error / \ (v + self.vars.k_speed_crosstrack)) ###################################################### # SET CONTROLS OUTPUT ###################################################### self.set_throttle(throttle_output) # in percent (0 to 1) self.set_steer(steer_output) # in rad (-1.22 to 1.22) self.set_brake(brake_output) # in percent (0 to 1) self.vars.x_prev = x self.vars.y_prev = y self.vars.yaw_prev = yaw self.vars.v_prev = v self.vars.v_error_prev = self.vars.v_error self.vars.t_prev = t
def set_throttle(self, input_throttle): # Clamp the throttle command to valid bounds throttle = np.fmax(np.fmin(input_throttle, 1.0), 0.0) self._set_throttle = throttle
def test_float_nans(self): nan = np.nan arg1 = np.array([0, nan, nan]) arg2 = np.array([nan, 0, nan]) out = np.array([0, 0, nan]) assert_equal(np.fmin(arg1, arg2), out)
def fuzzy_sub(x, a, y, b): """ Subtracts fuzzy set ``b`` from fuzzy set ``a``. Parameters ---------- x : 1d array, length N Universe variable for fuzzy set ``a``. A : 1d array, length N Fuzzy set for universe ``x``. y : 1d array, length M Universe variable for fuzzy set ``b``. b : 1d array, length M Fuzzy set for universe ``y``. Returns ------- z : 1d array Output variable. mfz : 1d array Fuzzy membership set for variable z. Notes ----- Uses Zadeh's Extension Principle from Ross, Fuzzy Logic w/Engineering Applications, (2010), pp.414, Eq. 12.17. If these results are unexpected and your membership functions are convex, consider trying the ``skfuzzy.dsw_*`` functions for fuzzy mathematics using interval arithmetic via the restricted Dong, Shah, and Wong method. """ # a and x, and b and y, are formed into (MxN) matrices. The former has # identical rows; the latter identical identical columns. n = len(b) aa = np.dot(np.atleast_2d(a).T, np.ones((1, n))) x = np.dot(np.atleast_2d(x).T, np.ones((1, n))) m = len(a) bb = np.dot(np.ones((m, 1)), np.atleast_2d(b)) y = np.dot(np.ones((m, 1)), np.atleast_2d(y)) # Subtract universes zz = (x - y).ravel() zz_index = np.argsort(zz) zz = np.sort(zz) # Array min() operation c = np.fmin(aa, bb).ravel() c = c[zz_index] # Initialize loop z, mfz = np.empty(0), np.empty(0) idx = 0 for i in range(len(c)): index = np.nonzero(zz == zz[idx])[0] z = np.hstack((z, zz[idx])) mfz = np.hstack((mfz, c[index].max())) if zz[idx] == zz.max(): break idx = index.max() + 1 return z, mfz
def generate_three_dim_multi_type2(self, num_data, max_vel, min_vel, num_step, motion_type, test=False, visualize=False): """sample discretized motions and corresponding place pairs""" vel_idx = None if not test and motion_type == 'discrete': velocity = generate_vel_list_3d(max_vel) num_vel = len(velocity) if pow(num_vel, num_step) < num_data: vel_list = np.asarray( list(itertools.product(np.arange(num_vel), repeat=num_step))) num_vel_list = len(vel_list) div, rem = num_data // num_vel_list, num_data % num_vel_list vel_idx = np.vstack( (np.tile(vel_list, [div, 1]), vel_list[np.random.choice(num_vel_list, size=rem)])) np.random.shuffle(vel_idx) else: vel_idx = np.random.choice(num_vel, size=[num_data, num_step]) vel_grid = np.take(velocity, vel_idx, axis=0) vel = vel_grid * self.interval_length vel_grid_cumsum = np.cumsum(vel_grid, axis=1) mu_max = np.fmin( self.num_interval, np.min(self.num_interval - vel_grid_cumsum, axis=1)) mu_min = np.fmax(0, np.max(-vel_grid_cumsum, axis=1)) mu_start = np.expand_dims(np.random.random(size=(num_data, 2)) * (mu_max - 1 - mu_min) + mu_min, axis=1) # mu_start = np.random.sample(size=[num_data, 2]) # mu_start = np.expand_dims(np.round(mu_start * (mu_max - mu_min) + mu_min - 0.5), axis=1) mu_seq = np.concatenate((mu_start, mu_start + vel_grid_cumsum), axis=1) elif not test: if self.shape == "square": num_data_sample = num_data else: raise NotImplementedError #theta = np.random.random(size=(num_data_sample, num_step)) * 2 * np.pi - np.pi #length = np.sqrt(np.random.random(size=(num_data_sample, num_step))) * (max_vel - min_vel) + min_vel #x = length * np.cos(theta) #y = length * np.sin(theta) #vel_seq = np.concatenate((np.expand_dims(x, axis=-1), np.expand_dims(y, axis=-1)), axis=-1) x1 = np.random.standard_normal(size=(num_data_sample, num_step)) y1 = np.random.standard_normal(size=(num_data_sample, num_step)) z1 = np.random.standard_normal(size=(num_data_sample, num_step)) v = np.sqrt(x1**2 + y1**2 + z1**2) length = np.cbrt(np.random.random(size=( num_data_sample, num_step))) * (max_vel - min_vel) + min_vel x = length * x1 / v y = length * y1 / v z = length * z1 / v vel_seq = np.concatenate( (np.expand_dims(x, axis=-1), np.expand_dims( y, axis=-1), np.expand_dims(z, axis=-1)), axis=-1) # from matplotlib import pyplot # from mpl_toolkits.mplot3d import Axes3D # fig = pyplot.figure() # ax = Axes3D(fig) # ax.scatter(x[:30000], y[0:30000], z[0:30000]) # pyplot.show() vel_seq_cumsum = np.cumsum(vel_seq, axis=1) mu_max = np.fmin( self.num_interval - 1, np.min(self.num_interval - 1 - vel_seq_cumsum, axis=1)) mu_min = np.fmax(0, np.max(-vel_seq_cumsum, axis=1)) start = np.random.random(size=(num_data_sample, 3)) * (mu_max - mu_min) + mu_min start = np.expand_dims(start, axis=1) mu_seq = np.concatenate((start, start + vel_seq), axis=1) vel = vel_seq * self.interval_length if self.shape == "circle": mu_seq_len = mu_seq * self.interval_length select_idx = np.sqrt((mu_seq_len[:, :, 0] - 0.5)**2 + (mu_seq_len[:, :, 1] - 0.5)**2) > 0.5 select_idx = np.where(np.sum(select_idx, axis=1) == 0)[0] mu_seq = mu_seq[select_idx[:num_data]] vel = vel[select_idx[:num_data]] elif self.shape == "triangle": mu_seq_len = mu_seq * self.interval_length x, y = mu_seq_len[:, :, 0], mu_seq_len[:, :, 1] select_idx = (x + 2 * y > 1) * (-x + 2 * y < 1) select_idx = np.where( np.sum(select_idx, axis=1) == num_step + 1)[0] mu_seq = mu_seq[select_idx[:num_data]] vel = vel[select_idx[:num_data]] else: velocity = generate_vel_list_3d(max_vel, min_vel) num_vel = len(velocity) if visualize: mu_start = np.reshape([20, 20, 20], newshape=(1, 1, 1, 3)) vel_pool = np.where((velocity[:, 0] >= -1) & (velocity[:, 1] >= -1) & (velocity[:, 2] >= -1)) vel_idx = np.random.choice(vel_pool[0], size=[num_data * 10, num_step]) vel_grid_cumsum = np.cumsum(np.take(velocity, vel_idx, axis=0), axis=1) mu_seq = np.concatenate( (np.tile(mu_start, [num_data * 10, 1, 1, 1]), vel_grid_cumsum + mu_start), axis=1) mu_seq_new, vel_idx_new = [], [] for i in range(len(mu_seq)): mu_seq_sub = mu_seq[i] if len(np.unique(mu_seq_sub, axis=0)) == len(mu_seq_sub): mu_seq_new.append(mu_seq[i]) vel_idx_new.append(vel_idx[i]) mu_seq, vel_idx = np.stack(mu_seq_new, axis=0), np.stack(vel_idx_new, axis=0) mu_seq_rs = np.reshape(mu_seq, [-1, (num_step + 1) * 2]) select_idx = np.where( np.sum(mu_seq_rs >= self.num_interval, axis=1) == 0)[0][:num_data] vel_idx = vel_idx[select_idx] mu_seq = mu_seq[select_idx] vel = np.take(velocity, vel_idx, axis=0) * self.interval_length else: vel_idx = np.random.choice(num_vel, size=[num_data * 20, num_step]) vel_grid_cumsum = np.cumsum(np.take(velocity, vel_idx, axis=0), axis=1) mu_max = np.fmin( self.num_interval, np.min(self.num_interval - vel_grid_cumsum, axis=1)) mu_min = np.fmax(0, np.max(-vel_grid_cumsum, axis=1)) select_idx = np.where( np.sum(mu_max <= mu_min, axis=1) == 0)[0][:num_data] vel_idx, vel_grid_cumsum = vel_idx[ select_idx], vel_grid_cumsum[select_idx] vel_grid = np.take(velocity, vel_idx, axis=0) mu_max, mu_min = mu_max[select_idx], mu_min[select_idx] mu_start = np.random.sample(size=[num_data, 3]) mu_start = np.expand_dims( np.round(mu_start * (mu_max - mu_min) + mu_min - 0.5), axis=1) mu_seq = np.concatenate((mu_start, mu_start + vel_grid_cumsum), axis=1) vel = vel_grid * self.interval_length # sns.distplot(vel, rug=True, hist=False) # plt.show() assert len(mu_seq) == num_data place_seq = {'seq': mu_seq, 'vel': vel, 'vel_idx': vel_idx} return place_seq
def K(self, X, X2, target): if X2 is None: X2 = X target += self.variance * np.fmin(X, X2.T)
ax1.plot(x_serv, serv_lo, 'b', linewidth=1.5, label='Poor') ax1.plot(x_serv, serv_md, 'g', linewidth=1.5, label='Acceptable') ax1.plot(x_serv, serv_hi, 'r', linewidth=1.5, label='Amazing') ax1.set_title('serve quality') ax1.legend() ax2.plot(x_tip, tip_lo, 'b', linewidth=1.5, label='Low') ax2.plot(x_tip, tip_md, 'g', linewidth=1.5, label='Medium') ax2.plot(x_tip, tip_hi, 'r', linewidth=1.5, label='High') ax2.set_title('tip') ax2.legend() #应用规则1:服务差或食物质量差->小费低 active_rule1 = np.fmax(qual_level_lo, serv_level_lo) tip_activation_lo = np.fmin(active_rule1, tip_lo) #应用规则2:如果服务可以接受,那么小费将是中等 tip_activation_md = np.fmin(serv_level_md, tip_md) #应用规则3:如果食物质量很好或服务令人满意->小费高 active_rule3 = np.fmax(qual_level_hi, serv_level_hi) tip_activation_hi = np.fmin(active_rule3, tip_hi) tip0 = np.zeros_like(x_tip) #可视化应用规则 fig, ax0 = plt.subplots(figsize=(8, 3)) ax0.fill_between(x_tip, tip0, tip_activation_lo, facecolor='b', alpha=0.7) ax0.plot( x_tip, tip_lo,
def _feed_forward_lateral(self): temp = self._l * self._waypoints[self._closest_idx, -1] if np.abs(temp) >= 1.: temp = np.sign(temp) return np.fmax(np.fmin(np.arcsin(temp), self._sat_lat_max), self._sat_lat_min) # From -pi/2 to pi/2
# Lets suppose highest=97 and average=68 qual_level_lo = fuzz.interp_membership(x_highest, highest_lo, 97) qual_level_md = fuzz.interp_membership(x_highest, highest_md, 97) qual_level_hi = fuzz.interp_membership(x_highest, highest_hi, 97) serv_level_lo = fuzz.interp_membership(x_average, average_lo, 68) serv_level_md = fuzz.interp_membership(x_average, average_md, 68) serv_level_hi = fuzz.interp_membership(x_average, average_hi, 68) # Defining rules , low highest marks and low average. # The OR operator means we take the maximum of these two. active_rule1 = np.fmax(qual_level_lo, serv_level_lo) # membership function with `np.fmin` tip_activation_lo = np.fmin(active_rule1, cutoff_lo) # For rule 2 we connect acceptable service to medium cutoff tip_activation_md = np.fmin(serv_level_md, cutoff_md) # For rule 3 we connect high highest marks OR high average with high cutoff active_rule3 = np.fmax(qual_level_hi, serv_level_hi) tip_activation_hi = np.fmin(active_rule3, cutoff_hi) tip0 = np.zeros_like(x_cutoff) # Visualize this fig, ax0 = plt.subplots(figsize=(8, 3)) ax0.fill_between(x_cutoff, tip0, tip_activation_lo, facecolor='b', alpha=0.7) ax0.plot( x_cutoff,
def real_loc_to_pixel_loc(x,y,h,w,xmin,xmax,ymin,ymax): assert len(x)==len(y) pixel_x=np.fmin((x-xmin)/(xmax-xmin)*w,w-1) pixel_y=np.fmin((y-ymin)/(ymax-ymin)*h,h-1) return pixel_x.astype(int),pixel_y.astype(int)
def graphcut(img, alpha,rect,foreground_gmm,background_gmm,flow_vector, gamma, num_iterations=1, num_components=5): """ Given an image, return the partition of the image using customized graphcut as described in the in hand object scanning paper Parameters ---------- img: (m,n,3) uint8 The input color image alpha : int Integer specifying background or foreground (0 or 1) rect : (4, ) float A list of 4 elements of (x,y,w,h), which is the zoomed in bounding box of the segment from the previous frame, this is to reduce the amount of computation needed to perform graphcut (Graphcut only computed within the region of this rect foreground_gmm : an utils.GMM instance Gaussian mixture models for the foreground background_gmm : an utils.GMM instance Gaussian mixture models for the background flow_vector : (m,n) float A flow gradient that describe the amount of shifts per pixel between frames gamma : int A ballancing constant used according to the original graphcut paper Returns ------- result: (m,n ) uint8 A partition of the image where 255 indicates foreground and 0 indicaes background """ img2=img.copy() # trim down the cut area to the projected rect estimated from the last frame img=img[int(rect[0]):int(rect[0]+rect[2]),int(rect[1]):int(rect[1]+rect[3])] alpha=alpha[int(rect[0]):int(rect[0]+rect[2]),int(rect[1]):int(rect[1]+rect[3])] flow_vector=flow_vector[int(rect[0]):int(rect[0]+rect[2]),int(rect[1]):int(rect[1]+rect[3])] # compute the pairwise smoothness term for both color and optical flow image # and choose the smaller of the two user_definite_background = np.where(alpha==0) pairwise_energies_1 = compute_smoothness_vectorized(img, neighborhood='eight') pairwise_energies_2 = compute_smoothness_vectorized(flow_vector, neighborhood='eight') pairwise_energies= np.fmin(pairwise_energies_1, pairwise_energies_2) # Get GMM components based on color from the given fgd,bgf model pixels = img.reshape((img.shape[0]*img.shape[1], img.shape[2])) foreground_components = foreground_gmm.get_component(pixels).reshape((img.shape[0], img.shape[1])) background_components = background_gmm.get_component(pixels).reshape((img.shape[0], img.shape[1])) # Compute Unary energies for each node (foreground and background energies) # graph = create_graph(img) node_ids, graph = create_graph(img) theta = (background_gmm, foreground_gmm, None, None) foreground_energies = get_unary_energy_vectorized(1, foreground_components.reshape((img.shape[0]*img.shape[1], 1)) , theta, pixels) background_energies = get_unary_energy_vectorized(0, background_components.reshape((img.shape[0]*img.shape[1], 1)) , theta, pixels) energy_differences = np.subtract(foreground_energies,background_energies) energy_min = np.fmin(foreground_energies, background_energies) better_count = np.where(energy_differences<10) foreground_energies[better_count] = energy_min[better_count] # Assign Unary energy for user defined background # Large foreground energy: gamma*9 as used in opencv implementation # Small background energy: 0 foreground_energies = foreground_energies.reshape(alpha.shape) background_energies = background_energies.reshape(alpha.shape) foreground_energies[user_definite_background] = gamma*9 background_energies[user_definite_background] = 0 # update graph with the energies for h in xrange(img.shape[0]): for w in xrange(img.shape[1]): index = h*img.shape[1] + w # void add_tweights(node_id i, tcaptype cap_source, tcaptype cap_sink); # graph.add_tweights(index, foreground_energies[h][w], background_energies[h][w]) # try: # graph.add_tedge(index, foreground_energies[h][w], background_energies[h][w]) # except: # print(index, foreground_energies[h][w], background_energies[h][w]) graph.add_tedge(index, foreground_energies[h][w], round_int(background_energies[h][w])) # Compute pairwise weights NEIGHBORHOOD = [(-1,0),(+1,0),(0,-1),(0,+1),(-1,-1),(-1,+1),(+1,+1),(+1,-1)] src_h = np.tile(np.arange(img.shape[0]).reshape(img.shape[0], 1), (1, img.shape[1])) src_w = np.tile(np.arange(img.shape[1]).reshape(1, img.shape[1]), (img.shape[0], 1)) src_h = src_h.astype(np.int32) src_w = src_w.astype(np.int32) for i, energy in enumerate(pairwise_energies): if i in [1,3,6,7]: continue height_offset, width_offset = NEIGHBORHOOD[i] dst_h = src_h + height_offset dst_w = src_w + width_offset idx = np.logical_and(np.logical_and(dst_h >= 0, dst_h < img.shape[0]), np.logical_and(dst_w >= 0, dst_w < img.shape[1])) src_idx = src_h * img.shape[1] + src_w dst_idx = dst_h * img.shape[1] + dst_w src_idx = src_idx[idx].flatten() dst_idx = dst_idx[idx].flatten() weights = energy.astype(np.float32)[idx].flatten() weights = gamma*weights # graph.add_edge_vectorized(src_idx, dst_idx, weights, weights) # graph.add_edge(src_idx, dst_idx, weights, weights) graph = add_edge_vectorized(graph, src_idx, dst_idx, weights, weights) # perform mincut/maxflow with pymaxflow graph.maxflow() # partition = graph.what_segment_vectorized() partition = graph.get_grid_segments(node_ids) partition = partition.reshape(alpha.shape) blank=np.zeros(img2.shape[:2]) blank[int(rect[0]):int(rect[0]+rect[2]),int(rect[1]):int(rect[1]+rect[3]) ]=partition*255 result = blank.astype(dtype=np.uint8) return result
def HMC(f, num_samples, Lmax, epsilon, x0, verbose=False, option_hmc = 0, mean_approx = None, cov_approx = None): D = x0.size samples = np.zeros((num_samples, D)) samples[0] = x0 logprob, grad = f(x0) ## Precompute the rotation matrix if using split HMC or the guiding Hamiltonian ## In these cases the potential energy is a quadratic form involving the inverse of the covariance of the approximating Gaussian if(option_hmc != 0): ## Compute eigenvalues and eigenvectors of the covariance of the Gaussian approximation ## These are useful to rotate the coordinates of the harmonic oscillator in D dimensions making the D dynamics independent eigen_cov_approx = np.linalg.eig(cov_approx) eigenvect_cov_approx = eigen_cov_approx[1] eigenval_cov_approx = eigen_cov_approx[0] ## omega is the vector of the D angular frequencies sqrt(1/lambda_i) where lambda_i is the ith eigenvalue omega = np.zeros(D) for i in range(0,D): omega[i] = np.sqrt(1/(eigenval_cov_approx[i])) accept_count_batch = 0 accept_count = 0 for t in range(num_samples-1): ## Output acceptance rate every 100 iterations if(((t+1) % 100) == 0): print("Iteration: ", t+1, "\t Acc Rate: ", 1. * accept_count_batch, "%") accept_count_batch = 0 logprob_t, grad_t = logprob, grad.copy() pt = np.random.randn(D) Lt = np.random.randint(1, Lmax+1) # Standard HMC - begin leapfrogging premature_reject = False if(option_hmc == 0): x = samples[t].copy() p = pt + 0.5 * epsilon * grad for l in range(Lt): x += epsilon * p logprob, grad = f(x) if np.any(np.isnan(grad)): premature_reject = True break p += epsilon * grad p -= 0.5*epsilon * grad #leapfrogging done if premature_reject: print("warning: numerical instability. rejecting this proposal prematurely") samples[t+1] = samples[t] logprob, grad = logprob_t, grad_t continue if(option_hmc == 1): raise NotImplementedError if(option_hmc == 2): raise NotImplementedError log_accept_ratio = logprob - 0.5*p.dot(p) - logprob_t + 0.5*pt.dot(pt) logu = np.log(np.random.rand()) if verbose: print('sample number {:<4} steps: {:<3}, eps: {:.2f} logprob: {:.2f} accept_prob: {:.2f}, {} (accepted {:.2%})'.format(t,Lt, epsilon, logprob, np.fmin(1, np.exp(log_accept_ratio)), 'rejecting' if logu>log_accept_ratio else 'accepting', accept_count/(t+1))) if logu < log_accept_ratio: samples[t+1] = x accept_count_batch += 1 accept_count += 1 else: samples[t+1] = samples[t] logprob, grad = logprob_t, grad_t return samples, (accept_count*1.0)/num_samples
# This is what fuzz.interp_membership exists for! qual_level_lo = fuzz.interp_membership(x_qual, qual_lo, 6.5) qual_level_md = fuzz.interp_membership(x_qual, qual_md, 6.5) qual_level_hi = fuzz.interp_membership(x_qual, qual_hi, 6.5) serv_level_lo = fuzz.interp_membership(x_serv, serv_lo, 9.8) serv_level_md = fuzz.interp_membership(x_serv, serv_md, 9.8) serv_level_hi = fuzz.interp_membership(x_serv, serv_hi, 9.8) # Now we take our rules and apply them. Rule 1 concerns bad food OR service. # The OR operator means we take the maximum of these two. active_rule1 = np.fmax(qual_level_lo, serv_level_lo) # Now we apply this by clipping the top off the corresponding output # membership function with `np.fmin` tip_activation_lo = np.fmin(active_rule1, tip_lo) # removed entirely to 0 # For rule 2 we connect acceptable service to medium tipping tip_activation_md = np.fmin(serv_level_md, tip_md) # For rule 3 we connect high service OR high food with high tipping active_rule3 = np.fmax(qual_level_hi, serv_level_hi) tip_activation_hi = np.fmin(active_rule3, tip_hi) tip0 = np.zeros_like(x_tip) """ .. image:: PLOT2RST.current_figure Rule aggregation ----------------
def deltamz(self, mz_value): "computes the theorical maximum resolution in m/z at m/z location" mz = np.fmin(mz_value, HighestMass) return 0.5 * (self.itomz(self.mztoi(mz) - 1) - self.itomz(self.mztoi(mz) + 1))
def dK_dtheta(self, dL_dK, X, X2, target): if X2 is None: X2 = X target += np.sum(np.fmin(X, X2.T) * dL_dK)
def And(cond1, cond2): return fmin(cond1, cond2)
def fun(x, shape): from numpy import fmin, fmax x = fmax(0, fmin(255, x * 255)) return x.reshape(shape)
def predict(price_input, distance_input, rating_input): # R1: If the price is cheap, distance is close, rating is good, then consider going. r1_x1 = fuzz.interp_membership(price, cheap, price_input) r1_x2 = fuzz.interp_membership(distance, close, distance_input) r1_x3 = fuzz.interp_membership(rating, good, rating_input) fire_r1 = min(r1_x1, r1_x2, r1_x3) # R2: If the price is cheap, distance is medium, rating is good, then consider going. r2_x1 = fuzz.interp_membership(price, cheap, price_input) r2_x2 = fuzz.interp_membership(distance, medium_distance, distance_input) r2_x3 = fuzz.interp_membership(rating, good, rating_input) fire_r2 = min(r2_x1, r2_x2, r2_x3) # R3: if the price is expensive, distance is far, rating is bad, then consider not going. r3_x1 = fuzz.interp_membership(price, expensive, price_input) r3_x2 = fuzz.interp_membership(distance, far, distance_input) r3_x3 = fuzz.interp_membership(rating, bad, rating_input) fire_r3 = min(r3_x1, r3_x2, r3_x3) # R4: If the price is expensive, distance is far, rating is moderate, then consider not going. r4_x1 = fuzz.interp_membership(price, expensive, price_input) r4_x2 = fuzz.interp_membership(distance, far, distance_input) r4_x3 = fuzz.interp_membership(rating, moderate, rating_input) fire_r4 = min(r4_x1, r4_x2, r4_x3) # R5: If the price is expensive, distance is medium, rating is bad, then consider not going. r5_x1 = fuzz.interp_membership(price, expensive, price_input) r5_x2 = fuzz.interp_membership(distance, medium_distance, distance_input) r5_x3 = fuzz.interp_membership(rating, bad, rating_input) fire_r5 = min(r5_x1, r5_x2, r5_x3) # R6: If the price is medium, distance is close, rating is good, then consider going. r6_x1 = fuzz.interp_membership(price, medium_price, price_input) r6_x2 = fuzz.interp_membership(distance, close, distance_input) r6_x3 = fuzz.interp_membership(rating, good, rating_input) fire_r6 = min(r6_x1, r6_x2, r6_x3) # R7: If the price is medium, distance is far, rating is bad, then consider not going. r7_x1 = fuzz.interp_membership(price, medium_price, price_input) r7_x2 = fuzz.interp_membership(distance, far, distance_input) r7_x3 = fuzz.interp_membership(rating, bad, rating_input) fire_r7 = min(r7_x1, r7_x2, r7_x3) # R8: If the price is medium, distance is far, rating is moderate, then consider not going. r8_x1 = fuzz.interp_membership(price, medium_price, price_input) r8_x2 = fuzz.interp_membership(distance, far, distance_input) r8_x3 = fuzz.interp_membership(rating, moderate, rating_input) fire_r8 = min(r8_x1, r8_x2, r8_x3) # R9: If the price is medium, distance is medium, rating is bad, then consider not going. r9_x1 = fuzz.interp_membership(price, medium_price, price_input) r9_x2 = fuzz.interp_membership(distance, medium_distance, distance_input) r9_x3 = fuzz.interp_membership(rating, bad, rating_input) fire_r9 = min(r9_x1, r9_x2, r9_x3) # R10: If the price is medium, distance is medium, rating is good, then consider going. r10_x1 = fuzz.interp_membership(price, medium_price, price_input) r10_x2 = fuzz.interp_membership(distance, medium_distance, distance_input) r10_x3 = fuzz.interp_membership(rating, good, rating_input) fire_r10 = min(r10_x1, r10_x2, r10_x3) rule_1_clip = np.fmin(fire_r1, will_go) rule_2_clip = np.fmin(fire_r2, will_go) rule_3_clip = np.fmin(fire_r3, not_go) rule_4_clip = np.fmin(fire_r4, not_go) rule_5_clip = np.fmin(fire_r5, not_go) rule_6_clip = np.fmin(fire_r6, will_go) rule_7_clip = np.fmin(fire_r7, not_go) rule_8_clip = np.fmin(fire_r8, not_go) rule_9_clip = np.fmin(fire_r9, not_go) rule_10_clip = np.fmin(fire_r10, will_go) # Aggregate all rules temp1 = np.fmax(rule_1_clip, rule_2_clip) temp2 = np.fmax(temp1, rule_3_clip) temp3 = np.fmax(temp2, rule_4_clip) temp4 = np.fmax(temp3, rule_5_clip) temp5 = np.fmax(temp4, rule_6_clip) temp6 = np.fmax(temp5, rule_7_clip) temp7 = np.fmax(temp6, rule_8_clip) temp8 = np.fmax(temp7, rule_9_clip) output = np.fmax(temp8, rule_10_clip) # Defuzzification try: going_predict = fuzz.defuzz(label, output, 'centroid') if args.graph: fire_going = fuzz.interp_membership(label, output, going_predict) label_0 = np.zeros_like(label) fig, ax0 = plt.subplots(figsize=(8, 3)) ax0.plot(label, will_go, 'g', linestyle='--') ax0.plot(label, not_go, 'r', linestyle='--') ax0.fill_between(label, label_0, output, facecolor='Orange', alpha=0.5) ax0.plot([going_predict, going_predict], [0, fire_going], 'k', linewidth=2.5, alpha=0.9) ax0.get_xaxis().tick_bottom() ax0.get_yaxis().tick_left() ax0.set_xlim([min(label), max(label)]) ax0.set_ylim([0, 1]) plt.xlabel('Possibility of Going to the Restaurant') plt.ylabel('membership degree') plt.title('Restaurant ') plt.show() return going_predict except AssertionError: return -1
def evaluate(self, cart_coords, hmat=None, ivals=None): """ Compute the value of the dihedral angle. # # Parameters ---------- cart_coords: (N, 3) array Atomic positions in Cartesian coordinates, where N is the number of atoms in the system. ivals: (2,) array, optional The index of the left and right point used to calculate the dihedral angle. The indices have to be from 1 to the number of the corresponding points. Return ------ distance: float Dihedral angle in radians. Example ------- 1 \\ 2---3 \\ 4 >>> d = Dihedral([1], [2,3], [4]) >>> v = d.evaluate(coords) # Compute 1--2--3--4 angle 1 6 \\ / 2--4---5 / \\ 3 7 >>> d = Dihedral([1,2,3], [4,5], [6,7]) >>> v = d.evaluate(coords) # Compute 1--4--5--6 angle >>> v = d.evaluate(coords, ivals=[3,1]) # Compute 3--4--5--6 angle >>> d = Dihedral([1,2,3], [4,5], [6,7], ivals=[1,2]) >>> v = d.evaluate(coords) # Compute 1--4--5--7 angle >>> v = d.evaluate(coords, ivals=[3,1]) # Compute 3--4--5--6 angle """ if not ivals is None: self._ivals = list(ivals) if len(self._ivals) != 2: raise Exception('Two "ivals" are needed') # Get dihedral axis and vectors ivecs = [self._iaxis] + [(self._iaxis[0], self._ivals[0]-1), (self._iaxis[1], self._ivals[1]+len(self._left)+1)] axis, v1, v2 = self.get_vectors(ivecs, cart_coords, hmat) # Compute normal vectors n1, n2 = np.cross(axis, v1), np.cross(axis, v2) n1, n2 = n1/npl.norm(n1), n2/npl.norm(n2) # Compute angle angl = np.sum(n1*n2) angl = np.fmin(np.fmax(-1,angl),1.) angl = np.arccos(angl) if np.sum(np.cross(n1, n2)*axis) > 0: angl *= -1 return angl
# calculate membership function values participants_value_low = fuzz.interp_membership(participants, participants_low, input_participants) participants_value_medium = fuzz.interp_membership(participants, participants_medium, input_participants) participants_value_high = fuzz.interp_membership(participants, participants_high, input_participants) available_slots_value_low = fuzz.interp_membership(available_slots, available_slots_low, input_available_slots) available_slots_value_medium = fuzz.interp_membership(available_slots, available_slots_medium, input_available_slots) available_slots_value_high = fuzz.interp_membership(available_slots, available_slots_high, input_available_slots) test_difficulty_value_low = fuzz.interp_membership(test_difficulty, test_difficulty_low, input_test_difficulty) test_difficulty_value_medium = fuzz.interp_membership(test_difficulty, test_difficulty_medium, input_test_difficulty) test_difficulty_value_high = fuzz.interp_membership(test_difficulty, test_difficulty_high, input_test_difficulty) # apply rules value_rule_1 = np.fmax(np.fmax(participants_value_high, available_slots_value_low), test_difficulty_value_high) value_rule_2 = np.fmin(np.fmin(1 - participants_value_high, 1 - test_difficulty_value_high), 1 - available_slots_value_low) value_rule_3 = np.fmax(test_difficulty_value_low, np.fmax(1 - participants_value_high, 1 - available_slots_value_low)) acceptance_probability_value_low = np.fmin(value_rule_1, acceptance_probability_low) acceptance_probability_value_medium = np.fmin(value_rule_2, acceptance_probability_medium) acceptance_probability_value_high = np.fmin(value_rule_3, acceptance_probability_high) # plot rule values acceptance_probability_zero = np.zeros_like(acceptance_probability) fig, ax0 = plt.subplots(figsize=(8, 3)) ax0.fill_between(acceptance_probability, acceptance_probability_zero, acceptance_probability_value_low, facecolor='b', alpha=0.5) ax0.plot(acceptance_probability, acceptance_probability_low, 'b', linewidth=0.5, linestyle='--', ) ax0.fill_between(acceptance_probability, acceptance_probability_zero, acceptance_probability_value_medium,
def approximate_time_remaining_until_walk(hours_passed_after_pee, crying_intensity): last_time_peed_raw = int(hours_passed_after_pee) crying_intensity_raw = int(crying_intensity) if last_time_peed_raw < 0: raise ValueError('Last time peed must be greater than 0') if crying_intensity_raw < 0 or crying_intensity_raw > 10: raise ValueError( 'Crying intensity must be greater than 0 and less than or equals than 10' ) if last_time_peed_raw > 10: exit('now') last_time_peed_range = np.arange(0, 11, 0.1) crying_intensity_range = np.arange(0, 11, 0.1) last_time_peed_now_function = fuzz.zmf(last_time_peed_range, 0, 1.5) last_time_peed_couple_of_hours_function = fuzz.gaussmf( last_time_peed_range, 0.75, 2.9) last_time_peed_many_hours_function = fuzz.smf(last_time_peed_range, 2.8, 4.5) crying_intensity_none_function = fuzz.zmf(crying_intensity_range, 0, 1.5) crying_intensity_slight_function = fuzz.gauss2mf(crying_intensity_range, 0.5, 0.6, 0.8, 1.7) crying_intensity_medium_function = fuzz.gaussmf(crying_intensity_range, 1.6, 4.5) crying_intensity_high_function = fuzz.smf(crying_intensity_range, 5, 7.9) walk_after_how_much_time_range = np.arange(0, 5, 0.1) walk_now_function = fuzz.zmf(walk_after_how_much_time_range, 0, 0.5) walk_after_a_while_function = fuzz.gaussmf(walk_after_how_much_time_range, 0.4, 0.8) walk_later_function = fuzz.gaussmf(walk_after_how_much_time_range, 0.6, 1.9) walk_after_couple_of_hours_function = fuzz.smf( walk_after_how_much_time_range, 2.3, 3.8) def convert_last_time_peed(last_time_peed_in_hours): now = fuzz.interp_membership(last_time_peed_range, last_time_peed_now_function, last_time_peed_in_hours) couple_of_hours = fuzz.interp_membership( last_time_peed_range, last_time_peed_couple_of_hours_function, last_time_peed_in_hours) many_hours = fuzz.interp_membership( last_time_peed_range, last_time_peed_many_hours_function, last_time_peed_in_hours) return [now, couple_of_hours, many_hours] def convert_crying_intensity(crying_intensity): none = fuzz.interp_membership(crying_intensity_range, crying_intensity_none_function, crying_intensity) slight = fuzz.interp_membership(crying_intensity_range, crying_intensity_slight_function, crying_intensity) medium = fuzz.interp_membership(crying_intensity_range, crying_intensity_medium_function, crying_intensity) high = fuzz.interp_membership(crying_intensity_range, crying_intensity_high_function, crying_intensity) return [none, slight, medium, high] last_time_peed_converted = convert_last_time_peed(last_time_peed_raw) crying_intensity_converted = convert_crying_intensity(crying_intensity_raw) rules = [ np.fmax(last_time_peed_converted[-1], crying_intensity_converted[-1]), np.fmin( last_time_peed_converted[0], np.fmax(crying_intensity_converted[1], crying_intensity_converted[0])), np.fmin(last_time_peed_converted[1], crying_intensity_converted[0]), np.fmin(last_time_peed_converted[1], crying_intensity_converted[2]), np.fmin(last_time_peed_converted[1], crying_intensity_converted[1]) ] walk_now = np.fmin(rules[0], walk_now_function) walk_after_a_while = np.fmin(rules[-2], walk_after_a_while_function) walk_later = np.fmin(rules[-1], walk_later_function) walk_after_couple_of_hours = np.fmin(rules[1], walk_after_couple_of_hours_function) aggregated = np.fmax( walk_now, np.fmax(walk_after_a_while, np.fmax(walk_later, walk_after_couple_of_hours))) result = fuzz.defuzz(walk_after_how_much_time_range, aggregated, 'centroid') return result
def subset_minibatch(dat, offset, batch_size): rg = np.arange(offset, offset + batch_size) rg = np.fmin(rg, dat['X'].shape[0] - 1) return {x: y[rg, :] for x, y in dat.items()}