def set_central_pixel_to_zero(popt,ras,decs,ra_range,dec_range,args,edge_pad,dims,wcs): '''Using the central position found when fitting a gaussian (popt) takes the ra,dec coord system and sets x0,y0=0,0''' x0 = popt[1] y0 = popt[2] ra_offs = np_abs(ra_range - x0) dec_offs = np_abs(dec_range - y0) ra_ind = where(ra_offs < abs(ra_range[1] - ra_range[0])/2.0)[0][0] dec_ind = where(dec_offs < abs(dec_range[1] - dec_range[0])/2.0)[0][0] ra_cent_off = ra_range[ra_ind] dec_cent_off = dec_range[dec_ind] if dims == 2: ra_cent,dec_cent = wcs.wcs_pix2world(ra_ind-edge_pad,dec_ind-edge_pad,0) elif dims == 3: ra_cent,dec_cent = wcs.wcs_pix2world(ra_ind-edge_pad,dec_ind-edge_pad,0,0) elif dims == 4: ra_cent,dec_cent,_,_ = wcs.wcs_pix2world(ra_ind-edge_pad,dec_ind-edge_pad,0,0,0) ras -= ra_cent_off decs -= dec_cent_off return ra_cent, dec_cent, ras, decs
def update(self, abs_tol=1e-5, rel_tol=1e-3): ''' update should return a number that when it is smaller than 1 the main loop stops. Here I choose this number to be: sqrt(1/dim*sum_{i=0}^{dim}(grad/(abs_tol+x*rel_tol))_i^2) ''' # accumulate the decay rates, in order to correct the averages self.beta_m_ac *= self.beta_m_ac _w2 = 0 _check = 0 self.Q.averageGrad() for i in range(self.dim): self.mE[i] = self.beta_m * self.mE[i] + ( 1 - self.beta_m) * self.Q.grad[i] self.v_max[i] = np_max( [self.beta_v * self.v_max[i], np_abs(self.Q.grad[i])]) dw = self.alpha / (self.v_max[i] + self.epsilon) * self.mE[i] / ( 1 - self.beta_m_ac) self.Q.model.w[i] = self.Q.model.w[i] - dw _w2 = abs_tol + np_abs(self.Q.model.w[i]) * rel_tol _check += (dw / _w2) * (dw / _w2) self.Q.grad[i] = 0 _check = np_sqrt(1. / self.dim * _check) return _check
def BsetPos(self,pos): """set the position of the newport to tuple of floats (x,y) NOTE this command is BLOCKING""" self.getPos() #need to do this first if np_abs(self._cX-pos[0])>self.feLim or np_abs(self._cY-pos[1])>self.feLim: print('You are farther away than your feLim! I will fix this later with KP or KD or KI!') return 0 else: #set them to where we are going! if self._cX==pos[0] and self._cY==pos[1]: print('Yer already thar mate!') return 0 self.target=pos self._cX=pos[0] self._cY=pos[1] #group axes 1&2, velocity to two (mm/s?), acc/dec to 1, motors on, move, wait, degroup self.write('1HN1,2;1HV2;1HA1;1HD1;1HO;1HL'+str(self._cY)+','+str(self._cX)) #HW also blocks bound=.0005 #have a um of error def format_str(string,width=9): missing=width-len(string) if missing: minus=string.count('-') string=' '*minus+string+' '*(missing-minus) return string while self.target: xy=self.getPos() xstr=str(xy[0]) ystr=str(xy[1]) sys.stdout.write('\r(%s, %s)'%(format_str(xstr),format_str(ystr))) sys.stdout.flush() if self.target[0]-bound <= self._cX <= self.target[0]+bound: #good old floating point errors if self.target[1]-bound <= self._cY <= self.target[1]+bound: self.write('1HX') #degroup oh man this is ugly and not transparent if not self._motors_on: #if the motors were off turn them back off self.write('1MF;2MF') self.target=None return self._cX,self._cY
def _set_central_pixel_to_zero(self, x0, y0): """ Using the central position found when fitting a gaussian (popt) takes the ra,dec coord system and sets x0,y0=0,0 """ ra_offs = np_abs(self.ra_range - x0) dec_offs = np_abs(self.dec_range - y0) ra_ind = where( ra_offs < abs(self.ra_range[1] - self.ra_range[0]) / 2.0)[0][0] dec_ind = where( dec_offs < abs(self.dec_range[1] - self.dec_range[0]) / 2.0)[0][0] ra_cent_off = self.ra_range[ra_ind] dec_cent_off = self.dec_range[dec_ind] if self.fits_data.data_dims == 2: self.ra_cent, self.dec_cent = self.fits_data.wcs.wcs_pix2world( ra_ind - self.edge_pad, dec_ind - self.edge_pad, 0) elif self.fits_data.data_dims == 3: self.ra_cent, self.dec_cent = self.fits_data.wcs.wcs_pix2world( ra_ind - self.edge_pad, dec_ind - self.edge_pad, 0, 0) elif self.fits_data.data_dims == 4: self.ra_cent, self.dec_cent, _, _ = self.fits_data.wcs.wcs_pix2world( ra_ind - self.edge_pad, dec_ind - self.edge_pad, 0, 0, 0) self.ras = self.fits_data.ras - ra_cent_off self.decs = self.fits_data.decs - dec_cent_off
def solve_quadratic(A, B, C): """ Solves the quadratic A x^2 + B x + C. Python implementation of QDRTC """ if C == 0: return array([0, -B / A]) if A != 0 and C != 0: σ = get_nearest_radix(sqrt(np_abs(A)) * sqrt(np_abs(C))) if np_abs(B) == np_abs(B) + σ: return array([-C / B, -B / A]) A = A / σ B = B / σ C = C / σ b = -B / 2 q = _disc(A, b, C) if q < 0: X = b / A Y = sqrt(-q) / A return array([X + 1j * Y, X - 1j * Y]) r = b + sign(b) * sqrt(q) if r == 0: X = C / A return array([X, -X]) return array([C / r, r / A])
def comp_height_active(self): """Compute the height of the active area Parameters ---------- self : SlotM12 A SlotM12 object Returns ------- Hwind: float Height of the active area [m] """ point_dict = self._comp_point_coordinate() ZM0 = point_dict["ZM0"] ZM1 = point_dict["ZM1"] ZM2 = point_dict["ZM2"] ZM3 = point_dict["ZM3"] ZM4 = point_dict["ZM4"] if self.is_outwards(): R1 = np_abs(ZM0) R2 = np_abs(ZM1) else: R1 = np_abs((ZM1 + ZM4) / 2) R2 = np_abs(ZM0) return R2 - R1
def intersect_line(self, Z1, Z2): """Return a list (0, 1 or 2 complex) of coordinates of the intersection of the segment with a line defined by two complex Parameters ---------- self : Segment A Segment object Returns ------- Z_list: list Complex coordinates of the intersection (if any, return [begin, end] if the segment is part of the line) """ Z3 = self.begin Z4 = self.end Z_list = inter_line_line(Z1, Z2, Z3, Z4) if len(Z_list) == 0: # No intersection return [] elif len(Z_list) == 1: # One intersect. Is it between begin and end or not ? Z_int = Z_list[0] Seg_len = self.comp_length() if np_abs(Z_int - Z3) <= Seg_len and np_abs(Z_int - Z4) <= Seg_len: return [Z_int] else: return [] elif len(Z_list) == 2: # The segment is on the line return [Z3, Z4]
def pairwise_complex_cmp(a, b): if a == b: return 0 if np_abs(a) == np_abs(b): if a.real == b.real: return bool_to_cmp(a.imag > b.imag) return bool_to_cmp(a.real > b.real) return bool_to_cmp(np_abs(a) > np_abs(b))
def _find_image_centre_celestial(self): '''Find the flux-weighted central position of an image''' power = 4 ra_cent = sum(self.fits_data.flat_data[self.pixel_inds_to_use]**power * self.fits_data.ras[self.pixel_inds_to_use]) ra_cent /= sum(self.fits_data.flat_data[self.pixel_inds_to_use]**power) dec_cent = sum( self.fits_data.flat_data[self.pixel_inds_to_use]**power * self.fits_data.decs[self.pixel_inds_to_use]) dec_cent /= sum( self.fits_data.flat_data[self.pixel_inds_to_use]**power) resolution = abs(self.fits_data.ras[1] - self.fits_data.ras[0]) ##Find the difference between the gridded ra coords and the desired ra_cent ra_offs = np_abs(self.fits_data.ras - ra_cent) ##Find out where in the gridded ra coords the current ra_cent lives; ##This is a boolean array of length len(ra_offs) ra_true = ra_offs < resolution / 2.0 ##Find the index so we can access the correct entry in the container ra_ind = where(ra_true == True)[0] ##Use the numpy abs because it's faster (np_abs) dec_offs = np_abs(self.fits_data.decs - dec_cent) dec_true = dec_offs < resolution / 2 dec_ind = where(dec_true == True)[0] ##If ra_ind,dec_ind coord sits directly between two grid points, ##just choose the first one if len(ra_ind) == 0: ra_true = ra_offs <= resolution / 2 ra_ind = where(ra_true == True)[0] if len(dec_ind) == 0: dec_true = dec_offs <= resolution / 2 dec_ind = where(dec_true == True)[0] ra_ind, dec_ind = ra_ind[0], dec_ind[0] ##Central dec index has multiple rows as it is from flattended coords, ##remove that here dec_ind = floor(dec_ind / self.fits_data.len1) print('Centre of flux pixel in image found as x,y', ra_ind, dec_ind) ra_mesh = deepcopy(self.fits_data.ras) ra_mesh.shape = self.fits_data.data.shape dec_mesh = deepcopy(self.fits_data.decs) dec_mesh.shape = self.fits_data.data.shape ra_range = ra_mesh[0, :] dec_range = dec_mesh[:, 0] self.ra_cent_ind = ra_ind self.dec_cent_ind = dec_ind self.ra_mesh = ra_mesh self.dec_mesh = dec_mesh self.ra_range = ra_range self.dec_range = dec_range
def comp_angle_d_axis(self, is_plot=False): """Compute the angle between the X axis and the first d+ axis By convention a "Tooth" is centered on the X axis Parameters ---------- self : LamSlotWind A LamSlotWind object is_plot : bool True to plot d axis position regarding unit mmf Returns ------- d_angle : float angle between the X axis and the first d+ axis """ if self.winding is None or self.winding.qs == 0 or self.winding.conductor is None: return 0 p = self.get_pole_pair_number() MMF, _ = self.comp_mmf_unit(Nt=1, Na=400 * p) # Get angle values results1 = MMF.get_along("angle[oneperiod]") angle_stator = results1["angle"] # Get the unit mmf FFT and wavenumbers results = MMF.get_along("wavenumber") wavenumber = results["wavenumber"] mmf_ft = results[MMF.symbol] # Find the fundamental harmonic of MMF indr_fund = np_abs(wavenumber - p).argmin() phimax = np_angle(mmf_ft[indr_fund]) magmax = np_abs(mmf_ft[indr_fund]) # Reconstruct fundamental MMF wave mmf_waveform = magmax * cos(p * angle_stator + phimax) # Get the first angle where mmf is max d_angle = angle_stator[argmax(mmf_waveform)] if is_plot: import matplotlib.pyplot as plt from numpy import squeeze fig, ax = plt.subplots() ax.plot(angle_stator, squeeze(MMF.get_along("angle[oneperiod]")[MMF.symbol]), "k") ax.plot(angle_stator, mmf_waveform, "r") ax.plot([d_angle, d_angle], [-magmax, magmax], "--k") plt.show() return d_angle
def update(self, abs_tol=1e-5, rel_tol=1e-3): ''' during the update step, you calculate the gradient of Q and update w and b. ''' # accumulate the decay rates, in order to correct the averages self.beta_m_ac *= self.beta_m_ac #The update should run after #FFANN.feedForward() and FFANN.backPropagation(). #these will be used to determine if the stopping conditions are satisfied _w2 = 0 _check = 0 self.Q.randomDataPoint() for l in range(self.Q.model.total_layers - 1): for j in range(self.Q.model.nodes[l + 1]): for i in range(self.Q.model.nodes[l]): #get the grad of the loss. The results should be stored in loss.dQdw and loss.dQdb #This way it should be easy to update the weights and biases of FFANN self.Q.grad(l, j, i) self.mw[l][j][i] = self.beta_m * self.mw[l][j][i] + ( 1 - self.beta_m) * self.Q.dQdw self.vw_max[l][j][i] = np_max([ self.beta_v * self.vw_max[l][j][i], np_abs(self.Q.dQdw) ]) dw = self.alpha / (self.vw_max[l][j][i] + self.epsilon) * self.mw[l][j][i] / ( 1 - self.beta_m_ac) #update the weight using loss.dQdw self.Q.model.addToWeight(l, j, i, -dw) _w2 = abs_tol + np_abs( self.Q.model.weights[l][j][i]) * rel_tol _check += (dw / _w2) * (dw / _w2) #update the bias using loss.dQdb (it is the same for all i, so don't run loss.grad again). self.mb[l][j] = self.beta_m * self.mb[l][j] + ( 1 - self.beta_m) * self.Q.dQdb self.vb_max[l][j] = np_max( [self.beta_v * self.vb_max[l][j], np_abs(self.Q.dQdb)]) dw = self.alpha / (self.vb_max[l][j] + self.epsilon ) * self.mb[l][j] / (1 - self.beta_m_ac) self.Q.model.addToBias(l, j, -dw) _w2 = abs_tol + np_abs(self.Q.model.biases[l][j]) * rel_tol _check += (dw / _w2) * (dw / _w2) _check = np_sqrt(1. / self.Q.N * _check) return _check
def my_dist(a, b): if a[2] == b[2]: a_t = (a[0] + np_pi) / 2 b_t = (b[0] + np_pi) / 2 a_r = a[1] b_r = b[1] return 10 * ((1. - np_cos(np_abs(a_t - b_t))) + np_log(np_abs(a_r - b_r) + 1.) / 5.) else: return 20.0
def update(self, abs_tol=1e-5, rel_tol=1e-3): ''' during the update step, you calculate the gradient of Q and update w and b. ''' #The update should run after #FFANN.feedForward() and FFANN.backPropagation(). #these will be used to determine if the stopping conditions are satisfied _w2 = 0 _check = 0 self.Q.randomDataPoint() for l in range(self.Q.model.total_layers - 1): for j in range(self.Q.model.nodes[l + 1]): for i in range(self.Q.model.nodes[l]): #get the grad of the loss. The results should be stored in loss.dQdw and loss.dQdb #This way it should be easy to update the weights and biases of FFANN self.Q.grad(l, j, i) self.mean_dQdw[l][j][i] = self.gamma * self.mean_dQdw[l][ j][i] + (1 - self.gamma) * self.Q.dQdw**2 dw = np_sqrt((self.mean_dw[l][j][i] + self.epsilon) / (self.mean_dQdw[l][j][i] + self.epsilon)) * self.Q.dQdw * self.alpha self.mean_dw[l][j][i] = self.gamma * self.mean_dw[l][j][ i] + (1 - self.gamma) * dw**2 #update the weight using loss.dQdw self.Q.model.addToWeight(l, j, i, -dw) _w2 = abs_tol + np_abs( self.Q.model.weights[l][j][i]) * rel_tol _check += (dw / _w2) * (dw / _w2) #update the bias using loss.dQdb (it is the same for all i, so don't run loss.grad again). self.mean_dQdb[l][j] = self.gamma * self.mean_dQdb[l][j] + ( 1 - self.gamma) * self.Q.dQdb**2 dw = np_sqrt((self.mean_db[l][j] + self.epsilon) / (self.mean_dQdb[l][j] + self.epsilon)) * self.Q.dQdb * self.alpha self.mean_db[l][j] = self.gamma * self.mean_db[l][j] + ( 1 - self.gamma) * dw**2 self.Q.model.addToBias(l, j, -dw) _w2 = abs_tol + np_abs(self.Q.model.biases[l][j]) * rel_tol _check += (dw / _w2) * (dw / _w2) _check = np_sqrt(1. / self.Q.N * _check) return _check
def expandSelection(self, startIndex, vals, stdevCutoff=0.05, maxSpread=0.1): """Expand a selection left and right from a staring index in a list of values Keep expanding unless the stdev of the values goes above the cutoff Return a list of indices into the original list """ ret_list = [startIndex] # this is what we will give back start_val = vals[startIndex] value_store = [start_val] sorted_indices = np_argsort(vals) max_index = len(vals) # set the upper and lower to point to the position # where the start resides lower_index = 0 upper_index = 0 for i in range(max_index): if sorted_indices[i] == startIndex: break lower_index += 1 upper_index += 1 do_lower = True do_upper = True max_index -= 1 while do_lower or do_upper: if do_lower: do_lower = False if lower_index > 0: try_val = vals[sorted_indices[lower_index - 1]] if np_abs(try_val - start_val) < maxSpread: try_array = value_store + [try_val] if np_std(try_array) < stdevCutoff: value_store = try_array lower_index -= 1 ret_list.append(sorted_indices[lower_index]) do_lower = True if do_upper: do_upper = False if upper_index < max_index: try_val = vals[sorted_indices[upper_index + 1]] if np_abs(try_val - start_val) < maxSpread: try_array = value_store + [try_val] if np_std(try_array) < stdevCutoff: value_store = try_array upper_index += 1 ret_list.append(sorted_indices[upper_index]) do_upper = True return sorted(ret_list)
def check_if_same_space(fname_1, fname_2): from msct_image import Image from numpy import min, nonzero, all, around from numpy import abs as np_abs from numpy import log10 as np_log10 im_1 = Image(fname_1) im_2 = Image(fname_2) q1 = im_1.hdr.get_qform() q2 = im_2.hdr.get_qform() dec = int(np_abs(round(np_log10(min(np_abs(q1[nonzero(q1)]))))) + 1) dec = 4 if dec > 4 else dec return all(around(q1, dec) == around(q2, dec))
def get_common_base(values1, values2, is_extrap=False, is_downsample=False): """Returns a common base for vectors values1 and values2 Parameters ---------- values1: list values of the first axis values2: list values of the second axis is_extrap: bool Boolean indicating if we want to keep the widest vector and extrapolate the other one is_downsample: bool Boolean indicating if we want to keep the smallest number of points and downsample the other one Returns ------- list of the common axis values """ # if len(values1) == 2: # return array([x for x in values2 if x >= values1[0] and x <= values1[-1]]) # else: if is_extrap: initial = min(values1[0], values2[0]) final = max(values1[-1], values2[-1]) else: initial = max(values1[0], values2[0]) final = min(values1[-1], values2[-1]) if is_downsample: number = min( len([i for i in values1 if i >= initial and i <= final]), len([i for i in values2 if i >= initial and i <= final]), ) else: length1 = len([i for i in values1 if i >= initial and i <= final]) length2 = len([i for i in values2 if i >= initial and i <= final]) if length1 > length2: number = length1 if initial not in values1: initial = values1[argmin(np_abs([i - initial for i in values1])) + 1] if final not in values1: final = values1[argmin(np_abs([i - final for i in values1])) - 1] else: number = length2 if initial not in values2: initial = values2[argmin(np_abs([i - initial for i in values2])) + 1] if final not in values2: final = values2[argmin(np_abs([i - final for i in values2])) - 1] return linspace(initial, final, int(number), endpoint=True)
def get_arrows_plt(mesh_pv, field, meshsol, factor, is_point_arrow, phase=1): """Create a pyvista arrow plot Parameters ---------- mesh_pv : UnstructuredGrid a pyvista mesh object field : ndarray a vector field to plot as glyph meshsol : MeshSolution a MeshSolution object factor : float an amplitude factor for the glyph plot is_point_arrow : bool True to plot arrows on the nodes phase : complex a phase shift to apply on the plot Returns ------- arrows_plt : PolyData a pyvista object to plot glyph factor : float an amplitude factor for the plot glyph """ vect_field = real(field * phase) # Compute factor if factor is None: factor = 0.2 * np_max(np_abs(mesh_pv.bounds)) / np_max(np_abs(vect_field)) # Add third dimension if needed solution = meshsol.get_solution() if solution.dimension == 2: vect_field = hstack((vect_field, zeros((vect_field.shape[0], 1)))) # Add field to mesh if is_point_arrow: mesh_pv.vectors = vect_field * factor arrows_plt = mesh_pv.arrows else: mesh_pv["field"] = vect_field mesh_cell = mesh_pv.point_data_to_cell_data() surf = mesh_cell.extract_geometry() centers2 = surf.cell_centers() centers2.vectors = surf["field"] * factor arrows_plt = centers2.arrows return arrows_plt, factor
def comp_angle_d_axis(self): """Compute the angle between the X axis and the first d+ axis By convention a "Tooth" is centered on the X axis Parameters ---------- self : LamSlotWind A LamSlotWind object Returns ------- d_angle : float angle between the X axis and the first d+ axis """ if self.winding is None: return 0 p = self.get_pole_pair_number() MMF = self.comp_mmf_unit(Nt=1, Na=400 * p) # Get angle values results1 = MMF.get_along("angle[oneperiod]") angle_stator = results1["angle"] # Get the unit mmf FFT and wavenumbers results = MMF.get_along("wavenumber") wavenumber = results["wavenumber"] mmf_ft = results[MMF.symbol] # Find the angle where the FFT is max indr_fund = np_abs(wavenumber - p).argmin() phimax = np_angle(mmf_ft[indr_fund]) magmax = np_abs(mmf_ft[indr_fund]) # Reconstruct fundamental MMF wave mmf_waveform = magmax * cos(p * angle_stator + phimax) # Get the first angle where mmf is max d_angle = angle_stator[argmax(mmf_waveform)] # fig, ax = plt.subplots() # ax.plot(angle_stator, squeeze(MMF.get_along("angle[oneperiod]")[MMF.symbol]), "k") # ax.plot(angle_stator, mmf_waveform, "r") # ax.plot([d_angle, d_angle], [-magmax, magmax], "--k") # plt.show() return d_angle
def pHfromTATC(TA, TC, K1, K2, KW, KB, KF, KS, KP1, KP2, KP3, KSi, KNH3, KH2S, TB, TF, TS, TP, TSi, TNH3, TH2S): """Calculate pH from total alkalinity and dissolved inorganic carbon. This calculates pH from TA and TC using K1 and K2 by Newton's method. It tries to solve for the pH at which Residual = 0. The starting guess is pH = 8. Though it is coded for H on the total pH scale, for the pH values occuring in seawater (pH > 6) it will be equally valid on any pH scale (H terms negligible) as long as the K Constants are on that scale. Based on CalculatepHfromTATC, version 04.01, 10-13-96, by Ernie Lewis. SVH2007: Made this to accept vectors. It will continue iterating until all values in the vector are "abs(deltapH) < pHTol". """ pHGuess = 8.0 # this is the first guess pH = full_like(TA, pHGuess) # first guess for all samples deltapH = 1 + pHTol ln10 = log(10) while np_any(np_abs(deltapH) > pHTol): HCO3, CO3, BAlk, OH, PAlk, SiAlk, NH3Alk, H2SAlk, Hfree, HSO4, HF = \ AlkParts(pH, TC, K1, K2, KW, KB, KF, KS, KP1, KP2, KP3, KSi, KNH3, KH2S, TB, TF, TS, TP, TSi, TNH3, TH2S) CAlk = HCO3 + 2 * CO3 H = 10.0**-pH Denom = H**2 + K1 * H + K1 * K2 Residual = (TA - CAlk - BAlk - OH - PAlk - SiAlk - NH3Alk - H2SAlk + Hfree + HSO4 + HF) # Find slope dTA/dpH (this is not exact, but keeps important terms): Slope = ln10 * (TC * K1 * H * (H**2 + K1 * K2 + 4 * H * K2) / Denom**2 + BAlk * H / (KB + H) + OH + H) deltapH = Residual / Slope # this is Newton's method # To keep the jump from being too big: while any(np_abs(deltapH) > 1): FF = np_abs(deltapH) > 1 deltapH[FF] /= 2.0 # The following logical means that each row stops updating once its # deltapH value is beneath the pHTol threshold, instead of continuing # to update ALL rows until they all meet the threshold. # This approach avoids the problem of reaching a different # answer for a given set of input conditions depending on how many # iterations the other input rows take to solve. // MPH F = np_abs(deltapH) > pHTol pH[F] += deltapH[F] # ^pH is on the same scale as K1 and K2 were calculated. return pH
def comp_radius(self): """Compute the radius of the min and max circle that contains the hole Parameters ---------- self : Hole A Hole object Returns ------- (Rmin,Rmax): tuple Radius of the circle that contains the hole """ surf_list = self.build_geometry() point_list = list() for surf in surf_list: for curve in surf.get_lines(): point_list.extend(curve.discretize()) abs_list = [np_abs(point) for point in point_list] Rmax = max(abs_list) Rmin = min(abs_list) return (Rmin, Rmax)
def get_middle(self): """Return the point at the middle of the arc Parameters ---------- self : Arc1 An Arc1 object Returns ------- Zmid: complex Complex coordinates of the middle of the Arc1 """ # We use the complex representation of the point z1 = self.begin z2 = self.end zc = self.get_center() # Geometric transformation : center is the origine, angle(begin) = 0 Zstart = (z1 - zc) * exp(-1j * np_angle(z1 - zc)) # Generation of the point by rotation alpha = self.get_angle() Zmid = Zstart * exp(1j * alpha / 2) # Geometric transformation : return to the main axis Zmid = Zmid * exp(1j * np_angle(z1 - zc)) + zc # Return (0,0) if the point is too close from 0 if np_abs(Zmid) < 1e-6: Zmid = 0 return Zmid
def comp_radius(self): """Compute the radius of the min and max circle that contains the hole Parameters ---------- self : HoleM54 A HoleM54 object Returns ------- (Rmin,Rmax): tuple Radius of the circle that contains the hole [m] """ Rbo = self.get_Rbo() surf_list = self.build_geometry() point_list = list() for curve in surf_list[0].line_list: point_list.extend(curve.discretize()) Rmax = max([np_abs(point) for point in point_list]) Rmin = Rbo - self.H0 - self.H1 return (Rmin, Rmax)
def update(self, abs_tol=1e-5, rel_tol=1e-3): ''' update should return a number that when it is smaller than 1 the main loop stops. Here I choose this number to be: sqrt(1/dim*sum_{i=0}^{dim}(grad/(abs_tol+x*rel_tol))_i^2) ''' _w2 = 0 _check = 0 self.Q.averageGrad() for i in range(self.dim): self.gE[i] = self.gamma * self.gE[i] + ( 1 - self.gamma) * self.Q.grad[i]**2 dw = self.alpha / np_sqrt( (self.gE[i] + self.epsilon)) * self.Q.grad[i] self.Q.model.w[i] = self.Q.model.w[i] - dw _w2 = abs_tol + np_abs(self.Q.model.w[i]) * rel_tol _check += (dw / _w2) * (dw / _w2) self.Q.grad[i] = 0 _check = np_sqrt(1. / self.dim * _check) self.steps.append(self.Q.model.w[:]) return _check
def get_interpolation(values, axis_values, new_axis_values): """Returns the interpolated field along one axis, given the new axis Parameters ---------- values: ndarray 1Darray of a field along one axis axis_values: list values of the original axis new_axis_values: list values of the new axis Returns ------- ndarray of the interpolated field """ if str(axis_values) == "whole": # Whole axis -> no interpolation return values elif len(new_axis_values) == 1: # Single point -> use argmin idx = argmin(np_abs(axis_values - new_axis_values[0])) return take(values, [idx]) elif len(axis_values) == len(new_axis_values) and all( isclose(axis_values, new_axis_values, rtol=1e-03)): # Same axes -> no interpolation return values elif isin(around(new_axis_values, 5), around(axis_values, 5), assume_unique=True).all( ): # New axis is subset -> no interpolation return values[isin(around(axis_values, 5), around(new_axis_values, 5), assume_unique=True)] else: f = interpolate.interp1d(axis_values, values) return f(new_axis_values)
def get_center(self): """Return the center of the arc Parameters ---------- self : Arc3 An Arc3 object Returns ------- Zc: complex Complex coordinates of the center of the Arc3 """ self.check() # the center is on the bisection of [begin, end] z1 = self.begin z2 = self.end Zc = (z1 + z2) / 2.0 # Return (0,0) if the point is too close from 0 if np_abs(Zc) < 1e-6: Zc = 0 return Zc
def get_middle(self): """Return the point at the middle of the arc Parameters ---------- self : Arc3 An Arc3 object Returns ------- Zmid: complex Complex coordinates of the middle of the Arc3 """ # We use the complex representation of the point z1 = self.begin zc = self.get_center() R = self.comp_radius() # Generation of the point by rotation if self.is_trigo_direction: # Top Zmid = R * exp(1j * pi / 2.0) else: # Bottom Zmid = R * exp(-1j * pi / 2.0) # Geometric transformation : return to the main axis Zmid = Zmid * exp(1j * np_angle(z1 - zc)) + zc # Return (0,0) if the point is too close from 0 if np_abs(Zmid) < 1e-6: Zmid = 0 return Zmid
def get_end(self): """Return the end of the arc Parameters ---------- self : Arc2 An Arc2 object Returns ------- end: complex Complex coordinates of the end of the Arc2 """ self.check() # the center is on the bisection of [begin, end] z1 = self.begin zc = self.center angle = self.angle # Geometric transformation : center is the origine (-zc) # Then rotation of begin of the correct angle (*exp(i*pi*angle)) # Then return to the main axis (+zc) z2 = (z1 - zc) * exp(1j * angle) + zc # Return (0,0) if the point is too close from 0 if np_abs(z2) < 1e-6: z2 = 0 return z2
def update(self, abs_tol=1e-5, rel_tol=1e-3): ''' update should return a number that when it is smaller than 1 the main loop stops. Here I choose this number to be: sqrt(1/dim*sum_{i=0}^{dim}(grad/(abs_tol+x*rel_tol))_i^2) ''' _w2 = 0 _check = 0 self.Q.averageGrad() for i in range(self.dim): self.mE[i] = self.beta_m * self.mE[i] + ( 1 - self.beta_m) * self.Q.grad[i] self.vE[i] = self.beta_v * self.vE[i] + ( 1 - self.beta_v) * self.Q.grad[i]**2 dw = self.alpha / (np_sqrt(self.vE[i] / (1 - self.beta_v_ac)) + self.epsilon) dw *= self.mE[i] / (1 - self.beta_m_ac) self.Q.model.w[i] = self.Q.model.w[i] - dw _w2 = abs_tol + np_abs(self.Q.model.w[i]) * rel_tol _check += (dw / _w2) * (dw / _w2) self.Q.grad[i] = 0 _check = np_sqrt(1. / self.dim * _check) return _check
def alkComponents(titrationPotentiometric, ax=None): t = titrationPotentiometric assert 'alkSteps' in vars(t), \ 'You must first run `titrationPotentiometric.get_alkSteps().`' # Get the 'used' values solver = 'complete' # only valid option for now usedMin = np_min(t.volAcid[t.solvedWith[solver]]) usedMax = np_max(t.volAcid[t.solvedWith[solver]]) # Draw the plot ax = _checksetax(ax) ax.plot(t.volAcid, -log10(t.alkSteps), label='Total alk.', marker='o', markersize=_markersize, c='k', alpha=_alpha) for component, conc in t.alkComponents.items(): if np_any(conc != 0): ax.plot(t.volAcid, -log10(np_abs(conc)), **rgbs[component]) ax.add_patch( patches.Rectangle((usedMin, ax.get_ylim()[1]), usedMax - usedMin, ax.get_ylim()[0] - ax.get_ylim()[1], facecolor=0.9 * ones(3))) ax.invert_yaxis() ax.legend(bbox_to_anchor=(1.05, 1), edgecolor='k') ax.set_xlabel('Acid volume / ml') ax.set_ylabel('$-$log$_{10}$(concentration from pH / mol$\cdot$kg$^{-1}$)') return ax
def update(self, abs_tol=1e-5, rel_tol=1e-3): ''' update should return a number that when it is smaller than 1 the main loop stops. Here I choose this number to be: sqrt(1/dim*sum_{i=0}^{dim}(grad/(abs_tol+x*rel_tol))_i^2) ''' self.Q.randomDataPoint() # accumulate the decay rates, in order to correct the averages self.beta_m_ac *= self.beta_m_ac self.beta_v_ac *= self.beta_v_ac _w2 = 0 _check = 0 for i in range(self.dim): self.Q.grad(i) self.mE[i] = self.beta_m * self.mE[i] + (1 - self.beta_m) * self.Q.dQdw self.vE[i] = self.beta_v * self.vE[i] + ( 1 - self.beta_v) * self.Q.dQdw**2 dw = self.alpha / (np_sqrt(self.vE[i] / (1 - self.beta_v_ac)) + self.epsilon) dw *= self.mE[i] / (1 - self.beta_m_ac) self.Q.model.w[i] = self.Q.model.w[i] - dw _w2 = abs_tol + np_abs(self.Q.model.w[i]) * rel_tol _check += (dw / _w2) * (dw / _w2) _check = np_sqrt(1. / self.dim * _check) return _check
def update(self, abs_tol=1e-5, rel_tol=1e-3): ''' update should return a number that when it is smaller than 1 the main loop stops. Here I choose this number to be: sqrt(1/dim*sum_{i=0}^{dim}(grad/(abs_tol+x*rel_tol))_i^2) ''' self.Q.randomDataPoint() _w2 = 0 _check = 0 for i in range(self.dim): self.Q.grad(i) self.gE[i] = self.gamma * self.gE[i] + ( 1 - self.gamma) * self.Q.dQdw**2 dw = self.alpha / np_sqrt( (self.gE[i] + self.epsilon)) * self.Q.dQdw self.Q.model.w[i] = self.Q.model.w[i] - dw _w2 = abs_tol + np_abs(self.Q.model.w[i]) * rel_tol _check += (dw / _w2) * (dw / _w2) _check = np_sqrt(1. / self.dim * _check) return _check
def approach_goal(y, dy, goal): # based on Hoffmann (2009) but instead of # avoiding obstacles, we approach a goal gamma = 10 # 1/5 beta = 1 / np.pi p = np.zeros(2) if np_norm(dy) > 1e-5: # calculate current heading phi_dy = np.arctan2(dy[1], dy[0]) # calc vector to goal goal_vec = goal - y phi_goal = np.arctan2(goal_vec[1], goal_vec[0]) # angle diff phi = phi_goal - phi_dy # tuned inverse sigmoid to create force towards goal dphi = gamma * phi * np_exp(-beta * np_abs(phi)) pval = goal_vec * dphi # print("force vector:", pval, dy) p += pval return p
def manhattan(self, loc1, loc2): ''' Return the distance between two location in taxicab geometry. Uses the numpy arrays and wrap/warp correctly. ''' absolute = np_abs(loc1 - loc2) # slightly faster than abs() modular = self.world_size - absolute return sum(minimum(absolute, modular)) # slightly faster than a.sum()
def dropOutliers(df_test, tight): return df_test[ np_abs( df_test.probability - df_test.probability.mean() ) <= (tight * df_test.probability.std() ) ]
def train_epoch(self, input_data, target_train): weights = self.weights self.minimized = minimized = dot(input_data, weights) reconstruct = dot(minimized, weights.T) error = input_data - reconstruct weights += self.step * dot(error.T, minimized) return np_abs(error) / (input_data.shape[0] * input_data.shape[1])
def normilize_error_output(output): """ Normalize error output when result is non-scalar. Parameters ---------- output : array-like Input can be any numpy array or matrix. Returns ------- int, float Return sum of all absolute values. """ return np_sum(np_abs(output))
def train_epoch(self, input_train, target_train): centers = self.centers old_centers = centers.copy() output_train = self.predict(input_train) for i, center in enumerate(centers): positions = argwhere(output_train[:, 0] == i) if not np_any(positions): continue class_data = take(input_train, positions, axis=0) centers[i, :] = (1 / len(class_data)) * np_sum(class_data, axis=0) return np_abs(old_centers - centers)
def train_epoch(self, input_data, target_train): weights = self.weights minimized = dot(input_data, weights) reconstruct = dot(minimized, weights.T) error = input_data - reconstruct weights += self.step * dot(error.T, minimized) mae = np_sum(np_abs(error)) / input_data.size del minimized del reconstruct del error return mae
def layer_weight_update(self, delta, layer_number): if not hasattr(self, "prev_gradients"): weight_delta = delta else: gradient = self.gradients[layer_number] prev_gradient = self.prev_gradients[layer_number] prev_weight_delta = self.prev_weight_deltas[layer_number] if norm(prev_gradient - gradient) < self.gradient_tol: raise StopIteration("Gradient norm after update is " "less than {}".format(self.gradient_tol)) weight_delta = prev_weight_delta * (gradient / (prev_gradient - gradient)) upper_bound = self.upper_bound weight_delta = where(np_abs(weight_delta) < upper_bound, weight_delta, sign(weight_delta) * upper_bound) self.weight_deltas.append(weight_delta) return weight_delta
def inf_norm(a, b): return np_abs(a - b).max()
jupiter_hist_a = [calc_a(SUN_GM, JUPITER_GM, sv[7*0+1:7*0+4], sv[7*0+4:7*0+7], sv[7*1+1:7*1+4], sv[7*1+4:7*1+7]) for t, sv in hist_state_vec] saturn_hist_a = [calc_a(SUN_GM, SATURN_GM, sv[7*0+1:7*0+4], sv[7*0+4:7*0+7], sv[7*2+1:7*2+4], sv[7*2+4:7*2+7]) for t, sv in hist_state_vec] jupiter_hist_n = [calc_n(SUN_GM, JUPITER_GM, sv[7*0+1:7*0+4], sv[7*0+4:7*0+7], sv[7*1+1:7*1+4], sv[7*1+4:7*1+7]) for t, sv in hist_state_vec] saturn_hist_n = [calc_n(SUN_GM, SATURN_GM, sv[7*0+1:7*0+4], sv[7*0+4:7*0+7], sv[7*2+1:7*2+4], sv[7*2+4:7*2+7]) for t, sv in hist_state_vec] do_plot('hist_energy.png', times, [hist_energy], 'Energy conservation plot', 'Time (s)', 'G * energy (km^5 / s^4)') do_plot('jupiter_hist_a.png', times, [jupiter_hist_a], 'Jupiter osculating semimajor axis', 'Time (s)', 'Semimajor axis (km)') do_plot('saturn_hist_a.png', times, [saturn_hist_a], 'Saturn osculating semimajor axis', 'Time (s)', 'Semimajor axis (km)') do_plot('jupiter_hist_n.png', times, [jupiter_hist_n], 'Jupiter osculating mean motion', 'Time (s)', 'Mean motion (rad/s)') do_plot('saturn_hist_n.png', times, [saturn_hist_n], 'Saturn osculating mean motion', 'Time (s)', 'Mean motion (rad/s)') import numpy as np saturn_hist_dl = np.cumsum(np.subtract(saturn_hist_n, np.average(saturn_hist_n)) *(times[1] - times[0])) * 180.0 / np.pi * 60.0 do_plot('saturn_hist_dl.png', times, [saturn_hist_dl], 'Saturn longitude anomaly', 'Time (s)', 'Anomaly (rad)') from numpy.fft import rfft from numpy import abs as np_abs jupiter_n_spectrum = np_abs(rfft(jupiter_hist_n)) / (len(jupiter_hist_n) / 2) saturn_n_spectrum = np_abs(rfft(saturn_hist_n)) / (len(saturn_hist_n) / 2) do_plot('jupiter_n_spectrum.png', range(1, 20), [jupiter_n_spectrum[1:20]], 'Jupiter mean motion spectrum', 'Component number', 'Amplitude (rad/s)') do_plot('saturn_n_spectrum.png', range(1, 20), [saturn_n_spectrum[1:20]], 'Saturn mean motion spectrum', 'Component number', 'Amplitude (rad/s)')
def mae(actual, predicted): """ Mean absolute error. """ actual, predicted = _preformat_inputs(actual, predicted) data_size = actual.shape[0] return np_abs(predicted - actual).sum() / data_size
def fixPosWLAN(len_wlan=None, wlan=None, wppdb=None, verb=False): """ Returns the online fixed user location in lat/lon format. Parameters ---------- len_wlan: int, mandatory Number of online visible WLAN APs. wlan: np.array, string list, mandatory Array of MAC/RSS for online visible APs. e.g. [['00:15:70:9E:91:60' '00:15:70:9E:91:61' '00:15:70:9E:91:62' '00:15:70:9E:6C:6C'] ['-55' '-56' '-57' '-68']]. verb: verbose mode option, default: False More debugging info if enabled(True). Returns ------- posfix: np.array, float Final fixed location(lat, lon). e.g. [ 39.922942 116.472673 ] """ interpart_offline = False; interpart_online = False # db query result: [ maxNI, keys:[ [keyaps:[], keycfps:(())], ... ] ]. # maxNI=0 if no cluster found. maxNI,keys = wppdb.getBestClusters(macs=wlan[0]) #maxNI,keys = [2, [ # [['00:21:91:1D:C0:D4', '00:19:E0:E1:76:A4', '00:25:86:4D:B4:C4'], # [[5634, 5634, 39.898019, 116.367113, '-83|-85|-89']] ], # [['00:21:91:1D:C0:D4', '00:25:86:4D:B4:C4'], # [[6161, 6161, 39.898307, 116.367233, '-90|-90']] ] ]] if maxNI == 0: # no intersection found wpplog.error('NO cluster found! Fingerprinting TERMINATED!') return [] elif maxNI < CLUSTERKEYSIZE: # size of intersection set < offline key AP set size:4, # offline keymacs/keyrsss (not online maxmacs/maxrsss) need to be cut down. interpart_offline = True if maxNI < len_wlan: #TODO: TBE. # size of intersection set < online AP set size(len_wlan) < CLUSTERKEYSIZE, # not only keymacs/keyrsss, but also maxmacs/maxrsss need to be cut down. interpart_online = True if verb: wpplog.debug('Partly[%d] matched cluster(s) found:' % maxNI) else: if verb: wpplog.debug('Full matched cluster(s) found:') if verb: wpplog.debug('keys:\n%s' % keys) # Evaluation|sort of similarity between online FP & radio map FP. # fps_cand: [ min_spid1:[cid,spid,lat,lon,rsss], min_spid2, ... ] # keys: ID and key APs of matched cluster(s) with max intersect APs. all_pos_lenrss = [] fps_cand = []; sums_cand = [] for keyaps,keycfps in keys: if verb: wpplog.debug('keyaps:\n%s\nkeycfps:\n%s' % (keyaps, keycfps)) # Fast fix when the ONLY 1 selected cid has ONLY 1 fp in 'cfps'. if len(keys)==1 and len(keycfps)==1: fps_cand = [ list(keycfps[0]) ] break pos_lenrss = (array(keycfps)[:,1:3].astype(float)).tolist() keyrsss = np_char_array(keycfps)[:,4].split('|') #4: column order in cfps.tbl keyrsss = array([ [float(rss) for rss in spid] for spid in keyrsss ]) for idx,pos in enumerate(pos_lenrss): pos_lenrss[idx].append(len(keyrsss[idx])) all_pos_lenrss.extend(pos_lenrss) # Rearrange key MACs/RSSs in 'keyrsss' according to intersection set 'keyaps'. if interpart_offline: if interpart_online: wl = deepcopy(wlan) # mmacs->wl[0]; mrsss->wl[1] idxs_inters = [ idx for idx,mac in enumerate(wlan[0]) if mac in keyaps ] wl = wl[:,idxs_inters] else: wl = wlan else: wl = wlan idxs_taken = [ keyaps.index(x) for x in wl[0] ] keyrsss = keyrsss.take(idxs_taken, axis=1) mrsss = wl[1].astype(int) # Euclidean dist solving and sorting. sum_rss = np_sum( (mrsss-keyrsss)**2, axis=1 ) fps_cand.extend( keycfps ) sums_cand.extend( sum_rss ) if verb: wpplog.debug('sum_rss:\n%s' % sum_rss) # Location estimation. if len(fps_cand) > 1: # KNN # lst_set_sums_cand: list format for set of sums_cand. # bound_dist: distance boundary for K-min distances. lst_set_sums_cand = array(list(set(sums_cand))) idx_bound_dist = argsort(lst_set_sums_cand)[:KNN][-1] bound_dist = lst_set_sums_cand[idx_bound_dist] idx_sums_sort = argsort(sums_cand) sums_cand = array(sums_cand) fps_cand = array(fps_cand) sums_cand_sort = sums_cand[idx_sums_sort] idx_bound_fp = searchsorted(sums_cand_sort, bound_dist, 'right') idx_sums_sort_bound = idx_sums_sort[:idx_bound_fp] #idxs_kmin = argsort(min_sums)[:KNN] sorted_sums = sums_cand[idx_sums_sort_bound] sorted_fps = fps_cand[idx_sums_sort_bound] if verb: wpplog.debug('k-dists:\n%s\nk-locations:\n%s' % (sorted_sums, sorted_fps)) # DKNN if sorted_sums[0]: boundry = sorted_sums[0]*KWIN else: if sorted_sums[1]: boundry = KWIN # What the hell are the following two lines doing here! #idx_zero_bound = searchsorted(sorted_sums, 0, side='right') #sorted_sums[:idx_zero_bound] = boundry / (idx_zero_bound + .5) else: boundry = 0 idx_dkmin = searchsorted(sorted_sums, boundry, side='right') dknn_sums = sorted_sums[:idx_dkmin].tolist() dknn_fps = sorted_fps[:idx_dkmin] if verb: wpplog.debug('dk-dists: \n%s\ndk-locations: \n%s' % (dknn_sums, dknn_fps)) # Weighted_AVG_DKNN. num_dknn_fps = len(dknn_fps) if num_dknn_fps > 1: coors = dknn_fps[:,1:3].astype(float) num_keyaps = array([ rsss.count('|')+1 for rsss in dknn_fps[:,-2] ]) # ww: weights of dknn weights. ww = np_abs(num_keyaps - len_wlan).tolist() #wpplog.debug(ww) if not np_all(ww): if np_any(ww): ww_sort = np_sort(ww) #wpplog.debug('ww_sort: %s' % ww_sort) idx_dknn_sums_sort = searchsorted(ww_sort, 0, 'right') #wpplog.debug('idx_dknn_sums_sort: %s' % idx_dknn_sums_sort) ww_2ndbig = ww_sort[idx_dknn_sums_sort] w_zero = ww_2ndbig / (len(ww)*ww_2ndbig) else: w_zero = 1 #for idx,sum in enumerate(ww): # if not sum: ww[idx] = w_zero ww = [ w if w else w_zero for w in ww ] ws = array(ww) + dknn_sums weights = reciprocal(ws) if verb: wpplog.debug('coors:%s, weights:%s' % (coors, weights)) posfix = average(coors, axis=0, weights=weights) else: posfix = array(dknn_fps[0][1:3]).astype(float) # ErrRange Estimation (more than 1 relevant clusters). idxs_clusters = idx_sums_sort_bound[:idx_dkmin] if len(idxs_clusters) == 1: if maxNI == 1: poserr = 200 else: poserr = 100 else: if verb: wpplog.debug('idxs_clusters: %s\nall_pos_lenrss: %s' % (idxs_clusters, all_pos_lenrss)) #allposs_dknn = vstack(array(all_pos_lenrss, object)[idxs_clusters]) allposs_dknn = array(all_pos_lenrss, object)[idxs_clusters] if verb: wpplog.debug('allposs_dknn: %s' % allposs_dknn) poserr = max( average([ dist_km(posfix[1], posfix[0], p[1], p[0])*1000 for p in allposs_dknn ]), 100 ) else: fps_cand = fps_cand[0][:-2] if verb: wpplog.debug('location:\n%s' % fps_cand) posfix = array(fps_cand[1:3]).astype(float) # ErrRange Estimation (only 1 relevant clusters). N_fp = len(keycfps) if N_fp == 1: if maxNI == 1: poserr = 200 else: poserr = 150 else: if verb: wpplog.debug('all_pos_lenrss: %s' % all_pos_lenrss) wpplog.debug('posfix: %s' % posfix) poserr = max( np_sum([ dist_km(posfix[1], posfix[0], p[1], p[0])*1000 for p in all_pos_lenrss ]) / (N_fp-1), 100 ) ret = posfix.tolist() ret.append(poserr) return ret
def _argcheck(self, c): c = np.asarray(c) self.b = where(c > 0, 1.0/np_abs(c), inf) return where(c==0, 0, 1)