def f2(element, energy): """ The imaginery part of anomalous X-ray scattering factor for selected input energy (or energies) in eV. using xraylib backend Parameters: element : Name, Symbol or Atomic Number (Z) as input energy : energy in keV - scalar, list, tuple or numpy array Returns: f2 : float or numpy array - same size as input energy imaginery part of anomalous X-ray scattering factor at input energy or energies """ z = elementDB[element]["Z"] if isinstance(energy, (list, tuple, np.ndarray)): f2 = np.zeroslike(energy) for i,enrg in enumerate(energy): f2[i] = xraylib.Fii(z,enrg) else: f2 = xraylib.Fii(z,enrg) return f2
def cs_total_kissel(element,energy): """ """ z = elementDB[element]["Z"] if isinstance(energy, (list, tuple, np.ndarray)): xsec = np.zeroslike(energy) for i,enrg in enumerate(energy): xsec[i] = xraylib.CS_Total_Kissel(z,enrg) else: xsec = xraylib.CS_Total_Kissel(z,energy) return xsec
def dayLengthArray(self, first_date, last_date, rad_lats): """ Determine the number of hours of daylight for each date in a range of dates at each node in a 2D grid of latitudes. Arguments ========= first_date : datetime.date or datetime.datetime first calendar date in range last_date : datetime.date or datetime.datetime last calendar date in range rad_lats : 2D numpy array, dtype=float grid of latitudes in radians Returns ======= 3D numpy array, dtype=int, shape=(num days, rad_lats.shape) length of day for each date at each grid node. """ # convert date to factored climatoligcal day clim_days = \ self.climatologicalDayGrid(first_date, last_date, rad_lats.shape) # daylight hours daylens = N.zeroslike(clim_days) # adjust grid nodes > 40 degrees latitude nodes = N.where(rad_lats > LAT40_RADS) if len(nodes[0]) > 0: daylens[nodes] = \ self._daylightAtLatsGT40(clim_day_factor,rad_lats[nodes]) # adjust grid nodes <= 40 degrees latitude nodes = N.where(rad_lats <= LAT40_RADS) if len(nodes[0]) > 0: daylens[nodes] = \ self._daylightAtLatsGT40(clim_day_factor,rad_lats[nodes]) # drop the decimal hours before returning return daylens.astype(int)
def cs_photo(element,energy): """ Get the photo ionization cross section for a given element when excitated with x-rays of a given energy (keV) using the xraylib backend Parameters: element : Name, Symbol or Atomic Number (Z) as input energy : energy in keV - scalar, list, tuple or numpy array Returns: cs_photo: float or numpy array - same size as input energy photoionization cross section for this element at this incident energy in cm2/g. """ z = elementDB[element]["Z"] if isinstance(energy, (list, tuple, np.ndarray)): xsec = np.zeroslike(energy) for i,enrg in enumerate(energy): xsec[i] = xraylib.CS_Photo(z,enrg) else: xsec = xraylib.CS_Photo(z,energy) return xsec
impact=0.1, rprs=0.05, ecosw=0.0, esinw=0.0, occ=0.0, rvamp=100.) # radial velocity semi-amplitude in m/s M.add_data(time=time, itime=np.zeros_like(time) + cadence) M.add_rv( time=rvtime, # radial velocity observation timestamps itime=np.zeros_like(rvtime) + cadence # integration time of each timestamp ) tmod = M.transitmodel rvmodel = M.rvmodelv return time, tmod, rvmod if '__name__' == __main__: # simulate some data time, tmod, rvmod = makeFakeData() ferr = np.zeroslike(tmod) + 1.E-4 rverr = np.zeroslike(rvmod) + 5. # add some white noise tmod * np
def VFIsolve(funcname, Xbar, Ybar, Sigma, nx, ny, nz, npts): # set VF iteration parameters ccrit = 1.0E-10 maxwhile = 1000 # find sizes and shapes of functions XZdims = [] for i in range(0, nx): XZdims.append(npts) for i in range(0, nz): XZdims.append(npts) # initialize value, policy and jump functions Vf1 = np.ones(XZdims) * (-100) Vf1new = np.zeroslike(Vf1) # need vecotor stored at each node. Pf1 = np.zeros((knpts, znpts)) Jf1 = np.zeros((knpts, znpts)) # set up Markov approximation of AR(1) process using Rouwenhorst method spread = 5. # number of standard deviations above and below 0 znpts = npts zstep = 4. * spread * sigma_z / (npts - 1) # Markov transition probabilities, current z in cols, next z in rows Pimat, zgrid = rouwen(rho_z, 0., zstep, znpts) # discretize X variables Xlow = .6 * Xbar Xhigh = 1.4 * Xbar for i in range(0, nx): Xgrid = np.linspace(Xlow[i], Xhigh[i], num=npts) # discretize Y variables # run the program to get the value function (VF1) count = 0 dist = 100. nconv = True while (nconv): count = count + 1 if count > maxwhile: break for i1 in range(0, knpts): # over kt for i2 in range( 0, znpts ): # over zt, searching the value for the stochastic shock maxval = -100000000000 for i3 in range(0, knpts): # over k_t+1 for i4 in range(0, knpts): # over ell_t Y, w, r, T, c, i, u = Modeldefs(kgrid[i3], kgrid[i1], \ ellgrid[i4], zgrid[i2], params) temp = u for i5 in range(0, znpts): # over z_t+1 temp = temp + beta * Vf1[i3, i5] * Pimat[i2, i5] # print i, j, temp (keep all of them) if np.iscomplex(temp): temp = -1000000000 if np.isnan(temp): temp = -1000000000 if temp > maxval: maxval = temp Vf1new[i1, i2] = temp Pf1[i1, i2] = kgrid[i3] Jf1[i1, i2] = ellgrid[i4] # calculate the new distance measure, we use maximum absolute difference dist = np.amax(np.abs(Vf1 - Vf1new)) if dist < ccrit: nconv = False # report the results of the current iteration print('iteration: ', count, 'distance: ', dist) # replace the value function with the new one Vf1 = 1.0 * Vf1new
slice_x = batch_test_x[:, :, sl[i]:sl[i+1]] test_model_m = TEST_M[i] loss_test_m, out_m = test_model_m(slice_x, slice_m, slice_f) if tpe is not 1: test_model_f = TEST_F[i] loss_test_f, out_f = test_model_f(slice_x, slice_f, slice_m) stop_time = time.time() - start_time print ('-'*5 + ' epoch = %i ' + '-'*5 + ' model = %i ' + '-'*5 + ' time = %.4f ' + '-'*5) % (e, i, stop_time) print 'M loss TEST = %.10f ' % loss_test_m, if tpe is not 1: print 'F loss TEST = %.10f ' % loss_test_f print '-'*30 # final test test_x, test_m, test_f = create_batches(100, False) out_out_m = np.zeroslike(test_x) out_out_f = np.zeroslike(test_x) for i in range(6): slice_m = test_m[:, :, sl[i]:sl[i+1]] slice_f = test_f[:, :, sl[i]:sl[i+1]] slice_x = test_x[:, :, sl[i]:sl[i+1]] test_model_m = TEST_M[i] l_m, out_m = test_model_m(slice_x, slice_m, slice_f) out_out_m[:, :, sl[i]:sl[i+1]] = out_m print 'M TEST = %.10f' % l_m if tpe is not 1: test_model_f = TEST_F[i] l_f, out_f = test_model_f(slice_x, slice_f, slice_m) out_out_f[:, :, sl[i]:sl[i+1]] = out_f print 'F TEST = %.10f' % l_f
M.add_data( time=time, itime=np.zeros_like(time)+cadence ) M.add_rv(time=rvtime, # radial velocity observation timestamps itime=np.zeros_like(rvtime)+cadence # integration time of each timestamp ) tmod = M.transitmodel rvmodel = M.rvmodelv return time, tmod, rvmod if '__name__' == __main__: # simulate some data time, tmod, rvmod = makeFakeData() ferr = np.zeroslike(tmod) + 1.E-4 rverr = np.zeroslike(rvmod) + 5. # add some white noise tmod * np
test_model_m = TEST_M[i] loss_test_m, out_m = test_model_m(slice_x, slice_m, slice_f) if tpe is not 1: test_model_f = TEST_F[i] loss_test_f, out_f = test_model_f(slice_x, slice_f, slice_m) stop_time = time.time() - start_time print('-' * 5 + ' epoch = %i ' + '-' * 5 + ' model = %i ' + '-' * 5 + ' time = %.4f ' + '-' * 5) % (e, i, stop_time) print 'M loss TEST = %.10f ' % loss_test_m, if tpe is not 1: print 'F loss TEST = %.10f ' % loss_test_f print '-' * 30 # final test test_x, test_m, test_f = create_batches(100, False) out_out_m = np.zeroslike(test_x) out_out_f = np.zeroslike(test_x) for i in range(6): slice_m = test_m[:, :, sl[i]:sl[i + 1]] slice_f = test_f[:, :, sl[i]:sl[i + 1]] slice_x = test_x[:, :, sl[i]:sl[i + 1]] test_model_m = TEST_M[i] l_m, out_m = test_model_m(slice_x, slice_m, slice_f) out_out_m[:, :, sl[i]:sl[i + 1]] = out_m print 'M TEST = %.10f' % l_m if tpe is not 1: test_model_f = TEST_F[i] l_f, out_f = test_model_f(slice_x, slice_f, slice_m) out_out_f[:, :, sl[i]:sl[i + 1]] = out_f print 'F TEST = %.10f' % l_f
def __init__(self, Wx, Wh, b): self.params = [Wx, Wh, b] self.grads = [np.zeroslike(Wx), np.zeros_like(Wh), np.zeros_like(b)] # 逆伝播の計算時に使用する中間データをcacheとしてNoneで初期化 self.cache = None