def test_function_overloading(): a = pe.pseudo_Obs(17, 2.9, 'e1') b = pe.pseudo_Obs(4, 0.8, 'e1') fs = [ lambda x: x[0] + x[1], lambda x: x[1] + x[0], lambda x: x[0] - x[1], lambda x: x[1] - x[0], lambda x: x[0] * x[1], lambda x: x[1] * x[0], lambda x: x[0] / x[1], lambda x: x[1] / x[0], lambda x: np.exp(x[0]), lambda x: np.sin(x[0]), lambda x: np.cos(x[0]), lambda x: np.tan(x[0]), lambda x: np.log(x[0]), lambda x: np.sqrt(np.abs(x[0])), lambda x: np.sinh(x[0]), lambda x: np.cosh(x[0]), lambda x: np.tanh(x[0]) ] for i, f in enumerate(fs): t1 = f([a, b]) t2 = pe.derived_observable(f, [a, b]) c = t2 - t1 assert c.is_zero() assert np.log(np.exp(b)) == b assert np.exp(np.log(b)) == b assert np.sqrt(b**2) == b assert np.sqrt(b)**2 == b np.arcsin(1 / b) np.arccos(1 / b) np.arctan(1 / b) np.arctanh(1 / b) np.sinc(1 / b)
def add_to_funcs(low, upp, i): if (low is None) and (upp is None): funcs.append(lambda x: pass_through(x)) inv_f.append(lambda x: pass_through(x)) elif (low == 0) and (upp == 1): D = 10 funcs.append(lambda x: D * np.arctanh((2 * x) - 1)) inv_f.append(lambda x: (np.tanh(x / D) + 1) / 2) elif (upp is None): funcs.append(lambda x: (inv_adj_relu(x - low))) inv_f.append(lambda x: (adj_relu(x) + low)) elif (low is None): funcs.append(lambda x: inv_rev_adj_relu(x - np.copy(upp))) inv_f.append(lambda x: np.copy(upp) + rev_adj_relu(x)) else: funcs.append(lambda x: pass_through(x)) inv_f.append(lambda x: pass_through(x))
def test_arctanh(): fun = lambda x : 3.0 * np.arctanh(x) d_fun = grad(fun) check_grads(fun, 0.2) check_grads(d_fun, 0.3)
def test_arctanh(): fun = lambda x: 3.0 * np.arctanh(x) check_grads(fun)(0.2)
def arctanh(a: Numeric): return anp.arctanh(a)
def inverse(self, p): return np.arctanh((np.clip(p, 0.4, 0.6)))
def test_arctanh(): fun = lambda x : 3.0 * np.arctanh(x) check_grads(fun)(0.2)
if model_type is "scalar": filename_prex = "figure/initial_scalar_" + state_name + "_" savefilename = "data/initial_scalar_solution_" + state_name opt_data = np.load(savefilename + "_full.npz", allow_pickle=True) configurations = opt_data["configurations"] controls = opt_data["controls_opt"] simulation_first_confirmed = opt_data["simulation_first_confirmed"] parameters = opt_data["parameters_opt"] # y0, t_total, N_total, number_group, population_proportion, \ # t_control, number_days_per_control_change, number_control_change_times, number_time_dependent_controls = configurations alpha = np.arctanh(2 * controls[0] - 1) controls = (alpha, ) misfit = Misfit(configurations, parameters, controls, simulation_first_confirmed) # sigma = 10 * np.ones_like(x) prior = Laplacian(misfit.dimension, gamma=10, mean=alpha) # prior = Laplacian(misfit.dimension, gamma=10, regularization=False) model = Model(prior, misfit) if __name__ == "__main__": print(misfit.t_total)
sum(temporalKL)) if __name__ == '__main__': #with open('powerData.pkl') as f: # X = pickle.load(f) with open('pendulous.pkl') as f: X = pickle.load(f) inputDim = 121 seqLen = 200 numSeq = 1 step_size = 0.0001 fakeData = np.random.randn(seqLen, numSeq, inputDim) print(fakeData.shape) frame_to_vect = lambda frame: np.reshape(np.arctanh(2.0 * frame - 1.0), 121 ) vect_to_frame = lambda vect: np.reshape(0.5 * np.tanh(vect) + 0.5, (11, 11)) dataDims = {'x': 80, 'u': 1, 'a': 20} dataDims = {'x': 121, 'u': 0, 'a': 0} X = map(frame_to_vect, X) X = np.concatenate(map(lambda x: np.expand_dims(x, axis=0), X), axis=0) inputs = {'x': np.expand_dims(X, axis=0)} outputs = {'x': inputs['x'][:, 1:, :]} inputs['x'] = inputs['x'][:, :-1, :] #inputs = {'x': X} #inputs = {'x':fakeData[:,:,:dataDims['x']], 'u':fakeData[:,:,dataDims['x']:dataDims['x']+dataDims['u']], # 'a':fakeData[:,:,:dataDims['a']]} hiddenDims = {
def inv_sigmoid(x): return np.arctanh(2.0 * x - 1.0)