def create_pf(): u = np.linspace(-1.2 / np.sqrt(2), 1.2 / np.sqrt(2), endpoint=True, num=60) v = np.linspace(-1.2 / np.sqrt(2), 1.2 / np.sqrt(2), endpoint=True, num=60) U, V = np.meshgrid(u, v) u, v = U.flatten(), V.flatten() uv = np.stack([u, v]).T print(f"uv.shape={uv.shape}") ls = [] for x in uv: # generate solutions on the Pareto front: # x = np.array([x1, x1]) f, f_dx = concave_fun_eval(x) ls.append(f) ls = np.stack(ls) po, pf = [], [] for i, x in enumerate(uv): l_i = ls[i] if np.any(np.all(l_i > ls, axis=1)): continue else: po.append(x) pf.append(l_i) po = np.stack(po) pf = np.stack(pf) pf_tri = mtri.Triangulation(po[:, 0], po[:, 1]) tri = mtri.Triangulation(u, v) return pf, pf_tri, ls, tri
def get_neighs_sq(inputs): verts = list() if inputs.shape[2] == 576: for i in range(12): for j in range(12): x = 2 * i + 48 * j a = inputs[:, :, x] b = inputs[:, :, x + 1] c = inputs[:, :, x + 24] d = inputs[:, :, x + 25] temp = np.stack((a, b, c, d)) temp = np.amax(temp, axis=0) verts.append(temp) elif inputs.shape[2] == 64: for i in range(4): for j in range(4): x = 2 * i + 16 * j a = inputs[:, :, x] b = inputs[:, :, x + 1] c = inputs[:, :, x + 8] d = inputs[:, :, x + 9] temp = np.stack((a, b, c, d)) temp = np.amax(temp, axis=0) verts.append(temp) return verts
def forward_pass(self, inputs, param_vector): #n = mesh_traversal.get_neighs_sq(inputs) #result = np.array(n) #result = np.moveaxis(result, 0, 2) # if inputs.shape[2] == 576: new_shape = inputs.shape[:2] for i in [0]: pool_width = self.pool_shape[i] img_width = inputs.shape[i + 2] new_dim = int((np.sqrt(img_width) / np.sqrt(pool_width))) new_shape += (new_dim * new_dim, ) result = [] for i in range(new_dim): for j in range(new_dim): x = (3 * j + 25) + 24 * 3 * i n = mesh_traversal.get_neighs_sq2(adj_mtx, x) a = inputs[:, :, n[0]] b = inputs[:, :, n[1]] c = inputs[:, :, n[2]] d = inputs[:, :, n[3]] e = inputs[:, :, n[4]] f = inputs[:, :, n[5]] g = inputs[:, :, n[6]] h = inputs[:, :, n[7]] temp = np.stack((a, b, c, d, e, f, g, h)) temp = np.amax(temp, axis=0) result.append(temp) result = np.stack(result) result = np.moveaxis(result, 0, 2) return result
def simulate_wind_tunnel(args, vx, vy, occlusion): '''Code modified from bit.ly/2Yy8LXs. Physics is based on bit.ly/386n3BR''' rows, cols = args.tunnel_shape red_smoke, blue_smoke = np.zeros_like(vx), np.zeros_like(vx) # add smoke red_smoke[rows // 4:rows // 2] = 0.9 # initialize red smoke band blue_smoke[rows // 2:3 * rows // 4] = 0.9 # ...and blue smoke band rgb = np.stack([red_smoke, occlusion, blue_smoke], axis=-1) frames = [rgb] # visualize occlusion and flow of smoke # Step through the simulation vx, vy = project(vx, vy, occlusion, args.filter_width) for t in range(args.simulator_steps): vx_updated = advect(vx, vx, vy) # self-advection of vx vy_updated = advect(vy, vx, vy) # self-advection of vy vx, vy = project(vx_updated, vy_updated, occlusion, args.filter_width) # vol. constraint red_smoke = advect(red_smoke, vx, vy) # advect / occlude the smoke red_smoke = occlude(red_smoke, occlusion) blue_smoke = advect(blue_smoke, vx, vy) blue_smoke = occlude(blue_smoke, occlusion) fields = ([red_smoke, blue_smoke, vx, vy], ['red_smoke', 'blue_smoke', 'vx', 'vy']) [red_smoke, blue_smoke, vx, vy] = enforce_boundary_conditions(*fields, args) rgb = np.stack([red_smoke, occlusion, blue_smoke], axis=-1) frames.append(rgb) return vx, vy, frames
def apply_rotation(obj, coord_old, src_folder): coord_vec_ls = [] for i in range(3): f = os.path.join(src_folder, 'coord{}_vec.npy'.format(i)) coord_vec_ls.append(np.load(f)) s = obj.shape coord0_vec, coord1_vec, coord2_vec = coord_vec_ls coord_old = np.tile(coord_old, [s[0], 1]) coord1_old = coord_old[:, 0] coord2_old = coord_old[:, 1] coord_old = np.stack([coord0_vec, coord1_old, coord2_old], axis=1).transpose() # print(sess.run(coord_old)) obj_channel_ls = np.split(obj, s[3], 3) obj_rot_channel_ls = [] for channel in obj_channel_ls: channel_flat = channel.flatten() ind = coord_old[0] * (s[1] * s[2]) + coord_old[1] * s[2] + coord_old[2] ind = ind.astype('int') obj_chan_new_val = channel_flat[ind] obj_rot_channel_ls.append(np.reshape(obj_chan_new_val, s[:-1])) obj_rot = np.stack(obj_rot_channel_ls, axis=3) return obj_rot
def test_batch_softmax(): X = np.random.randint(1, 10, size=(10, 5)) p0 = np.stack(softmax(x_) for i, x_ in enumerate(X)) p1 = batch_softmax(X, axis=1) p0_0 = np.stack(softmax(x_) for i, x_ in enumerate(X.T)) p1_0 = batch_softmax(X, axis=0) assert np.all(np.equal(p0, p1)) assert np.all(np.equal(p0_0.round(4), p1_0.round(4).T))
def gp0(self, m, s): """ Compute joint predictions for MGP with uncertain inputs. """ assert hasattr(self, "hyp") if not hasattr(self, "K"): self.cache() x = np.atleast_2d(self.inputs) y = np.atleast_2d(self.targets) n, D = x.shape n, E = y.shape X = self.hyp iK = self.iK beta = self.alpha m = np.atleast_2d(m) inp = x - m # Compute the predicted mean and IO covariance. iL = np.stack([np.diag(exp(-X[i, :D])) for i in range(E)]) iN = np.matmul(inp, iL) B = iL @ s @ iL + np.eye(D) t = np.stack([solve(B[i].T, iN[i].T).T for i in range(E)]) q = exp(-np.sum(iN * t, 2) / 2) qb = q * beta.T tiL = np.matmul(t, iL) c = exp(2 * X[:, D]) / sqrt(det(B)) M = np.sum(qb, 1) * c V = (np.transpose(tiL, [0, 2, 1]) @ np.expand_dims(qb, 2)).reshape( E, D).T * c k = 2 * X[:, D].reshape(E, 1) - np.sum(iN**2, 2) / 2 # Compute the predicted covariance. inp = np.expand_dims(inp, 0) / np.expand_dims(exp(2 * X[:, :D]), 1) ii = np.repeat(inp[:, newaxis, :, :], E, 1) ij = np.repeat(inp[newaxis, :, :, :], E, 0) iL = np.stack([np.diag(exp(-2 * X[i, :D])) for i in range(E)]) siL = np.expand_dims(iL, 0) + np.expand_dims(iL, 1) R = np.matmul(s, siL) + np.eye(D) t = 1 / sqrt(det(R)) iRs = np.stack( [solve(R.reshape(-1, D, D)[i], s) for i in range(E * E)]) iRs = iRs.reshape(E, E, D, D) Q = exp(k[:, newaxis, :, newaxis] + k[newaxis, :, newaxis, :] + maha(ii, -ij, iRs / 2)) S = np.einsum('ji,iljk,kl->il', beta, Q, beta) tr = np.hstack([np.sum(Q[i, i] * iK[i]) for i in range(E)]) S = (S - np.diag(tr)) * t + np.diag(exp(2 * X[:, D])) S = S - np.matmul(M[:, newaxis], M[newaxis, :]) return M, S, V
def save_rotation_lookup(array_size, n_theta, dest_folder=None): image_center = [np.floor(x / 2) for x in array_size] coord0 = np.arange(array_size[0]) coord1 = np.arange(array_size[1]) coord2 = np.arange(array_size[2]) coord2_vec = np.tile(coord2, array_size[1]) coord1_vec = np.tile(coord1, array_size[2]) coord1_vec = np.reshape(coord1_vec, [array_size[1], array_size[2]]) coord1_vec = np.reshape(np.transpose(coord1_vec), [-1]) coord0_vec = np.tile(coord0, [array_size[1] * array_size[2]]) coord0_vec = np.reshape(coord0_vec, [array_size[1] * array_size[2], array_size[0]]) coord0_vec = np.reshape(np.transpose(coord0_vec), [-1]) # move origin to image center coord1_vec = coord1_vec - image_center[1] coord2_vec = coord2_vec - image_center[2] # create matrix of coordinates coord_new = np.stack([coord1_vec, coord2_vec]).astype(np.float32) # create rotation matrix theta_ls = np.linspace(0, 2 * np.pi, n_theta) coord_old_ls = [] for theta in theta_ls: m_rot = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]]) coord_old = np.matmul(m_rot, coord_new) coord1_old = np.round(coord_old[0, :] + image_center[1]).astype(np.int) coord2_old = np.round(coord_old[1, :] + image_center[2]).astype(np.int) # clip coordinates coord1_old = np.clip(coord1_old, 0, array_size[1]-1) coord2_old = np.clip(coord2_old, 0, array_size[2]-1) coord_old = np.stack([coord1_old, coord2_old], axis=1) coord_old_ls.append(coord_old) if dest_folder is None: dest_folder = 'arrsize_{}_{}_{}_ntheta_{}'.format(array_size[0], array_size[1], array_size[2], n_theta) if not os.path.exists(dest_folder): os.mkdir(dest_folder) for i, arr in enumerate(coord_old_ls): np.save(os.path.join(dest_folder, '{:04}'.format(i)), arr) coord1_vec = coord1_vec + image_center[1] coord1_vec = np.tile(coord1_vec, array_size[0]) coord2_vec = coord2_vec + image_center[2] coord2_vec = np.tile(coord2_vec, array_size[0]) for i, coord in enumerate([coord0_vec, coord1_vec, coord2_vec]): np.save(os.path.join(dest_folder, 'coord{}_vec'.format(i)), coord) return coord_old_ls
def expected_log_beta(a, b, alpha, beta): """ E[log p(x)] where p(x) = Beta(a, b) q(x) = Beta(alpha, beta) """ # stack parameters along last axis (our Dirichlet convention) # and treat as dirichlet new_a = np.stack([a, b], axis=-1) new_alpha = np.stack([alpha, beta], axis=-1) return expected_log_dirichlet(new_a, new_alpha)
def any_perpendicular(vecs): # For 'vecs' of shape (..., 3), this returns an array of shape # (..., 3) in which every corresponding vector is perpendicular # (but nonzero). 'vecs' does not need to be normalized, and the # returned vectors are not normalized. x, y, z = [vecs[..., i] for i in range(3)] a0 = np.zeros_like(x) # The condition has the extra dimension added to make it (..., 1) # so it broadcasts properly with the branches, which are (..., 3): p = np.where((np.abs(z) < np.abs(x))[..., None], np.stack((y, -x, a0), axis=-1), np.stack((a0, -z, y), axis=-1)) return p
def get_field(xmin=-1.2, xmax=1.2, ymin=-1.2, ymax=1.2, gridsize=20): field = {'meta': locals()} # meshgrid to get vector field b, a = np.meshgrid(np.linspace(xmin, xmax, gridsize), np.linspace(ymin, ymax, gridsize)) ys = np.stack([b.flatten(), a.flatten()]) # get vector directions dydt = [dynamics_fn(None, y) for y in ys.T] dydt = np.stack(dydt).T field['x'] = ys.T field['dx'] = dydt.T return field
def test_multiple_expectation_jacobian_positional(self): "Tests that qnodes using positional arguments return correct gradients for multiple expectation values." self.logTestName() a, b, c = 0.5, 0.54, 0.3 def circuit(x, y, z): qml.QubitStateVector(np.array([1, 0, 1, 1]) / np.sqrt(3), [0, 1]) qml.Rot(x, y, z, 0) qml.CNOT([0, 1]) return qml.expval.PauliZ(0), qml.expval.PauliY(1) circuit = qml.QNode(circuit, self.dev2) # compare our manual Jacobian computation to theoretical result # Note: circuit.jacobian actually returns a full jacobian in this case res = circuit.jacobian(np.array([a, b, c])) self.assertAllAlmostEqual(self.expected_jacobian(a, b, c), res, delta=self.tol) # compare our manual Jacobian computation to autograd # not sure if this is the intended usage of jacobian jac0 = qml.jacobian(circuit, 0) jac1 = qml.jacobian(circuit, 1) jac2 = qml.jacobian(circuit, 2) res = np.stack([jac0(a, b, c), jac1(a, b, c), jac2(a, b, c)]).T self.assertAllAlmostEqual(self.expected_jacobian(a, b, c), res, delta=self.tol)
def pos_traj(sts1, sts2, m0, m1, m2, r0, noises, nt, dt, tau, lag, rdotf = rdot_2d3w_S): # sts = on/off-ness of bmp at each of the T stages -- should be T x M -- currently T = 6 # sigParams = parameters for getSigSeries function # r0 = initial position on fate landscape 1x2 # noises = noise at each timestep for each data point --> nt x M x 2 # nt = number of timesteps (integer) # dt = length of timesteps (float) # tau = timescale (float) l0s = np.zeros((nt, sts1.shape[1])) + m0[0] l1s = getSigSeriesG(sts1, nt, *m1[0:3]) # nt x M l2s = getSigSeriesG(sts2, nt, *m2[0:3]) # nt x M v0 = np.array([[np.cos(m0[1]), np.sin(m0[1])]]) v1 = np.array([[np.cos(m1[3]), np.sin(m1[3])]]) v2 = np.array([[np.cos(m2[3]), np.sin(m2[3])]]) tilt = getTilt(l0s, v0) + getTilt(l1s, v1) + getTilt(l2s, v2) # should be able to evaluate m(t) = l0v0+l1v1+l2v2 and feed that into rdot to save some computation time... #rs = np.zeros((sts1.shape[1], nt, r0.shape[0])) # M x nt x 2 #rs[:,0] = r0 #rs = np.hstack([r0*np.ones((sts1.shape[1],1,r0.shape[0])), np.zeros((sts1.shape[1], nt-1, r0.shape[0]))]) rs = [r0*np.ones((sts1.shape[1],r0.shape[0]))] for t in range(0, lag): #rs[:,t+1] = rs[:,t] + dt*noises[t] rs.append(rs[t] + dt*noises[t]) for t in range(lag, nt-1): #rs[:,t+1] = rs[:,t] + dt*(rdotf(rs[:,t], tau, tilt[t]) + noises[t]) rs.append(rs[t] + dt*(rdotf(rs[t], tau, tilt[t]) + noises[t])) return np.stack(rs,axis=1)
def __init__(self, m=4, n=3, initial_path='initial_airfoil/naca0012.dat', config_fname='op_conditions.ini'): # Airfoil parameters self.m = m self.n = n # NACA 0012 as the initial airfoil try: self.airfoil0 = np.loadtxt(initial_path, skiprows=1) except: self.airfoil0 = np.loadtxt(initial_path, delimiter=',') x_min = np.min(self.airfoil0[:, 0]) x_max = np.max(self.airfoil0[:, 0]) z_min = np.min(self.airfoil0[:, 1]) z_max = np.max(self.airfoil0[:, 1]) Px = np.linspace(x_min, x_max, self.m, endpoint=True) Py = np.linspace(z_min, z_max, self.n, endpoint=True) x, y = np.meshgrid(Px, Py) P0 = np.stack((x, y), axis=-1) self.Px = P0[:, :, 0] self.alpha0 = P0[:, :, 1].flatten() self.dim = len(self.alpha0) self.bounds = np.zeros((self.dim, 2)) perturb = 0.2 self.bounds[:, 0] = self.alpha0 - perturb self.bounds[:, 1] = self.alpha0 + perturb self.y = None self.config_fname = config_fname
def autograd(f, ds, points): """Evaluate derivatives of f on the given points.""" df_ds = lambda *args: f(np.stack(args, axis=-1)) for i in ds: df_ds = egrad(df_ds, i) ndim = points.shape[-1] return df_ds(*[points[..., i] for i in range(ndim)])
def m_step(self, expectations, datas, inputs, masks, tags, **kwargs): from autograd.scipy.special import i0, i1 x = np.concatenate(datas) weights = np.concatenate([Ez for Ez, _, _ in expectations]) # convert angles to 2D representation and employ closed form solutions x_k = np.stack((np.sin(x), np.cos(x)), axis=1) r_k = np.tensordot(weights.T, x_k, (-1, 0)) r_norm = np.sqrt(np.sum(r_k**2, 1)) mus_k = r_k / r_norm[:, None] r_bar = r_norm / weights.sum(0)[:, None] # truncated newton approximation with 2 iterations kappa_0 = r_bar * (2 - r_bar**2) / (1 - r_bar**2) kappa_1 = kappa_0 - ((i1(kappa_0)/i0(kappa_0)) - r_bar) / \ (1 - (i1(kappa_0)/i0(kappa_0)) ** 2 - (i1(kappa_0)/i0(kappa_0)) / kappa_0) kappa_2 = kappa_1 - ((i1(kappa_1)/i0(kappa_1)) - r_bar) / \ (1 - (i1(kappa_1)/i0(kappa_1)) ** 2 - (i1(kappa_1)/i0(kappa_1)) / kappa_1) for k in range(self.K): self.mus[k] = np.arctan2(*mus_k[k]) self.log_kappas[k] = np.log(kappa_2[k])
def get_trajectory(radius=None, y0=None, noise_std=0.1, **kwargs): time_stamps = kwargs["time_stamps"] if "time_stamps" in kwargs else 45 t_span = [0, 3 / 44 * (time_stamps - 1)] t_eval = np.linspace(t_span[0], t_span[1], time_stamps) # get initial state if y0 is None: y0 = np.random.rand(2) * 2. - 1 if radius is None: radius = np.random.rand() + 1.3 # sample a range of radii y0 = y0 / np.sqrt((y0**2).sum()) * radius ## set the appropriate radius spring_ivp = solve_ivp(fun=dynamics_fn, t_span=t_span, y0=y0, t_eval=t_eval, rtol=1e-10, **kwargs) q, p = spring_ivp['y'][0], spring_ivp['y'][1] dydt = [dynamics_fn(None, y) for y in spring_ivp['y'].T] dydt = np.stack(dydt).T dqdt, dpdt = np.split(dydt, 2) # add noise q += np.random.randn(*q.shape) * noise_std p += np.random.randn(*p.shape) * noise_std return q, p, dqdt, dpdt, t_eval
def test_multiple_expectation_jacobian_positional(self, tol): """Tests that qnodes using positional arguments return correct gradients for multiple expectation values.""" a, b, c = 0.5, 0.54, 0.3 def circuit(x, y, z): qml.QubitStateVector(np.array([1, 0, 1, 1]) / np.sqrt(3), wires=[0, 1]) qml.Rot(x, y, z, wires=0) qml.CNOT(wires=[0, 1]) return qml.expval(qml.PauliZ(0)), qml.expval(qml.PauliY(1)) dev = qml.device('default.qubit', wires=2) circuit = qml.QNode(circuit, dev) # compare our manual Jacobian computation to theoretical result # Note: circuit.jacobian actually returns a full jacobian in this case res = circuit.jacobian(np.array([a, b, c])) assert np.allclose(self.expected_jacobian(a, b, c), res, atol=tol, rtol=0) # compare our manual Jacobian computation to autograd # not sure if this is the intended usage of jacobian jac0 = qml.jacobian(circuit, 0) jac1 = qml.jacobian(circuit, 1) jac2 = qml.jacobian(circuit, 2) res = np.stack([jac0(a, b, c), jac1(a, b, c), jac2(a, b, c)]).T assert np.allclose(self.expected_jacobian(a, b, c), res, atol=tol, rtol=0)
def initialization(self, sizes, seed = None): """ Initialize the weights and biases parameters with random seed. Parameters: ----------- sizes: list or np.ndarray The shape will be the layer size of the neural network and the value will be the hidden units of the corresponding layer. >>> layer_size = array([3, 2, 3, 1]) whihc means that there are 2 hidden layers and the hidden units will be 2 and 3, respectively. The inputs size should be different when the number of fingerprints and the type of fingeprints are different. In the BPNN, the outputs should always be 1, which means the atomic energy of a element. In the SPNN, the outputs would be different when the number of elements in the configuration is different. Returns: ------------ A weights matrix, which contains biases. """ rs = np.random.RandomState(seed = seed) weights = [] for i in range(len(sizes) - 1): weights += list(rs.randn((sizes[i] + 1) * sizes[i + 1])) return np.stack(weights)
def sphere_points(K, min_angle=None, max_angle=None): # generate evenly distributed preference vector # ang0 = np.pi / 20. if min_angle is None else min_angle # ang1 = np.pi * 9 / 20. if max_angle is None else max_angle # azim = np.linspace(ang0, ang1, endpoint=True, num=K) # elev = np.linspace(ang0, ang1, endpoint=True, num=K) azim = [ np.linspace(np.pi / 8, np.pi * 3 / 8, endpoint=True, num=3), np.linspace(np.pi / 5, np.pi * 3. / 10, endpoint=True, num=2), [np.pi / 4] ] elev = [np.pi / 3, np.pi / 6, np.pi / 12] rs = [] for i, el in enumerate(elev): azs = azim[i] if i % 2 != 0 else azim[i][::-1] for az in azs: rs.append( np.array([ np.sin(el) * np.cos(az), np.sin(el) * np.sin(az), np.cos(el) ])) # for az in azim: # for el in elev: # rs.append(np.array([np.cos(el) * np.cos(az), # np.cos(el) * np.sin(az), # np.sin(el)])) # rs.append(np.array([1., 1., 1.])) return np.stack(rs)
def convert_results(results, interface): """Convert a list of results coming from multiple QNodes to the object required by each interface for auto-differentiation. Internally, this method makes use of ``tf.stack``, ``torch.stack``, and ``np.stack``. Args: results (list): list containing the results from multiple QNodes interface (str): the interfaces of the underlying QNodes Returns: list or array or torch.Tensor or tf.Tensor: the converted and stacked results. """ if interface == "tf": import tensorflow as tf return tf.stack(results) if interface == "torch": import torch return torch.stack(results, dim=0) if interface in ("autograd", "numpy"): from autograd import numpy as np return np.stack(results) return results
def generate_batch_of_beats_numpy(params): ode_params = ODEParamsNumpy() x = np.array([-0.417750770388669 for _ in range(params.shape[0])]).reshape( (-1, 1)) y = np.array([-0.9085616622823985 for _ in range(params.shape[0])]).reshape((-1, 1)) z = np.array([-0.004551233843726818 for _ in range(params.shape[0])]).reshape((-1, 1)) t = 0.0 x_signal = [x] y_signal = [y] z_signal = [z] start = time.time() for i in range(215): f_x = d_x_d_t_numpy(y, x, t, ode_params.rrpc, ode_params.h) f_y = d_y_d_t_numpy(y, x, t, ode_params.rrpc, ode_params.h) f_z = d_z_d_t_numpy(x, y, z, t, params, ode_params) t += 1 / 512 x_signal.append(x + ode_params.h * f_x) y_signal.append(y + ode_params.h * f_y) z_signal.append(z + ode_params.h * f_z) x = x + ode_params.h * f_x y = y + ode_params.h * f_y z = z + ode_params.h * f_z end = time.time() logging.info("Time to generate batch: {}".format(end - start)) z_signal = np.stack(z_signal).reshape((216, -1)).transpose() return z_signal
def get_trajectory(t_span=[0, 3], timescale=10, radius=None, y0=None, noise_std=0.1, **kwargs): t_eval = np.linspace(t_span[0], t_span[1], int(timescale * (t_span[1] - t_span[0]))) # get initial state if y0 is None: y0 = np.random.rand(2) * 2 - 1 if radius is None: radius = np.random.rand() * 0.9 + 0.1 # sample a range of radii y0 = y0 / np.sqrt((y0**2).sum()) * radius ## set the appropriate radius spring_ivp = solve_ivp(fun=dynamics_fn, t_span=t_span, y0=y0, t_eval=t_eval, rtol=1e-10, **kwargs) q, p = spring_ivp['y'][0], spring_ivp['y'][1] dydt = [dynamics_fn(None, y) for y in spring_ivp['y'].T] dydt = np.stack(dydt).T dqdt, dpdt = np.split(dydt, 2) # add noise q += np.random.randn(*q.shape) * noise_std p += np.random.randn(*p.shape) * noise_std return q, p, dqdt, dpdt, t_eval
def get_d_paretomtl_init(grads, value, weights, i): # calculate the gradient direction for Pareto MTL initialization nobj, dim = grads.shape # check active constraints normalized_current_weight = weights[i] / np.linalg.norm(weights[i]) normalized_rest_weights = np.delete(weights, (i), axis=0) / np.linalg.norm( np.delete(weights, (i), axis=0), axis=1, keepdims=True) w = normalized_rest_weights - normalized_current_weight gx = np.dot(w, value / np.linalg.norm(value)) idx = gx > 0 if np.sum(idx) <= 0: return np.zeros(nobj) if np.sum(idx) == 1: sol = np.ones(1) else: vec = np.dot(w[idx], grads) sol, nd = MinNormSolver.find_min_norm_element(vec) # calculate the weights weight0 = np.sum( np.array([sol[j] * w[idx][j, 0] for j in np.arange(0, np.sum(idx))])) weight1 = np.sum( np.array([sol[j] * w[idx][j, 1] for j in np.arange(0, np.sum(idx))])) weight = np.stack([weight0, weight1]) return weight
def mixture_elbo(var_mixture_params, t): # We need to only sample the continuous component parameters, # and integrate over the discrete component choice def mixture_lower_bound(params): """Provides a stochastic estimate of the variational lower bound.""" samples = component_sample(params, num_samples, rs) log_qs = mixture_log_density(var_mixture_params, samples) log_ps = logprob(samples, t) log_ps = np.reshape(log_ps, (num_samples, -1)) log_qs = np.reshape(log_qs, (num_samples, -1)) return np.mean(log_ps - log_qs) #over samples # log_w = log_ps - log_qs # elbo = logmeanexp(log_w) # return elbo log_weights, var_params = unpack_mixture_params(var_mixture_params) component_elbos = np.stack( [mixture_lower_bound(params_k) for params_k in var_params]) # print (component_elbos.shape) # print (log_weights.shape) # fasdf # return np.sum(component_elbos + log_weights) #over clusters return np.sum(component_elbos * np.exp(log_weights))
def pos_traj_1d(sts, m0, m1, x0, noises, nt, dt, tau, lag, rdotf = rdot_1d2w): # sts = on/off-ness of bmp at each of the T stages -- should be T x M -- currently T = 6 # m0 = tilt toward development # m1 = array of gaussian params for tilt toward BMP # x0 = initial position on fate landscape 1x1 # noises = noise at each timestep for each data point --> nt x M # nt = number of timesteps (integer) # dt = length of timesteps (float) # tau = timescale (float) #l0s = np.zeros((nt, sts.shape[1])) + m0 # if positive, pushes x negative toward neural #l0s = np.zeros((nt, sts.shape[1])) # if positive, pushes x negative toward neural #l0s[:int(m0[1])] = m0[0] l1s = getSigSeriesG(sts, nt, *m1[0:3]) # nt x M # if positive, pushes x positive toward epidermal #tilt = l1s - l0s xs = [x0*np.ones((sts.shape[1]))] for t in range(0, lag): xs.append(xs[t] + dt*noises[t]) for t in range(lag, int(m0[1])): xs.append(xs[t] + dt*(rdotf(xs[t], tau, l1s[t] - m0[0]) + noises[t])) for t in range(int(m0[1]), nt-1): xs.append(xs[t] + dt*(rdotf(xs[t], tau, l1s[t]) + noises[t])) # for t in range(lag, nt-1): # xs.append(xs[t] + dt*(rdotf(xs[t], tau, tilt[t]) + noises[t])) return np.stack(xs,axis=1)
def theta_2_stiefel_jacobian(theta, shape): n, p = shape dof = int(n * p - 0.5 * p * (p + 1)) dZ = np.stack([np.eye(n)[:, :p]] * dof) idx = -1 for i in reversed(range(p)): for j in reversed(range(i + 1, n)): # compute cos and sin of rotation angle cos = np.cos(theta[idx]) sin = np.sin(theta[idx]) for k in reversed(range(dof)): if k == idx + dof: dZ_new = np.zeros((n, p)) for l in range(p): a = dZ[k, i, l] b = dZ[k, j, l] dZ_new[i, l] = -a * sin + b * cos dZ_new[j, l] = -a * cos - b * sin dZ[k] = dZ_new else: # perform rotation on dZ matrix for l in range(p): a = dZ[k, i, l] b = dZ[k, j, l] dZ[k, i, l] = a * cos + b * sin dZ[k, j, l] = -a * sin + b * cos idx -= 1 # update index return dZ
def _get_dof_indices(freedofs, fixdofs, k_xlist, k_ylist): index_map = autograd_lib.inverse_permutation( np.concatenate([freedofs, fixdofs])) keep = np.isin(k_xlist, freedofs) & np.isin(k_ylist, freedofs) i = index_map[k_ylist][keep] j = index_map[k_xlist][keep] return index_map, keep, np.stack([i, j])
def get_one_trajectory(t_span=[0, 10], y0=np.array([1, 0]), n_points=50, **kwargs): """ Evaluate one GT trajectory """ t_eval = np.linspace(t_span[0], t_span[1], n_points) # ODE solver pen_sol = solve_ivp(fun=dynamics_fn, t_span=t_span, y0=y0, t_eval=t_eval, rtol=1e-10, **kwargs) q, p = pen_sol['y'][0], pen_sol['y'][1] dydt = [dynamics_fn(None, y) for y in pen_sol['y'].T] dydt = np.stack(dydt).T dqdt, dpdt = np.split(dydt, 2) return q, p, dqdt, dpdt, t_eval
def distint_locs(xy): Dist = dist_mat(xy, xy) d_list = [Dist[i] for i in range(len(Dist))] D_ = sorted(d_list, key=row_comp) Duniq = [] ids_ = [] for i in range(len(D_) - 1): di = 1.0 * (D_[i] < TAU) di1 = 1.0 * (D_[i + 1] < TAU) if not np.isclose(np.linalg.norm(di - di1), 0): Duniq.append(D_[i]) ids_.append(i) Duniq.append(D_[-1]) ids_.append(len(D_) - 1) if plot > 1: D = np.stack(Duniq) fig, ax = plt.subplots(nrows=1, ncols=1 + len(threshs)) ax[0].imshow(D) for i in range(len(threshs)): ax[i + 1].imshow(D < threshs[i]) plt.show() print(xy[:, ids_].shape) Dist = dist_mat(xy[:, ids_], xy[:, ids_]) fig, ax = plt.subplots(nrows=1, ncols=1 + len(threshs)) ax[0].imshow(Dist) for i in range(len(threshs)): ax[i + 1].imshow(Dist < threshs[i]) plt.show() plt.scatter(xy[0, ids_], xy[1, ids_], c='r', marker='x') plt.show() return ids_
def test_make_ggnvp_broadcasting(): A = npr.randn(4, 5) x = npr.randn(10, 4) v = npr.randn(10, 4) fun = lambda x: np.tanh(np.dot(x, A)) res1 = np.stack([_make_explicit_ggnvp(fun)(xi)(vi) for xi, vi in zip(x, v)]) res2 = make_ggnvp(fun)(x)(v) check_equivalent(res1, res2)
def init_pgm_param(K, N, alpha, niw_conc=10., random_scale=0.): def init_niw_natparam(N): nu, S, m, kappa = N+niw_conc, (N+niw_conc)*np.eye(N), np.zeros(N), niw_conc m = m + random_scale * npr.randn(*m.shape) return niw.standard_to_natural(S, m, kappa, nu) dirichlet_natparam = alpha * (npr.rand(K) if random_scale else np.ones(K)) niw_natparam = np.stack([init_niw_natparam(N) for _ in range(K)]) return dirichlet_natparam, niw_natparam
def numerical_jacobian(fun, argnum, args, kwargs): def vector_fun(x): args_tmp = list(args) args_tmp[argnum] = vs_in.unflatten(vs_in.flatten(args[argnum]) + x) return vs_out.flatten(fun(*args_tmp, **kwargs)) vs_in = vspace(args[argnum]) vs_out = vspace(fun(*args, **kwargs)) return np.stack([(vector_fun(dx) - vector_fun(-dx)) / EPS for dx in np.eye(vs_in.size) * EPS / 2]).T
def callback(params, iter, g): pred = ode_pred(params, true_y0, t) print("Iteration {:d} train loss {:.6f}".format( iter, L1_loss(pred, true_y))) ax_traj.cla() ax_traj.set_title('Trajectories') ax_traj.set_xlabel('t') ax_traj.set_ylabel('x,y') ax_traj.plot(t, true_y[:, 0], '-', t, true_y[:, 1], 'g-') ax_traj.plot(t, pred[:, 0], '--', t, pred[:, 1], 'b--') ax_traj.set_xlim(t.min(), t.max()) ax_traj.set_ylim(-2, 2) ax_traj.xaxis.set_ticklabels([]) ax_traj.yaxis.set_ticklabels([]) ax_traj.legend() ax_phase.cla() ax_phase.set_title('Phase Portrait') ax_phase.set_xlabel('x') ax_phase.set_ylabel('y') ax_phase.plot(true_y[:, 0], true_y[:, 1], 'g-') ax_phase.plot(pred[:, 0], pred[:, 1], 'b--') ax_phase.set_xlim(-2, 2) ax_phase.set_ylim(-2, 2) ax_phase.xaxis.set_ticklabels([]) ax_phase.yaxis.set_ticklabels([]) ax_vecfield.cla() ax_vecfield.set_title('Learned Vector Field') ax_vecfield.set_xlabel('x') ax_vecfield.set_ylabel('y') ax_vecfield.xaxis.set_ticklabels([]) ax_vecfield.yaxis.set_ticklabels([]) # vector field plot y, x = npo.mgrid[-2:2:21j, -2:2:21j] dydt = nn_predict(np.stack([x, y], -1).reshape(21 * 21, 2), 0, params).reshape(-1, 2) mag = np.sqrt(dydt[:, 0]**2 + dydt[:, 1]**2).reshape(-1, 1) dydt = (dydt / mag) dydt = dydt.reshape(21, 21, 2) ax_vecfield.streamplot(x, y, dydt[:, :, 0], dydt[:, :, 1], color="black") ax_vecfield.set_xlim(-2, 2) ax_vecfield.set_ylim(-2, 2) fig.tight_layout() plt.draw() plt.pause(0.001)
def make_pinwheel_data(radial_std, tangential_std, num_classes, num_per_class, rate): rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False) features = npr.randn(num_classes*num_per_class, 2) \ * np.array([radial_std, tangential_std]) features[:,0] += 1. labels = np.repeat(np.arange(num_classes), num_per_class) angles = rads[labels] + rate * np.exp(features[:,0]) rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)]) rotations = np.reshape(rotations.T, (-1, 2, 2)) return 10*npr.permutation(np.einsum('ti,tij->tj', features, rotations))
def jacobian(fun, x): """ Returns a function which computes the Jacobian of `fun` with respect to positional argument number `argnum`, which must be a scalar or array. Unlike `grad` it is not restricted to scalar-output functions, but also it cannot take derivatives with respect to some argument types (like lists or dicts). If the input to `fun` has shape (in1, in2, ...) and the output has shape (out1, out2, ...) then the Jacobian has shape (out1, out2, ..., in1, in2, ...). """ vjp, ans = _make_vjp(fun, x) ans_vspace = vspace(ans) jacobian_shape = ans_vspace.shape + vspace(x).shape grads = map(vjp, ans_vspace.standard_basis()) return np.reshape(np.stack(grads), jacobian_shape)
def make_pinwheel(radial_std, tangential_std, num_classes, num_per_class, rate, rs=npr.RandomState(0)): """Based on code by Ryan P. Adams.""" rads = np.linspace(0, 2*np.pi, num_classes, endpoint=False) features = rs.randn(num_classes*num_per_class, 2) \ * np.array([radial_std, tangential_std]) features[:, 0] += 1 labels = np.repeat(np.arange(num_classes), num_per_class) angles = rads[labels] + rate * np.exp(features[:,0]) rotations = np.stack([np.cos(angles), -np.sin(angles), np.sin(angles), np.cos(angles)]) rotations = np.reshape(rotations.T, (-1, 2, 2)) return np.einsum('ti,tij->tj', features, rotations)
def mixture_elbo(var_mixture_params, t): # We need to only sample the continuous component parameters, # and integrate over the discrete component choice def mixture_lower_bound(params): """Provides a stochastic estimate of the variational lower bound.""" samples = component_sample(params, num_samples, rs) log_qs = mixture_log_density(var_mixture_params, samples) log_ps = logprob(samples, t) log_ps = np.reshape(log_ps, (num_samples, -1)) log_qs = np.reshape(log_qs, (num_samples, -1)) return np.mean(log_ps - log_qs) log_weights, var_params = unpack_mixture_params(var_mixture_params) component_elbos = np.stack( [mixture_lower_bound(params_k) for params_k in var_params]) return np.sum(component_elbos*np.exp(log_weights))
def rand_natparam(n, k): return np.squeeze(np.stack([rand_gaussian(n) for _ in range(k)]))
def linear_fun_to_matrix(flat_fun, vs): return np.stack(map(flat_fun, np.eye(vs.size)))
def jacfun(*args, **kwargs): vjp, ans = make_vjp(fun, argnum)(*args, **kwargs) ans_vspace = vspace(ans) jacobian_shape = ans_vspace.shape + vspace(args[argnum]).shape grads = map(vjp, ans_vspace.standard_basis()) return np.reshape(np.stack(grads), jacobian_shape)
def rand_natparam(n, k): return np.squeeze(np.stack([rand_dirichlet(n) for _ in range(k)]))