def trace(self, indices: cp.ndarray, recursion_depth: int, efficiency: float) -> cp.ndarray: """ Searches contacts of the given indices (start vertices) up to the given recursion depth. Only (1 - efficiency) contacts will be returned :param indices: desired indices :param recursion_depth: desired recursion depth :param efficiency: efficiency of contact tracing :return: indices of corresponding contacts (end vertices) """ res = [] searched = indices for i in range(recursion_depth): contacts = self._search_contacts(searched) contacts = contacts[cp.random.random(len(contacts)) <= efficiency] res.append(contacts) searched = cp.hstack(res) return cp.hstack(searched)
def match_fuzzy(q, tms, tmt, opt): tms_score = cdist(q, tms) if not opt.include_perfect_match: tms_score[tms_score > 0.99] = -float('inf') tmt_score = cdist(q, tmt) if not opt.include_perfect_match: tmt_score[tms_score == -float('inf')] = -float('inf') tmt_score[tmt_score > 0.99] = -float('inf') tms_top_v, tms_top_i = topk(tms_score, opt.topk) tmt_top_v, tmt_top_i = topk(tmt_score, opt.topk) tms_top_i += opt.shard_i * opt.shard_max_len tmt_top_i += opt.shard_i * opt.shard_max_len top_v = cp.hstack([tms_top_v, tmt_top_v]) top_i = cp.hstack([tms_top_i, tmt_top_i]) arg_i = cp.flip(top_v.astype('float32').argsort(axis=1), axis=1) top_v = top_v[cp.arange(top_v.shape[0]).reshape(-1, 1), arg_i][:, :opt.topk] top_i = top_i[cp.arange(top_i.shape[0]).reshape(-1, 1), arg_i][:, :opt.topk] return top_v, top_i
def getNabla_psi2(self): f = cp.zeros((H, W)) psi_with_block = copy.deepcopy(self.psi) psi_with_block[self.block_mask] = psi_wall temp = cp.hstack((self.left_wall, cp.hstack((psi_with_block, self.right_wall)))) # temp = np.vstack((self.top_bottom_wall, np.vstack((temp, self.top_bottom_wall)))) for i in range(9): if i == 0: f += -20 * temp[:, 1:-1] elif i == 1: f += 4 * cp.roll(temp, -1, axis=0)[:, 1:-1] elif i == 2: f += 4 * cp.roll(temp, -1, axis=1)[:, 1:-1] elif i == 3: f += 4 * cp.roll(temp, 1, axis=1)[:, 1:-1] elif i == 4: f += 4 * cp.roll(temp, 1, axis=0)[:, 1:-1] elif i == 5: f += cp.roll(cp.roll(temp, -1, axis=1), -1, axis=0)[:, 1:-1] elif i == 6: f += cp.roll(cp.roll(temp, 1, axis=1), -1, axis=0)[:, 1:-1] elif i == 7: f += cp.roll(cp.roll(temp, 1, axis=1), 1, axis=0)[:, 1:-1] elif i == 8: f += cp.roll(cp.roll(temp, -1, axis=1), 1, axis=0)[:, 1:-1] return f / 6
def bernstein_5_coeffs(t, tmin, tmax): l = tmax - tmin t = (t - tmin) / l n = 5 P0 = binom(n, 0) * ((1 - t)**(n - 0)) * t**0 P1 = binom(n, 1) * ((1 - t)**(n - 1)) * t**1 P2 = binom(n, 2) * ((1 - t)**(n - 2)) * t**2 P3 = binom(n, 3) * ((1 - t)**(n - 3)) * t**3 P4 = binom(n, 4) * ((1 - t)**(n - 4)) * t**4 P5 = binom(n, 5) * ((1 - t)**(n - 5)) * t**5 P0dot = binom(n, 0) * (-5 * (1 - t)**4) P1dot = binom(n, 1) * (-4 * t * (1 - t)**3 + (1 - t)**4) P2dot = binom(n, 2) * (-3 * t**2 * (1 - t)**2 + 2 * t * (1 - t)**3) P3dot = binom(n, 3) * (t**3 * (2 * t - 2) + 3 * t**2 * (1 - t)**2) P4dot = binom(n, 4) * (-t**4 + 4 * t**3 * (1 - t)) P5dot = binom(n, 5) * (5 * t**4) P0ddot = binom(n, 0) * (-20 * (t - 1)**3) P1ddot = binom(n, 1) * (4 * (t - 1)**2 * (5 * t - 2)) P2ddot = binom(n, 2) * (-2 * (t - 1) * (3 * t**2 + 6 * t * (t - 1) + (t - 1)**2)) P3ddot = binom(n, 3) * (2 * t * (t**2 + 6 * t * (t - 1) + 3 * (t - 1)**2)) P4ddot = binom(n, 4) * (-4 * t**2 * (5 * t - 3)) P5ddot = binom(n, 5) * (20 * t**3) P = cp.hstack((P0, P1, P2, P3, P4, P5)) Pdot = cp.hstack((P0dot, P1dot, P2dot, P3dot, P4dot, P5dot)) / l Pddot = cp.hstack( (P0ddot, P1ddot, P2ddot, P3ddot, P4ddot, P5ddot)) / (l**2) return P, Pdot, Pddot
def kernel_fisherface(): print('Kernel Fisherface') X = np.hstack((train_X, test_X)) Y = np.hstack((train_Y, test_Y)) proj_data = kernel_lda(X, Y, kernel_type) proj_train_data = proj_data[:, :len(train_Y)] proj_test_data = proj_data[:, len(train_Y):] face_recognition(proj_train_data, proj_test_data, train_Y, test_Y)
def bernstein_10_coeffs(t, tmin, tmax): l = tmax - tmin t = (t - tmin) / l n = 10 P0 = binom(n, 0) * ((1 - t)**(n - 0)) * t**0 P1 = binom(n, 1) * ((1 - t)**(n - 1)) * t**1 P2 = binom(n, 2) * ((1 - t)**(n - 2)) * t**2 P3 = binom(n, 3) * ((1 - t)**(n - 3)) * t**3 P4 = binom(n, 4) * ((1 - t)**(n - 4)) * t**4 P5 = binom(n, 5) * ((1 - t)**(n - 5)) * t**5 P6 = binom(n, 6) * ((1 - t)**(n - 6)) * t**6 P7 = binom(n, 7) * ((1 - t)**(n - 7)) * t**7 P8 = binom(n, 8) * ((1 - t)**(n - 8)) * t**8 P9 = binom(n, 9) * ((1 - t)**(n - 9)) * t**9 P10 = binom(n, 10) * ((1 - t)**(n - 10)) * t**10 P0dot = -10.0 * (-t + 1)**9 P1dot = -90.0 * t * (-t + 1)**8 + 10.0 * (-t + 1)**9 P2dot = -360.0 * t**2 * (-t + 1)**7 + 90.0 * t * (-t + 1)**8 P3dot = -840.0 * t**3 * (-t + 1)**6 + 360.0 * t**2 * (-t + 1)**7 P4dot = -1260.0 * t**4 * (-t + 1)**5 + 840.0 * t**3 * (-t + 1)**6 P5dot = -1260.0 * t**5 * (-t + 1)**4 + 1260.0 * t**4 * (-t + 1)**5 P6dot = -840.0 * t**6 * (-t + 1)**3 + 1260.0 * t**5 * (-t + 1)**4 P7dot = -360.0 * t**7 * (-t + 1)**2 + 840.0 * t**6 * (-t + 1)**3 P8dot = 45.0 * t**8 * (2 * t - 2) + 360.0 * t**7 * (-t + 1)**2 P9dot = -10.0 * t**9 + 9 * t**8 * (-10.0 * t + 10.0) P10dot = 10.0 * t**9 P0ddot = 90.0 * (-t + 1)**8 P1ddot = 720.0 * t * (-t + 1)**7 - 180.0 * (-t + 1)**8 P2ddot = 2520.0 * t**2 * (-t + 1)**6 - 1440.0 * t * (-t + 1)**7 + 90.0 * ( -t + 1)**8 P3ddot = 5040.0 * t**3 * (-t + 1)**5 - 5040.0 * t**2 * ( -t + 1)**6 + 720.0 * t * (-t + 1)**7 P4ddot = 6300.0 * t**4 * (-t + 1)**4 - 10080.0 * t**3 * ( -t + 1)**5 + 2520.0 * t**2 * (-t + 1)**6 P5ddot = 5040.0 * t**5 * (-t + 1)**3 - 12600.0 * t**4 * ( -t + 1)**4 + 5040.0 * t**3 * (-t + 1)**5 P6ddot = 2520.0 * t**6 * (-t + 1)**2 - 10080.0 * t**5 * ( -t + 1)**3 + 6300.0 * t**4 * (-t + 1)**4 P7ddot = -360.0 * t**7 * (2 * t - 2) - 5040.0 * t**6 * ( -t + 1)**2 + 5040.0 * t**5 * (-t + 1)**3 P8ddot = 90.0 * t**8 + 720.0 * t**7 * (2 * t - 2) + 2520.0 * t**6 * (-t + 1)**2 P9ddot = -180.0 * t**8 + 72 * t**7 * (-10.0 * t + 10.0) P10ddot = 90.0 * t**8 P = cp.hstack((P0, P1, P2, P3, P4, P5, P6, P7, P8, P9, P10)) Pdot = cp.hstack((P0dot, P1dot, P2dot, P3dot, P4dot, P5dot, P6dot, P7dot, P8dot, P9dot, P10dot)) / l Pddot = cp.hstack((P0ddot, P1ddot, P2ddot, P3ddot, P4ddot, P5ddot, P6ddot, P7ddot, P8ddot, P9ddot, P10ddot)) / (l**2) return P, Pdot, Pddot
def _spread_in_social_networks(self): if self.social_network is None: return for start_vertices, end_vertices in self.social_network: lockdown = self.city_lockdown[start_vertices] lockdown_mask = lockdown * (cp.random.random(len(lockdown)) <= self._lockdown_ratio) start_vertices = cp.hstack([ start_vertices[~lockdown], start_vertices[lockdown_mask], ]) end_vertices = cp.hstack([ end_vertices[~lockdown], end_vertices[lockdown_mask], ]) start_susceptible = self._is_susceptible[start_vertices] end_susceptible = self._is_susceptible[end_vertices] start_infectious = self._is_infectious[start_vertices] start_in_quarantine = self._is_in_quarantine[start_vertices] start_infectious = start_infectious * ~( start_in_quarantine * (cp.random.random(len(start_infectious)) <= self._quarantine_effifiency)) end_infectious = self._is_infectious[end_vertices] end_in_quarantine = self._is_in_quarantine[end_vertices] end_infectious = end_infectious * ~( end_in_quarantine * (cp.random.random(len(end_infectious)) <= self._quarantine_effifiency)) forward_transmissions = start_infectious * end_susceptible * ( cp.random.random(len(start_infectious)) <= self._virus.household_transmission_probability / 2) backward_transmissions = end_infectious * start_susceptible * ( cp.random.random(len(start_infectious)) <= self._virus.household_transmission_probability / 2) infected = cp.hstack([ end_vertices[forward_transmissions], start_vertices[backward_transmissions] ]) self._is_infected[infected] = True self._day_contracted[infected] = self.day_i
def gen_train(q_func, s0, a0, r1, s1, done, n_actions): train = [] gamma = GAMMA # Outputs y0 = q_func(s0).data y1 = q_func(s1).data q0 = y0[:, :n_actions] q1 = y1[:, :n_actions] h0 = cp.stack(cp.split(y0[:, n_actions:-1], n_actions, axis=1), axis=0) h1 = cp.stack(cp.split(y1[:, n_actions:-1], n_actions, axis=1), axis=0) v0 = y0[:, -1:] v1 = y1[:, -1:] # Correct actions ''' print(h0) print(a0) print(s1) ''' action_error = h0[a0, cp.arange(h0.shape[1])] - s1 print('action_error', (action_error**2).mean()) h0[a0, np.arange(h0.shape[1])] = s1 # Correct q-values q1[done, :] = 0 tt = r1 + (gamma * cp.max(q1, axis=1)) q0[a0[:, None] == cp.arange(q0.shape[1])] = tt # TODO: Correct v-values # Put them back together t = cp.hstack([ q0, cp.hstack(h0), v0, ]) train = [(s0[i], t[i]) for i in range(t.shape[0])] return train ''' ''' t = q_func(s0).data t1 = q_func(s1).data '''
def kernel_eigenface(): print('Kernel Eigenface') X = np.hstack((train_X, test_X)) proj_data = kerenl_pca(X, kernel_type) proj_train_data = proj_data[:, :len(train_Y)] proj_test_data = proj_data[:, len(train_Y):] face_recognition(proj_train_data, proj_test_data, train_Y, test_Y)
def _hstack(self, Xs): """Stacks Xs horizontally. This allows subclasses to control the stacking behavior, while reusing everything else from ColumnTransformer. Parameters ---------- Xs : list of {array-like, sparse matrix, dataframe} """ if self.sparse_output_: try: # since all columns should be numeric before stacking them # in a sparse matrix, `check_array` is used for the # dtype conversion if necessary. converted_Xs = [ check_array(X, accept_sparse=True, force_all_finite=False) for X in Xs ] except ValueError as e: raise ValueError( "For a sparse output, all columns should " "be a numeric or convertible to a numeric.") from e return cu_sparse.hstack(converted_Xs).tocsr() else: Xs = [f.toarray() if issparse(f) else f for f in Xs] return np.hstack(Xs)
def backward(self, dh_next, dc_next): Wx, Wh, b = self.params x, h_prev, c_prev, i, f, g, o, c_next = self.cache dt = dh_next * o dch = dt * (1 - cp.tanh(c_next)**2) dc = dch + dc_next dc_prev = dc * f df = dc * c_prev dg = dc * i di = dc * g do = dh_next * cp.tanh(c_next) di *= i * (1 - i) df *= f * (1 - f) do *= o * (1 - o) dg *= (1 - g**2) dA = cp.hstack((df, dg, di, do)) dx = cp.dot(dA, Wx.T) dWx = cp.dot(x.T, dA) dh_prev = cp.dot(dA, Wh.T) dWh = cp.dot(h_prev.T, dA) db = dA.sum(axis=0) self.grads[0][...] = dWx self.grads[1][...] = dWh self.grads[2][...] = db return dx, dh_prev, dc_prev
def call_cupy_tf2(frequency_array, mass_1, mass_2, chi_1, chi_2, luminosity_distance, theta_jn, phase, **kwargs): waveform_kwargs = dict(reference_frequency=50.0, minimum_frequency=20.0) waveform_kwargs.update(kwargs) minimum_frequency = waveform_kwargs["minimum_frequency"] in_band = frequency_array >= minimum_frequency frequency_array = xp.asarray(frequency_array) h_out_of_band = xp.zeros(int(xp.sum(~in_band))) wf = TF2(mass_1, mass_2, chi_1, chi_2, luminosity_distance=luminosity_distance) hplus = wf(frequency_array[in_band], phi_c=phase) hplus = xp.hstack([h_out_of_band, hplus]) hcross = hplus * xp.exp(-1j * np.pi / 2) hplus *= (1 + np.cos(theta_jn)**2) / 2 hcross *= np.cos(theta_jn) return dict(plus=hplus, cross=hcross)
def _list2array(lst): """Convert a list to a numpy array.""" if lst and isinstance(lst[0], cp.ndarray): return cp.hstack(lst) else: return cp.asarray(lst)
def get_acc(pos, mass, G, softening): """ Calculate the acceleration on each particle due to Newton's Law pos is an N x 3 matrix of positions mass is an N x 1 vector of masses G is Newton's Gravitational constant softening is the softening length a is N x 3 matrix of accelerations """ # positions r = [x,y,z] for all particles x = pos[:, 0:1] y = pos[:, 1:2] z = pos[:, 2:3] # matrix that stores all pairwise particle separations: r_j - r_i dx = x.T - x dy = y.T - y dz = z.T - z # matrix that stores 1/r^3 for all particle pairwise particle separations inv_r3 = (dx**2 + dy**2 + dz**2 + softening**2) inv_r3[inv_r3 > 0] = inv_r3[inv_r3 > 0]**(-1.5) ax = G * (dx * inv_r3) @ mass ay = G * (dy * inv_r3) @ mass az = G * (dz * inv_r3) @ mass # pack together the acceleration components a = cp.hstack((ax, ay, az)) return a
def _list2array(lst): """Convert a list to a numpy array.""" if lst and isinstance(lst[0], cp.ndarray): return cp.hstack(lst) else: return cp.asarray(lst)
def __init__(self, dtype): self.c = np.hstack([ np.random.rand(N, int(2 * M / 4), dtype=np.float32), np.zeros((N, int(2 * M / 4))) ]) self.x = np.array([(m / M) * A * Pe for m in range(M)], dtype=np.float32) self.y = np.array([(n / N) * Pe for n in range(N)], dtype=np.float32) a = np.asarray(range(0, int(M / 2)), dtype=np.float32) * 2 * np.pi / (A * Pe) b = np.asarray(range(int(-M / 2 + 1), 0), dtype=np.float32) * 2 * np.pi / (A * Pe) self.km = np.concatenate((a, np.concatenate( (np.array([0]), b)))).astype(np.float32) c = np.asarray(range(0, int(N / 2)), dtype=np.float32) * 2 * np.pi / Pe d = np.asarray(range(int(-N / 2 + 1), 0), dtype=np.float32) * 2 * np.pi / Pe self.kn = np.concatenate((c, np.concatenate( (np.array([0]), d)))).astype(np.float32) self.km_km = np.tile(self.km, (N, 1)).astype(np.float32) self.kn_kn = np.tile(self.kn, (M, 1)).astype(np.float32).T #print(self.kn_kn.shape) self.c_hat = np.fft.fft2(self.c).astype(np.complex64) self.phi_x = np.zeros((N, M), dtype=np.complex) self.phi_y = np.zeros((N, M), dtype=np.complex) self.c_x = np.fft.ifft2(self.c_hat * self.km_km * 1.0j).astype( np.complex64) self.c_y = np.fft.ifft2(self.c_hat * self.kn_kn * 1.0j).astype( np.complex64) self.km2kn2 = self.km_km**2 + self.kn_kn**2
def forward(self, inputs, target_array): # Get hidden units self.n_h = np.dot(inputs, self.w_h) # Sigmoid self.n_h = self.activate(self.n_h) # Add bias self.n_h = np.hstack((self.n_h, 1)) #self.n_h = np.append(self.n_h, 1) # Get outputs n_o = np.dot(self.n_h, self.w_o) # 1 Activate max of outputs by storing activations of n_o in predictions array. # * predictions array is used to compute confusion matrix. # Init predictions array to shape of output neurons predictions = np.empty((n_o.shape)) predictions = self.activate(n_o) # 2 Activate on output neurons. n_o = self.activate(n_o) # 3 Find the max of each sample in predictions and give it a 1 there, 0 otherwise. predictions = np.where(predictions>=np.amax(predictions),1,0) target_k = predictions * target_array # 4 Turn target array into array of 0.9s and 0.1s. # * for calculating deltas. target_k = np.where(target_k==1,0.9,0.1) return n_o, target_k, predictions
def _rmatmat(self, X): """Default implementation of _rmatmat defers to rmatvec or adjoint.""" if type(self)._adjoint == LinearOperator._adjoint: return cupy.hstack( [self.rmatvec(col.reshape(-1, 1)) for col in X.T]) else: return self.H.matmat(X)
def get_uniform_init(n: int, shape: Tuple[int]): """Returns tuple of x,y-coordinates uniformly distributed over an area of the given shape.""" x = np.random.randint(shape[0], size=n).reshape(-1, 1) # starting_position x y = np.random.randint(shape[1], size=n).reshape(-1, 1) # starting_position y return np.hstack([x, y])
def get_perlin_init(shape=(1000, 1000), n=100000, cutoff=None, repetition=(1000, 1000), scale=100, octaves=20.0, persistence=0.1, lacunarity=2.0): """Returns a tuple of x,y-coordinates sampled from Perlin noise. This can be used to initialize the starting positions of a physarum- population, as well as to generate a cloudy feeding-pattern that will have a natural feel to it. This function wraps the one from the noise- library from Casey Duncan, and is in parts borrowed from here (see also this for a good explanation of the noise-parameters): https://medium.com/@yvanscher/playing-with-perlin-noise-generating-realistic-archipelagos-b59f004d8401 The most relevant paramaters for our purposes are: :param shape: The shape of the area in which the noise is to be generated. Defaults to (1000,1000) :type shape: Tuple of integers with the form (width, height). :param n: Number of particles to sample. When used as a feeeding trace, this translates to the relative strength of the pattern. defaults to 100000. :param cutoff: value below which noise should be set to zero. Default is None. Will lead to probabilities 'contains NaN-error, if to high' :param scale: (python-noise parameter) The scale of the noise -- larger or smaller patterns, defaults to 100. :param repetition: (python-noise parameter) Tuple that denotes the size of the area in which the noise should repeat itself. Defaults to (1000,1000) """ import numpy as np import cupy as cp # vectorized not present in cupy, so for now to conversion at the end shape = [i - 1 for i in shape] # make coordinate grid on [0,1]^2 x_idx = np.linspace(0, shape[0], shape[0]) y_idx = np.linspace(0, shape[1], shape[1]) world_x, world_y = np.meshgrid(x_idx, y_idx) # apply perlin noise, instead of np.vectorize, consider using itertools.starmap() world = np.vectorize(noise.pnoise2)( world_x / scale, world_y / scale, octaves=int(octaves), persistence=persistence, lacunarity=lacunarity, repeatx=repetition[0], repeaty=repetition[1], base=np.random.randint(0, 100), ) # world = world * 3 # Sample particle init from map: world[world <= 0.0] = 0.0 # filter negative values if cutoff is not None: world[world <= cutoff] = 0.0 linear_idx = np.random.choice(world.size, size=n, p=world.ravel() / float(world.sum())) x, y = np.unravel_index(linear_idx, shape) x = x.reshape(-1, 1) y = y.reshape(-1, 1) return cp.asarray(np.hstack([x, y]))
def build_translator(self, metric, method, **kwargs): self.dist_function = lambda X, Y: distance_function(X, Y, metric) self.method = method batch = kwargs.get('batch', None) self.batch = batch epsilon = kwargs.get('epsilon', None) k = kwargs.get('k', None) lr = kwargs.get('lr', None) iters = kwargs.get('iters', None) if self.method == 'nn': self.normalizer = None self.score_function = lambda q: -self.dist_function( q, self.tgt_space) elif self.method == 'isf': self.normalizer = [] for j in range(0, self.n, batch): self.normalizer.append( logsumexp(-self.dist_function( self.src_space, self.tgt_space[j:min(j + batch, self.n)]) / epsilon, axis=0)) self.normalizer = xp.hstack(self.normalizer) self.score_function = lambda q: -self.dist_function(q, self.tgt_space)/epsilon\ - self.normalizer elif self.method == 'csls': self.normalizer = [] for j in range(0, self.n, batch): dist = self.dist_function( self.src_space, self.tgt_space[j:min(j + batch, self.n)]) neighbors, neighborhood = top_k(dist, k, axis=0, biggest=False) self.normalizer.append(xp.mean(neighborhood, axis=0)) self.normalizer = xp.hstack(self.normalizer) self.score_function = lambda q: 0.5 * self.normalizer\ - self.dist_function(q, self.tgt_space) elif self.method == 'hnn': hnn = HNN(self.src_space, self.tgt_space, self.dist_function, epsilon) hnn.gallery_weight(iters=iters, batch=batch, lr=lr) self.normalizer = hnn.beta self.score_function = lambda q: self.normalizer - \ self.dist_function(q, self.tgt_space)
def inverse_pass(self, data, forward): output = [data] # For the inverse direction, we invert layer order for indx, layer in enumerate(self.layers[::-1]): out = layer.inverse(output[-1]) output.append( xp.hstack([out, forward[-2 - indx][:, out.shape[1]:]])) return output[::-1]
def _Preprocess(T, H): T = [t.data for t in T] T = [cupy.vstack([cupy.zeros((self.fbsize, t.shape[1]), cupy.float32), t]) for t in T] T = [cupy.hstack([t[i:len(t)-(self.fbsize-i)] for i in range(self.fbsize)]) for t in T] T = [cupy.fft.irfft(cupy.exp(inum*cupy.angle(cupy.fft.rfft(t))))*dftnorm for t in T] T = [Variable(t) for t in T] H = [F.reshape(F.concat(F.broadcast_to(h, (self.fs, h.shape[0], h.shape[1])), axis=1), (h.shape[0]*self.fs, -1)) for h in H] H = [F.concat([t, h]) for t, h in zip(T, H)] return H
def optimized_update_positions(positions, angle, theta_sense, horizon_sense, theta_walk, horizon_walk, trace_array): """Returns the adapted physarum-positions, given initial coordinates and constants. This function is optimized by using Cupy (implementation of NumPy-compatible multi-dimensional array on CUDA)""" ### Get all possible positions to test # get the new 3 angles to test for each organism angles_to_test = np.hstack(( (angle - theta_sense) % (2 * np.pi), angle, (angle + theta_sense) % (2 * np.pi), )).reshape(-1, 3) # get positions to test based on current positions and angles pos_to_test = positions.reshape(-1, 1, 2) + np.stack( (horizon_sense * np.cos(angles_to_test), horizon_sense * np.sin(angles_to_test)), axis=-1) pos_to_test = np.remainder(pos_to_test, np.array(trace_array.shape)) ### Get all possible positions to walk to # get the new 3 angles to walk to for each organism angles_to_walk = np.hstack(( (angle - theta_walk) % (2 * np.pi), angle, (angle + theta_walk) % (2 * np.pi), )).reshape(-1, 3) # get positions to walk to based on current positions and angles pos_to_walk = positions.reshape(-1, 1, 2) + np.stack( (horizon_walk * np.cos(angles_to_walk), horizon_walk * np.sin(angles_to_walk)), axis=-1) pos_to_walk = np.remainder(pos_to_walk, np.array(trace_array.shape)) ### Get the positions to walk too based on the best positions out of the tested ones pos_to_test = np.floor(pos_to_test).astype(np.int64) - 1 # TODO notice argmax will always return first when multiple entries are equal best_indexes = trace_array[pos_to_test[:, :, 0], pos_to_test[:, :, 1]].argmax(axis=-1) new_positions = pos_to_walk[np.arange(len(pos_to_test)), best_indexes] new_angles = angles_to_walk[np.arange(len(pos_to_test)), best_indexes].reshape(-1, 1) return new_positions, new_angles
def get_gait_updates(self, forward_pass, targets, ortho_weighting=0.0, gamma=0.001): # Updates will be stored and returned weight_updates = [] bias_updates = [] nb_layers = len(self.layers) inverse = targets mult_factor = 1.0 for layer_index in range(nb_layers)[::-1]: error = mult_factor * (forward_pass[layer_index + 1] - inverse) if self.layers[layer_index].linear: layer_derivatives = xp.ones((error.shape)) else: layer_derivatives = self.layers[ layer_index].transfer_derivative_func( self.layers[layer_index].transfer_inverse_func( forward_pass[layer_index + 1])) # Calculate updates for this layer weight_update = xp.mean(xp.einsum( 'nj, ni -> nij', layer_derivatives * error, forward_pass[layer_index][:, :self.net_structure[layer_index + 1]]), axis=0) bias_update = xp.mean(layer_derivatives * error, axis=0) # Calculating a weight update based upon a soft orthogonal regularizer if ortho_weighting != 0.0: weight_update += self.ortho_gradients(ortho_weighting, layer_index) # Collect updates weight_updates.append(-weight_update) bias_updates.append(-bias_update) grad_adjusted_inc_factor = gamma * layer_derivatives * layer_derivatives inverse = self.layers[layer_index].inverse( ((1.0 - grad_adjusted_inc_factor) * forward_pass[layer_index + 1] + grad_adjusted_inc_factor * inverse)) mult_factor = mult_factor / gamma # Adding the auxilliary neurons on inverse = xp.hstack([ inverse, forward_pass[layer_index][:, self.net_structure[layer_index + 1]:] ]) return weight_updates[::-1], bias_updates[::-1]
def _upsampled_dft(self, array, region_sz, offsets=None): """ Upsampled DFT by matrix multiplication. This code is intended to provide the same result as if the following operations are performed: - Embed the array to a larger one of size `upsample_factor` times larger in each dimension. - ifftshift to bring the center of the image to (1, 1) - Take the FFT of the larger array. - Extract region of size [region_sz] from the result, starting with offsets. It achieves this result by computing the DFT in the output array without the need to zeropad. Much faster and memroy efficient than the zero-padded FFT approach if region_sz is much smaller than array.size * upsample_factor. Args: array (cp.ndarray): DFT of the data to be upsampled region_sz (int or tuple of int): size of the region to be sampled offsets (int or tuple of int): offsets to the sampling region Returns: (cp.ndarray): upsampled DFT of the specified region """ try: if len(region_sz) != array.ndim: raise ValueError("upsampled region size must match array dimension") except TypeError: # expand integer to list region_sz = (region_sz,) * array.ndim if offsets is None: offsets = (0,) * array.ndim else: if len(offsets) != array.ndim: raise ValueError("axis offsets must match array dimension") dim_props = zip(reversed(array.shape), reversed(region_sz), reversed(offsets)) for ax_sz, up_ax_sz, ax_offset in dim_props: # float32 sample frequencies fftfreq = ( cp.hstack( ( cp.arange(0, (ax_sz - 1) // 2 + 1, dtype=cp.float32), cp.arange(-(ax_sz // 2), 0, dtype=cp.float32), ) ) / ax_sz / self.upsample_factor ) # upsampling kernel kernel = cp.exp( (1j * 2 * np.pi) * (cp.arange(up_ax_sz, dtype=np.float32) - ax_offset)[:, None] * fftfreq ) # convolve array = cp.tensordot(kernel, array, axes=(1, -1)) return array
def get_circle_init(n: int, center: Tuple[int], radius: int, width: int): """Returns tuple of x,y-coordinates sampled from a ring with the given center, radius and width.""" x = (center[0] + radius * np.cos(np.linspace(0, 2 * np.pi, n))).reshape( -1, 1) y = (center[1] + radius * np.sin(np.linspace(0, 2 * np.pi, n))).reshape( -1, 1) # perturb coordinates: x = x + np.random.normal(0.0, 0.333, size=(n, 1)) * width y = y + np.random.normal(0.0, 0.333, size=(n, 1)) * width return np.hstack([x, y])
def _overlapadd(self, F): N = len(F) F = F[:, :, :self.fl] X = cupy.sum(cupy.stack([ cupy.hstack([ cupy.zeros((N, i, 1), cupy.float32), F[:, :, i:i + 1], cupy.zeros((N, self.fl - 1 - i, 1), cupy.float32) ]) for i in range(self.fl) ]), axis=0) return X
def fix_binary_predict_proba_result(proba): if proba.ndim == 1: if CumlToolBox.is_cupy_array(proba): proba = cupy.vstack([1 - proba, proba]).T else: proba = cudf.Series(proba) proba = cudf.concat([1 - proba, proba], axis=1) elif proba.shape[1] == 1: proba = cupy.hstack([1 - proba, proba]) return proba
def get_filled_circle_init(n: int, center: Tuple[int], radius: int): """Returns tuple of x,y-coordinates sampled from a circle with the given center and radius""" t = 2 * np.pi * np.random.rand(n) r = np.random.rand(n) + np.random.rand(n) r[r > 1] = 2 - r[r > 1] x = center[0] + r * radius * np.cos(t) y = center[1] + r * radius * np.sin(t) x = x.reshape(-1, 1) y = y.reshape(-1, 1) return np.hstack([x, y])
def block_hankel(data, f): """ Create a block hankel matrix. Args: data (float): Array. f (int): number of rows Returns: Hankel matrix of f rows. """ n = data.shape[1] - f return np.vstack( [np.hstack([data[:, i + j] for i in range(f)]) for j in range(n)]).T