def test_floor_divide_remainder_and_divmod(self): inch = u.Unit(0.0254 * u.m) dividend = np.array([1., 2., 3.]) * u.m divisor = np.array([3., 4., 5.]) * inch quotient = dividend // divisor remainder = dividend % divisor assert_allclose(quotient.value, [13., 19., 23.]) assert quotient.unit == u.dimensionless_unscaled assert_allclose(remainder.value, [0.0094, 0.0696, 0.079]) assert remainder.unit == dividend.unit quotient2 = np.floor_divide(dividend, divisor) remainder2 = np.remainder(dividend, divisor) assert np.all(quotient2 == quotient) assert np.all(remainder2 == remainder) quotient3, remainder3 = divmod(dividend, divisor) assert np.all(quotient3 == quotient) assert np.all(remainder3 == remainder) with pytest.raises(TypeError): divmod(dividend, u.km) with pytest.raises(TypeError): dividend // u.km with pytest.raises(TypeError): dividend % u.km if hasattr(np, 'divmod'): # not NUMPY_LT_1_13 quotient4, remainder4 = np.divmod(dividend, divisor) assert np.all(quotient4 == quotient) assert np.all(remainder4 == remainder) with pytest.raises(TypeError): np.divmod(dividend, u.km)
def _reduce(self, n): idxs = np.int64(np.floor(np.array(range(3**3*n))/n)) idxs, i = np.divmod(idxs, 3) idxs, j = np.divmod(idxs, 3) k = idxs % 3 ijk = np.vstack((i, j, k)).T-1 self._coords = [ coord for coord in self._coords if np.all(np.isin(coord.center(ijk), [0, -1])) ] idxs = {i for coord in self._coords for i in coord.idx} self.fragments = [frag for frag in self.fragments if set(frag) & idxs]
def test_ufunc(): arr = PandasArray(np.array([-1.0, 0.0, 1.0])) result = np.abs(arr) expected = PandasArray(np.abs(arr._ndarray)) tm.assert_extension_array_equal(result, expected) r1, r2 = np.divmod(arr, np.add(arr, 2)) e1, e2 = np.divmod(arr._ndarray, np.add(arr._ndarray, 2)) e1 = PandasArray(e1) e2 = PandasArray(e2) tm.assert_extension_array_equal(r1, e1) tm.assert_extension_array_equal(r2, e2)
def test_half_ufuncs(self): """Test the various ufuncs""" a = np.array([0, 1, 2, 4, 2], dtype=float16) b = np.array([-2, 5, 1, 4, 3], dtype=float16) c = np.array([0, -1, -np.inf, np.nan, 6], dtype=float16) assert_equal(np.add(a, b), [-2, 6, 3, 8, 5]) assert_equal(np.subtract(a, b), [2, -4, 1, 0, -1]) assert_equal(np.multiply(a, b), [0, 5, 2, 16, 6]) assert_equal(np.divide(a, b), [0, 0.199951171875, 2, 1, 0.66650390625]) assert_equal(np.equal(a, b), [False, False, False, True, False]) assert_equal(np.not_equal(a, b), [True, True, True, False, True]) assert_equal(np.less(a, b), [False, True, False, False, True]) assert_equal(np.less_equal(a, b), [False, True, False, True, True]) assert_equal(np.greater(a, b), [True, False, True, False, False]) assert_equal(np.greater_equal(a, b), [True, False, True, True, False]) assert_equal(np.logical_and(a, b), [False, True, True, True, True]) assert_equal(np.logical_or(a, b), [True, True, True, True, True]) assert_equal(np.logical_xor(a, b), [True, False, False, False, False]) assert_equal(np.logical_not(a), [True, False, False, False, False]) assert_equal(np.isnan(c), [False, False, False, True, False]) assert_equal(np.isinf(c), [False, False, True, False, False]) assert_equal(np.isfinite(c), [True, True, False, False, True]) assert_equal(np.signbit(b), [True, False, False, False, False]) assert_equal(np.copysign(b, a), [2, 5, 1, 4, 3]) assert_equal(np.maximum(a, b), [0, 5, 2, 4, 3]) x = np.maximum(b, c) assert_(np.isnan(x[3])) x[3] = 0 assert_equal(x, [0, 5, 1, 0, 6]) assert_equal(np.minimum(a, b), [-2, 1, 1, 4, 2]) x = np.minimum(b, c) assert_(np.isnan(x[3])) x[3] = 0 assert_equal(x, [-2, -1, -np.inf, 0, 3]) assert_equal(np.fmax(a, b), [0, 5, 2, 4, 3]) assert_equal(np.fmax(b, c), [0, 5, 1, 4, 6]) assert_equal(np.fmin(a, b), [-2, 1, 1, 4, 2]) assert_equal(np.fmin(b, c), [-2, -1, -np.inf, 4, 3]) assert_equal(np.floor_divide(a, b), [0, 0, 2, 1, 0]) assert_equal(np.remainder(a, b), [0, 1, 0, 0, 2]) assert_equal(np.divmod(a, b), ([0, 0, 2, 1, 0], [0, 1, 0, 0, 2])) assert_equal(np.square(b), [4, 25, 1, 16, 9]) assert_equal(np.reciprocal(b), [-0.5, 0.199951171875, 1, 0.25, 0.333251953125]) assert_equal(np.ones_like(b), [1, 1, 1, 1, 1]) assert_equal(np.conjugate(b), b) assert_equal(np.absolute(b), [2, 5, 1, 4, 3]) assert_equal(np.negative(b), [2, -5, -1, -4, -3]) assert_equal(np.positive(b), b) assert_equal(np.sign(b), [-1, 1, 1, 1, 1]) assert_equal(np.modf(b), ([0, 0, 0, 0, 0], b)) assert_equal(np.frexp(b), ([-0.5, 0.625, 0.5, 0.5, 0.75], [2, 3, 1, 3, 2])) assert_equal(np.ldexp(b, [0, 1, 2, 4, 2]), [-2, 10, 4, 64, 12])
def asymm_ellipse(left, right, upper, lower, phi): phi = np.divmod(phi+2*np.pi,2*np.pi)[1] # Be sure phi ∈ [0,2π] b = np.where(phi<=np.pi, upper, lower) a = np.where(np.logical_or(phi<=np.pi/2, phi>=3*np.pi/2), right, left) m = np.logical_and(a!=0,b!=0) r = np.zeros(phi.shape) # if a & b is zero, result is zero r[m] = a[m]*b[m]/np.sqrt((b[m]*np.cos(phi[m]))**2+(a[m]*np.sin(phi[m]))**2) return r
def convert_simcell_index_to_latlong(df, column, rows, cols, left, bottom, cell_size): i = df[[column]].values x, y = np.divmod(i, cols) x_m = x * cell_size y_m = y * cell_size x_meters = left + x_m y_meters = bottom - y_m longs, lats = pyproj.transform(vicgrid94, wgs84, x_meters, y_meters) return longs, lats
def _partition_or_replicate_on_host(self, tensor, dims): """Partitions or replicates the input tensor. The ops inside this function are placed on the host side. Args: tensor: The input tensor which will be partioned or replicated. dims: A list of integer describes how to partition the input tensor. Returns: An iterator of `Tensor`s or a list of partioned tensors. """ if dims is None: return itertools.repeat(tensor) dims = np.array(dims) self._check_input_partition_dims(tensor, dims) output = [tensor] shape_list = np.array(tensor.shape.as_list()) quotients, remainders = np.divmod(shape_list, dims) for axis, (quotient, remainder, dim, original_size) in enumerate( zip(quotients, remainders, dims, shape_list)): if dim <= 1: continue if remainder > 0: # For each dimension, when it cannot be evenly partitioned, XLA assumes # tensors are partitioned in a greedy manner by using # ceil_ratio(size/dim) first. E.g. 2D tensor with shape (5, 14) and dims # are (2, 4). Since 5 % 2 = 1 and 14 % 4 = 2, [5, 14] => # [[(3, 4), (3, 4), (2, 4), (2, 2)], # [(2, 4), (2, 4), (2, 4), (2, 2)]] ceil_ratio = quotient + 1 num_full_slots, left_over = np.divmod(original_size, ceil_ratio) num_or_size_splits = [ceil_ratio] * num_full_slots + [left_over] if len(num_or_size_splits) < dim: num_or_size_splits += [0] * (dim - len(num_or_size_splits)) new_output = [] for x in output: new_output.append( array_ops.split( x, num_or_size_splits=num_or_size_splits, axis=axis)) output = new_output else: output = [array_ops.split(x, dim, axis=axis) for x in output] output = nest.flatten(output) return output
def get_neighbours_square(k,Lx,Ly): """ return the neighbours of site k in the square lattice """ ky,kx = np.divmod(k,Lx) return [ (kx+1+Lx)%Lx+ky*Lx, (kx-1+Lx)%Lx+ky*Lx, kx+((ky+1+Ly)%Ly)*Lx , kx+((ky-1+Ly)%Ly)*Lx]
def test_two_argument_two_output_ufunc_inplace(self, value): v = value * u.m divisor = 70.*u.cm v_copy = v.copy() tmp = v.copy() # cannot use out1, out2 keywords with numpy 1.7 check = np.divmod(v, divisor, tmp, v) assert check[0] is tmp and check[1] is v assert tmp.unit == u.dimensionless_unscaled assert v.unit == v_copy.unit # can also replace in last position if no scaling is needed v2 = v_copy.to(divisor.unit) check2 = np.divmod(v2, divisor, v2, tmp) assert check2[0] is v2 and check2[1] is tmp assert v2.unit == u.dimensionless_unscaled assert tmp.unit == divisor.unit # but cannot replace input with first output if scaling is needed with pytest.raises(TypeError): np.divmod(v_copy, divisor, v_copy, tmp)
def iter_neigh_box(self): from copy import deepcopy for ii in self.search_idx[0]: for jj in self.search_idx[1]: for kk in self.search_idx[2]: box_shift, box_pos = np.divmod([ii, jj, kk], self.nbins_c) neigh_box_idx = self.cell2lin(box_pos) jcenters = deepcopy(self.boxlist[neigh_box_idx].icenters) for jneigh in jcenters: yield jneigh, deepcopy(box_shift)
def vdc(nSeq, base=2): seq = np.zeros((nSeq, )) for i in np.arange(nSeq): q = i + 1 denom = 1 while q > 0: denom *= base q, mod = np.divmod(q, base) seq[i] += mod / float(denom) return seq
def test_scgeom(): g = si.geom.graphene() gsc = geom_sc_geom(g) sc_off = g.sc.sc_off acells, fxyz1 = np.divmod(gsc.xyz.dot(g.sc.icell.T), 1) acells[np.isclose(fxyz1, 1)] += 1 for i, acell in enumerate(acells): isc_off = i // 2 assert np.allclose(acell, sc_off[isc_off])
def _fp_prime_element_row_index_to_image_row_index(self, original_rows, index_arr, num_desc, num_desc_max): atom_indices_for_specific_element, desc_indices = np.divmod( original_rows, num_desc) atom_indices_in_image = index_arr[atom_indices_for_specific_element] new_row = atom_indices_in_image * num_desc_max + desc_indices return new_row
def _round_to_nearest_half_even(values, unit): """Copied from pandas.""" if unit % 2: return _ceil_int(values - unit // 2, unit) quotient, remainder = np.divmod(values, unit) mask = np.logical_or( remainder > (unit // 2), np.logical_and(remainder == (unit // 2), quotient % 2) ) quotient[mask] += 1 return quotient * unit
def weighted_vote_into_cells(magnitude, angle): idx, weight = np.divmod(angle, 20) mask_lower = np.equal(idx[..., np.newaxis], np.arange(9)) mask_upper = np.equal(((idx + 1) % 9)[..., np.newaxis], np.arange(9)) histogram = mask_lower.astype(np.float64) * (magnitude * (weight / 20))[..., np.newaxis] \ + mask_upper.astype(np.float64) * (magnitude * (1 - (weight / 20)))[..., np.newaxis] return histogram.sum(axis=(2, 3))
def prime_finder(array): prime = [] for i in array: division_array = np.array(range(2, i)) remainder = np.divmod(i, division_array)[1] if 0 in remainder: continue else: prime.append(i) return np.array(prime)
def log_and_save_initial(self): """ Perform the initial log and save. """ if self.should_save: # Notify the user where the file is being saved. print("QOC is saving this optimization run to {}." "".format(self.save_file_path)) save_count, save_count_remainder = np.divmod( self.iteration_count, self.save_iteration_step) density_count = len(self.initial_densities) # If the final iteration doesn't fall on a save step, add a save step. if save_count_remainder != 0: save_count += 1 with h5py.File(self.save_file_path, "w") as save_file: save_file["complex_controls"] = self.complex_controls save_file["control_count"] = self.control_count save_file["control_step_count"] = self.control_step_count save_file["controls"] = np.zeros( ( save_count, self.control_step_count, self.control_count, ), dtype=self.initial_controls.dtype) save_file["cost_names"] = np.array( [np.string_("{}".format(cost)) for cost in self.costs]) save_file["densities"] = np.zeros( (save_count, density_count, self.hilbert_size, self.hilbert_size), dtype=np.complex128) save_file["error"] = np.zeros((save_count), dtype=np.float64) save_file["evolution_time"] = self.evolution_time save_file["grads"] = np.zeros( (save_count, self.control_step_count, self.control_count), dtype=self.initial_controls.dtype) save_file["initial_controls"] = self.initial_controls save_file["initial_densities"] = self.initial_densities save_file["interpolation_policy"] = "{}".format( self.interpolation_policy) save_file["iteration_count"] = self.iteration_count save_file["max_control_norms"] = self.max_control_norms save_file["operation_policy"] = "{}".format( self.operation_policy) save_file["optimizer"] = "{}".format(self.optimizer) save_file[ "system_step_multiplier"] = self.system_step_multiplier #ENDWITH #ENDIF if self.should_log: print("iter | total error | grads_l2 \n" "=========================================")
def triangulate_pts_opt(X1, X2, P1, P2, stand_lens, sub_size): X1_Origin = X1[:2] X2_Origin = X2[:2] pts_num = X1.shape[-1] error1_array = np.zeros((1, pts_num))[0] error2_array = np.zeros((1, pts_num // 3))[0] divider, remainder = np.divmod(pts_num, sub_size) for i in np.arange(divider + 1): start_col = sub_size * i if i == divider: step_size = remainder else: step_size = sub_size X1 = X1_Origin[:, start_col:start_col + step_size] X2 = X2_Origin[:, start_col:start_col + step_size] X = np.row_stack((X1, X2)) pts_num = X1.shape[-1] X_one_col = X.T.reshape((4 * pts_num, 1)) p1_31, p1_32, p1_33 = P1[2, 0:3] p2_31, p2_32, p2_33 = P2[2, 0:3] A2 = np.array([[p1_31, p1_32, p1_33], \ [p1_31, p1_32, p1_33], \ [p2_31, p2_32, p2_33], \ [p2_31, p2_32, p2_33]]) A1 = np.repeat(X_one_col, 3, axis=1) A2 = np.tile(A2, (pts_num, 1)) A3 = np.row_stack((P1[:2, :3], P2[:2, :3])) A3 = np.tile(A3, (pts_num, 1)) A = A1 * A2 - A3 A = diag_block_mat_slicing(A, pts_num, (4, 3)) B1 = np.row_stack((P1[0:2, 3], P2[0:2, 3])).reshape((4, 1)) B1 = np.tile(B1, (pts_num, 1)) B2 = np.row_stack((P1[2, 3] * X1, P2[2, 3] * X2)) B2 = B2.T.reshape((4 * pts_num, 1)) B = B1 - B2 A_T = A.T pts_ref_csys = (inv(A_T.dot(A)).dot(A_T).dot(B)).reshape( (pts_num // 3, 9)) pts_move_ref_csys = np.column_stack((pts_ref_csys[:, 6:9], \ pts_ref_csys[:, 0:6])) delta_p2p = pts_ref_csys - pts_move_ref_csys dist = np.linalg.norm(delta_p2p.reshape(pts_num, 3), axis=1) dist_error = dist - np.tile(stand_lens, pts_num // 3) error1_array[start_col:start_col + step_size] = dist_error dist_3cols = dist.reshape((pts_num // 3, 3)) error2_array[start_col//3:(start_col+step_size)//3] = \ dist_3cols[:,2] - dist_3cols[:,0] - dist_3cols[:,1] return error1_array, error2_array
def rt2enc_v1(rt, grid): """ :param rt: n, k, 2 | log[d, tau] for each ped (n,) to each vic (k,) modifies rt during clipping to grid :param grid: (lx, ly, dx, dy, nx, ny) lx, ly | lower bounds for x and y coordinates of the n*k (2,) in rt dx, dy | step sizes of the regular grid nx, ny | number of grid points in each coordinate (so nx*ny total) :return: n, k, m | m = nx*ny, encoding for each ped to each vic uses row-major indexing for the flattened (2d) indices for nx 'rows' and ny 'columns' """ n, k = rt.shape[:2] nx, ny = np.array(grid[-2:]).astype(np.int) m = nx * ny Z = np.zeros((n, k, m), dtype=np.float32) clip2grid(rt, grid) # n, k a_x = np.empty((n, k)) r_x = np.empty((n, k), dtype=np.float32) np.divmod(rt[..., 0] - grid[0], grid[2], a_x, r_x) th_x = 1 - r_x / grid[2] a_y = np.empty((n, k)) r_y = np.empty((n, k), dtype=np.float32) np.divmod(rt[..., 1] - grid[1], grid[3], a_y, r_y) th_y = 1 - r_y / grid[3] # 1d inds for m, | n, k m_inds = (ny * a_x + a_y).astype(np.int) assert np.all(m_inds >= 0), 'ny {} ax {} ay {}'.format( ny, a_x.min(), a_y.min()) offsets = np.array([0, ny, 1, ny + 1], dtype=np.int) nk_flat_inds = np.arange(0, n * k * m, m, dtype=np.int) nk_flat_inds += m_inds.reshape(-1) Z.flat[nk_flat_inds + offsets[0]] = (th_x * th_y).reshape(-1) Z.flat[nk_flat_inds + offsets[1]] = ((1 - th_x) * th_y).reshape(-1) Z.flat[nk_flat_inds + offsets[2]] = (th_x * (1 - th_y)).reshape(-1) Z.flat[nk_flat_inds + offsets[3]] = ((1 - th_x) * (1 - th_y)).reshape(-1) return Z
def align_equispaced(schedule: Schedule, duration: int) -> Schedule: """Schedule a list of pulse instructions with equivalent interval. Args: schedule: Input schedule of which top-level ``child`` nodes will be reschedulued. duration: Duration of context. This should be larger than the schedule duration. Returns: New schedule with input `schedule`` child schedules and instructions aligned with equivalent interval. Notes: This context is convenient for writing PDD or Hahn echo sequence for example. """ total_duration = sum([child.duration for _, child in schedule._children]) if duration and duration < total_duration: return schedule total_delay = duration - total_duration if len(schedule._children) > 1: # Calculate the interval in between sub-schedules. # If the duration cannot be divided by the number of sub-schedules, # the modulo is appended and prepended to the input schedule. interval, mod = np.divmod(total_delay, len(schedule._children) - 1) else: interval = 0 mod = total_delay # Calculate pre schedule delay delay, mod = np.divmod(mod, 2) aligned = Schedule() # Insert sub-schedules with interval _t0 = int(aligned.stop_time + delay + mod) for _, child in schedule._children: aligned.insert(_t0, child, inplace=True) _t0 = int(aligned.stop_time + interval) return pad(aligned, aligned.channels, until=duration, inplace=True)
def topk_cov_matrix(self, k, reverse=False): N, N = self.covmat.shape if reverse: _indices = np.argsort(self.covmat, axis=None)[:2 * k:2] else: _indices = np.argsort(self.covmat, axis=None)[-1:-2 * k:-2] indices = np.stack(np.divmod(_indices, N)).transpose() resid2name = {i: aa for i, aa in enumerate(self.nettop.nodes())} print(np.vectorize(resid2name.get)(indices)) for elt in self.covmat.reshape(-1)[_indices]: print(elt)
def build_frame_index(self): parsed_keys = [k.decode('ascii').split('/') for k in self.keys] steps = np.array([int(l[-1]) for l in parsed_keys]) seeds = np.array([int(l[-3]) for l in parsed_keys]) cam_ids = [int(l[-2].split('_')[-1]) for l in parsed_keys] scene_ids, config_ids = np.divmod(steps + seeds * len(np.unique(steps)), self.n_fixed_steps) frame_index = pd.DataFrame({'scene_id': scene_ids, 'config_id': config_ids, 'cam_id': cam_ids, 'view_id': 0}) if self.n_frames is not None: frame_index = frame_index.iloc[:self.n_frames] return frame_index
def step(self, action): reward = 0 #action = np.random.choice(range(self.R * self.R), 1, policy)[0] o, d = np.divmod(int(action), int(self.R)) #ensure there exists available cars #assert np.sum(self.c_state[o,: self.patience_time]) > 0 if np.sum(self.c_state[o, 0:self.patience_time] - self.l_state[o, 0:self.patience_time]) <= 0: return self.generate_state(), action, reward, [], False for tt1 in range(self.patience_time): if self.c_state[o, tt1] - self.l_state[o, tt1] > 0: #print(tt1, self.c_state[o,0: self.patience_time] , self.l_state[o,0: self.patience_time]) break tt2 = self.travel_time[self.city_time + tt1, o, d] if self.p_state[o, d] > 0: reward = 1 self.p_state[o, d] -= 1 else: reward = 0 if o == d or tt1 > 0: tt2 = 0 self.step_change_dest(o, d, tt1, tt2) #print(reward, self.p_state[o, d], self.c_state[o,0: self.patience_time] , self.l_state[o,0: self.patience_time]) #print(self.city_time, reward, o, d) self.total_reward += reward self.i += 1 #next_state = self.generate_state() time_update = np.sum(self.c_state[:, 0:self.patience_time] - self.l_state[:, 0:self.patience_time]) while time_update <= 0 and not self.terminate: self.step_time_update() time_update = np.sum(self.c_state[:, 0:self.patience_time] - self.l_state[:, 0:self.patience_time]) #print(self.p_state) if self.city_time == self.time_horizon: #print(self.starting_c_state) self.terminate = True next_state = self.generate_state() self.update_action_mask() return next_state, action, reward, self.action_mask, True
def test_two_argument_two_output_ufunc_inplace(self, value): v = value * u.m divisor = 70.*u.cm v1 = v.copy() tmp = v.copy() check = np.divmod(v1, divisor, out=(tmp, v1)) assert check[0] is tmp and check[1] is v1 assert tmp.unit == u.dimensionless_unscaled assert v1.unit == v.unit v2 = v.copy() check2 = np.divmod(v2, divisor, out=(v2, tmp)) assert check2[0] is v2 and check2[1] is tmp assert v2.unit == u.dimensionless_unscaled assert tmp.unit == v.unit v3a = v.copy() v3b = v.copy() check3 = np.divmod(v3a, divisor, out=(v3a, v3b)) assert check3[0] is v3a and check3[1] is v3b assert v3a.unit == u.dimensionless_unscaled assert v3b.unit == v.unit
def calculate_next_state_value(s, current_state_values, lmbda=1): ''' Again, we work around using the 4-value probability function p as we have complete knowledge of the environment's dynamics. ''' res = 0 for a in range(4): res += policy(a, s) * ( expected_reward(a, s) + lmbda * current_state_values[np.divmod(next_state(a, s), 4)]) return res
def iterate(self): qA = np.zeros([self.num_states, self.nu]) qB = np.zeros([self.num_states, self.nu]) qA_Last = np.zeros([self.num_states, self.nu]) qB_Last = np.zeros([self.num_states, self.nu]) for i in tqdm.tqdm(range(self.max_iters)): # # choose initial state x0 x_0 = np.array([0, 0]) x_index = self.get_index(x_0) for j in range(self.step_in_iter): if np.random.uniform(0, 1) > self.epslion: score1 = qA[x_index, :] score2 = qB[x_index, :] u_index = np.argmin(score1 + score2) else: u_index = np.random.randint(0, self.nu - 1) # observe x_t+1 next_index = self.state_transfer_table[x_index, u_index] # compute g(x_t,u(x_t)) x = self.get_states(x_index) u = self.action_list[u_index] # compute TDerror # TDerror = self.costfn(x, u) + self.alpha * min(q[next_index, :]) - q[ # x_index, u_index] # q[x_index, u_index] = q[x_index, u_index] + self.lr * TDerror if np.random.uniform(0, 1) > 0.5: # updataA action_from_A = np.argmin(qA[next_index, :]) TDerrorA = self.costfn(x, u) + self.gamma * qB[next_index, action_from_A] - qA[x_index, u_index] qA[x_index, u_index] = qA[x_index, u_index] + self.lr * TDerrorA else: # updataB action_from_B = np.argmin(qB[next_index, :]) TDerrorB = self.costfn(x, u) + self.gamma * qA[next_index, action_from_B] - qB[x_index, u_index] qB[x_index, u_index] = qB[x_index, u_index] + self.lr * TDerrorB x_index = next_index # we update the current Q function if there is any change otherwise we are done if ((qA_Last - qA) ** 2 < 1e-2).all() and ((qB_Last - qB) ** 2 < 1e-2).all(): break else: qA_Last = qA.copy() qB_Last = qB.copy() policy = np.zeros(self.space_shape) value_function = np.zeros(self.space_shape) for k in range(self.num_states): iv, ix = np.divmod(k, self.nq) policy[ix, iv] = self.action_list[np.argmin(qA[k, :])] value_function[ix, iv] = min(qA[k, :]) return value_function, policy
def base_splice(X, base): D = X X_out = np.empty([X.shape[0], 0], dtype=np.float64) while (1): D, M = np.divmod(D, base) X_out = np.append(X_out, M, axis=1) if (not np.any(D)): break return X_out
def pt2rs(point, gap_ring, gap_sector, num_ring, num_sector): # point to ring and sector x = point[0] y = point[1] if x == 0.0: x = 0.001 if y == 0.0: y = 0.001 theta = xy2theta(x, y) faraway = np.sqrt(x * x + y * y) idx_ring = np.divmod(faraway, gap_ring)[0] # ыкл idx_sector = np.divmod(theta, gap_sector)[0] # ыкл if idx_ring >= num_ring: idx_ring = num_ring - 1 # python starts with 0 and ends with N-1 return int(idx_ring), int(idx_sector)
def __getitem__(self, x): """Querry for x belonging to any of the open intervals.""" i = np.searchsorted(self.LR, x, side='left') cluster_no, in_clust = np.divmod(i, 2) # 'clip' fixes the problem with points right to max(R) # by changing the index from 2*len(L) to 2*len(L)-1. # This is ok, because these points have in_clust == 0 anyway. not_right_end = x < np.take(self.R, cluster_no, mode='clip') cluster_no = np.where(np.logical_and(in_clust, not_right_end), cluster_no, -1) return cluster_no
def __call__(self, val): """Perform linear interpolation on `val`, sample(s) in [0, 1]""" prob_bin_idx, dx = np.divmod(val**self.inv_power, self.prob_binwidth) exceed_max_mask = prob_bin_idx > self.n_samp - 2 prob_bin_idx[exceed_max_mask] = self.n_samp - 2 dx[exceed_max_mask] = self.prob_binwidth prob_bin_idx = prob_bin_idx.astype(int) y0 = self.domain_samples[prob_bin_idx] m = self.inv_cdf_slopes[prob_bin_idx] y = m * dx + y0 return y
def push(x, starts, freqs, precisions): starts, freqs, precisions = map(atleast_1d, (starts, freqs, precisions)) head, tail = x # assert head.shape == starts.shape == freqs.shape idxs = head >= ((rans_l >> precisions) << 32) * freqs if np.any(idxs): tail = stack_extend(tail, np.uint32(head[idxs])) head = np.copy(head) # Ensure no side-effects head[idxs] >>= 32 head_div_freqs, head_mod_freqs = np.divmod(head, freqs) return (head_div_freqs << precisions) + head_mod_freqs + starts, tail
def pt2rs(point, gap_ring, gap_sector, num_ring, num_sector): x = point[0] y = point[1] # z = point[2] if (x == 0.0): x = 0.001 if (y == 0.0): y = 0.001 theta = xy2theta(x, y) faraway = np.sqrt(x * x + y * y) idx_ring = np.divmod(faraway, gap_ring)[0] idx_sector = np.divmod(theta, gap_sector)[0] if (idx_ring >= num_ring): idx_ring = num_ring - 1 # python starts with 0 and ends with N-1 return int(idx_ring), int(idx_sector)
def view_composite_source_model(token, dstore): """ Show the structure of the CompositeSourceModel in terms of grp_id """ lst = [] n = len(dstore['full_lt'].sm_rlzs) trt_smrs = dstore['trt_smrs'][:] for grp_id, df in dstore.read_df('source_info').groupby('grp_id'): trts, sm_rlzs = numpy.divmod(trt_smrs[grp_id], n) lst.append((str(grp_id), to_str(trts), to_str(sm_rlzs), len(df))) return numpy.array(lst, dt('grp_id trt smrs num_sources'))
def rt2add_enc_v1(rt, grid): """ :param rt: n, k, 2 | log[d, tau] for each ped (n,) to each vic (k,) modifies rt during clipping to grid :param grid: (lx, ly, dx, dy, nx, ny) lx, ly | lower bounds for x and y coordinates of the n*k (2,) in rt dx, dy | step sizes of the regular grid nx, ny | number of grid points in each coordinate (so nx*ny total) :return: n, m | m = nx*ny, encoding for each ped uses row-major indexing for the flattened (2d) indices for nx 'rows' and ny 'columns' """ n, k = rt.shape[:2] nx, ny = np.array(grid[-2:]).astype(np.int32) m = nx * ny Z = np.zeros((n, m), dtype=np.float32) clip2grid(rt, grid) # n, k a_x = np.empty((n, k), dtype=np.int32) r_x = np.empty((n, k), dtype=np.float32) np.divmod(rt[..., 0] - grid[0], grid[2], a_x, r_x, casting='unsafe') th_x = 1 - r_x / grid[2] a_y = np.empty((n, k), dtype=np.int32) r_y = np.empty((n, k), dtype=np.float32) np.divmod(rt[..., 1] - grid[1], grid[3], a_y, r_y, casting='unsafe') th_y = 1 - r_y / grid[3] # 1d inds for m, | n, k c_x = ny * a_x + a_y offsets = np.array([0, ny, 1, ny + 1], dtype=np.int32) # n, k, 4 inds = c_x[..., np.newaxis] + offsets[np.newaxis, :] vals = np.dstack((th_x * th_y, (1 - th_x) * th_y, th_x * (1 - th_y), (1 - th_x) * (1 - th_y))) row_inds = np.repeat(np.arange(n, dtype=np.int32), 4 * k) np.add.at(Z, (row_inds, inds.ravel()), vals.ravel()) return Z
def align(self, schedule: Schedule) -> Schedule: """Reallocate instructions according to the policy. Only top-level sub-schedules are aligned. If sub-schedules are nested, nested schedules are not recursively aligned. Args: schedule: Schedule to align. Returns: Schedule with reallocated instructions. """ duration = self._context_params[0] instruction_duration_validation(duration) total_duration = sum([child.duration for _, child in schedule._children]) if duration < total_duration: return schedule total_delay = duration - total_duration if len(schedule._children) > 1: # Calculate the interval in between sub-schedules. # If the duration cannot be divided by the number of sub-schedules, # the modulo is appended and prepended to the input schedule. interval, mod = np.divmod(total_delay, len(schedule._children) - 1) else: interval = 0 mod = total_delay # Calculate pre schedule delay delay, mod = np.divmod(mod, 2) aligned = Schedule() # Insert sub-schedules with interval _t0 = int(aligned.stop_time + delay + mod) for _, child in schedule._children: aligned.insert(_t0, child, inplace=True) _t0 = int(aligned.stop_time + interval) return pad(aligned, aligned.channels, until=duration, inplace=True)
def decimate(self, nx, ny): if len(self) != 0: # get some values for quicker access npix = np.uint64(nx) * np.uint64(ny) # get the values to decimate xyl, val = [], [] for pdt in self: xy = pdt.x.astype(np.uint64) + nx * pdt.y.astype(np.uint64) xyl.extend(xy + npix * pdt.lam.astype(np.uint64)) val.extend(pdt.val.astype(np.float64)) if len(xyl) > 0: xyl = np.array(xyl) val = np.array(val) # find unique indices & compress xylu = np.unique(xyl) xylc = np.digitize(xyl, xylu) - 1 # sum over repeated indices vu = np.bincount(xylc, weights=val) # go back to 1d indices lamu, xygu = np.divmod(xylu, npix) yu, xu = np.divmod(xygu, nx) # get the wavelengths wu = self.wav[lamu] # return the DDT ddt = DDT(self.segid, xu, yu, wu, vu) else: # a null table ddt = DDT(self.segid) else: # a null table ddt = DDT(self.segid) return ddt
def apply_rules_for_generation_conv(grid): new_gen_grid = grid.copy() # new_gen_grid = np.zeros_like(grid) # new_gen_grid[grid == LUMBER] = LUMBER # lumber is set now and replaced later # 1) open with 3 or more trees around -> tree # 2) tree with 3 or more lumbers -> lumber # 3) lumber with no tree and no lumber around -> open rule = np.array([ [1, 1, 1], [1, MIDDLE, 1], [1, 1, 1], ]) # todo: add options to all others (any number of trees for lumbers check, any number of lumbers for tree check...) # rules are symmetric and thus flip invariant, no need for flipping # conv_res = signal.convolve2d(grid, rule, mode='same') conv_res = ndimage.convolve(grid, rule, mode='constant') # # conv_res = signal.fftconvolve(grid, rule, mode='same') middle_pos, around = np.divmod(conv_res, MIDDLE) around_lumbers, around_trees = np.divmod(around, LUMBER) around_trees = np.round(around_trees / TREE).astype(np.int32) # 1) # big magic with coprimes and modulo, muhaha should_appear_tree = (middle_pos == OPEN) & (around_trees >= 3) indices = np.where(should_appear_tree) new_gen_grid[indices] = TREE # 2) should_appear_lumber = (middle_pos == TREE) & (around_lumbers >= 3) indices = np.where(should_appear_lumber) new_gen_grid[indices] = LUMBER # 3) should_appear_open = (middle_pos == LUMBER) & ((around_lumbers < 1) | (around_trees < 1)) indices = np.where(should_appear_open) new_gen_grid[indices] = OPEN return new_gen_grid
def initialize(self, batch_size): n_batches, remainder = np.divmod(self.length, batch_size) n_batches = int(n_batches) remainder = int(remainder) self.batch_size = batch_size self.graphs = torch.from_numpy(self.graphs_np)[:-remainder].contiguous().view(n_batches, batch_size, self.order, self.vertex_dim) self.targets = torch.from_numpy(self.targets_np)[:-remainder].contiguous().view(n_batches, batch_size, self.target_dim) self.dads = torch.from_numpy(self.dads_np)[:-remainder].contiguous().view(n_batches, batch_size, self.order, self.order) self.graphs = Variable(self.graphs).float() self.targets = Variable(self.targets).float() self.dads = Variable(self.dads).float()
def convert_simcell_index_to_cell_polygon(df, column, rows, cols, left, bottom, cell_size): i = df[[column]].values return [ [pyproj.transform(vicgrid94, wgs84, left + (x[0] * cell_size), bottom - (y[0] * cell_size)), pyproj.transform(vicgrid94, wgs84, left + ((x[0] - 1) * cell_size), bottom - (y[0] * cell_size)), pyproj.transform(vicgrid94, wgs84, left + ((x[0] - 1) * cell_size), bottom - ((y[0] + 1) * cell_size)), pyproj.transform(vicgrid94, wgs84, left + (x[0] * cell_size), bottom - ((y[0] + 1) * cell_size)), pyproj.transform(vicgrid94, wgs84, left + (x[0] * cell_size), bottom - (y[0] * cell_size)) ] for (x, y) in [np.divmod(a, cols) for a in i] ]
def subsample_array(x, step, pad=False, mode='reflect'): """ Use :func:`numpy.lib.stride_tricks.as_strided` to construct a view of the input array that represents a subsampling of the array by the specified step, with different offsets of the subsampling as additional axes of the array. If the input array shape is not evenly divisible by the subsampling step, it is padded before the view is constructed. For example, if ``x`` is 6 x 6 array, the output of ``y = subsample_array(x, (2, 2))`` is a 2 x 2 x 3 x 3 array, with the first subsampling offset indexed as ``y[0, 0]``. Parameters ---------- x : ndarray Input array step : tuple Subsampling step size pad : bool, optional (default False) Flag indicating whether the input array should be padded when its size is not integer divisible by the step size mode : string, optional (default 'reflect') A pad mode specification for :func:`numpy.pad` Returns ------- xs : ndarray An array representing different subsampling offsets in the input array """ if np.any(np.greater_equal(step, x.shape)): raise ValueError('Step size must be less than array size on each axis') sbsz, dvmd = np.divmod(x.shape, step) if pad and np.any(dvmd): sbsz += np.clip(dvmd, 0, 1) psz = np.subtract(np.multiply(sbsz, step), x.shape) pdt = [(0, p) for p in psz] x = np.pad(x, pdt, mode=mode) outsz = step + tuple(sbsz) outstrd = x.strides + tuple(np.multiply(step, x.strides)) return np.lib.stride_tricks.as_strided(x, outsz, outstrd)
def _partition_or_replicate_on_host(self, tensor, dims): """Partitions or replicates the input tensor. The ops inside this function are placed on the host side. Args: tensor: The input tensor which will be partioned or replicated. dims: A list of integer describes how to partition the input tensor. Returns: An iterator of `Tensor`s or a list of partioned tensors. """ if dims is None: return itertools.repeat(tensor) dims = np.array(dims) self._check_input_partition_dims(tensor, dims) output = [tensor] divds, remainders = np.divmod(np.array(tensor.shape.as_list()), dims) for axis, (divd, remainder, dim) in enumerate( np.dstack((divds, remainders, dims))[0]): if dim <= 1: continue if remainder > 0: # For each dimension, when it cannot be evenly partitioned, XLA assumes # the size of last parts are smaller by 1. E.g. 2D tensor with shape # (5, 14) and dims are (2, 4). Since 5 % 2 = 1 and 14 % 4 = 2, [5, 14] # => [[(3, 3), (3, 3), (2, 3), (2, 3)], # [(2, 3), (2, 3), (2, 2), (2, 2)]] output = [ array_ops.split( x, num_or_size_splits=[divd + 1] * remainder + [divd] * (dim - remainder), axis=axis) for x in output ] else: output = [array_ops.split(x, dim, axis=axis) for x in output] output = nest.flatten(output) return output