def compute_chisq(patchpixels, patchivar, patchmodel, psferr): modelsigma = psferr * patchmodel ii = (modelsigma > 0) & (patchivar > 0) totpix_ivar = ii * cp.reciprocal(~ii + ii * modelsigma * modelsigma + ii * cp.reciprocal(ii * patchivar + ~ii)) chi = (patchpixels - patchmodel) * cp.sqrt(totpix_ivar) return chi * chi
def hilbert(n): """Create a Hilbert matrix of order ``n``. Returns the ``n`` by ``n`` array with entries ``h[i,j] = 1 / (i + j + 1)``. Args: n (int): The size of the array to create. Returns: cupy.ndarray: The Hilbert matrix. .. seealso:: :func:`scipy.linalg.hilbert` """ values = cupy.arange(1, 2 * n, dtype=cupy.float64) cupy.reciprocal(values, values) return hankel(values[:n], r=values[n - 1:])
def __truediv__(self, other): """Point-wise division by another matrix, vector or scalar""" if _util.isscalarlike(other): dtype = self.dtype if dtype == numpy.float32: # Note: This is a work-around to make the output dtype the same # as SciPy. It might be SciPy version dependent. dtype = numpy.float64 dtype = cupy.result_type(dtype, other) d = cupy.reciprocal(other, dtype=dtype) return multiply_by_scalar(self, d) elif _util.isdense(other): other = cupy.atleast_2d(other) check_shape_for_pointwise_op(self.shape, other.shape) return self.todense() / other elif base.isspmatrix(other): # Note: If broadcasting is needed, an exception is raised here for # compatibility with SciPy, as SciPy does not support broadcasting # in the "sparse / sparse" case. check_shape_for_pointwise_op(self.shape, other.shape, allow_broadcasting=False) dtype = numpy.promote_types(self.dtype, other.dtype) if dtype.char not in 'FD': dtype = numpy.promote_types(numpy.float64, dtype) # Note: The following implementation converts two sparse matrices # into dense matrices and then performs a point-wise division, # which can use lots of memory. self_dense = self.todense().astype(dtype, copy=False) return self_dense / other.todense() raise NotImplementedError
def pinv(a, rcond=1e-15): """Compute the Moore-Penrose pseudoinverse of a matrix. It computes a pseudoinverse of a matrix ``a``, which is a generalization of the inverse matrix with Singular Value Decomposition (SVD). Note that it automatically removes small singular values for stability. Args: a (cupy.ndarray): The matrix with dimension ``(..., M, N)`` rcond (float or cupy.ndarray): Cutoff parameter for small singular values. For stability it computes the largest singular value denoted by ``s``, and sets all singular values smaller than ``rcond * s`` to zero. Broadcasts against the stack of matrices. Returns: cupy.ndarray: The pseudoinverse of ``a`` with dimension ``(..., N, M)``. .. warning:: This function calls one or more cuSOLVER routine(s) which may yield invalid results if input conditions are not met. To detect these invalid results, you can set the `linalg` configuration to a value that is not `ignore` in :func:`cupyx.errstate` or :func:`cupyx.seterr`. .. seealso:: :func:`numpy.linalg.pinv` """ _util._assert_cupy_array(a) if a.size == 0: _, out_dtype = _util.linalg_common_type(a) m, n = a.shape[-2:] if m == 0 or n == 0: out_dtype = a.dtype # NumPy bug? return cupy.empty(a.shape[:-2] + (n, m), dtype=out_dtype) u, s, vt = _decomposition.svd(a.conj(), full_matrices=False) # discard small singular values cutoff = rcond * cupy.amax(s, axis=-1) leq = s <= cutoff[..., None] cupy.reciprocal(s, out=s) s[leq] = 0 return cupy.matmul(vt.swapaxes(-2, -1), s[..., None] * u.swapaxes(-2, -1))
def TTTG_Network(inputs, weight, baise, weight2, baise2, weight3, baise3, weight4, baise4, L, answers, train, print_result=False): hidden = inputs.dot(weight) + baise hidden = cp.reciprocal(1 + cp.exp(-hidden)) # sigmoid hidden2 = hidden.dot(weight2) + baise2 hidden2 = cp.reciprocal(1 + cp.exp(-hidden2)) # sigmoid hidden3 = hidden2.dot(weight3) + baise3 hidden3 = cp.reciprocal(1 + cp.exp(-hidden3)) # sigmoid # hidden = hidden / (1 + cp.absolute(hidden)) # TanH output = hidden3.dot(weight4) + baise4 output = cp.reciprocal(1 + cp.exp(-output)) # sigmoid # output = output / (1 + cp.absolute(output)) # TanH # Relu # hidden = inputs.dot(weight.T) + baise # hidden = cp.maximum(hidden, 0) # output = hidden.dot(weight2.T) # output = cp.maximum(output, 0) # print(hidden) Error = cp.square(answers - output).sum() if train: # print("weight:") # print(weight) # print("weight2:") # print(weight2) weight4, baise4, weight3, baise3, weight2, baise2, weight, baise = training( answers, weight, output, weight2, weight3, weight4, inputs, hidden, hidden2, hidden3, baise, baise2, baise3, baise4, L) # output[output >= 0.5] = 1 # AND閘最佳化 # output[output < 0.5] = 0 # AND閘最佳化 # return Error.tolist(), output.tolist(), output.tolist() # AND閘返回結果 return Error, cp.argmax(output, 1), output
def norm_penalty(self, embeddings, is_test, learn_rate): if is_test: tmp_embeddings = embeddings[self.model.num_of_train_nodes:] else: tmp_embeddings = embeddings norm = cp.linalg.norm(tmp_embeddings, axis=1).reshape( (tmp_embeddings.shape[0], 1)) squared_norm = cp.square(norm) norm_gradient = (-cp.reciprocal(squared_norm) + 1) * tmp_embeddings / norm tmp_embeddings -= learn_rate * norm_gradient
def norm(x, ord=None, axis=None, keepdims=False): """Returns one of matrix norms specified by ``ord`` parameter. See numpy.linalg.norm for more detail. Args: x (cupy.ndarray): Array to take norm. If ``axis`` is None, ``x`` must be 1-D or 2-D. ord (non-zero int, inf, -inf, 'fro'): Norm type. axis (int, 2-tuple of ints, None): 1-D or 2-D norm is cumputed over ``axis``. keepdims (bool): If this is set ``True``, the axes which are normed over are left. Returns: cupy.ndarray """ if not issubclass(x.dtype.type, numpy.inexact): x = x.astype(float) # Immediately handle some default, simple, fast, and common cases. if axis is None: ndim = x.ndim if (ord is None or (ndim == 1 and ord == 2) or (ndim == 2 and ord in ('f', 'fro'))): if x.dtype.kind == 'c': s = abs(x.ravel()) s *= s ret = cupy.sqrt(s.sum()) else: ret = cupy.sqrt((x * x).sum()) if keepdims: ret = ret.reshape((1,) * ndim) return ret # Normalize the `axis` argument to a tuple. nd = x.ndim if axis is None: axis = tuple(range(nd)) elif not isinstance(axis, tuple): try: axis = int(axis) except Exception: raise TypeError( '\'axis\' must be None, an integer or a tuple of integers') axis = (axis,) if len(axis) == 1: if ord == numpy.Inf: return abs(x).max(axis=axis, keepdims=keepdims) elif ord == -numpy.Inf: return abs(x).min(axis=axis, keepdims=keepdims) elif ord == 0: # Zero norm # Convert to Python float in accordance with NumPy return (x != 0).astype(x.real.dtype).sum( axis=axis, keepdims=keepdims) elif ord == 1: # special case for speedup return abs(x).sum(axis=axis, keepdims=keepdims) elif ord is None or ord == 2: # special case for speedup if x.dtype.kind == 'c': s = abs(x) s *= s else: s = x * x return cupy.sqrt(s.sum(axis=axis, keepdims=keepdims)) else: try: float(ord) except TypeError: raise ValueError('Invalid norm order for vectors.') absx = abs(x) absx **= ord ret = absx.sum(axis=axis, keepdims=keepdims) ret **= cupy.reciprocal(ord, dtype=ret.dtype) return ret elif len(axis) == 2: row_axis, col_axis = axis if row_axis < 0: row_axis += nd if col_axis < 0: col_axis += nd if not (0 <= row_axis < nd and 0 <= col_axis < nd): raise ValueError('Invalid axis %r for an array with shape %r' % (axis, x.shape)) if row_axis == col_axis: raise ValueError('Duplicate axes given.') if ord == 1: if col_axis > row_axis: col_axis -= 1 ret = abs(x).sum(axis=row_axis).max(axis=col_axis) elif ord == numpy.Inf: if row_axis > col_axis: row_axis -= 1 ret = abs(x).sum(axis=col_axis).max(axis=row_axis) elif ord == -1: if col_axis > row_axis: col_axis -= 1 ret = abs(x).sum(axis=row_axis).min(axis=col_axis) elif ord == -numpy.Inf: if row_axis > col_axis: row_axis -= 1 ret = abs(x).sum(axis=col_axis).min(axis=row_axis) elif ord in [None, 'fro', 'f']: if x.dtype.kind == 'c': s = abs(x) s *= s ret = cupy.sqrt(s.sum(axis=axis)) else: ret = cupy.sqrt((x * x).sum(axis=axis)) else: raise ValueError('Invalid norm order for matrices.') if keepdims: ret_shape = list(x.shape) ret_shape[axis[0]] = 1 ret_shape[axis[1]] = 1 ret = ret.reshape(ret_shape) return ret else: raise ValueError('Improper number of dimensions to norm.')
def sigmoid(z): return cupy.reciprocal(1 + cupy.exp(-z))
def General_n_Balance_n_Collision_Eff(self, _new_path, length_only = True, GPU_accelerating = False, GPU_accelerating_data = None, matrix_data = None): ITC = {} max_ITC = 1 min_ITC = sys.maxsize total_cost = 0 max_order = 0 total_order = 0 standard_index = self.tools.GetWidth()**2 + self.tools.GetHeight()**2 # Parallelization if GPU_accelerating and length_only: n_AGV, population_size = GPU_accelerating_data T_matrix, S_matrix = matrix_data T_matrix = cp.array(T_matrix) S_matrix = cp.array(np.array(S_matrix).astype(float)) ITC_matrix = cp.reshape(cp.dot(T_matrix, cp.array([[1],[1]])), (population_size, n_AGV)) O_matrix = cp.reshape(cp.dot(T_matrix, cp.array([[0],[1]])), (population_size, n_AGV)) TC_matrix = cp.reshape(cp.dot(ITC_matrix, cp.ones((n_AGV, 1))), (population_size)) TO_matrix = cp.reshape(cp.dot(O_matrix, cp.ones((n_AGV, 1))), (population_size)) max_ITC_matrix = cp.amax(ITC_matrix, axis=1) min_ITC_matrix = cp.amin(ITC_matrix, axis=1) max_order_matrix = cp.amax(O_matrix, axis=1) _, n_order_points, _ = S_matrix.shape t_m = cp.reshape(cp.dot(S_matrix, cp.array([[[1],[0],[0],[0],[0]]]*n_order_points)), (population_size, n_order_points, n_order_points)) x_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[1],[0],[0],[0]]]*n_order_points)), (population_size, n_order_points, n_order_points)) y_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[1],[0],[0]]]*n_order_points)), (population_size, n_order_points, n_order_points)) l_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[0],[1],[0]]]*n_order_points)), (population_size, n_order_points, n_order_points)) o_m = cp.reshape(cp.dot(S_matrix, cp.array([[[0],[0],[0],[0],[1]]]*n_order_points)), (population_size, n_order_points, n_order_points)) t_m_l = cp.reshape(cp.dot(S_matrix, cp.array([[[1],[0],[0],[0],[0]]])), (population_size, n_order_points)) t_m_diff = t_m - cp.transpose(t_m, (0, 2, 1)) x_m_diff = x_m - cp.transpose(x_m, (0, 2, 1)) y_m_diff = y_m - cp.transpose(y_m, (0, 2, 1)) m_xy_diff = cp.absolute(x_m_diff) + cp.absolute(y_m_diff) m_diff = cp.absolute(t_m_diff) + m_xy_diff m_diff_l = m_diff - l_m * 2 m_diff_l_sign = (cp.logical_xor(cp.sign(m_diff_l) + 1, True)) m_diff_l_eff = cp.multiply(m_diff, m_diff_l_sign) m_diff_l_sign = cp.sign(m_diff_l_eff) m_diff_l_H = cp.multiply(cp.multiply(cp.reciprocal(m_diff_l_eff + m_diff_l_sign - 1), m_diff_l_sign), cp.log10(m_diff_l_eff + cp.absolute(m_diff_l_sign - 1))) d_m = cp.reciprocal(cp.sum(m_diff_l_H, (1,2))) # Occupancy test """ t_m_o = t_m + o_m - 1 m_diff_o = cp.absolute(t_m_o - cp.transpose(t_m_o, (0, 2, 1))) - o_m - 1 m_occupancy = (cp.logical_xor(cp.sign(m_diff_o) + 1, True)) m_idn = cp.identity(n_order_points) OT = cp.prod(cp.logical_or(m_xy_diff, cp.logical_not(m_occupancy - m_idn)), (1,2)) """ G1 = max_order_matrix/max_ITC_matrix G2 = TO_matrix/TC_matrix BU = min_ITC_matrix/max_ITC_matrix CI = cp.multiply(d_m, BU) # d_m * 0.1 E_matrix = G1 + G2 + BU + CI cp.cuda.Stream.null.synchronize() return (list(E_matrix), (list(max_ITC_matrix), list(TC_matrix), list(BU), list(CI))) # Non-Paralleization else: print("[Scheduling] Must be use GPU to calculate") for each_AGV_ID in _new_path.keys(): each_AGV_len_schedule = 0 each_AGV_num_orders = 0 if length_only: each_AGV_len_schedule, each_num_order, each_order_list = _new_path[each_AGV_ID] each_AGV_num_orders = each_num_order else: each_path = _new_path[each_AGV_ID] for each_pos_path in each_path: if len(each_pos_path) == 3: each_AGV_num_orders += 1 each_AGV_len_schedule += 1 cost = each_AGV_len_schedule + each_AGV_num_orders ITC[each_AGV_ID] = cost if each_AGV_num_orders > max_order: max_order = each_AGV_num_orders total_order += each_AGV_num_orders for _, each_value in ITC.items(): if each_value > max_ITC: max_ITC = each_value if each_value < min_ITC: min_ITC = each_value total_cost += each_value TT = max_ITC TTC = total_cost BU = min_ITC / max_ITC CI = 0 G1 = max_order/TT G2 = total_order/TTC value = G1 + G2 + BU + CI return (value, (TT, TTC, BU, CI))