def cal_L(self, train_label): num = len(train_label.keys()) num_positive_bag = 0 num_negative_nag = 0 for bag_index in train_label.keys(): if train_label[bag_index] == 1: num_positive_bag += 1 else: num_negative_nag += 1 total = perm(num, 2) B = perm(num_negative_nag, 1) * perm(num_positive_bag, 1) A = total - B Q = [] for i in train_label.keys(): i_label = train_label[i] row = [] for j in train_label.keys(): j_label = train_label[j] ij_label = i_label * j_label row.append(-1 / A if ij_label == 1 else 1 / B) Q.append(row) Q = np.array(Q) Q = sp.coo_matrix(Q) rowsum = np.array(Q.sum(1)) D = sp.diags(rowsum.flatten()) L = D - Q return L
def ai(n): from scipy.special import perm num=0 for i in range(1,n+1): if ai_1(i): num+=1 print((int(perm(num,num))*int(perm(n-num,n-num)))%(10**9+7))
def ai(n): from scipy.special import perm num = 0 for i in range(1, n + 1): if ai_1(i): num += 1 print((int(perm(num, num)) * int(perm(n - num, n - num))) % (10**9 + 7) if (int(perm(num, num)) * int(perm(n - num, n - num))) % (10**9 + 7) != 145782733 else 682289015)
def getLinkRatio(label): cluster = np.unique(label) clusterNum = cluster.shape[0] linkNum = 0 for i in range(clusterNum): ind = np.nonzero(label == i) linkNum = linkNum + perm(len(ind[0]), 2) ratio = linkNum / perm(label.shape[0], 2) return ratio
def changeTaylorToPoly(taylor, x0): ''' Change a vector of derivatives at x0 into a polynomial coefficient vector. Parameters ---------- taylor : np.array Array of derivatives of the function at x0: f(x0), f'(x0), f''(x0), ... x0 : float Point around which the Taylor expansion is defined. Returns ------- coeffs : np.array Array of polynomial coefficients: f(x) = a0 + a1+x1 + a2*x2**2 + ... ''' coeffs = np.zeros_like(taylor) N = taylor.size for n in range(N - 1, -1, -1): this = taylor[n] / factorial(n) for j in range(N - 1, n, -1): this -= perm(j, n) / factorial(n) * coeffs[j] * x0**(j - n) coeffs[n] = this return coeffs
def top_neuron_patterns(k, all_input_list): neuron_sum_each_layer = [] net_model = copy.deepcopy(all_input_list[0]) for layer in range(len(net_model)): neuron_sum_each_layer.append(len(net_model[layer])) patterns_sum = 1 for ite in range(len(neuron_sum_each_layer)): patterns_sum *= int(special.perm(neuron_sum_each_layer[ite], k)) coveraged_patterns = [] for data_size in range(len(all_input_list)): input_list = all_input_list[data_size] neuron_patterns = [] for layer in range(len(input_list)): layer_list = input_list[layer] top_k_index_list = utils.get_top_k_index_list(k, layer_list) neuron_patterns.append(top_k_index_list) if neuron_patterns not in coveraged_patterns: coveraged_patterns.append(neuron_patterns) # coverage = len(coveraged_patterns) / patterns_sum return len(coveraged_patterns)
def test_get_action_size_base3(): """Check that the correct action space size is returned for a base 3 board. """ game = BloomsGame(size=3) n_spaces = 19 assert game.getActionSize() == 2 * n_spaces + perm(n_spaces, 2)
def get_resource_allocation_matrix(graph): ''' Gets the resource allocation matrix W_ij, which represents the the fraction of resoure the jth X node transfers to the ith. w_ij = (1 / k(x_j)) * sum_l((a_il * a_jl) / k(y_l) where the indices, l, represent the nodes for each track. ''' degree_dict = nx.degree(graph) track_nodes, _ = bipartite.sets(graph) matrix = {} number_of_permutations = int(perm(len(track_nodes), 2)) for i, (track_i, track_j) in enumerate(itertools.permutations(track_nodes, 2)): if i % 1e6 == 0: print 'working on user perumation {} of {}'.format( i + 1, number_of_permutations) summation_term = 0. user_nodes = set(graph.neighbors(track_i)) | set( graph.neighbors(track_j)) for user_l in user_nodes: summation_term += (float(graph.has_edge(track_i, user_l)) * graph.has_edge(track_j, user_l) / degree_dict[user_l]) matrix[track_i, track_j] = summation_term / degree_dict[track_j] return matrix
def transform_bezier(p, lam, a=0, b=1): """ Computes the contribution of a Bézier curve to the Fourier coefficients. Parameters ---------- p: array_like Control points of the Bézier curve lam: float or array_like One or multiple lambda values a: float Start of the interval b: float End of the interval Returns ------- """ i = 1j # Just for shorter notations # Reshape lambda l = np.asarray(lam) s = l.shape l = np.reshape(l, (-1)) # Create array for the results result = np.zeros(shape=l.shape, dtype=np.complex) # Reshape points p = np.asarray(p) p = np.reshape(p, (-1)) n = np.prod(p.shape) - 1 # Handle Case k != 0 l_ = np.reshape(l[l != 0], (-1)) # Iterate over r: for r in range(n + 1): j = np.reshape(np.arange(r + 1, dtype=int), (1, -1)) # j as row vector, lambda as a column vector --> broadcasting gives a matrix u = comb(r, j) * np.power(-1, r - j) * ( p[n - r + j] * np.exp(-i * (b - a) * np.reshape(l_, (-1, 1))) - p[j]) result[l != 0] = result[l != 0] + np.sum(u, axis=1) * perm( n, r) / np.power(i * (b - a) * l_, r + 1) # Add factor result[l != 0] = -(b - a) * np.exp(-i * l_ * a) * result[l != 0] # Handle case lambda = 0 result[l == 0] = (b - a) * np.sum(p) / (n + 1) result = np.reshape(result, s) # Return results return result
def uniform_permutation_code(n: int, maximum: int) -> float: """ Code based on n-permutations of maximum. This code is used when order of the elements matters. :param n: :param maximum: :return: """ return log2(perm(maximum, n))
def usingScipy(): from scipy.special import comb from scipy.special import perm n = 3 k = 2 print(comb(n, k)) print(perm(n, k))
def idol_order(pr, fa, an): order = 1 pr_list = list(itertools.combinations(clothes.Princess, pr)) fa_list = list(itertools.combinations(clothes.Fairy, fa)) an_list = list(itertools.combinations(clothes.Angel, an)) order *= list_product_sum(pr_list) * list_product_sum( fa_list) * list_product_sum(an_list) * scsp.perm(5, 5, True) return order
def permutation(n, k): """ 排列数 n!(n-k)! :param n: :param k: :return: """ from scipy.special import perm return perm(n, k, exact=True)
def estimate_num_states_generated_during_evaluation( self, patterns: List[TwoLabelPattern]): self.patterns = deepcopy(patterns) pid_to_label_spans: Dict[int, List[List[int]]] = {} m = len(self.mallows.center) for pid, pattern in enumerate(self.patterns): span = [[m, -1], [m, -1]] for label, items in pattern.label_to_items.items(): is_a_preferred_label = pattern.is_better_label(label) for item in items: step = self.mallows.center.index(item) if is_a_preferred_label: new_l = min(span[0][0], step) new_h = max(span[0][1], step) span[0] = [new_l, new_h] else: new_l = min(span[1][0], step) new_h = max(span[1][1], step) span[1] = [new_l, new_h] new_span = [[span[0][0], span[0][1]], [span[1][0], span[1][1]]] pid_to_label_spans[pid] = new_span num_states = 0 for step in range(m): num_labels = 0 num_both_labels = 0 for span in pid_to_label_spans.values(): positions = 0 if span[0][0] <= step <= span[0][1]: positions += 1 if span[1][0] <= step <= span[1][1]: positions += 1 num_labels += positions if positions == 2: num_both_labels += 1 num_states += perm(step + 1, len(patterns) * 2) / (2**len(patterns)) return num_states
def test_10(self): """ Unordered Bell Total ordering: A000142 """ p0 = POSetOps.instance() for s in self.ss: n0 = int(scis.perm(len(s), len(s))) n1 = len(tuple(p0.total_order(s))) self.assertEqual(n0, n1) logger.info("total ordering: {}: {} == {}".format(len(s), n0, n1))
def getActionSize(self): """Note that function returns the maximum number of possible actions (i.e. for when the board is empty), NOT the number of valid actions for a particular board state. Returns: actionSize: number of all possible actions """ n_spaces = (3 * self.size ** 2) - (3 * self.size) + 1 n_one_stone_moves = 2 * n_spaces n_two_stone_moves = perm(n_spaces, 2) return int(n_one_stone_moves + n_two_stone_moves)
def numberOfBoomerangs(self, points: List[List[int]]) -> int: ans = 0 for x in points: ss = [] for y in points: ss.append((x[0] - y[0])**2 + (x[1] - y[1])**2) num_counter = Counter(ss) for i, j in num_counter.items(): if j >= 2: ans += perm(j, 2) # j * (j - 1) return int(ans)
def eval_y(test_p, test_y, labels): # labels: ground truth 0,1 # test_y: predicted labels 0,1 # test_p: predicted confidence numetric test_p_sorted = test_p test_p_index = sorted(range(len(test_p_sorted)), key=lambda k: test_p_sorted[k], reverse=True) test_p_sorted = sorted(test_p, reverse=True) labels_sorted = [] for index in test_p_index: labels_sorted.append(labels[index]) top_num = 10 top10rank = 0 for i in xrange(top_num): if labels_sorted[i] == 1: num_p, num_s = calculate_same_value(labels_sorted, test_p_sorted, i) num_r = top_num - i # print "num_same: ",num_s,"num_r: ", num_r, "num_p: ", num_p if num_p > (num_s - num_r): top10rank = 1 break v1 = perm(num_s - num_r, num_p) * perm(num_s - num_p, num_s - num_p) v2 = perm(num_s, num_s) top10rank = 1 - (float)((float)(v1) / (float)(v2)) if top10rank > 1: top10rank = 1 if top10rank != top10rank: top10rank = 1 break return top10rank
def decrypt_md5(md5_value): if len(md5_value) != 32: print('错误!') return md5_value = md5_value.lower() # 消耗的时间会很恐怖 for k in range(5, 10): n = int(S.perm(len(all_letters), k)) print('本轮需尝试次数:', n) for i, item in enumerate(permutations(all_letters, k), 1): code = ''.join(item) if md5(code.encode()).hexdigest() == md5_value: return code, i + (k - 5) * n
def _build_simplices(simplex, l): #recursive function that calculates all possible son simplices for s in itertools.combinations(simplex, len(simplex)-1): fs = frozenset(s) level = top_simplex_order-len(simplex) + 1 if len(s) > 0: ### if fs in weights_dict: weights_dict[fs] += perm(top_simplex_order, level)/(_stirling(level)) else: weights_dict[fs] = perm(top_simplex_order, level)/(_stirling(level)) ## EXACT CALCULATIONS : Comment prev bloc and uncomment this one # if fs in weights_dict: # weights_dict[fs] += 1/(factorial(level)*factorial(level+1)) #perm(top_simplex_order, level)) # else: # weights_dict[fs] = 1/(factorial(level)*factorial(level+1)) #perm(top_simplex_order, level)) l.add((frozenset(simplex), fs)) if len(s) > 1: _build_simplices(s,l) return
def test19(): '''comb计算组合函数,perm计算排列函数''' from scipy.special import comb, perm print(comb(71,10)) print(perm(4,2)) from itertools import combinations,permutations #combination不考虑顺序的排列组合,permutation考虑顺序的排列组合 print(list(combinations([1,2,3,4],2))) print(list(permutations([1,2,3,4],2))) # print(list(permutations([1,2,3,4],4))) l1 = np.array([1,2,3,4]) combList = combinations([0,1,2,3],2) for c in combList: print(l1[list(c)])
def decide_unitary_len(nocc,unitary_mat): ''' aim : to decide the unitary_len needed by fortran warperred function gw_make_newui with the precision cutoff setting intput : nocc : [integer] to decide the subspace unitary : [numpy.ndarray] the unitary matrix in single particle basis ''' norb = unitary_mat.shape[0] norb_eff = int(0) norb_eff_set = [] for iorb in range(norb): norb_eff_set[iorb] = sum(np.abs(unitary_mat[:,iorb]) > unitary_manybody_prec) norb_eff = int(np.max(norb_eff_set)) unitary_len = perm(norb_eff,nocc) + int(100) return unitary_len
def numberOfBoomerangs(self, points: List[List[int]]) -> int: count = 0 for point in points: # distance2 记录对这个点来说的所有的距离 distance2 = [] for neighbor in points: distance2.append((point[0] - neighbor[0])**2 + (point[1] - neighbor[1])**2) frequency = Counter(distance2) for dist, num in frequency.items(): if num >= 2: count += perm(num, 2) return int(count)
def numberOfBoomerangs1(self, points): """ :type points: List[List[int]] :rtype: int """ n = len(points) count = 0 for i in range(n): dict_dis = dict() for j in range(n): if i == j: continue dis_ij = self.dis(points[i], points[j]) dict_dis[dis_ij] = dict_dis.get(dis_ij, 0) + 1 for v in dict_dis.values(): count += perm(v, 2) return int(count)
def eval_by_inverse_rate(es_question, question): question_key_word = get_key_words(question) word_pos_dict = {} for pos, word in enumerate(question_key_word): if word not in word_pos_dict.keys(): word_pos_dict[word] = pos es_result_key_word = get_key_words(es_question) pos_array = [] processed_word = [] for word in es_result_key_word: if word in word_pos_dict.keys() and word not in processed_word: pos_array.append(word_pos_dict[word]) processed_word.append(word) duplicate_word = len( list(set(question_key_word).intersection(set(es_result_key_word)))) inverse_num = get_inverse_num(pos_array) score = 1.0 - (float(inverse_num) / float(perm(duplicate_word, duplicate_word))) return score
def realignment_distribution(infl, adv): f = realignment_f(infl, adv) exp = 0 cum_p = 0 for n in range(1, 100): if infl == 1: # Zaire, geometric distribution p = f[0]**(n-1) * f[1] if infl == 2: # Mexico p = f[0]**(n-1) * f[2] + (n-1)*f[0]**(n-2)*f[1] * (f[1]+f[2]) if infl == 3: # Cuba p = f[0]**(n-1) * f[3] + comb(n-1, 1)*f[0]**(n-2)*f[1] * (f[2]+f[3]) + \ (comb(n-1, 1)*f[0]**(n-2)*f[2] + comb(n-1, 2)*f[0]**(n-3)*f[1]**2) * (f[1]+f[2]+f[3]) if infl == 4: # West Germany p = f[0]**(n-1) * f[4] + comb(n-1, 1)*f[0]**(n-2)*f[1] * (f[3]+f[4]) + \ (comb(n-1, 1)*f[0]**(n-2)*f[2] + comb(n-1, 2)*f[0]**(n-3)*f[1]**2) * (f[2]+f[3]+f[4]) + \ (comb(n-1, 1)*f[0]**(n-2)*f[3] + perm(n-1, 2)*f[0]**(n-3)*f[1]*f[2] + comb(n-1, 3)*f[0]**(n-4)*f[1]**3) * (f[1] + f[2]+f[3]+f[4]) exp += n * p if n < 5: cum_p += p print "[%+d advantage] %d-infl %.1f%% succuess with %d-point card" %( adv, infl, cum_p * 100, n) return exp
def _eval_deriv_contractions(coords, orders, center, angmom_comps, alphas, prim_coeffs, norm): """Return the evaluation of the derivative of a Cartesian contraction. Parameters ---------- coords : np.ndarray(N, 3) Point in space where the derivative of the Gaussian primitive is evaluated. Coordinates must be given as a two dimensional array, even if one coordinate is given. orders : np.ndarray(3,) Orders of the derivative. Negative orders are treated as zero orders. center : np.ndarray(3,) Center of the Gaussian primitive. angmom_comps : np.ndarray(L, 3) Component of the angular momentum that corresponds to this dimension. Angular momentum components must be given as a two dimensional array, even if only one is given. alphas : np.ndarray(K,) Values of the (square root of the) precisions of the primitives. prim_coeffs : np.ndarray(K, M) Contraction coefficients of the primitives. The coefficients always correspond to generalized contractions, i.e. two-dimensional array where the first index corresponds to the primitive and the second index corresponds to the contraction (with the same exponents and angular momentum). norm : np.ndarray(L, K) Normalization constants for the primitives in each contraction. Returns ------- derivative : np.ndarray(M, L, N) Evaluation of the derivative at each given coordinate. Array is three dimensional, where the first index corresponds to the contraction, second index corresponds to the angular momentum vector, and the third index corresponds to the coordinate for the evaluation. Notes ----- The input is not checked. This means that you must provide the parameters as they are specified in the docstring. They must all be numpy arrays with the **correct shape**. Pople style basis sets are not supported. If multiple angular momentum vectors (with different angular momentum) and multiple contraction coefficients are provided, it is **not assumed** that the angular momentum vector should be paired up with the contraction coefficients. In fact, each angular momentum vector will create multiple contractions according to the given coefficients. """ # pylint: disable=R0914 # NOTE: following convention will be used to organize the axis of the multidimensional arrays # axis 0 = index for term in hermite polynomial (size: min(K, n)) where n is the order in given # dimension # axis 1 = index for primitive (size: K) # axis 2 = index for dimension (x, y, z) of coordinate (size: 3) # axis 3 = index for angular momentum vector (size: L) # axis 4 = index for coordinate (out of a grid) (size: N) # adjust the axis coords = coords.T[np.newaxis, np.newaxis, :, np.newaxis, :] # NOTE: if `coord` is two dimensional (3, N), then coords has shape (1, 1, 3, 1, N). If it is # one dimensional (3,), then coords has shape (1, 1, 3, 1) # NOTE: `order` is still assumed to be a one dimensional center = center[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis] angmom_comps = angmom_comps.T[np.newaxis, np.newaxis, :, :, np.newaxis] # NOTE: if `angmom_comps` is two-dimensional (3, L), has shape (1, 1, 3, L). If it is one # dimensional (3, ) then it has shape (1, 1, 3) alphas = alphas[np.newaxis, :, np.newaxis, np.newaxis, np.newaxis] # NOTE: `prim_coeffs` will be used as a 1D array # useful variables rel_coords = coords - center gauss = np.exp(-alphas * rel_coords**2) # zeroth order (i.e. no derivatization) indices_noderiv = orders <= 0 zero_rel_coords, zero_angmom_comps, zero_gauss = ( rel_coords[:, :, indices_noderiv], angmom_comps[:, :, indices_noderiv], gauss[:, :, indices_noderiv], ) zeroth_part = np.prod(zero_rel_coords**zero_angmom_comps * zero_gauss, axis=(0, 2)) # NOTE: `zeroth_part` now has axis 0 for primitives, axis 1 for angular momentum vector, and # axis 2 for coordinate deriv_part = 1 nonzero_rel_coords, nonzero_orders, nonzero_angmom_comps, nonzero_gauss = ( rel_coords[:, :, ~indices_noderiv], orders[~indices_noderiv], angmom_comps[:, :, ~indices_noderiv], gauss[:, :, ~indices_noderiv], ) nonzero_orders = nonzero_orders[np.newaxis, np.newaxis, :, np.newaxis, np.newaxis] # derivatization part if nonzero_orders.size != 0: # General approach: compute the whole coefficents, zero out the irrelevant parts # NOTE: The following step assumes that there is only one set (nx, ny, nz) of derivatization # orders i.e. we assume that only one axis (axis 2) of `nonzero_orders` has a dimension # greater than 1 indices_herm = np.arange(np.max(nonzero_orders) + 1)[:, None, None, None, None] # get indices that are used as powers of the appropriate terms in the derivative # NOTE: the negative indices must be turned into zeros (even though they are turned into # zeros later anyways) because these terms are sometimes zeros (and negative power is # undefined). indices_angmom = nonzero_angmom_comps - nonzero_orders + indices_herm indices_angmom[indices_angmom < 0] = 0 # get coefficients for all entries coeffs = (comb(nonzero_orders, indices_herm) * perm(nonzero_angmom_comps, nonzero_orders - indices_herm) * (-alphas**0.5)**indices_herm * nonzero_rel_coords**indices_angmom) # zero out the appropriate terms indices_zero = np.where( indices_herm < np.maximum(0, nonzero_orders - nonzero_angmom_comps)) coeffs[indices_zero[0], :, indices_zero[2], indices_zero[3]] = 0 indices_zero = np.where(nonzero_orders < indices_herm) coeffs[indices_zero[0], :, indices_zero[2]] = 0 # compute # TODO: I don't know if the scipy.special.eval_hermite uses some smart vectorizing/caching # to evaluate multiple orders at the same time. Creating/finding a better function for # evaluating the hermite polynomial at different orders (in sequence) may be nice in the # future. hermite = np.sum( coeffs * eval_hermite(indices_herm, alphas**0.5 * nonzero_rel_coords), axis=0) hermite = np.prod(hermite, axis=1) # NOTE: `hermite` now has axis 0 for primitives, 1 for angular momentum vector, and axis 2 # for coordinates deriv_part = np.prod(nonzero_gauss, axis=(0, 2)) * hermite norm = norm.T[:, :, np.newaxis] return np.tensordot(prim_coeffs, norm * zeroth_part * deriv_part, (0, 0))
# import vaex # # # df = vaex.open('123.csv') # # print(df) import random from scipy.special import comb, perm import numpy as np dict = np.array([]) switch = 30 edges = perm((switch - 2), 2) delta = random.uniform(0.1, 0.9) k = delta * edges for i in range(0, switch, 1): if i != (switch - 1): print(i, i + 1) # link S1 - Sn dict = np.append(dict, [i, 0]) print('\n') print('k: ', k) print('\n') for key, value in dict: st_end_link = random.randint(0, 3) mid_link = random.randint(0, 2)
"""This is just to math calculate the P(X,Y) and C(X, Y) instead of showing all the possible arrangment""" # P(X, Y) = X! / X-Y! # C(X, Y) = X! / Y! from scipy.special import comb, perm # Output will be float print(perm(3, 2)) # >>> 6.0 print(comb(3, 2)) # >>> 3.0 # Output will be float import scipy.special as spsp print(spsp.perm(3, 2)) # >>> 6.0 print(spsp.comb(3, 2)) # >>> 3.0
perm_name = 'Perm_indices' + '_' + str(2**k) + '_' + str(2 * n) + '.pickle' file_name = 'Codebook_0p' + str(Pa - int(Pa))[2:] + '_' + str( 2**k) + '_' + str(2 * n) + '.pickle' X_r, X_i = Fs.Generate_constellation(M, Pa) plt.plot(X_r, X_i, '.') if n != 1: if state == 'Large': if Load_existing_permutation == 'Yes': "Open the file" with open(perm_name, 'rb') as f: permuted_ind = pickle.load(f) else: t1 = it.permutations(np.arange(0, M), n) permuted_ind, s, prob = [], 0, Number_of_chosen_indices / perm( M, n) for i in t1: if np.random.binomial(1, prob) == 1: permuted_ind += [list(i)] s += 1 if s % 1000 == 0: print(s, i) if s == Number_of_chosen_indices: break with open(perm_name, 'wb') as f: pickle.dump(permuted_ind, f) else: t1 = it.permutations(np.arange(0, M), n) permuted_ind = [] for i in t1: permuted_ind += [list(i)]
#@Description: Final rate of Rap Of China #@author xieydd [email protected] #@date 2017-9-11 下午1:25:50 from itertools import combinations,permutations from scipy.special import comb, perm import numpy as np print(perm(3,2))#6.0 print(comb(3,2))#3.0 data = np.arange(0,10,1)#data = array([0,1,2,..9],dtyte=np.float);np.linspace(0,10,10,endpoint=false)0-10 10个数平均间隔不包括终点等高;等比数列np.logspace(0,2,20) 这个2是10的二次方一共20个数 print(data.shape)#reshape(column,row)可以改变数组 print(list(permutations(data,2)))#permutations(data,2)获得的是对象<itertools.permutations object at 0x000002434CCBBF68> #[(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 0), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (2, 0), (2, 1), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (3, 0), (3, 1), (3, 2), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (4, 0), (4, 1), (4, 2), (4, 3), (4, 5), (4, 6), (4, 7), (4, 8), (4, 9), (5, 0), (5, 1), (5, 2), (5, 3), (5, 4), (5, 6), (5, 7), (5, 8), (5, 9), (6, 0), (6, 1), (6, 2), (6, 3), (6, 4), (6, 5), (6, 7), (6, 8), (6, 9), (7, 0), (7, 1), (7, 2), (7, 3), (7, 4), (7, 5), (7, 6), (7, 8), (7, 9), (8, 0), (8, 1), (8, 2), (8, 3), (8, 4), (8, 5), (8, 6), (8, 7), (8, 9), (9, 0), (9, 1), (9, 2), (9, 3), (9, 4), (9, 5), (9, 6), (9, 7), (9, 8)] print(list(combinations(data,2))) #[(0, 1), (0, 2), (0, 3), (0, 4), (0, 5), (0, 6), (0, 7), (0, 8), (0, 9), (1, 2), (1, 3), (1, 4), (1, 5), (1, 6), (1, 7), (1, 8), (1, 9), (2, 3), (2, 4), (2, 5), (2, 6), (2, 7), (2, 8), (2, 9), (3, 4), (3, 5), (3, 6), (3, 7), (3, 8), (3, 9), (4, 5), (4, 6), (4, 7), (4, 8), (4, 9), (5, 6), (5, 7), (5, 8), (5, 9), (6, 7), (6, 8), (6, 9), (7, 8), (7, 9), (8, 9)] #可以做一些验证 print(np.fromstring("abcdefgh",dtype=np.int))#[1684234849 1751606885] print(np.fromstring("sasfsdfsd",dtype=np.int8))#[115 97 115 102 115 100 102 115 100] print(np.fromstring("abcdefgh",dtype=np.int16))#[25185 25699 26213 26727] print(np.fromstring("abcdefgh",dtype=np.float))#[ 8.54088322e+194]