def __init__(self, delimiter=None, comments='#', autostrip=True, encoding=None): delimiter = _decode_line(delimiter) comments = _decode_line(comments) self.comments = comments # Delimiter is a character if (delimiter is None) or isinstance(delimiter, basestring): delimiter = delimiter or None _handyman = self._delimited_splitter # Delimiter is a list of field widths elif hasattr(delimiter, '__iter__'): _handyman = self._variablewidth_splitter idx = np.cumsum([0] + list(delimiter)) delimiter = [slice(i, j) for (i, j) in zip(idx[:-1], idx[1:])] # Delimiter is a single integer elif int(delimiter): (_handyman, delimiter) = (self._fixedwidth_splitter, int(delimiter)) else: (_handyman, delimiter) = (self._delimited_splitter, None) self.delimiter = delimiter if autostrip: self._handyman = self.autostrip(_handyman) else: self._handyman = _handyman self.encoding = encoding
def _selection(self, population, population_fitness): new_population = [] new_population_fitness = [] # 1/x used for minimum value, x used for maximum # selection strategy is less value, more better. # 适应度计算,如[1,2,3,4,5]的适应度为[1/15,2/15,3/15,4/15,5/15] # 这里的基因值是取的倒数 fitness = [1 / x for x in population_fitness] fitness_rate = [x / sum(fitness) for x in fitness] #累计概率 fitness_circle = np.cumsum(fitness_rate) # fitness_circle数组内的某位的值等于fitness_rate对应位的值加上fitness_rate内该位前面的所有值 # 举例:a=[1,2,3,4,5],np.cumsum(a)=[1,3,6,10,15] for _ in range(self.population_size): # np.random.rand()随机抽取0到1的数 #判断当前染色体是否会被选中,返回类型为布尔类型 select_rates = fitness_circle > np.random.rand() #如果返回值为True,保存其索引 select_first_index = list(select_rates).index(True) #满足适应性的加入新种群,不满足的丢弃,并将上一次满足的再次加入新种群中,以保持种群数量的稳定 new_population.append(population[select_first_index]) new_population_fitness.append( population_fitness[select_first_index]) return new_population, new_population_fitness
def _unique1d(ar, return_index=False, return_inverse=False, return_counts=False): """ Find the unique elements of an array, ignoring shape. """ ar = np.asanyarray(ar).flatten() optional_indices = return_index or return_inverse if optional_indices: perm = ar.argsort(kind='mergesort' if return_index else 'quicksort') aux = ar[perm] else: ar.sort() aux = ar mask = np.empty(aux.shape, dtype=np.bool_) mask[:1] = True mask[1:] = aux[1:] != aux[:-1] ret = (aux[mask], ) if return_index: ret += (perm[mask], ) if return_inverse: imask = np.cumsum(mask) - 1 inv_idx = np.empty(mask.shape, dtype=np.intp) inv_idx[perm] = imask ret += (inv_idx, ) if return_counts: idx = np.concatenate(np.nonzero(mask) + ([mask.size], )) ret += (np.diff(idx), ) return ret
def test_result_values(self): for axis in (-2, -1, 0, 1, None): tgt = np.cumprod(_ndat_ones, axis=axis) res = np.nancumprod(_ndat, axis=axis) assert_almost_equal(res, tgt) tgt = np.cumsum(_ndat_zeros,axis=axis) res = np.nancumsum(_ndat, axis=axis) assert_almost_equal(res, tgt)
def Select_Operation(Pop, Chromosome_Length, Popualation_Size, Pop_FitnV): # 对个体进行选择操作 # Pop 种群 # Population_size 种群大小 # Pop_FitnV" # 初始化返回的种群和适应度 Return_Pop = [[0 for i in range(Chromosome_Length)] for j in range(Popualation_Size)] Return_FitnV = [0 for i in range(Popualation_Size)] # 计算轮盘选择概率 temp_FitnV = [1 / x for x in Pop_FitnV] rate_FitnV = [x / sum(temp_FitnV) for x in temp_FitnV] cumsum_FitnV = np.cumsum(rate_FitnV) # 轮盘选择 for iPopualation_Size in range(Popualation_Size): select_index = cumsum_FitnV > random.uniform(0, 1) Return_Pop[iPopualation_Size][:] = Pop[list(select_index).index( True)][:] Return_FitnV[iPopualation_Size] = Pop_FitnV[list(select_index).index( True)] return Return_Pop, Return_FitnV
def wij_list(total_history): w_ij=[] for i in range(len(total_history)): list_ones=[] for j in range(len(total_history[i])): if total_history[i][j]==0: pass else: wij=data[total_history[i][j]-1][8] list_ones.append(wij) w_ij.append(list_ones) #w_ij每条路线载货记录 #tw_ij是w_ij的颠倒,并求累计重量 tw_ij=[] for i in range(len(w_ij)): list_reserve=[] for j in range(len(w_ij[i])): list_reserve.append(w_ij[i][len(w_ij[i])-1-j]) #累计重量 tw_ij.append(np.cumsum(list_reserve)) goods=[] for i in range(len(tw_ij)): list_reserve=[] for j in range(len(tw_ij[i])): list_reserve.append(tw_ij[i][j]) goods.append(list_reserve) goods_l=[] for i in range(len(goods)): list_reserve=[] for j in range(len(goods[i])): list_reserve.append(goods[i][len(goods[i])-1-j]) list_reserve.append(0) goods_l.append(list_reserve) return goods_l
def stack_arrays(arrays, defaults=None, usemask=True, asrecarray=False, autoconvert=False): """ Superposes arrays fields by fields Parameters ---------- arrays : array or sequence Sequence of input arrays. defaults : dictionary, optional Dictionary mapping field names to the corresponding default values. usemask : {True, False}, optional Whether to return a MaskedArray (or MaskedRecords is `asrecarray==True`) or a ndarray. asrecarray : {False, True}, optional Whether to return a recarray (or MaskedRecords if `usemask==True`) or just a flexible-type ndarray. autoconvert : {False, True}, optional Whether automatically cast the type of the field to the maximum. Examples -------- >>> from numpy1.lib import recfunctions as rfn >>> x = np.array([1, 2,]) >>> rfn.stack_arrays(x) is x True >>> z = np.array([('A', 1), ('B', 2)], dtype=[('A', '|S3'), ('B', float)]) >>> zz = np.array([('a', 10., 100.), ('b', 20., 200.), ('c', 30., 300.)], ... dtype=[('A', '|S3'), ('B', float), ('C', float)]) >>> test = rfn.stack_arrays((z,zz)) >>> test masked_array(data = [('A', 1.0, --) ('B', 2.0, --) ('a', 10.0, 100.0) ('b', 20.0, 200.0) ('c', 30.0, 300.0)], mask = [(False, False, True) (False, False, True) (False, False, False) (False, False, False) (False, False, False)], fill_value = ('N/A', 1e+20, 1e+20), dtype = [('A', '|S3'), ('B', '<f8'), ('C', '<f8')]) """ if isinstance(arrays, ndarray): return arrays elif len(arrays) == 1: return arrays[0] seqarrays = [np.asanyarray(a).ravel() for a in arrays] nrecords = [len(a) for a in seqarrays] ndtype = [a.dtype for a in seqarrays] fldnames = [d.names for d in ndtype] # dtype_l = ndtype[0] newdescr = get_fieldspec(dtype_l) names = [n for n, d in newdescr] for dtype_n in ndtype[1:]: for fname, fdtype in get_fieldspec(dtype_n): if fname not in names: newdescr.append((fname, fdtype)) names.append(fname) else: nameidx = names.index(fname) _, cdtype = newdescr[nameidx] if autoconvert: newdescr[nameidx] = (fname, max(fdtype, cdtype)) elif fdtype != cdtype: raise TypeError("Incompatible type '%s' <> '%s'" % (cdtype, fdtype)) # Only one field: use concatenate if len(newdescr) == 1: output = ma.concatenate(seqarrays) else: # output = ma.masked_all((np.sum(nrecords), ), newdescr) offset = np.cumsum(np.r_[0, nrecords]) seen = [] for (a, n, i, j) in zip(seqarrays, fldnames, offset[:-1], offset[1:]): names = a.dtype.names if names is None: output['f%i' % len(seen)][i:j] = a else: for name in n: output[name][i:j] = a[name] if name not in seen: seen.append(name) # return _fix_output(_fix_defaults(output, defaults), usemask=usemask, asrecarray=asrecarray)
def test_nancumsum(self): tgt = np.cumsum(self.mat) for mat in self.integer_arrays(): assert_equal(np.nancumsum(mat), tgt)