def rgg(value, min=10, max=100, invert=False): med = (max - min) / 2 + min r = int(map(value, [med, max], [255, 0])) g = int(map(value, [min, med], [0, 255])) if invert: r, g = g, r return '#%02x%02x%02x' % (r, g, 0)
def predict(self, input_array): # generating hidden outputs inputs = np.array(input_array) hidden = np.array(np.dot(self.weights_ho, inputs)) hidden += self.bias_h # activation function! hidden = np.map(sigmoid, hidden) output = np.dot(self.weights_ho, hidden) output += self.bias_o output = np.map(sigmoid, output) return np.ravel(output)
def bp_sensitivity_map(self, sensitivity_map): """ 计算上一层的sensitivity_map :param sensitivity_map: 本层的sensitivity_map :return: """ self.pad_input_height = self.pad_input.shape[-2] self.pad_input_width = self.pad_input.shape[-1] expand_array = self.expand_sensitivity_map(sensitivity_map) expand_height = expand_array.shape[-2] expand_width = expand_array.shape[-1] zp = (self.pad_input_height + self.kernel_height - 1 - expand_height) // 2 sum_delta_array = np.zeros( (self.channel_num, self.pad_input_height, self.pad_input_width)) padded_array = pad(expand_array, zp) for k in range(self.kernel_num): kernel = self.kernels[k] flipped_weight = np.map(lambda i: np.rot90(i, 2), kernel.weight) # 旋转180 delta_array = np.zeros((self.channel_num, self.pad_input_height, self.pad_input_width)) for d in range(delta_array.shape[0]): conv(padded_array[k], flipped_weight[d], delta_array[d], 1, 0) sum_delta_array += delta_array sum_delta_array *= self.activator.backward(self.pad_input) self.delta_array = sum_delta_array[:, self.zp:self.zp + self.input_height, self.zp:self.zp + self.input_width] return self.delta_array
def spherical2cartesian(lon,lat): """Convert spherical to cartesian coordinates""" rlon,rlat = np.map(np.radians,lon,lat) x = np.cos(rlat) * np.cos(rlon) y = np.cos(rlat) * np.sin(rlon) z = np.sin(rlat) return x,y,z
def loadDataSet(fileName): dataMat = [] fr = open(fileName) for line in fr.readlines(): curLine = line.strip().split('\t') flLine = np.map(float, curLine) dataMat.append(flLine) return dataMat
def loadDataSet(fileName): dataMat = [] fr = open(fileName) for line in fr.readlines(): curLine = line.strip().split('\t') # 将每行映射为float浮点类型 fltLine = np.map(float, curLine) dataMat.append(fltLine) return dataMat
def train(self, input_array, target_array): inputs = np.array(input_array) hidden = np.dot(self.weights_ih, inputs) hidden += self.bias_h hidden = np.map(sigmoid, hidden) outputs = np.dot(self.weights_ih, hidden) outputs += self.bias_o outputs = np.map(sigmoid, outputs) targets = np.array(target_array) output_errors = targets - outputs gradients = np.map(sigmoid, outputs) gradients = np.dot(gradients, output_errors) gradients *= self.learning_rate hidden_T = np.transpose(hidden) weight_ho_deltas = np.dot(gradients, hidden_T) self.weights_ho += weight_ho_deltas self.bias_o += gradients who_t = np.transpose(self.weights_ho) hidden_errors = np.dot(who_t, output_errors) hidden_gradient = np.map(sigmoid, output_errors) hidden_gradient = np.dot(hidden_gradient, hidden_errors) hidden_gradient *= self.learning_rate inputs_T = np.transpose(inputs) weight_ih_deltas = np.dot(hidden_gradient, inputs_T) self.weights_ih += weight_ih_deltas self.bias_h += hidden_gradient
def addBoostTrainDS(dataArr, classLabels, numIt=40): weakClassArr = [] m = dataArr.shape[0] D = np.mat(ones((m, 1)) / m) aggClassEst = np.mat(np.zeros((m, 1))) for i in range(numIt): bestStump, error, classEST = buildStump(dataArr, classLabels, D) print "D:", D.T alpha = float(0.5 * log((1.0 - error) / max(error, 1e-16))) bestStump['alpha'] = alpha weakClassArr.apend(bestStump) print "classEst: ", classEST.T expon = np.multiply(-1 * alpha * np.map(classLabels).T, classEst) D = np.multiply(D, exp(expon)) D = D / D.sum() aggClassEst += alpha * classEST print "aggClassEst: ", aggClassEst.T aggErrors = np.multiply( sign(aggClassEst) != np.mat(classLabels).T, ones((m, 1))) errorRate = aggErrors.sum() / m print "total error: ", errorRate, "\n" if errorRate == 0.0: break return weakClassArr
# next get the PSSM matrix for the sequence sp = 21 * i ep = 21 * (i + 1) psi = np.array(pssm[sp:ep]) pssmi = np.stack([p for p in psi], axis=1) # then get the coordinates sx = 3 * i ex = 3 * (i + 1) xi = np.array(xyz[sx:ex]) xyzi = np.stack([c for c in xi], axis=1) / 100 # have to scale by 100 to match PDB # lastly convert the mask to indices msk_idx = np.map(np.array(list(masks[i])) == '+')[0] # bracket id or get "setting an array element with a sequence" zt = np.array([[id], seq, pssmi, xyzi, msk_idx]) if i == 0: bc = bcolz.carray([zt], rootdir=data_path + 'testing.bc', mode='w', expectedlen=len(ids)) bc.flush() else: bc = bcolz.carray(rootdir=data_path + 'testing.bc', mode='w') bc.append([zt]) bc.flush()