def _parse_dicom_and_contour_files(filenames): """Convert two filenames to valid image data :param filenames: a 3-tuple record of dicom_filename, icontour_filename, and ocontour_filename: dicom_filename is the path string to the DICOM file icontour_filename is the path string to the i-contour file ocontour_filename is the path string to the o-contour file :return: 5-tuple (RecordData) containing the DICOM image data and contour mask data """ dicom_filename, icontour_filename, ocontour_filename = filenames dicom_data, icontour_data, ocontour_data = None, None, None icontour_path, ocontour_path = [], [] if dicom_filename: dicom_data = parsing.parse_dicom_file(dicom_filename) if dicom_data is not None: dicom_data = dicom_data['pixel_data'] if icontour_filename: icontour_path = parsing.parse_contour_file(icontour_filename) if dicom_data is not None: height, width = dicom_data.shape else: max_x, max_y = 0, 0 for x, y in icontour_path: max_x = max(max_x, x) max_y = max(max_y, y) height = round(max_x + 1) width = round(max_y + 1) icontour_data = parsing.poly_to_mask(icontour_path, width, height) if ocontour_filename: ocontour_path = parsing.parse_contour_file(ocontour_filename) if dicom_data is not None: height, width = dicom_data.shape else: max_x, max_y = 0, 0 for x, y in ocontour_path: max_x = max(max_x, x) max_y = max(max_y, y) height = round(max_x + 1) width = round(max_y + 1) ocontour_data = parsing.poly_to_mask(ocontour_path, width, height) # TODO: fix the case in which all of them are None if dicom_data is not None: if icontour_data is None: icontour_data = np.zeros(dicom_data.shape, dtype=np.bool_) if ocontour_data is None: ocontour_data = np.zeros(dicom_data.shape, dtype=np.bool_) else: if icontour_data is not None: dicom_data = np.zeors(icontour_data.shape, dtype=np.int16) elif ocontour_data is not None: dicom_data = np.zeors(ocontour_data.shape, dtype=np.int16) return RecordData(dicom_data, icontour_data, ocontour_data, icontour_path, ocontour_path)
def test_create(): print("***常用创建****************************************") arr1 = np.arange(5, dtype=float) print('arange create:', arr1) arr1 = np.linspace(10, 20, 5, endpoint=False) print('arange linspace:', arr1) print('zeros1:', np.zeors((5))) print('zeros2:', np.zeors((5, 4))) print('ones1:', np.ones((5, 4, 3))) print('empty1:', np.empty((5, 4))) print('full1:', np.full((5, 4), 3)) print('eye1:', np.eye(3, 4)) print('linspace1:', np.linspace(0, 10, num=4))
def costFunc(nn_params, input_layer_size, hidden_layer_size, num_labels, X, y, nn_lambda): Theta1 = np.reshape(nn_params[0:(hidden_layer_size*(input_layer_size + 1))], (hidden_layer_size, input_layer_size + 1)) Theta2 = np.reshape(nn_params[hidden_layer_size*(input_layer_size + 1):np.size(nn_params)]) m = np.size(X,0) J = 0 Theta1_grad = np.zeros((np.size(Theta1, 0), np.size(Theta1, 1))) Theta2_grad = np.zeros((np.size(Theta2, 0), np.size(Theta2, 1))) # feed forward a1_all = np.concatenate((np.ones((m, 1)), X), axis=1) z2_all = np.dot(Theta1, a1_all.transpose()).transpose() a2_all = np.concatenate((np.ones((m, 1)), sigmoid(z2_all)), axis=1) z3_all = np.dot(Theta2, a2_all.transpose()).transpose() a3_all = sigmoid(z3_all) all_labels = np.eye(num_labels) # calculate 1st term corresponding to non regularized cost s = 0 for cnt in range(0, m): for cnt1 in range(1, num_labels + 1): temp = -all_labels[y(cnt), cnt1]*np.log(a3_all[cnt, cnt1]) - (1 - all_labels[y(cnt), cnt1])*np.log(1 - a3_all[cnt, cnt1]) s = s + temp J_nr = 1/m*s # calculate 2nd tern corresponding to regularized part # remember that we do not regularize bias intercepter Theta1[:, 1] = 0 Theta2[:, 1] = 0 J_rg = nn_lambda / 2 / m * (np.sum(Theta1 ** 2) + np.sum(Theta2 ** 2)) J = J_nr + J_rg # calculate gradient delta3 = np.zeors((m, num_labels)) delta2 = np.zeors((m, hidden_layer_size)) D1 = np.zeros((np.size(Theta1, 0), np.size(Theta1, 1))) D2 = np.zeros((np.size(Theta2, 0), np.size(Theta2, 1))) for cnt in range(0, m): delta3[cnt, :] = a3_all[cnt, :] - all_labels[y(cnt), :] delta2[cnt, :] = np.dot(delta3[cnt, :], Theta2[:, 1:(hidden_layer_size + 1)]) * sigmoid_gradient(z2_all[cnt, :]) D1 = D1 + np.dot(delta2[cnt, :].transpose(), a1_all[cnt, :]) D2 = D2 + np.dot(delta3[cnt, :].transpose(), a2_all[cnt, :]) Theta1_grad = 1/m*D1 Theta2_grad = 1/m*D2 Theta1_grad[:, 1:(input_layer_size + 1)] = Theta1_grad[:, 1:(input_layer_size + 1)] + nn_lambda/m*Theta1[:, 1:(input_layer_size + 1)] Theta2_grad[:, 1:(input_layer_size + 1)] = Theta2_grad[:, 1:(input_layer_size + 1)] + nn_lambda/m*Theta2[:, 1:(input_layer_size + 1)] grad = np.concatenate((np.ravel(Theta1_grad), np.ravel(Theta2_grad))) return J, grad
def projection(img): h, w = img.shape[:2] proj = np.zeors(h) for i in range(w): for j in range(h): if img[j, i] > 200: proj[j] += 1
def __init__(self, D): self.running_mean = tf.Variable(np.zeros(D, dtype=np.float32), trainable=True) self.running_var = tf.Variable(np.zeros(D, dtype=np.float32), trainable=True) self.beta = tf.Variable(np.zeors(D, dtype=np.float32)) self.gamma = tf.Variable(np.zeros(D, dtype=np.float32))
def pick_strategy(self): ''' TODO: This is a very easy function, where we choose the largest number Q value in Q matrix and find the best stragegy :return: ''' self.best_strategy = np.zeors(4,3)
def div_basis(self, bc): mesh = self.mesh p = self.p divPhi = np.zeors((NC, ldof), dtype=self.dtype) Dlambda, _ = mesh.grad_lambda() Rlambda, _ = mesh.rot_lambda() if p == 1: divPhi[:, 0] = np.sum(Dlambda[:, 1, :] * Rlambda[:, 2, :] - Dlambda[:, 2, :] * Rlambda[:, 1, :], axia=1) divPhi[:, 1] = np.sum(Dlambda[:, 1, :] * Rlambda[:, 2, :] + Dlambda[:, 2, :] * Rlambda[:, 1, :], axia=1) divPhi[:, 2] = np.sum(Dlambda[:, 2, :] * Rlambda[:, 0, :] - Dlambda[:, 0, :] * Rlambda[:, 2, :], axis=1) divPhi[:, 3] = np.sum(Dlambda[:, 2, :] * Rlambda[:, 0, :] + Dlambda[:, 0, :] * Rlambda[:, 2, :], axis=1) divPhi[:, 4] = np.sum(Dlambda[:, 0, :] * Rlambda[:, 1, :] - Dlambda[:, 1, :] * Rlambda[:, 0, :], axis=1) divPhi[:, 5] = np.sum(Dlambda[:, 0, :] * Rlambda[:, 1, :] + Dlambda[:, 1, :] * Rlambda[:, 0, :], axis=1) divPhi[:, 0:6:2] *= cell2edgeSign divPhi[:, 1:6:2] *= cell2edgeSign else: #TODO:raise a error print("error") return divPhi
def div_basis(self, bc): mesh = self.mesh p = self.p divPhi = np.zeors((NC, ldof), dtype=self.dtype) return divPhi
def div_basis(self, bc): mesh = self.mesh p = self.p ldof = self.number_of_local_dofs() NC = mesh.number_of_cells() divPhi = np.zeors((NC, ldof), dtype=np.float) cell2edgeSign = self.cell_to_edge_sign() W = np.array([[0, 1], [-1, 0]], dtype=np.float) Rlambda = mesh.rot_lambda() Dlambda = Rlambda @ W if p == 1: divPhi[:, 0] = np.sum( Dlambda[:, 1, :] * Rlambda[:, 2, :], axis=1) - np.sum( Dlambda[:, 2, :] * Rlambda[:, 1, :], axis=1) divPhi[:, 1] = np.sum( Dlambda[:, 2, :] * Rlambda[:, 0, :], axis=1) - np.sum( Dlambda[:, 0, :] * Rlambda[:, 2, :], axis=1) divPhi[:, 2] = np.sum( Dlambda[:, 0, :] * Rlambda[:, 1, :], axis=1) - np.sum( Dlambda[:, 1, :] * Rlambda[:, 0, :], axis=1) divPhi *= cell2edgeSign else: #TODO:raise a error print("error") return divPhi
def enkfanalysis(ensemble, obsvalue): obs_error = 1.0E-7 obserrormat = np.mat([obs_error]) obsHmatrix = np.mat([0, 0, 1, 0, 0, 0]) matobsvalue = np.mat([obsvalue]) # mean variable matensemble = np.mat(ensemble) ensembleNum = len(ensemble[0]) meanvariable = np.mat( [np.mean(ensemble[0, :]), np.mean(ensemble[1, :]), np.mean(ensemble[2, :]), np.mean(ensemble[3, :]), np.mean(ensemble[4, :]), np.mean(ensemble[5, :])]).reshape((6, 1)) bh = np.mat((6, 1)) hbh = np.mat((1, 1)) for i in range(ensembleNum): hx_hx = (np.mat(obsHmatrix) * np.mat(ensemble[:, i]) - np.mat(obsHmatrix) * meanvariable) bh = (np.mat(ensemble[:, i]).reshape((6, 1)) - meanvariable) * (hx_hx.T) + bh # 6*1 hbh = hx_hx * (hx_hx.T) + hbh # 1*1 bh = bh / (ensembleNum - 1) hbh = hbh / (ensembleNum - 1) # kalman gain kalmanGain = ph * ((hbh + obserrormat).I) analysisensemble = np.mat(np.zeors((6, ensembleNum))) for i in range(ensembleNum): analysisensemble[:, i] = matensemble[:, i] + kalmanGain * (matobsvalue - obsHmatrix * matensemble[:, i]) return analysisensemble
def extract_features(imgs, feature_fns, verbose=False): num_images = imgs.shape[0] if num_images == 0: return np.array([]) feature_dims = [] first_image_features = [] for feature_fn in feature_fns: feats = feature_fn(imgs[0].squeeze()) assert len( feats.shape) == 1, 'Feature functions must be one-dimensional' feature_dims.append(feats.size) first_image_features.append(feats) total_feature_dim = sum(feature_dims) imgs_features = np.zeors((num_images, total_feature_dim)) imgs_features[0] = np.hstack(first_image_features).T for i in range(1, num_images): idx = 0 for feature_fn, feature_dim in zip(feature_fns, feature_dims): next_idx = idx + feature_dim imgs_features[i, idx:next_idx] = feature_fn(imgs[i].squeeze()) idx = next_idx if verbose and i % 1000 == 0: print('Done extracting features for %d / $d images' % (i, num_images)) return imgs_features
def p_jds(Z, L, U, C, M): """ Input: coefficient matrix Z desired numbers of dynamic active sets L dictionary atom label vector U the number of classes C the number of views M Output: Index matrix I for top-L dynamic active sets """ #Initialize I = np.zeros((L, M)) V = np.zeros((C, M)) _I = np.zeros((C, M)) S = np.zeors(C) for l in xrange(L): for i in xrange(C): #c代表索引值 c = find(U, i) for m in xrange(M): v, t = Max(Z[c, m]) V[i, m] = v _I[i, m] = c[t] tmp = np.cumsum(np.power(V[i], 2))[-1] S[i] = np.power(tmp, 0.5) _v, _t = Max(S) I[l, :] = _I[_t,:] Z[_I[_t,:]] = 0 return I
def initialize(self): ''' In this function we initialize our state space ''' self.X = np.zeors(4, 4) # incoming cars self.Y = np.zeros(4, 4) # waiting cars self.Z = np.zeros(4, 4) # Outgoing cars
def onehot_vector(self, x, size): if type(x) is not list: onehot = np.zeros(size) onehot[x] = 1 else: onehot = np.zeors((len(x), size)) onehot[np.arange(len(x)), x] = 1 return onehot
def test_whole(self): test_compound = np.zeors((self.compound_size, self.pos_compound_size + self.neg_compound_size, self.compound_size)) test_gene = np.zeros( (self.gene_size, self.pos_gene_size + self.neg_gene_size, self.gene_size)) for i in range(self.compound_size): compound[0, 0, :] = self.assign_value_compound(compoundid)
def crf(prob, area): import pydensecrf.densecrf as dcrf from pydensecrf.utils import unary_from_softmax, create_pairwise_bilateral s_x = 16 s_y = 16 s_x_Gaussian = 4 s_y_Gaussian = 4 s_ch = 0.01 W = prob.shape[2] H = prob.shape[1] NLABELS = prob.shape[0] image_path = ('top/top_mosaic_09cm_area%s.tif') % area dsm_path = ('dsm/dsm_09cm_matching_area%s.tif') % area image_rgb = imread(image_path) image_dsm = imread(dsm_path) image_rgb = image_rgb[:H, :W, :] image_dsm = image_dsm[:H, :W] #input_image = input_image.transpose(2,0,1) U = unary_from_softmax(prob) input_image_1 = image_rgb[:, :, 0:1] pairwise_energy_1 = create_pairwise_bilateral(sdims=(s_x, s_y), schan=(s_ch, ), img=input_image_1, chdim=2) input_image_2 = image_rgb[:, :, 1:2] pairwise_energy_2 = create_pairwise_bilateral(sdims=(s_x, s_y), schan=(s_ch, ), img=input_image_2, chdim=2) input_image_3 = image_rgb[:, :, 2:3] pairwise_energy_3 = create_pairwise_bilateral(sdims=(s_x, s_y), schan=(s_ch, ), img=input_image_3, chdim=2) input_image_4 = image_dsm[:, :] pairwise_energy_4 = create_pairwise_bilateral(sdims=(s_x, s_y), schan=(s_ch, ), img=input_image_4, chdim=2) d = dcrf.DenseCRF2D(W, H, NLABELS) d.setUnaryEnergy(U) d.addPairwiseEnergy(pairwise_energy_1, compat=10, kernel=dcrf.FULL_KERNEL) d.addPairwiseEnergy(pairwise_energy_2, compat=10, kernel=dcrf.FULL_KERNEL) d.addPairwiseEnergy(pairwise_energy_3, compat=10, kernel=dcrf.FULL_KERNEL) d.addPairwiseEnergy(pairwise_energy_4, compat=10, kernel=dcrf.FULL_KERNEL) d.addPairwiseGaussian(sxy=(s_x_Gaussian, s_y_Gaussian), compat=1) Q = d.inference(5) out_crf = np.argmax(Q, axis=0).reshape((H, W)) out_crf_expand = np.zeors(NLABELS, H, W) for i in range(NLABELS): out_crf_expand[i] = 1 * (out_crf == i) return out_crf_expand
def test_load_data(path, sf_interest): data = h5py.File(path, 'r') gene_idx = data['gene_idx'][:] ensg_ids = data['gene_names'][:] conf_idx = data['conf_idx'][:] interest_idx = _get_interest_idx(gene_idx, ensg_ids, sf_interest.values()) mask_idx = np.intersect1d(conf_idx, interest_idx) mask = np.zeors(gene_idx.size, dtype=bool); mask[mask_idx] = True return data, mask
def basis(self, bc): mesh = self.mesh dim = mesh.geom_dimension() ldof = self.number_of_local_dofs() p = self.p phi = np.zeors((NC, ldof, dim), dtype=self.dtype) return phi
def generate_output(t, a, sample_size=None): if (t == 1): if (type(sample_size) == type(0)): return np.ones((sample_size, 1), dtype=np.float) return np.ones((a.shape[0], 1), dtype=np.float) elif (t == 0): if (type(sample_size) == type(0)): return np.zeros((sample_size, 1), dtype=np.float) return np.zeors((a.shape[0], 1), dtype=np.float) else: print('argument t: {0, 1}')
def correlation(G,l,n,O): corr=np.zeors(2) for isite in range(0,2): for i in range(n): theta=np.tensordot(np.diag(l[(isite+i+1)%2,:]),G[(isite+i)%2,:,:,:],axes=(1,1)) theta=np.tensordot(theta,np.diag(l[(isite+i)%2,:]),axes=(2,0)) theta_o=np.tensordot(theta,O,axes=(1,0)) theta_o=np.tensordot(theta_o,O,axes=(-2,0)).conj() oder_o=[0]+list(range(2,n+1))+[n+2]+[1,n+1] corr[isite]=np.squeeze(np.tensordot(theta_o,theta,axes=(list(range(n+3)),oder_o))).item() return corr
def kmeans1(boxes,k,dist=np.mean): rows = boxes.shape[0] distances = np.zeros([rows,k]) np.random.seed() clusters = boxes[np.random.choice(rows, k, replace=False)] cur_clusters = np.zeors([k,2]) last_clusters = np.zeors([rows,]) while(True): for r in range(rows): distances[r] = 1 - iou(boxes[r],clusters) G = np.argmin(distances,axis = 1) # 求均值 if (last_clusters== G).all(): break for k_i in range(k): cur_clusters[k_i] = dist(boxes[G==k_i],axis = 0) clusters =cur_clusters return clusters
def smiles2fpRdkit(smiles, explicit=128): if len(smiles) == 0: fp = '\n' print('Warning by RM: empty smiles!') arr = np.zeors((1, explicit), dtype=int) else: ms = Chem.MolFromSmiles(smiles) fp = AllChem.GetMorganFingerprintAsBitVect(ms, 2, nBits=explicit) arr = np.zeros((1, ), dtype=int) DataStructs.ConvertToNumpyArray(fp, arr) return arr
def getSampleCovariance(data, mean): sampleCovariance = np.zeors((Data.D, Data.D)) for i in range(Data.numVectors): x_minus_x_bar = np.zeros((Data.D, 1)) x_minus_x_bar = data[i] + x_minus_x_bar x_minus_x_bar = x_minus_x_bar - mean x_minus_x_bar_T = x_minus_x_bar.T mul = np.dot(x_minus_x_bar, x_minus_x_bar_T) sampleCovariance += mul sampleCovariance /= Data.numVectors - 1 return sampleCovariance
def var_estimation(X): ''' Use superimposed method to compute multi-dimension data's variance :param X: input data array ''' if X.ndim == 3: # (n_events, n_epochs, n_times) var = np.zeors((X.shape[0], X.shape[2])) # (n_events, n_times) for i in range(X.shape[0]): # i for n_events ex = np.mat(np.mean(X[i, :, :], axis=0)) # (1, n_times) temp = np.mat(np.ones((1, X.shape[1]))) # (1, n_epochs) minus = (temp.T * ex).A # (n_epochs, n_times) var[i, :] = np.mean((X[i, :, :] - minus)**2, axis=0) elif X.ndim == 2: # (n_epochs, n_times) var = np.zeors((X.shape[1])) # (n_times) ex = np.mat(np.mean(X, axis=0)) # (1, n_times) temp = np.mat(np.ones(1, X.shape[0])) # (1, n_epochs) minus = (temp.T * ex).A # (n_epochs, n_times) var = np.mean((X - minus)**2, axis=0) return var
def getSubstack(stack1, r, sgX, sgY, sgZ): sX, sY, sZ = stack1.shape substack = np.zeors([sgX, sgY, sgZ]) # int ii,jj,kk sum = 0 # int iii,jjj,kkk # std::cout<<xf<<","<<yf<<","<<zf<<","<<dxf<<","<<dyf<<","<<dzf<<" | "<<sX<<","<<sY<<","<<sZ<<" \n" xf, yf, zf = r.astype(int) fdi = r[0] - xf fdj = r[1] - yf fdk = r[2] - zf if (sgX / 2) < xf < (sX - sgX / 2) and (sgY / 2) < yf < ( sY - sgY / 2) and (sgZ / 2) < zf < (sZ - sgZ / 2): # std::cout<<xf<<","<<yf<<","<<zf<<", | "<<sX<<","<<sY<<","<<sZ<<" \n" x0 = xf - sgX // 2 x1 = x0 - sgX y0 = yf - sgY // 2 y1 = y0 - sgY z0 = zf - sgZ // 2 z1 = z0 - sgZ substack = ( (1 - fdi) * (1 - fdj) * (1 - fdk) * stack1[x0:x1, y0:y1, z0:z1] + (fdi) * (1 - fdj) * (1 - fdk) * stack1[x0 + 1:x1 + 1, y0:y1, z0:z1] + (1 - fdi) * (fdj) * (1 - fdk) * stack1[x0:x1, y0 + 1:y1, z0:z1] + (1 - fdi) * (1 - fdj) * (fdk) * stack1[x0:x1, y0:y1, z0 + 1:z1] + (fdi) * (fdj) * (1 - fdk) * stack1[x0 + 1:x1 + 1, y0 + 1:y1 + 1, z0:z1] + (1 - fdi) * (fdj) * (fdk) * stack1[x0:x1, y0 + 1:y1 + 1, z0 + 1:z1 + 1] + (fdi) * (1 - fdj) * (fdk) * stack1[x0 + 1:x1 + 1, y0:y1, z0 + 1:z1 + 1] + (fdi) * (fdj) * (fdk) * stack1[x0 + 1:x1 + 1, y0 + 1:y1 + 1, z0 + 1:z1 + 1:]) substack -= np.mean(substack) substack /= np.linalg.norm(substack) return substack else: substack[:] = 0 return substack
def hessian(self,x): n = x.shape[0] if self.method =="NPS" || self.method ==1: w=self.w; xi = self.xi; N = x.shape[1] H = np.zeors((n)) for ii in range(N): X = x - xi[:,ii] if np.linalg.norm(X) > 1e-5: H = H + 3*w[ii]*((X*X.T)/np.linalg.norm(X) + np.linalg.norm(X)*np.identity(n)) return H
def run(self): while True: self.predict() policy_stable = True for state_i in range(self.state_values_func.size()): old_action = copy.deepcopy(self.policy[state_i]) state_action_value = np.zeros(self.env.action_space.n) for action_i in self.env.action_space: self.env.set_current_state(state_i) next_state, reward, _, _ = self.env.step(action_i) state_action_value[ action_i] = reward + self.gamma * self.state_values_func[ next_state] optimal_action = np.random.choice( np.flatnonzero( state_action_value == state_action_value.max())) np.zeors(self.policy[state_i]) self.policy[state_i][optimal_action] = 1.0 if old_action != self.policy: policy_stable = False if policy_stable: return
def GenralBrownianMotion(T, mu= lambda t, Wt: 0, sigma= lambda t, Wt: 1, B0 = 1, steps=1000, seed=rand_seed()): dW = BrownianPath(T, steps, seed) W = np.zeors((steps+1,)) dt = T/steps T = [i*dt for i in range(steps+1)] for idx, dw in enumerate(dW): t=T[idx] w=W[idx] _mu = mu(t,w) _sigma = sigma(t,w) W[idx+1] = _mu*dt + _simga*dw _W = UnitLocalLinearCurveFromArray(zip(T,W)) return _W
def predict(self, X): """ X in N x D where each row is an example we wish to predict label for """ num_test = X.shape[0] # lets make sure that the output type mathces the input type Ypred = np.zeors(num_test, dtype=self.ytr.dtype) # loop over all test rows for i in xrange(num_test): # find the nereat training image to the i'th thest image # using the L1 distance (sum of absolute value differences) distances = np.sum(np.abs(self.Xtr=X[i, :]), axis=1) min_index = np.argmin( distances) # get the index with smallest distance Ypred[i] = self.ytr[min_index] return Ypred
def get_overview(code, headers): url = f'http://comp.fnguide.com/SVO2/ASP/SVD_Main.asp?pGB=1&gicode=A{code}&cID=&MenuYn=Y&ReportGB=&NewMenuID=101&stkGb=701' req = requests.get(url, headers=headers) soup = BeautifulSoup(req.content, 'html.parser') table = soup.select('#svdMainGrid1 > table') _state = pd.read_html(str(table))[0] col = ['전일종가', '시가총액', '발행주식수_보통주', '발행주식수_우선주'] try: price = int(_state.loc[0][1].split('/')[0].replace(',', '')) Market_cap = int(_state.loc[4][1]) shares_norm = int(_state.loc[6][1].split('/')[0].replace(',', '')) shares_pre = int(_state.loc[6][1].split('/')[1].replace(',', '')) return col, [price, Market_cap, shares_norm, shares_pre] except: return col, np.zeors(len(col))
def ori_img(self, input, x, y, reflect_i): r = 3 * self.sigma_s a = np.arange(-r, r + 1) if len(input.shape) is 2: input_diff = np.zeors((len(a), len(a), 1)) else: input_diff = np.zeros((len(a), len(a), input.shape[2])) for j in range(len(a)): for i in range(len(a)): input_diff[i][j] = np.float64(reflect_i[x - a[i] + r][y - a[j] + r]) return input_diff
def div_basis(self, bc): mesh = self.mesh p = self.p divPhi = np.zeors((NC, ldof), dtype=self.dtype) cell2edgeSign = self.cell_to_edge_sign() Dlambda, _ = mesh.grad_lambda() if p == 0: divPhi[:, 0] = np.sum(Dlambda[:, 1, :]*Dlambda[:, 2, :] - Dlambda[:, 2, :]*Dlambda[:, 1, :], axia=1) divPhi[:, 1] = np.sum(Dlambda[:, 2, :]*Dlambda[:, 0, :] - Dlambda[:, 0, :]*Dlambda[:, 2, :], axis=1) divPhi[:, 2] = np.sum(Dlambda[:, 0, :]*Dlambda[:, 1, :] - Dlambda[:, 1, :]*Dlambda[:, 0, :], axis=1) divPhi *= cell2edgeSign else: #TODO:raise a error print("error") return divPhi