def solve_linear_system(self): if not self.iter_solver or self.guess is None: self.guess = spsolve(self.technosphere_matrix, self.demand_array) return self.guess else: solution, status = self.iter_solver(self.technosphere_matrix, self.demand_array, x0=self.guess, maxiter=1000) if status != 0: return spsolve(self.technosphere_matrix, self.demand_array) return solution
def solve(u, N): """Solves the PDE in (0,1) with coefficients u and N number of Chebyshev interpolant points""" #Boundary condition, p(1; u) = c c = 0. #create N Chebyshev nodes in (0,1) nodes = np.zeros(N + 2) nodes[1:-1] = np.cos((2 * np.arange(N) + 1) * np.pi / (2 * N) - np.pi) / 2 + 0.5 nodes[-1] = 1 A, b_bttm = stiffness_matrix(nodes, u) b = cal_B(nodes) #change last entry of b for Dirichlet bdry condition at 1 b[-1] = b[-1] + c * b_bttm b = np.append(b, c) #solve the PDE p = spsolve(A, b) #add 0 for the first node p = np.insert(p, 0, 0) return p, nodes
def solve(k, N): """Solves the PDE in (0,1) with coefficients k and N number of Chebyshev interpolant points""" #Make sure k_0 = 1 (or however many dimesions we are not searching for) #have k in the middle of vector of ones (total length = 10) dim_k = k.shape[0] if dim_k % 2 != 1: pad_width = (int((11-dim_k)/2), int((9-dim_k)/2)) else: pad_width = int((10-dim_k)/2) k_full = np.pad(k, pad_width, 'constant', constant_values=1) #Boundary condition, p(1; u) = c c = 1. #create N Chebyshev nodes in (0,1) nodes = np.zeros(N+2) nodes[1:-1] = np.cos((2*np.arange(N)+1)*np.pi/(2*N) - np.pi)/2 + 0.5 nodes[-1] = 1 A, b_bttm = stiffness_matrix(nodes, k_full) b = cal_B(nodes) #change last entry of b for Dirichlet bdry condition at 1 b[-1] = b[-1] + c*b_bttm b = np.append(b, c) #solve the PDE p = spsolve(A, b) #add 0 for the first node p = np.insert(p,0,0) return p, nodes
def poisson_blend(img_source, img_target, img_mask): """Combine images using Poisson editing.""" x_max, y_max = img_source.shape[1], img_source.shape[0] img_mask = img_mask != 0 # determines the diagonals on the coefficient matrix - flattened positions = np.where(img_mask) positions = (positions[0] * x_max) + positions[1] mat_a = poisson_matrix(x_max, y_max, positions) pois = poisson_matrix(x_max, y_max) # get positions in mask that should be taken from the target positions_from_target = np.where((~img_mask).flatten())[0] # for each layer (ex. RGB) for num_layer in range(img_target.shape[2]): tgt = img_target[..., num_layer].flatten() src = img_source[..., num_layer].flatten() mat_b = pois * src mat_b[positions_from_target] = tgt[positions_from_target] sol = spsolve(mat_a, mat_b) sol = np.clip(sol.reshape((y_max, x_max)), 0, 255) img_target[..., num_layer] = np.array(sol, img_target.dtype) return img_target
def model(self, sample): """ Rerun LCIA with a new sample and return score. Attributes ---------- sample : np.array Array that contains uniform samples on [0,1] with the values for all parameters and all params from all inputs. Returns ------- score : np.array Contains LCIA score for all LCIA methods. TODO probably can only do scalars atm """ lca = self.lca self.amount_tech = lca.tech_params['amount'] self.amount_bio = lca.bio_params['amount'] self.i_sample = 0 self.replace_non_parameterized_exchanges(sample) self.replace_parameterized_exchanges(sample) lca.rebuild_technosphere_matrix(self.amount_tech) lca.rebuild_biosphere_matrix(self.amount_bio) score = (sum(lca.characterization_matrix)*lca.biosphere_matrix) * \ spsolve(lca.technosphere_matrix,lca.demand_array) np.append(self.scores, score) return score
def assemble_and_solve_linear_system(self, tol: float) -> np.ndarray: """ Assemble a solve the linear system""" A, b = self.assemble_matrix_rhs() # Estimate condition number logger.info(f"Max element in A {np.max(np.abs(A)):.2e}") logger.info(f"Max {np.max(np.sum(np.abs(A), axis=1)):.2e} and " f"min {np.min(np.sum(np.abs(A), axis=1)):.2e} A sum.") # UMFPACK Estimate of condition number sum_diag_abs_A = np.abs(A.diagonal()) logger.info(f"UMFPACK Condition number estimate: " f"{np.min(sum_diag_abs_A) / np.max(sum_diag_abs_A) :.2e}") if self.params.linear_solver == "direct": tic = time.time() logger.info("Solve Ax=b using scipy") # sol = spla.spsolve(A, b) sol = spsolve(A, b) # pypardiso logger.info(f"Done. Elapsed time {time.time() - tic}") norm = np.linalg.norm(b - A * sol) logger.info(f"||b-Ax|| = {norm}") rhs_norm = np.linalg.norm(b) identical_zero = np.isclose(rhs_norm, 0) and np.isclose(norm, 0) rel_norm = norm / rhs_norm if not identical_zero else norm logger.info(f"||b-Ax|| / ||b|| = {rel_norm}") return sol else: raise ValueError( f"Unknown linear solver {self.params.linear_solver}")
def score_from_sample(lca,sobol_sample): vector = lca.tech_params['amount'] q = (q_high-q_low)*sobol_sample[:n_normal] + q_low params_normal_new = norm.ppf(q,loc=params_normal['loc'],scale=params_normal['scale']) np.put(vector,indices_normal,params_normal_new) del q q = sobol_sample[n_normal:n_normal+n_triang] loc = params_triang['minimum'] scale = params_triang['maximum']-params_triang['minimum'] c = (params_triang['loc']-loc)/scale params_triang_new = triang.ppf(q,c=c,loc=loc,scale=scale) np.put(vector,indices_triang,params_triang_new) del q #TODO implement group sampling # q = (q_high-q_low)*samples[:,:n_lognor] + q_low q = (q_high-q_low)*np.random.rand(n_lognor) + q_low params_lognor_new = lognorm.ppf(q,s=params_lognor['scale'],scale=np.exp(params_lognor['loc'])) np.put(vector,indices_lognor,params_lognor_new) lca.rebuild_technosphere_matrix(vector) score = (cB*spsolve(lca.technosphere_matrix,d))[0] return score
def get_cf_params_local_sa(lca, write_dir, const_factors=(0.1, 10)): """Local SA for characterization factors.""" path_lsa_cf = Path(write_dir) / "LSA_scores_cf.pickle" if not path_lsa_cf.exists(): bio_x_tech_x_demand = lca.biosphere_matrix * spsolve( lca.technosphere_matrix, lca.demand_array ) path_lsa_include_inds_cf = Path(write_dir) / "include_inds_cf.pickle" inds_uncertain = np.where(lca.cf_params["uncertainty_type"] > 1)[0] if not path_lsa_include_inds_cf.exists(): uncertain_cf_params_temp = lca.cf_params[inds_uncertain] # Exclude characterization factors that are not affected by given demand exclude_flows = np.where(bio_x_tech_x_demand == 0)[0] exclude_inds = np.array([]) for flow in exclude_flows: exclude_inds = np.hstack( [exclude_inds, np.where(uncertain_cf_params_temp["row"] == flow)[0]] ) include_inds_temp = np.setdiff1d( np.arange(len(uncertain_cf_params_temp)), exclude_inds ) write_pickle(include_inds_temp, path_lsa_include_inds_cf) else: include_inds_temp = read_pickle(path_lsa_include_inds_cf) include_inds = inds_uncertain[include_inds_temp] uncertain_cf_params = lca.cf_params[include_inds] flows = uncertain_cf_params["row"] bio_reverse_dict = lca.reverse_dict()[WHERE_BIO_REVERSE_DICT] lsa_scores_cf = {} for i, param in enumerate(uncertain_cf_params): flow = flows[i] input_ = bio_reverse_dict[flow] scores = [] for const_factor in const_factors: characterization_vector = sum(deepcopy(lca.characterization_matrix)) characterization_vector[0, flow] *= const_factor score = characterization_vector * bio_x_tech_x_demand scores.append(score[0]) lsa_scores_cf[include_inds[i]] = { "input": input_, "scores": np.array(scores), } write_pickle(lsa_scores_cf, path_lsa_cf) else: lsa_scores_cf = read_pickle(path_lsa_cf) return lsa_scores_cf
def __Solve(self, A, B): if self.__solver[0] == 'direct': if USE_PYPARDISO == True: return spsolve(A,B) else: return sparse.linalg.spsolve(A,B) elif self.__solver[0] == 'cg': if self.__solver[2] == True: Mprecond = sparse.diags(1/A.diagonal(), 0) else: Mprecond = None res, info = sparse.linalg.cg(A,B, tol=self.__solver[1], M=Mprecond) if info > 0: print('Warning: CG convergence to tolerance not achieved') return res
def solve_linear_system(self): """ Master solution function for linear system :math:`Ax=B`. To most numerical analysts, matrix inversion is a sin. -- Nicolas Higham, Accuracy and Stability of Numerical Algorithms, Society for Industrial and Applied Mathematics, Philadelphia, PA, USA, 2002, p. 260. We use `UMFpack <http://www.cise.ufl.edu/research/sparse/umfpack/>`_, which is a very fast solver for sparse matrices. If the technosphere matrix has already been factorized, then the decomposed technosphere (``self.solver``) is reused. Otherwise the calculation is redone completely. """ if hasattr(self, "solver"): return self.solver(self.demand_array) else: return spsolve(self.technosphere_matrix, self.demand_array)
def model(self, sample): lca = self.lca self.amount_tech = lca.tech_params['amount'] self.amount_bio = lca.bio_params['amount'] self.i_sample = 0 self.replace_non_parameterized(sample) self.replace_parameterized(sample) lca.rebuild_technosphere_matrix(self.amount_tech) lca.rebuild_biosphere_matrix(self.amount_bio) score = (sum(lca.characterization_matrix)*lca.biosphere_matrix) * \ spsolve(lca.technosphere_matrix,lca.demand_array) return score
def get_tech_params_local_sa( where_tech_lsa, lca, write_dir, const_factors=(0.1, 10), tag=None ): """Local SA for technosphere exchanges.""" path_lsa_tech = Path(write_dir) / "LSA_scores_tech_{}.pickle".format(tag) if not path_lsa_tech.exists(): # 1. lca related d = lca.demand_array B = lca.biosphere_matrix C = sum(lca.characterization_matrix) reverse_dict = lca.reverse_dict()[0] # 2. Find zero indices using Local SA where_tech_lsa.sort() num_params = len(where_tech_lsa) tech_params_lsa = lca.tech_params[where_tech_lsa] rows = tech_params_lsa["row"] cols = tech_params_lsa["col"] # 3. Constant factor const_factor = np.tile(const_factors, (num_params, 1)) N = const_factor.shape[1] # 4. Preparation for saving of the results lsa_scores_tech = {} # 5. Run LSA for i, where in enumerate(where_tech_lsa): scores = np.empty(N) scores[:] = np.nan for j in range(N): A = deepcopy(lca.technosphere_matrix) A[rows[i], cols[i]] *= const_factor[i, j] scores[j] = C * B * spsolve(A, d) del A lsa_scores_tech[int(where)] = dict( input=reverse_dict[rows[i]], output=reverse_dict[cols[i]], scores=deepcopy(scores), ) # 6. Save results write_pickle(lsa_scores_tech, path_lsa_tech) else: lsa_scores_tech = read_pickle(path_lsa_tech) return lsa_scores_tech
def calculate_impact_scores( A_public_cutoff, A_public_apos, A_public_consequential, B_public_cutoff, B_public_apos, B_public_consequential, C_public_cutoff, C_public_apos, C_public_consequential, ee_index_cutoff, ee_index_apos, ee_index_consequential, ie_index_cutoff, ie_index_apos, ie_index_consequential, LCIA_index_cutoff, LCIA_index_apos, LCIA_index_consequential): """ This function performs the calculation of lcia scores for all three system models. Required arguments: - six matrices for each of the system models (A_public, B_public, C_public, ie_index, ee_index and LCIA_index) Returns: - the C-Matrix in array form for each of the system models - the lcia-Matrix with the scores of all the products for each of the system models """ # Remove values in the diagonal # Lcia scores for the cutoff matrices A_cutoff = sp.coo_matrix( (A_public_cutoff["coefficient"], (A_public_cutoff["row"], A_public_cutoff["column"]))).tocsc() A_public_cor_cutoff = A_public_cutoff.loc[ A_public_cutoff["row"] != A_public_cutoff["column"]].copy() B_cutoff = sp.coo_matrix( (B_public_cutoff["coefficient"], (B_public_cutoff["row"], B_public_cutoff["column"])), shape=(len(ee_index_cutoff), len(ie_index_cutoff))) lci_cutoff = spsolve(A_cutoff.transpose(), B_cutoff.transpose()) C_cutoff = sp.coo_matrix( (C_public_cutoff["coefficient"], (C_public_cutoff["row"], C_public_cutoff["column"])), shape=(len(LCIA_index_cutoff), len(ee_index_cutoff))) c_array_cutoff = C_cutoff.transpose().toarray() lcia_cutoff = lci_cutoff * C_cutoff.transpose() LCIA_index_cutoff['method_long'] = LCIA_index_cutoff[ LCIA_index_cutoff.columns[:-1]].apply( lambda x: ", ".join(x.astype(str)), axis=1) lcia_df_cutoff = pd.DataFrame( data=lcia_cutoff[:, :], columns=LCIA_index_cutoff['method_long'].values) # Lcia scores for the apos matrices A_apos = sp.coo_matrix( (A_public_apos["coefficient"], (A_public_apos["row"], A_public_apos["column"]))).tocsc() A_public_cor_apos = A_public_apos.loc[ A_public_apos["row"] != A_public_apos["column"]].copy() B_apos = sp.coo_matrix((B_public_apos["coefficient"], (B_public_apos["row"], B_public_apos["column"])), shape=(len(ee_index_apos), len(ie_index_apos))) lci_apos = spsolve(A_apos.transpose(), B_apos.transpose()) C_apos = sp.coo_matrix((C_public_apos["coefficient"], (C_public_apos["row"], C_public_apos["column"])), shape=(len(LCIA_index_apos), len(ee_index_apos))) c_array_apos = C_apos.transpose().toarray() lcia_apos = lci_apos * C_apos.transpose() LCIA_index_apos['method_long'] = LCIA_index_apos[ LCIA_index_apos.columns[:-1]].apply(lambda x: ", ".join(x.astype(str)), axis=1) lcia_df_apos = pd.DataFrame(data=lcia_apos[:, :], columns=LCIA_index_apos['method_long'].values) # Lcia scores for the apos matrices A_consequential = sp.coo_matrix( (A_public_consequential["coefficient"], (A_public_consequential["row"], A_public_consequential["column"]))).tocsc() A_public_cor_consequential = A_public_consequential.loc[ A_public_consequential["row"] != A_public_consequential["column"]].copy() B_consequential = sp.coo_matrix( (B_public_consequential["coefficient"], (B_public_consequential["row"], B_public_consequential["column"])), shape=(len(ee_index_consequential), len(ie_index_consequential))) lci_consequential = spsolve(A_consequential.transpose(), B_consequential.transpose()) C_consequential = sp.coo_matrix( (C_public_consequential["coefficient"], (C_public_consequential["row"], C_public_consequential["column"])), shape=(len(LCIA_index_consequential), len(ee_index_consequential))) c_array_consequential = C_consequential.transpose().toarray() lcia_consequential = lci_consequential * C_consequential.transpose() LCIA_index_consequential['method_long'] = LCIA_index_consequential[ LCIA_index_consequential.columns[:-1]].apply( lambda x: ", ".join(x.astype(str)), axis=1) lcia_df_consequential = pd.DataFrame( data=lcia_consequential[:, :], columns=LCIA_index_consequential['method_long'].values) return (A_public_cor_cutoff, c_array_cutoff, lcia_cutoff, LCIA_index_cutoff, lcia_df_cutoff, A_public_cor_apos, c_array_apos, lcia_apos, LCIA_index_apos, lcia_df_apos, A_public_cor_consequential, c_array_consequential, lcia_consequential, LCIA_index_consequential, lcia_df_consequential)
def get_bio_params_local_sa(lca, write_dir, const_factors=(0.1, 10)): """Local SA for biosphere parameters.""" path_lsa_bio = Path(write_dir) / "LSA_scores_bio.pickle" if not path_lsa_bio.exists(): tech_x_demand = spsolve(lca.technosphere_matrix, lca.demand_array) characterization_vector = sum(lca.characterization_matrix) path_include_inds_bio = Path(write_dir) / "include_inds_bio.pickle" inds_uncertain = np.where(lca.bio_params["uncertainty_type"] > 1)[0] if not path_include_inds_bio.exists(): uncertain_bio_params_temp = lca.bio_params[inds_uncertain] # Exclude bio exchanges that are not selected with the demand vector exclude_cols = np.where(tech_x_demand == 0)[0] exclude_inds = np.array([]) for col in exclude_cols: exclude_inds = np.hstack( [exclude_inds, np.where(uncertain_bio_params_temp["col"] == col)[0]] ) # Exclude bio exchanges that are not included in the given lcia method exclude_rows = np.where(characterization_vector.toarray()[0] == 0)[0] for row in exclude_rows: exclude_inds = np.hstack( [exclude_inds, np.where(uncertain_bio_params_temp["row"] == row)[0]] ) print( "Excluding {}/{} biosphere exchanges".format( len(exclude_inds), len(uncertain_bio_params_temp) ) ) exclude_inds = np.sort(exclude_inds) include_inds_temp = np.setdiff1d( np.arange(len(uncertain_bio_params_temp)), exclude_inds ) write_pickle(include_inds_temp, path_include_inds_bio) else: include_inds_temp = read_pickle(path_include_inds_bio) include_inds = inds_uncertain[include_inds_temp] uncertain_bio_params = lca.bio_params[include_inds] nbio = len(uncertain_bio_params) rows = uncertain_bio_params["row"] cols = uncertain_bio_params["col"] bio_reverse_dict = lca.reverse_dict()[2] tech_reverse_dict = lca.reverse_dict()[0] lsa_scores_bio = {} for i, param in enumerate(uncertain_bio_params): if i % 1000 == 0: print("{}/{}".format(i, nbio)) row, col = rows[i], cols[i] input_ = bio_reverse_dict[row] output_ = tech_reverse_dict[col] scores = [] for const_factor in const_factors: biosphere_matrix = deepcopy(lca.biosphere_matrix) biosphere_matrix[row, col] *= const_factor score = characterization_vector * (biosphere_matrix * tech_x_demand) scores.append(score[0]) lsa_scores_bio[include_inds[i]] = { "input": input_, "output": output_, "scores": np.array(scores), } write_pickle(lsa_scores_bio, path_lsa_bio) else: lsa_scores_bio = read_pickle(path_lsa_bio) return lsa_scores_bio
def fill_depth_colorization(rgb_filename, depth_filename, alpha=1): imgRgb = np.array(Image.open(rgb_filename), dtype=int) imgRgb = imgRgb / np.max(imgRgb) imgDepthInput = img2depth(depth_filename) imgIsNoise = imgDepthInput == 0 maxImgAbsDepth = np.max(imgDepthInput) imgDepth = imgDepthInput / maxImgAbsDepth imgDepth[imgDepth > 1] = 1 (H, W) = imgDepth.shape numPix = H * W indsM = np.arange(numPix).reshape((W, H)).transpose() knownValMask = (imgIsNoise == False).astype(int) grayImg = skimage.color.rgb2gray(imgRgb) winRad = 1 len_ = 0 absImgNdx = 0 len_window = (2 * winRad + 1)**2 len_zeros = numPix * len_window cols = np.zeros(len_zeros) - 1 rows = np.zeros(len_zeros) - 1 vals = np.zeros(len_zeros) - 1 gvals = np.zeros(len_window) - 1 for j in range(W): for i in range(H): nWin = 0 for ii in range(max(0, i - winRad), min(i + winRad + 1, H)): for jj in range(max(0, j - winRad), min(j + winRad + 1, W)): if ii == i and jj == j: continue rows[len_] = absImgNdx cols[len_] = indsM[ii, jj] gvals[nWin] = grayImg[ii, jj] len_ = len_ + 1 nWin = nWin + 1 curVal = grayImg[i, j] gvals[nWin] = curVal c_var = np.mean((gvals[:nWin + 1] - np.mean(gvals[:nWin + 1]))**2) csig = c_var * 0.6 mgv = np.min((gvals[:nWin] - curVal)**2) if csig < -mgv / np.log(0.01): csig = -mgv / np.log(0.01) if csig < 2e-06: csig = 2e-06 gvals[:nWin] = np.exp(-(gvals[:nWin] - curVal)**2 / csig) gvals[:nWin] = gvals[:nWin] / sum(gvals[:nWin]) vals[len_ - nWin:len_] = -gvals[:nWin] # Now the self-reference (along the diagonal). rows[len_] = absImgNdx cols[len_] = absImgNdx vals[len_] = 1 # sum(gvals(1:nWin)) len_ = len_ + 1 absImgNdx = absImgNdx + 1 vals = vals[:len_] cols = cols[:len_] rows = rows[:len_] A = scipy.sparse.csr_matrix((vals, (rows, cols)), (numPix, numPix)) rows = np.arange(0, numPix) cols = np.arange(0, numPix) vals = (knownValMask * alpha).transpose().reshape(numPix) G = scipy.sparse.csr_matrix((vals, (rows, cols)), (numPix, numPix)) A = A + G b = np.multiply(vals.reshape(numPix), imgDepth.flatten('F')) # print ('Solving system..') new_vals = spsolve(A, b) new_vals = np.reshape(new_vals, (H, W), 'F') # print ('Done.') denoisedDepthImg = new_vals * maxImgAbsDepth output = denoisedDepthImg.reshape((H, W)).astype('float32') output = np.multiply(output, (1 - knownValMask)) + imgDepthInput return output
def Solve(self, A, b, reuse_factorisation=False): """Solves the linear system of equations""" if not issparse(A): raise ValueError("Linear system is not of sparse type") if A.shape == (0,0) and b.shape[0] == 0: warn("Empty linear system!!! Nothing to solve!!!") return np.copy(b) self.reuse_factorisation = reuse_factorisation if self.solver_type != "direct" and self.reuse_factorisation is True: warn("Re-using factorisation for non-direct solvers is not possible. The pre-conditioner is going to be reused instead") # DECIDE IF THE SOLVER TYPE IS APPROPRIATE FOR THE PROBLEM if self.switcher_message is False and self.dont_switch_solver is False: # PREFER PARDISO OR MUMPS OVER AMG IF AVAILABLE if self.has_pardiso: self.solver_type = "direct" self.solver_subtype = "pardiso" elif self.has_mumps: self.solver_type = "direct" self.solver_subtype = "mumps" elif b.shape[0] > 100000 and self.has_amg_solver: self.solver_type = "amg" self.solver_subtype = "gmres" print('Large system of equations. Switching to algebraic multigrid solver') self.switcher_message = True # elif mesh.points.shape[0]*MainData.nvar > 50000 and MainData.C < 4: # self.solver_type = "direct" # self.solver_subtype = "MUMPS" # print 'Large system of equations. Switching to MUMPS solver' elif b.shape[0] > 70000 and self.geometric_discretisation=="hex" and self.has_amg_solver: self.solver_type = "amg" self.solver_subtype = "gmres" print('Large system of equations. Switching to algebraic multigrid solver') self.switcher_message = True else: self.solver_type = "direct" self.solver_subtype = "umfpack" if self.solver_type == 'direct': # CALL DIRECT SOLVER if self.solver_subtype=='umfpack' and self.has_umfpack: if A.dtype != np.float64: A = A.astype(np.float64) if self.solver_context_manager is None: if self.reuse_factorisation is False: sol = spsolve(A,b,permc_spec='MMD_AT_PLUS_A',use_umfpack=True) # from scikits import umfpack # sol = umfpack.spsolve(A, b) else: from scikits import umfpack lu = umfpack.splu(A) sol = lu.solve(b) self.solver_context_manager = lu else: sol = self.solver_context_manager.solve(b) elif self.solver_subtype=='mumps' and self.has_mumps: from mumps.mumps_context import MUMPSContext t_solve = time() A = A.tocoo() # False means non-symmetric - Do not change it to True. True means symmetric pos def # which is not the case for electromechanics if self.solver_context_manager is None: context = MUMPSContext((A.shape[0], A.row, A.col, A.data, False), verbose=False) context.analyze() context.factorize() sol = context.solve(rhs=b) if self.reuse_factorisation: self.solver_context_manager = context else: sol = self.solver_context_manager.solve(rhs=b) print("MUMPS solver time is {}".format(time() - t_solve)) return sol elif self.solver_subtype == "pardiso" and self.has_pardiso: # NOTE THAT THIS PARDISO SOLVER AUTOMATICALLY SAVES THE RIGHT FACTORISATION import pypardiso from pypardiso.scipy_aliases import pypardiso_solver as ps A = A.tocsr() t_solve = time() sol = pypardiso.spsolve(A,b) if self.reuse_factorisation is False: ps.remove_stored_factorization() ps.free_memory() print("Pardiso solver time is {}".format(time() - t_solve)) else: # FOR 'super_lu' if A.dtype != np.float64: A = A.astype(np.float64) A = A.tocsc() if self.solver_context_manager is None: if self.reuse_factorisation is False: sol = spsolve(A,b,permc_spec='MMD_AT_PLUS_A',use_umfpack=True) else: lu = splu(A) sol = lu.solve(b) self.solver_context_manager = lu else: sol = self.solver_context_manager.solve(b) elif self.solver_type == "iterative": # CALL ITERATIVE SOLVER if self.solver_subtype == "gmres": sol = gmres(A,b,tol=self.iterative_solver_tolerance)[0] if self.solver_subtype == "lgmres": sol = lgmres(A,b,tol=self.iterative_solver_tolerance)[0] elif self.solver_subtype == "bicgstab": sol = bicgstab(A,b,tol=self.iterative_solver_tolerance)[0] else: sol = cg(A,b,tol=self.iterative_solver_tolerance)[0] # PRECONDITIONED ITERATIVE SOLVER - CHECK # P = spilu(A.tocsc(), drop_tol=1e-5) # M_x = lambda x: P.solve(x) # m = A.shape[1] # n = A.shape[0] # M = LinearOperator((n * m, n * m), M_x) # sol = cg(A, b, tol=self.iterative_solver_tolerance, M=M)[0] elif self.solver_type == "amg": if self.has_amg_solver is False: raise ImportError('Algebraic multigrid solver was not found. Please install it using "pip install pyamg"') from pyamg import ruge_stuben_solver, rootnode_solver, smoothed_aggregation_solver if A.dtype != b.dtype: # DOWN-CAST b = b.astype(A.dtype) if not isspmatrix_csr(A): A = A.tocsr() t_solve = time() if self.iterative_solver_tolerance > 1e-9: self.iterative_solver_tolerance = 1e-10 # AMG METHOD amg_func = None if self.preconditioner_type=="smoothed_aggregation": # THIS IS TYPICALLY FASTER BUT THE TOLERANCE NEED TO BE SMALLER, TYPICALLY 1e-10 amg_func = smoothed_aggregation_solver elif self.preconditioner_type == "ruge_stuben": amg_func = ruge_stuben_solver elif self.preconditioner_type == "rootnode": amg_func = rootnode_solver else: amg_func = rootnode_solver ml = amg_func(A) # ml = amg_func(A, smooth=('energy', {'degree':2}), strength='evolution' ) # ml = amg_func(A, max_levels=3, diagonal_dominance=True) # ml = amg_func(A, coarse_solver=spsolve) # ml = amg_func(A, coarse_solver='cholesky') if self.solver_context_manager is None: # M = ml.aspreconditioner(cycle='V') M = ml.aspreconditioner() if self.reuse_factorisation: self.solver_context_manager = M else: M = self.solver_context_manager # EXPLICIT CALL TO KYROLOV SOLVERS WITH AMG PRECONDITIONER # sol, info = bicgstab(A, b, M=M, tol=self.iterative_solver_tolerance) # sol, info = cg(A, b, M=M, tol=self.iterative_solver_tolerance) # sol, info = gmres(A, b, M=M, tol=self.iterative_solver_tolerance) # IMPLICIT CALL TO KYROLOV SOLVERS WITH AMG PRECONDITIONER residuals = [] sol = ml.solve(b, tol=self.iterative_solver_tolerance, accel=self.solver_subtype, residuals=residuals) print("AMG solver time is {}".format(time() - t_solve)) elif self.solver_type == "petsc" and self.has_petsc: if self.solver_subtype != "gmres" and self.solver_subtype != "minres" and self.solver_subtype != "cg": self.solver_subtype == "cg" if self.iterative_solver_tolerance < 1e-9: self.iterative_solver_tolerance = 1e-7 from petsc4py import PETSc t_solve = time() pA = PETSc.Mat().createAIJ(size=A.shape, csr=(A.indptr, A.indices, A.data)) pb = PETSc.Vec().createWithArray(b) ksp = PETSc.KSP() ksp.create(PETSc.COMM_WORLD) # ksp.create() ksp.setType(self.solver_subtype) ksp.setTolerances(atol=self.iterative_solver_tolerance, rtol=self.iterative_solver_tolerance) # ILU ksp.getPC().setType('icc') # CREATE INITIAL GUESS psol = PETSc.Vec().createWithArray(np.ones(b.shape[0])) # SOLVE ksp.setOperators(pA) ksp.setFromOptions() ksp.solve(pb, psol) sol = psol.getArray() # print('Converged in', ksp.getIterationNumber(), 'iterations.') print("Petsc linear iterative solver time is {}".format(time() - t_solve)) else: warn("{} solver is not available. Default solver is going to be used".format(self.solver_type)) # FOR 'super_lu' if A.dtype != np.float64: A = A.astype(np.float64) A = A.tocsc() if self.solver_context_manager is None: if self.reuse_factorisation is False: sol = spsolve(A,b,permc_spec='MMD_AT_PLUS_A',use_umfpack=True) else: lu = splu(A) sol = lu.solve(b) self.solver_context_manager = lu else: sol = self.solver_context_manager.solve(b) return sol
lca = bw.LCA(demand,method) lca.lci() lca.lcia() q_low = (1-THREE_SIGMA_Q)/2 q_high = (1+THREE_SIGMA_Q)/2 A = lca.technosphere_matrix B = lca.biosphere_matrix c = sum(lca.characterization_matrix) lca.build_demand_array() d = lca.demand_array cB = c*B score_initial = cB*spsolve(A,d) #run it before MC to factorize matrix A def get_distr_indices_params(lca,id_distr): list_ = lca.tech_params['uncertainty_type']==id_distr indices = list(compress(range(len(list_)), list_)) params = lca.tech_params[indices] return indices,params indices_lognor,params_lognor = get_distr_indices_params(lca,ID_LOGNOR) indices_normal,params_normal = get_distr_indices_params(lca,ID_NORMAL) indices_triang,params_triang = get_distr_indices_params(lca,ID_TRIANG) n_params = len(lca.tech_params) n_lognor = len(params_lognor) n_normal = len(params_normal) n_triang = len(params_triang)
def main(nelx, nely, nelz, volfrac, penal, rmin, heaviside): # USER DEFINED PRINT ORIENTATION baseplate = 'S' # USER DEFINED LOOP PARAMETERS maxloop = 1000 tolx = 0.01 displayflag = 0 # USER DEFINED MATERIAL PROPERTIES E0 = 1 Emin = 1e-9 nu = 0.3 # USER DEFINED LOAD DoFs il, jl, kl = np.meshgrid(nelx, 0, np.arange(nelz + 1)) loadnid = kl * (nelx + 1) * (nely + 1) + il * (nely + 1) + (nely + 1 - jl) loaddof = 3 * np.ravel(loadnid, order='F') - 1 #CURRENTLY A 1D ARRAY (used for sparse later) # USER DEFINED SUPPORT FIXED DOFS iif, jf, kf = np.meshgrid(0, np.arange(nely + 1), np.arange(nelz + 1)) fixednid = kf * (nelx + 1) * (nely + 1) + iif * (nely + 1) + (nely + 1 - jf) fixeddof = np.concatenate((3 * np.ravel(fixednid, order='F'), 3*np.ravel(fixednid, order='F')-1, 3*np.ravel(fixednid, order='F') - 2)) #CURRENTLY A 1D ARRAY (used for sparse later) # PREPARE FE ANALYSIS nele = nelx * nely * nelz ndof = 3 * (nelx + 1) * (nely + 1) * (nelz + 1) F = csr_matrix((-1 * np.ones(np.shape(loaddof)), (loaddof-1, np.ones(np.shape(loaddof))-1)), shape=(ndof, 1)) U = np.zeros((ndof, 1)) freedofs = np.setdiff1d(np.arange(ndof) + 1, fixeddof) KE = lk_H8(nu) nodegrd = np.reshape(np.arange((nely + 1) * (nelx + 1)) + 1, (nely + 1, nelx + 1), order = 'F') nodeids = np.reshape(nodegrd[0:-1, 0:-1], (nely * nelx, 1), order='F') nodeidz = np.arange(0, (nelz - 1) * (nely + 1) * (nelx + 1) + 1, (nely + 1) * (nelx + 1))[np.newaxis] nodeids = (np.matlib.repmat(nodeids, np.shape(nodeidz)[0], np.shape(nodeidz)[1]) + np.matlib.repmat(nodeidz, np.shape(nodeids)[0], np.shape(nodeids)[1])) edofVec = (3 * np.ravel(nodeids, order='F') + 1)[np.newaxis] edofMat = (np.matlib.repmat(edofVec.T, 1, 24) + np.matlib.repmat(np.concatenate(( np.array([0, 1, 2]), 3*nely + np.array([3, 4, 5, 0, 1, 2]), np.array([-3, -2, -1]), 3*(nely + 1)*(nelx + 1) + np.concatenate(( np.array([0, 1, 2]), 3*nely+np.array([3, 4, 5, 0, 1, 2]), np.array([-3, -2, -1]) )) )), nele, 1)) iK = np.reshape(np.kron(edofMat, np.ones((24, 1))).T, (24 * 24 * nele, 1), order='F') jK = np.reshape(np.kron(edofMat, np.ones((1, 24))).T, (24 * 24 * nele, 1), order='F') # PREPARE FILTER iH = np.ones((int(nele * (2 * (np.ceil(rmin) - 1) + 1)** 2), 1)) iHdummy = [] jH = np.ones(np.shape(iH)) jHdummy = [] sH = np.zeros(np.shape(iH)) sHdummy = [] k = 0 ##################### for k1 in np.arange(nelz)+1: for i1 in np.arange(nelx)+1: for j1 in np.arange(nely)+1: e1 = (k1 - 1) * nelx * nely + (i1 - 1) * nely + j1 for k2 in np.arange(max(k1 - (np.ceil(rmin) - 1), 1), min(k1 + (np.ceil(rmin) - 1), nelz) + 1): for i2 in np.arange(max(i1 - (np.ceil(rmin) - 1), 1), min(i1 + (np.ceil(rmin) - 1), nelx) + 1): for j2 in np.arange(max(j1 - (np.ceil(rmin) - 1), 1), min(j1 + (np.ceil(rmin) - 1), nely) + 1): e2 = (k2 - 1) * nelx * nely + (i2 - 1) * nely + j2 if k < np.size(iH): iH[k] = e1 jH[k] = e2 sH[k] = max(0, rmin - np.sqrt((i1 - i2)** 2 + (j1 - j2)** 2 + (k1 - k2)** 2)) else: iHdummy.append(e1) jHdummy.append(e2) sHdummy.append(max(0, rmin - np.sqrt((i1 - i2)** 2 + (j1 - j2)** 2 + (k1 - k2)** 2))) k = k + 1 ##################### iH = np.concatenate((iH, np.array(iHdummy).reshape((len(iHdummy), 1)))) jH = np.concatenate((jH, np.array(jHdummy).reshape((len(jHdummy), 1)))) sH = np.concatenate((sH, np.array(sHdummy).reshape((len(sHdummy), 1)))) H = csr_matrix((np.squeeze(sH), (np.squeeze(iH.astype(int)) - 1, np.squeeze(jH.astype(int)) - 1))) Hs = csr_matrix.sum(H, axis=0).T if heaviside == 0: # INITIALIZE ITERATION x = np.tile(volfrac, [nelz, nely, nelx]) xPhys = x ######## AMFILTER CALL TYPE 1 ######### xPrint, _ = AMFilter3D.AMFilter(xPhys, baseplate) ################################## loop = 0 change = 1 # START ITERATION while change > tolx and loop < maxloop: loop = loop + 1 # FE ANALYSIS sK = np.reshape(np.ravel(KE, order='F')[np.newaxis].T @ (Emin+xPrint.transpose(0,2,1).ravel(order='C')[np.newaxis]**penal*(E0-Emin)),(24*24*nele,1),order='F') K = csr_matrix((np.squeeze(sK), (np.squeeze(iK.astype(int)) - 1, np.squeeze(jK.astype(int)) - 1))) K = (K + K.T) / 2 U[freedofs - 1,:] = spsolve(K[freedofs - 1,:][:, freedofs - 1], F[freedofs - 1,:])[np.newaxis].T # OBJECTIVE FUNCTION AND SENSITIVITY ANALYSIS ce = np.reshape(np.sum((U[edofMat - 1].squeeze() @ KE) * U[edofMat - 1].squeeze(), axis=1), (nelz, nelx, nely), order = 'C').transpose(0,2,1) c = np.sum(np.sum(np.sum(Emin + xPrint ** penal * (E0 - Emin) * ce))) # REPLACE xPhys with xPrint dc = -penal * (E0 - Emin) * (xPrint ** (penal - 1)) * ce # REPLACE xPhys with xPrint dv = np.ones((nelz, nely, nelx)) ######### AMFILTER CALL TYPE 2 ######### xPrint, senS = AMFilter3D.AMFilter(xPhys, baseplate, dc, dv) dc = senS[0] dv = senS[1] ################################### # FILTERING AND MODIFICATION OF SENSITIVITIES dc = np.array((H @ (dc.transpose(0,2,1).ravel(order='C')[np.newaxis].T/Hs))).reshape((nelz, nelx, nely), order = 'C').transpose(0,2,1) dv = np.array((H @ (dv.transpose(0,2,1).ravel(order='C')[np.newaxis].T/Hs))).reshape((nelz, nelx, nely), order = 'C').transpose(0,2,1) # OPTIMALITY CRITERIA UPDATE l1 = 0 l2 = 1e9 move = 0.05 while (l2 - l1) / (l1 + l2) > 1e-3 and l2>1e-9: lmid = 0.5 * (l2 + l1) xnew_step1 = np.minimum(x + move, x * np.sqrt(-dc / dv / lmid)) xnew_step2 = np.minimum(1, xnew_step1) xnew_step3 = np.maximum(x - move, xnew_step2) xnew = np.maximum(0, xnew_step3) xPhys = np.array((H @ (xnew.transpose(0,2,1).ravel(order='C')[np.newaxis].T)/Hs)).reshape((nelz, nelx, nely), order = 'C').transpose(0,2,1) ######### AMFILTER CALL TYPE 1 ###### xPrint, _ = AMFilter3D.AMFilter(xPhys, baseplate) ################################# if np.sum(xPrint.ravel(order='C')) > volfrac * nele: # REPLACE xPhys with xPrint l1 = lmid else: l2 = lmid change = np.max(np.absolute(np.ravel(xnew, order='F') - np.ravel(x, order='F'))) x = xnew print("it.: {0} , ch.: {1:.3f}, obj.: {2:.4f}, Vol.: {3:.3f}".format( loop, change, c, np.mean(xPrint.ravel(order='C')))) elif heaviside == 1: beta = 1 # INITIALIZE ITERATION x = np.tile(volfrac, [nelz, nely, nelx]) xTilde = x xPhys = 1 - np.exp(-beta * xTilde) + xTilde * np.exp(-beta) ######## AMFILTER CALL TYPE 1 ######### xPrint, _ = AMFilter3D.AMFilter(xPhys, baseplate) ################################## loop = 0 loopbeta = 0 change = 1 # START ITERATION while change > tolx and loop < maxloop: loop = loop + 1 loopbeta = loopbeta + 1 # FE ANALYSIS sK = np.reshape(np.ravel(KE, order='F')[np.newaxis].T @ (Emin+xPrint.transpose(0,2,1).ravel(order='C')[np.newaxis]**penal*(E0-Emin)),(24*24*nele,1),order='F') K = csr_matrix((np.squeeze(sK), (np.squeeze(iK.astype(int)) - 1, np.squeeze(jK.astype(int)) - 1))) K = (K + K.T) / 2 U[freedofs - 1,:] = spsolve(K[freedofs - 1,:][:, freedofs - 1], F[freedofs - 1,:])[np.newaxis].T # OBJECTIVE FUNCTION AND SENSITIVITY ANALYSIS ce = np.reshape(np.sum((U[edofMat - 1].squeeze() @ KE) * U[edofMat - 1].squeeze(), axis=1), (nelz, nelx, nely), order = 'C').transpose(0,2,1) c = np.sum(np.sum(np.sum(Emin + xPrint ** penal * (E0 - Emin) * ce))) # REPLACE xPhys with xPrint dc = -penal * (E0 - Emin) * (xPrint ** (penal - 1)) * ce # REPLACE xPhys with xPrint dv = np.ones((nelz, nely, nelx)) ######### AMFILTER CALL TYPE 2 ######### xPrint, senS = AMFilter3D.AMFilter(xPhys, baseplate, dc, dv) dc = senS[0] dv = senS[1] ################################### # FILTERING AND MODIFICATION OF SENSITIVITIES dx = beta * np.exp(-beta * xTilde) + np.exp(-beta) dc = np.array((H @ (dc.transpose(0, 2, 1).ravel(order='C')[np.newaxis].T * dx.transpose(0, 2, 1).ravel(order='C')[np.newaxis].T /Hs))).reshape((nelz, nelx, nely), order = 'C').transpose(0,2,1) dv = np.array((H @ (dv.transpose(0, 2, 1).ravel(order='C')[np.newaxis].T * dx.transpose(0, 2, 1).ravel(order='C')[np.newaxis].T /Hs))).reshape((nelz, nelx, nely), order = 'C').transpose(0,2,1) # OPTIMALITY CRITERIA UPDATE l1 = 0 l2 = 1e9 move = 0.05 while (l2 - l1) / (l1 + l2) > 1e-3: lmid = 0.5 * (l2 + l1) xnew_step1 = np.minimum(x + move, x * np.sqrt(-dc / dv / lmid)) xnew_step2 = np.minimum(1, xnew_step1) xnew_step3 = np.maximum(x - move, xnew_step2) xnew = np.maximum(0, xnew_step3) xTilde = np.array((H @ (xnew.transpose(0,2,1).ravel(order='C')[np.newaxis].T)/Hs)).reshape((nelz, nelx, nely), order = 'C').transpose(0,2,1) xPhys = 1 - np.exp(-beta * xTilde) + xTilde * np.exp(-beta) ######### AMFILTER CALL TYPE 1 ###### xPrint, _ = AMFilter3D.AMFilter(xPhys, baseplate) ################################# if np.sum(xPrint.ravel(order='C')) > volfrac * nele: # REPLACE xPhys with xPrint l1 = lmid else: l2 = lmid change = np.max(np.absolute(np.ravel(xnew, order='F') - np.ravel(x, order='F'))) x = xnew if beta < 512 and (loopbeta >= 50 or change <= 0.01): beta = 2 * beta loopbeta = 0 change = 1 print("Parameter beta increased to {0}. \n".format(beta)) print("it.: {0} , ch.: {1:.3f}, obj.: {2:.4f}, Vol.: {3:.3f}".format( loop, change, c, np.mean(xPrint.ravel(order='C')))) return xPrint
def main(nelx,nely,volfrac,penal,rmin,ft,bc): # MATERIAL PROPERTIES E0 = 1 Emin = 1e-9 nu = 0.3 # USER DEFINED PRINT DIRECTION baseplate = 'S' # PREPARE FINITE ELEMENT ANALYSIS A11 = np.array([[12, 3, -6, -3],[3, 12, 3, 0],[-6, 3, 12, -3],[-3, 0, -3, 12]]) A12 = np.array([[-6, -3, 0, 3],[-3, -6, -3, -6],[0, -3, -6, 3],[3, -6, 3, -6]]) B11 = np.array([[-4, 3, -2, 9],[3, -4, -9, 4],[-2, -9, -4, -3],[9, 4, -3, -4]]) B12 = np.array([[2, -3, 4, -9],[-3, 2, 9, -2],[4, 9, 2, 3],[-9, -2, 3, 2]]) Atop = np.concatenate((A11, A12),axis = 1) Abottom = np.concatenate((A12.T, A11), axis = 1) A = np.concatenate((Atop,Abottom), axis = 0) Btop = np.concatenate((B11, B12), axis = 1) Bbottom = np.concatenate((B12.T, B11), axis = 1) B = np.concatenate((Btop, Bbottom), axis = 0) KE = 1/(1-nu**2)/24 *(A + nu*B) nodenrs = np.reshape(np.arange(1,((nelx+1)*(nely+1)+1)), (1+nelx,1+nely)) nodenrs = nodenrs.T edofVec = np.ravel(nodenrs[0:nely,0:nelx], order='F') *2 + 1 edofVec = edofVec.reshape((nelx*nely,1)) edofMat = np.matlib.repmat(edofVec,1,8) + np.matlib.repmat(np.concatenate(([0, 1], 2*nely+np.array([2,3,0,1]), [-2, -1])),nelx*nely,1) iK = np.reshape(np.kron(edofMat, np.ones((8,1))).T, (64*nelx*nely,1),order='F') jK = np.reshape(np.kron(edofMat, np.ones((1,8))).T, (64*nelx*nely,1),order='F') # DEFINE LOADS AND SUPPORTS # Inititalise the matrices F = np.zeros((2*(nely+1)*(nelx+1),1)) U = np.zeros((2*(nely+1)*(nelx+1),1)) # Define the unit load location and BC if bc == 1: # Half MBB-BEAM Case F[1,0] = -1 fixeddofs = np.union1d(np.arange(1,2*(nely+1),2),2*(nelx+1)*(nely+1)) elif bc == 2: # cantilever case F[2*(nely+1)*(nelx+1)-1, 0] = -1 fixeddofs = np.arange(1,2*(nely+1)) alldofs = np.arange(1,2*(nely+1)*(nelx+1)+1) freedofs = np.setdiff1d(alldofs, fixeddofs) # DEFINE LOADS AND BC (MULTIPLE CHOICES) # PREPARE FILTER iH = np.ones((nelx*nely*(int(2*(np.ceil(rmin)-1)+1))**2,1)) jH = np.ones(np.shape(iH)) sH = np.zeros(np.shape(iH)) k = 0 for i1 in range(1,nelx+1): for j1 in range(1,nely+1): e1 = (i1-1)*nely+j1 for i2 in range(max(i1-(int(np.ceil(rmin))-1),1), min(i1+(int(np.ceil(rmin))-1),nelx)+1): for j2 in range(max(j1-(int(np.ceil(rmin))-1),1), min(j1+(int(np.ceil(rmin))-1),nely)+1): e2 = (i2-1)*nely + j2 iH[k] = e1 jH[k] = e2 sH[k] = max(0, rmin-np.sqrt((i1-i2)**2+(j1-j2)**2)) k = k + 1 H = csr_matrix( (np.squeeze(sH), (np.squeeze(iH.astype(int))-1,np.squeeze(jH.astype(int))-1))) Hs = np.sum(H, axis = 1) # INITIATE ITERATION x = np.matlib.repmat(volfrac,nely,nelx) xPhys = x beta = 1 if ft == 1 or ft == 2: # sensitivity or density filter xPhys = x ###### AMfilter Call Type 1 ######## xPrint, _ = AMFilter.AMFilter(xPhys, baseplate) elif ft == 3: # Heaviside filter xTilde = x xPhys = 1 - np.exp(-beta * xTilde) + xTilde * np.exp(-beta) ###### AMfilter Call Type 1 ######### xPrint, _ = AMFilter.AMFilter(xPhys, baseplate) loop = 0 loopbeta = 0 change = 1 # START ITERATION while change > 0.01 and loop<=1000: loop = loop + 1 loopbeta = loopbeta + 1 # FE ANALYSIS sK = np.reshape(KE.ravel(order='F')[np.newaxis].T @ (Emin+xPrint.ravel(order = 'F')[np.newaxis]**penal*(E0-Emin)),(64*nelx*nely,1),order='F') K = csr_matrix( (np.squeeze(sK), (np.squeeze(iK.astype(int))-1,np.squeeze(jK.astype(int))-1))) K = (K + K.T) / 2 U[freedofs-1,0]=spsolve(K[freedofs-1,:][:,freedofs-1],F[freedofs-1,0]) #OBJECTIVE FUNCTION AND SENSITIVITY ANALYSIS ce = np.reshape((np.sum( U[edofMat-1,0]@KE*U[edofMat-1,0] , axis = 1)),(nely, nelx),order='F') c = np.sum(np.sum( (Emin+xPrint**penal*(E0-Emin))*ce )) # REPLACE xPhys with xPrint dc = -penal*(E0-Emin)*xPrint**(penal-1)*ce # REPLACE xPhys with xPrint dv = np.ones((nely, nelx)) # TRANSFORM SENSITIVITIES BEFORE FILTERING ######### AMFILTER CALL Type 2 ######### xPrint, senS = AMFilter.AMFilter(xPhys, baseplate, dc, dv) dc = senS[0] dv = senS[1] ################################### # FILTERING/MODIFICAITON OF SENSITIVITIES if ft == 1: dc = H @ np.ravel((x * dc), order='F')[np.newaxis].T / Hs / np.maximum(0.001, x).ravel(order='F')[np.newaxis].T dc = np.reshape(dc, (nely, nelx), order='F') dc = np.asarray(dc) elif ft == 2: dc = H @ (dc.ravel(order='F')[np.newaxis].T / Hs) dc = np.reshape(dc, (nely, nelx), order='F') dc = np.asarray(dc) dv = H @ (dv.ravel(order='F')[np.newaxis].T / Hs) dv = np.reshape(dv, (nely, nelx), order='F') dv = np.asarray(dv) elif ft == 3: dx = beta * np.exp(-beta * xTilde) + np.exp(-beta) dc = H @ (dc.ravel(order='F')[np.newaxis].T * dx.ravel(order='F')[np.newaxis].T / Hs) dc = np.reshape(dc, (nely, nelx), order='F') dc = np.asarray(dc) dv = H @ (dv.ravel(order='F')[np.newaxis].T * dx.ravel(order='F')[np.newaxis].T / Hs) dv = np.reshape(dv, (nely, nelx), order='F') dv = np.asarray(dv) # Save strain energy at the first iteration if loop == 1: se=(Emin + xPrint* (E0 - Emin))* ce # strain enrgy at the first iteration # np.save(str(path)+'/strain_energy/strain_energy_'+nelx+'_'+nely+'.npy',dc) # np.save(str(path)+'\strain_energy\strain_energy'+str(nelx)+'_'+str(nely)+'.npy',se) # OPTIMALITY CRITERIA UPDATE OF DESIGN VARIABLES AND PHYSICAL DENSITIES l1 = 0 l2 = 1e9 move = 0.05 while (l2-l1)/(l1+l2) > 1e-3: lmid = 0.5 * (l2 + l1) xnew_step1 = np.minimum(x + move, x * np.sqrt(-dc / dv / lmid)) xnew_step2 = np.minimum(1, xnew_step1) xnew_step3 = np.maximum(x - move, xnew_step2) xnew = np.maximum(0, xnew_step3) if ft == 1: xPhys = xnew elif ft == 2: xPhys = np.asarray(H @ xnew.ravel(order='F')[np.newaxis].T) / np.asarray(Hs) xPhys = np.reshape(xPhys, (nely, nelx), order='F') elif ft == 3: xTilde = np.asarray(H @ xnew.ravel(order='F')[np.newaxis].T) / np.asarray(Hs) xTilde = np.reshape(xTilde, (nely, nelx), order='F') xPhys = 1 - np.exp(-beta * xTilde) + xTilde * np.exp(-beta) ######### AMFILTER CALL TYPE 1 ###### xPrint, _ = AMFilter.AMFilter(xPhys, baseplate) ################################# if np.sum(xPrint) > volfrac*nelx*nely: # REPLACE xPhys with xPrint l1 = lmid else: l2 = lmid change = np.max(np.abs(xnew[:]-x[:])) x = xnew if ft == 3 and beta < 512 and (loopbeta >= 50 or change <= 0.01): beta = 2 * beta loopbeta = 0 change = 1 print("Parameter beta increased to {0}. \n".format(beta)) # Write iteration history to screen (req. Python 2.6 or newer) print("it.: {0} , obj.: {1:.4f}, vol.: {3:.3f}, ch.: {2:.3f}".format(\ loop, c, change, volfrac)) return xPrint, se
def lsa(lca, threshold=0.1): ''' Computes relative sensitivity coefficients (RSC) according to (Heijungs and Kleijn, 2001) and (Sakai and Yokoyama, 2002), used in (Wei et al., 2015) for performing a local sensitivity analysis. Returns an array of two datasets of RSC, respectively for the technosphere and the biosphere matrices, where each record contains: - impact indix - row index in the matrix - column index in the matrix - value of RSC - label of the row (respectively activity and biosphere) - label of the column (respectively product and activity) Only RSC above the given threshold will be keeped. References: Heijungs, R.; Kleijn, R. Numerical approaches towards life cycle interpretation five examples. Int. J. Life Cycle Assess. 2001, 6, 141−148. Sakai, S.; Yokoyama, K. Formulation of sensitivity analysis in life cycle assessment using a perturbation method. Clean Technol. Environ. Policy 2002, 4, 72−78 W. Wei, P. Larrey Lassalle, T. Faure, N. Dumoulin, P. Roux, J.D. Mathias. How to conduct a proper sensitivity analysis in life cycle assessment: Taking into account correlations within LCI data and interactions within the LCA calculation model Environmental Science & Technology 49 (1), 2015 http://pubs.acs.org/doi/abs/10.1021/es502128k ''' lca.lci() lca.lcia() m_a = lca.technosphere_matrix m_b = lca.biosphere_matrix m_q = lca.characterization_matrix h = lca.characterized_inventory.sum(axis=1) s = lca.supply_array m_lambda = spsolve(m_a.transpose(), m_b.transpose()).transpose() m_ql = m_q.dot(m_lambda) rev_activity, rev_product, rev_bio = lca.reverse_dict() m_as = m_a.multiply(s) rsca = [ -m_as.multiply(1 / hk).multiply(m_ql[k, :]).tocsr() for k, hk in enumerate(h) ] rscb = [ m_b.multiply(1 / hk).multiply(m_q[k, :].T).multiply(s).tocsr() for k, hk in enumerate(h) ] rsca_filtered = [[k, x[0], x[1], rscak[x[0], x[1]]] for k, rscak in enumerate(rsca) for x in np.array(rscak.nonzero()).T if abs(rscak[x[0], x[1]]) > threshold] rsca_sorted = sorted(rsca_filtered, key=lambda x: abs(x[3]), reverse=True) rsca_summary = [ x + [ str(Database(rev_activity[x[1]][0]).get(rev_activity[x[1]][1])), str(Database(rev_product[x[2]][0]).get(rev_product[x[2]][1])) ] for x in rsca_sorted ] rscb_filtered = [[k, x[0], x[1], rscbk[x[0], x[1]]] for k, rscbk in enumerate(rscb) for x in np.array(rscbk.nonzero()).T if abs(rscbk[x[0], x[1]]) > threshold] rscb_sorted = sorted(rscb_filtered, key=lambda x: abs(x[3]), reverse=True) rscb_summary = [ x + [ str(Database(rev_bio[x[1]][0]).get(rev_bio[x[1]][1])), str(Database(rev_activity[x[2]][0]).get(rev_activity[x[2]][1])) ] for x in rscb_sorted ] return [rsca_summary, rscb_summary]
def solver(b, x0=None): return spsolve(A, b, squeeze=False, solver=pypardisosolver)
def solve(self, A, b, **kwargs): """Brief description of 'solve'""" if not isinstance(A, (csr_matrix, csc_matrix)): A = A.tocsr() # TODO: solver.solve should return (x, info) not just x return (spsolve(A, b), 0)
def _solve_linear_system(A, b): return pypardiso.spsolve(A.tocsc(), b)
def precompute(self): ## Precompute whatever possible A = self.lca.technosphere_matrix # Note that A has a block triangular form (order of databases might be different) # [ A_ch 0 0 0 # L_ag_ch A_ag 0 0 # L_ec_ch L_ec_ag A_ec 0 # L_ex_ch 0 0 A_ex ] B = self.lca.biosphere_matrix # B in block form # [ B_ch B_ag B_ec B_ex ] d = self.lca.demand_array # Demand in block form # [ d_ch d_ag d_ec d_ex ] # Find indices of activities for each database (where the databases start and end) keys_db = [k[0] for k in list(self.lca.activity_dict.keys())] # 1. Exiobase db_list = [ind for ind, val in enumerate(keys_db) if val == self.exiobase_name] min_ex, max_ex = min(db_list), max(db_list) + 1 d_exiobas = d[min_ex:max_ex] A_exiobas = A[min_ex:max_ex, min_ex:max_ex] B_exiobas = B[:, min_ex:max_ex] # 2. Ecoinvent db_list = [ind for ind, val in enumerate(keys_db) if val == self.ecoinvent_name] min_ec, max_ec = min(db_list), max(db_list) + 1 d_ecoinve = d[min_ec:max_ec] A_ecoinve = A[min_ec:max_ec, min_ec:max_ec] B_ecoinve = B[:, min_ec:max_ec] # 3. Agribalyse db_list = [ind for ind, val in enumerate(keys_db) if val == self.agribalyse_name] min_ag, max_ag = min(db_list), max(db_list) + 1 d_agribal = d[min_ag:max_ag] A_agribal = A[min_ag:max_ag, min_ag:max_ag] B_agribal = B[:, min_ag:max_ag] # 4. CH consumption database db_list = [ind for ind, val in enumerate(keys_db) if val == self.ch_consumption_name] min_ch, max_ch = min(db_list), max(db_list) + 1 d_consump = d[min_ch:max_ch] A_consump = A[min_ch:max_ch, min_ch:max_ch] B_consump = B[:, min_ch:max_ch] # 5. L matrices are links between different databases L_ag_ch = A[min_ag:max_ag, min_ch:max_ch] # ch_consumption and agribalyse L_ec_ch = A[min_ec:max_ec, min_ch:max_ch] # ch_consumption and ecoinvent L_ex_ch = A[min_ex:max_ex, min_ch:max_ch] # ch_consumption and exiobase L_ec_ag = A[min_ec:max_ec, min_ag:max_ag] # agribalyse and ecoinvent # 6. Solutions of system of linear equations for all databases x_consump = spsolve(A_consump, d_consump) x_agribal = spsolve(A_agribal, d_agribal - L_ag_ch * x_consump) x_ecoinve = spsolve(A_ecoinve, d_ecoinve - L_ec_ch * x_consump - L_ec_ag * x_agribal) # 7. LCIA score without exiobase biosphere_without_exiobase = B_consump * x_consump \ + B_agribal * x_agribal \ + B_ecoinve * x_ecoinve # 8. Adjusted exiobase demand d_exiobas_adjusted = d_exiobas - L_ex_ch * x_consump return biosphere_without_exiobase, d_exiobas_adjusted
def harmonic_response(nodes, node_id, glob_stiff, hyst_damp, inf_damp, consistent_mass, ext_force, data, file_name): """ :param nodes: (float) array [NP, 2] with the radial and vertical coordinate of each node (NP: total number of nodes) :param node_id: (int) array [NP, 2] with the equation number of the radial and vertical displacement of each node :param glob_stiff: (float) csr_matrix [NEQ, NEQ] global stiffness matrix (NEQ: total number of equations) :param hyst_damp: (float) csr_matrix [NEQ, NEQ] global hysteretic stiffness matrix :param inf_damp: (float) csr_matrix [NEQ, NEQ] viscous damping matrix due to infinite boundaries :param consistent_mass: (float) csr_matrix [NEQ, NEQ] with the global consistent mass matrix :param ext_force: (float) array [NEQ] with global external unit force vector :param data: (dict) with FEM input parameters :param file_name: (string) full path to the txt-file describing the determined FEM parameters :return: (dict) with the transfer compliance in radial and vertical direction """ # Obtain frequencies at which harmonic response analysis has to be performed frequencies = frequency_sampling(data) omegas = frequencies * 2 * math.pi neq = ext_force.shape[0] # Allocate the array with results harm_response = np.zeros(shape=(neq, frequencies.shape[0]), dtype=complex) try: with open(file_name, "a") as fid: fid.write( "----------------------------------------------------------------\n\n" ) fid.write("Calculation started at %s\n" % (datetime.datetime.now().strftime("%B %d, %Y %I:%M:%S"))) fid.write(" total number of frequency steps %d\n" % np.size(frequencies)) except OSError: exit(-102) # Cycle through all frequencies and solve the harmonic response analysis for i1, w in enumerate(omegas): if data["SolverType"] == 1: matrix = -consistent_mass * (w**2) + glob_stiff + 1j * ( inf_damp * w + hyst_damp) harm_response[:, i1] = spsolve(matrix, ext_force, use_umfpack=True) elif data["SolverType"] == 2: matrix = -consistent_mass * (w**2) + glob_stiff + 1j * ( inf_damp * w + hyst_damp) harm_response[:, i1] = spsolve(matrix, ext_force, use_umfpack=True) elif data["SolverType"] == 3: matrix_real = -consistent_mass * (w**2) + glob_stiff matrix_imag = inf_damp * w + hyst_damp matrix = vstack([ hstack([matrix_real, -matrix_imag]), hstack([matrix_imag, matrix_real]) ], format='csr') solution = pp.spsolve( matrix, np.concatenate( [ext_force, np.zeros(shape=ext_force.shape)], axis=0)) harm_response[:, i1] = solution[0:neq] + 1j * solution[neq:2 * neq] print("freq = %0.4f Hz" % (frequencies[i1])) harm_response = np.append(harm_response, np.zeros(shape=(1, frequencies.shape[0])), axis=0) # Only save the results of the top nodes top_nodes_idx = np.where(nodes[:, 1] == 0) result = { 'RDisp_real': np.squeeze(np.real(harm_response[node_id[top_nodes_idx, 0], :])).tolist(), 'RDisp_imag': np.squeeze(np.imag(harm_response[node_id[top_nodes_idx, 0], :])).tolist(), 'ZDisp_real': np.squeeze(np.real(harm_response[node_id[top_nodes_idx, 1], :])).tolist(), 'ZDisp_imag': np.squeeze(np.imag(harm_response[node_id[top_nodes_idx, 1], :])).tolist(), 'Frequency': frequencies.tolist(), 'Rcoord': np.squeeze(nodes[top_nodes_idx, 0]).tolist() } if data["MaxFreqLimited"] != data["HighFreq"]: result["MaxFreqLimited"] = data["MaxFreqLimited"] try: with open(file_name, "a") as fid: fid.write("Calculation ended at %s\n" % (datetime.datetime.now().strftime("%B %d, %Y %I:%M:%S"))) except OSError: exit(-102) return result
def pick_method(data, glob_stiff, lumped_mass, consistent_mass, hyst_damp, inf_damp, ext_force, file_name): """ Routine to estimate the required CPU time to perform the simulation using central difference and harmonic response analysis. The fastest method is then chosen :param data: (dict) with FEM input parameters :param glob_stiff: (float) csr_matrix [NEQ, NEQ] global stiffness matrix (NEQ: total number of equations) :param lumped_mass: (float) array [NEQ] with diagonal components of the lumped mass matrix :param consistent_mass: (float) csr_matrix [NEQ, NEQ] with the global consistent mass matrix :param hyst_damp: (float) scr_matrix [NEQ, NEQ] with the global hysteretic damping matrix :param inf_damp: (float) scr_matrix [NEQ, NEQ] with the viscous damping matrix due to infinite boundaries :param ext_force: (float) array [NEQ] with global external unit force vector :param file_name: (string) full path to the txt-file describing the determined FEM parameters :return: (dict) with the update data dictionary """ time_step, total_steps, output_interval, time_end = time_sampling( data, glob_stiff, lumped_mass) neq = lumped_mass.shape[0] disp = np.ones(shape=neq, dtype=float) vel = np.ones(shape=neq, dtype=float) # Time the CPU time to solve 10 time step with central differences time = time_step excitation_frequencies = np.arange( data["ForcingFreqIncrement"], data["HighFreq"] * 1.1, data["ForcingFreqIncrement"]) * 2 * math.pi excitation_phases = np.ones(shape=excitation_frequencies.shape, dtype=float) t1_start = perf_counter() for i1 in range(10): force_magnitude = 1.0E6 * np.sum( np.sin(excitation_frequencies * time + excitation_phases)) result = (force_magnitude - (glob_stiff * disp) - hyst_damp * (np.absolute(disp) * np.sign(vel)) - (inf_damp * vel)) * lumped_mass time1 = (perf_counter() - t1_start) * total_steps / 10 del result # Time the CPU time to solve the harmonic response at 1 frequency frequencies = frequency_sampling(data) * 2 * math.pi t2_start = perf_counter() if data["SolverType"] == 1: matrix = -consistent_mass * (frequencies[-1]**2) + glob_stiff + 1j * ( inf_damp * frequencies[-1] + hyst_damp) result = spsolve(matrix, ext_force) elif data["SolverType"] == 2: matrix = -consistent_mass * (frequencies[-1]**2) + glob_stiff + 1j * ( inf_damp * frequencies[-1] + hyst_damp) result = spsolve(matrix, ext_force, use_umfpack=True) elif data["SolverType"] == 3: matrix_real = -consistent_mass * (frequencies[-1]**2) + glob_stiff matrix_imag = inf_damp * frequencies[-1] + hyst_damp matrix = vstack([ hstack([matrix_real, -matrix_imag]), hstack([matrix_imag, matrix_real]) ], format='csr') result = pp.spsolve( matrix, np.concatenate( [ext_force, np.zeros(shape=ext_force.shape)], axis=0)) time2 = (perf_counter() - t2_start) * frequencies.size del result # Make a decision if time1 < time2 * data["MethodDecisionFactor"]: data["CalcType"] = 1 else: data["CalcType"] = 2 try: with open(file_name, "a") as fid: fid.write( "----------------------------------------------------------------\n\n" ) fid.write("CPU time estimation finished at %s\n" % (datetime.datetime.now().strftime("%B %d, %Y %I:%M:%S"))) fid.write(" estimated CPU time for explicit %12.6e\n" % time1) fid.write(" estimated CPU time for harmonic %12.6e\n" % time2) if data["CalcType"] == 1: fid.write(" Texplicit > %12.6e x Tharmonic\n" % data["MethodDecisionFactor"]) fid.write( " explicit time integration is chosen\n") else: fid.write(" Texplicit <= %12.6e x Tharmonic\n" % data["MethodDecisionFactor"]) fid.write( " harmonic response analysis is chosen\n") except OSError: exit(-102) return data
def solve_with_profiling(A, b, matrix_name, matrix_type, solver_library='umfpack'): """Perform a benchmark on the given matrix-rhs for solving A*xe = b, where xe is assumed to be a vector of ones [1, 1,..., 1].T Parameters ---------- A: scipy.sparse matrix the coefficient matrix b: numpy.array right-hand side of A*xe = b, where xe is a vector of ones [1, 1, 1,..., 1].T Returns ------- result: Dict dictionary with these key-value pairs: 'matrix_name': name of the matrix 'start_time': int, start time in UNIX format 'end_time': int, end time in UNIX format 'relative_error': float, relative error computed as norm2(xe - x)/norm2(xe) 'solver_library': str, value of the solver library 'matrix_dimensions': str, value of NxM 'umfpack_error': 1 if UMFPACK raised MemoryError, else 0 """ umfpack_mem_error = False if solver_library == 'mkl': start_time = time.time() x = pypardiso.spsolve(A, b) end_time = time.time() elif solver_library == 'superlu': start_time = time.time() x = scipy.sparse.linalg.spsolve(A, b, use_umfpack=False) end_time = time.time() elif solver_library == 'umfpack': start_time = time.time() try: x = scipy.sparse.linalg.spsolve(A, b, use_umfpack=True) except MemoryError: print("Got MemoryError for UMFPACK!") umfpack_mem_error = True end_time = time.time() else: raise ValueError( "Wrong value for parameter 'solver_library', shoud be in {'mkl', 'umfpack', 'superlu'}, got {} instead." .format(solver_library)) xe = np.ones((A.shape[1], )) relative_error = get_relative_error(xe, x) if not umfpack_mem_error else -1 del xe gc.collect() return { 'matrix_name': matrix_name, 'matrix_type': matrix_type, 'matrix_dimensions': "{}x{}".format(A.shape[0], A.shape[1]), 'start_time': start_time, 'end_time': end_time, 'relative_error': relative_error, 'solver_library': solver_library, 'umfpack_error': 1 if umfpack_mem_error else 0, }
def solver(A, b, **kwargs): r""" Wrapper method for PyPardiso sparse linear solver. """ x = pypardiso.spsolve(A=A, b=b) return x