def _traverse_node(node: javalang.ast.Node, file_name) -> [str]: strings_in_node, left_side_identifiers = [], [] if isinstance(node, list) or isinstance(node, set): for elem in node: child_strings_in_node, child_left_side_identifiers = _traverse_node(elem, file_name) left_side_identifiers += child_left_side_identifiers strings_in_node += child_strings_in_node elif isinstance(node, str): if node: strings_in_node.append(node) else:# Empty string -> Do nothing pass elif isinstance(node, javalang.ast.Node): for elem in node.children: child_strings_in_node, child_left_side_identifiers = _traverse_node(elem, file_name) left_side_identifiers += child_left_side_identifiers strings_in_node += child_strings_in_node a = _parse_left_side_identifiers(node) if a is None: print("as") left_side_identifiers += a elif not node: pass # Empty node -> Do nothing else: log.info(str(node) + "is neither a node nor a string nor a list nor None") return strings_in_node, left_side_identifiers
def dynamic_residual(self, x: np.ndarray, f_exc: np.ndarray, # TODO: make this a property of WEC f_pto_fun: types.FunctionType, f_ext_fun: types.FunctionType) -> np.ndarray: """ Solves WEC dynamics in residual form so that they may be enforced through a nonlinear constraint within an optimization problem Parameters ---------- x : np.ndarray Decision variable for optimization problem f_exc : np.ndarray Time history of excitation forcing at collocation points in body coordinate system f_pto_fun : types.FunctionType Function that acceps decision variable and WEC, returns PTO forcing at collocation points in body coordinate system f_ext_fun : types.FunctionType Function that acceps decision variable and WEC, returns other forcing at collocation points in body coordinate system Returns ------- np.ndarray Residuals at collocation points """ assert isinstance(x, np.ndarray) assert isinstance(f_exc, np.ndarray) assert isinstance(f_pto_fun, types.FunctionType) assert isinstance(f_ext_fun, types.FunctionType) # WEC position x_wec, _, nf, nm = self.decompose_decision_var(x) # WEC velocity (each row is a mode, each column is a Fourier component) X = np.reshape(x_wec, (nm, -1)) # complex velocity with position at beginning X_hat = np.concatenate((np.reshape(X[:,0],(-1,1)), X[:,1::2] - X[:,2::2]*1j ), axis=1) Gi_block_scaled = self.num_scale * self.Gi_block.toarray() # TODO: do this only once Fi = np.squeeze(np.reshape(Gi_block_scaled @ X_hat.flatten(), (nm, -1))) Fi_fs_tmp_0 = np.real(Fi[0]) Fi_fs_tmp_1 = np.vstack([np.real(Fi[1::]), -np.imag(Fi[1::])]).ravel('F') Fi_fs = np.hstack((np.array(Fi_fs_tmp_0), Fi_fs_tmp_1)) fi = Fi_fs @ self.Phi residual = f_exc + f_pto_fun(x) + f_ext_fun(x) - fi return residual.flatten()
def _parse_left_side_identifiers(node): # Returns name of all variable names that are on the left side of an assignment if isinstance(node, javalang.tree.LocalVariableDeclaration) or isinstance(node, javalang.tree.VariableDeclaration): decl_strings = [] for decl in node.declarators: if isinstance(decl, javalang.tree.VariableDeclarator): decl_strings += [decl.name] else: log.error(f"Unknown case: {type(decl)} is not a VariableDeclaration") return decl_strings elif isinstance(node, javalang.tree.Assignment): if isinstance(node.expressionl, javalang.tree.MemberReference): return [node.expressionl.qualifier, node.expressionl.member] elif isinstance(node.expressionl, javalang.tree.This): if len(node.expressionl.selectors) == 1 and isinstance(node.expressionl.selectors[0], javalang.tree.MemberReference): return [node.expressionl.selectors[0].member] else: log.error(f"Unknown case: {node.expressionl.selectors} has more than 1 selector or is not a MemberReference") elif isinstance(node.expressionl, javalang.tree.MethodInvocation): strings = [] for arg in node.expressionl.arguments: if isinstance(arg, javalang.tree.MemberReference): strings += [arg.qualifier, arg.member] return [node.expressionl.qualifier, node.expressionl.member] + strings else: log.error(f"Unknown case: {type(node.expressionl)} on the left side of an assignment") else: return []
def run_bem(self, freq=None, wave_dirs=None, post_proc=True): if freq is None: freq = np.arange(1, self.params['num_freq']+1)*self.params['f0'] assert isinstance(freq, np.ndarray) if wave_dirs is None: wave_dirs = [0] else: raise NotImplementedError assert isinstance(wave_dirs, list) # TODO check list contains floats solver = cpy.BEMSolver() # TODO: enable setting this test_matrix = xr.Dataset(coords={ 'rho': 1e3, # TODO: enable setting this 'water_depth': [np.infty], # TODO: enable setting this 'omega': freq*2*np.pi, 'wave_direction': wave_dirs, 'radiating_dof': list(self.fb.dofs.keys()), }) data = solver.fill_dataset(test_matrix, [self.fb], hydrostatics=True, mesh=True, wavelength=True, wavenumber=True) data['freq'] = data.omega / (2 * np.pi) data['freq'].attrs['units'] = 'Hz' data = data.set_coords('freq') data['T'] = 1 / data.freq data['T'].attrs['units'] = 's' data = data.set_coords('T') self.hydro = data self.hydro['displaced_volume'] = self.hsa.hs_data['disp_volume'] # TODO - redundant probably remove if True: # TODO # Infinite frequency added mass inf_test_matrix = xr.Dataset(coords={ 'rho': 1e3, # TODO 'water_depth': [np.infty], 'omega': [np.infty], 'radiating_dof': list(self.fb.dofs.keys()), }) inf_data = solver.fill_dataset(inf_test_matrix, [self.fb]) self.inf_data = inf_data self.hydro['Ainf'] = inf_data.added_mass[0,:,:] if post_proc: self.__post_proc_bem__()
def _extract_inner_classifier(cls_node, file_name): inner_classifiers = [] for body_elem in cls_node.body: if isinstance(body_elem, javalang.tree.ClassDeclaration): inner_classifiers.append(_extract_class(body_elem, file_name)) elif isinstance(body_elem, javalang.tree.InterfaceDeclaration): inner_classifiers.append(_extract_interface(body_elem, file_name)) elif isinstance(body_elem, javalang.tree.EnumDeclaration): inner_classifiers.append(_extract_enum(body_elem, file_name)) else: continue return inner_classifiers
def extract_type(type_node, file_name): if isinstance(type_node, javalang.tree.ClassDeclaration): return _extract_class(type_node, file_name) elif isinstance(type_node, javalang.tree.InterfaceDeclaration): return _extract_interface(type_node, file_name) elif isinstance(type_node, javalang.tree.EnumDeclaration): return _extract_enum(type_node, file_name) elif isinstance(type_node, javalang.tree.AnnotationDeclaration): log.info("Extracting annotation declaration called") return None else: log.info("No matching TypeDeclaration-Subclass!") return None
def set(self, param_internal, param_val): assert isinstance(param_internal, Parameter) assert param_internal.shape == (self.dimension,) if isinstance(param_val, (list, np.ndarray)): assert len(param_val) == self.dimension assert np.array(param_val).ndim == 1 val_int_list = [self.decode(val, 'param_val') for val in param_val] else: assert np.isscalar(param_val) is True val_int_list = [self.decode(param_val, 'param_val')] * self.dimension param_internal.set_data(anp.array(val_int_list))
def boundary_value_or_flux(self, symbol, discretised_child): """ Uses linear extrapolation to get the boundary value or flux of a variable in the Finite Volume Method. See :meth:`pybamm.SpatialMethod.boundary_value` """ # Find the number of submeshes submesh_list = self.mesh.combine_submeshes(*discretised_child.domain) prim_pts = submesh_list[0].npts sec_pts = len(submesh_list) # Create submatrix to compute boundary values or fluxes if isinstance(symbol, pybamm.BoundaryValue): if symbol.side == "left": sub_matrix = csr_matrix( ([1.5, -0.5], ([0, 0], [0, 1])), shape=(1, prim_pts) ) elif symbol.side == "right": sub_matrix = csr_matrix( ([-0.5, 1.5], ([0, 0], [prim_pts - 2, prim_pts - 1])), shape=(1, prim_pts), ) elif isinstance(symbol, pybamm.BoundaryGradient): if symbol.side == "left": dx = submesh_list[0].d_nodes[0] sub_matrix = (1 / dx) * csr_matrix( ([-1, 1], ([0, 0], [0, 1])), shape=(1, prim_pts) ) elif symbol.side == "right": dx = submesh_list[0].d_nodes[-1] sub_matrix = (1 / dx) * csr_matrix( ([-1, 1], ([0, 0], [prim_pts - 2, prim_pts - 1])), shape=(1, prim_pts), ) # Generate full matrix from the submatrix # Convert to csr_matrix so that we can take the index (row-slicing), which is # not supported by the default kron format # Note that this makes column-slicing inefficient, but this should not be an # issue matrix = csr_matrix(kron(eye(sec_pts), sub_matrix)) # Return boundary value with domain given by symbol boundary_value = pybamm.Matrix(matrix) @ discretised_child boundary_value.domain = symbol.domain boundary_value.auxiliary_domains = symbol.auxiliary_domains return boundary_value
def gen_initial_guess(self, x_extra=None) -> np.ndarray: if isinstance(x_extra,int): x_extra = np.zeros(x_extra) assert isinstance(x_extra, np.ndarray) assert x_extra.ndim == 1 num_modes = self.hydro.radiating_dof.size num_freq = self.hydro.omega.size x_pos = np.zeros(num_modes*(2*num_freq+1)) x = np.concatenate([x_pos, x_extra]) return x
def get_pow_ub(self, S, dof:int=2) -> xr.DataArray: """ Find the upper theoretical limit of power Parameters ---------- S : pandas.core.frame.DataFrame Wave spectrum created by MHKiT. dof : int, optional degree-of-freedom. The default is 2 (heave). Returns ------- P_ub : xr.DataArray Spectrum of power upper bound (sum for total). """ assert isinstance(dof, int) Fexc = self.get_waveExcitation(S)['F_EXC'].isel(influenced_dof=dof).squeeze() # print(self.hydro) Zi = self.hydro['Zi'].isel(dict(influenced_dof=dof, radiating_dof=dof)).squeeze() P_ub = 1/8 * np.abs(Fexc)**2 / np.real(Zi) return P_ub
def fit(self, X, Y, **kwargs): X = self._check_and_format_input(X) Y = self._check_and_format_input(Y) mean_function = self.likelihood.mean if isinstance(mean_function, ScalarMeanFunction): mean_function.set_mean_value(anp.mean(Y)) def _log_posterior_density(hp_values: anp.ndarray) -> float: # We check box constraints before converting hp_values to # internal if not self._is_feasible(hp_values): return -float('inf') # Decode and write into Gluon parameters _set_gp_hps(hp_values, self.likelihood) neg_log = negative_log_posterior(self.likelihood, X, Y) return -neg_log slice_sampler = SliceSampler(_log_posterior_density, 1.0, self.random_seed) init_hp_values = _get_gp_hps(self.likelihood) self.samples = slice_sampler.sample(init_hp_values, self.mcmc_config.n_samples, self.mcmc_config.n_burnin, self.mcmc_config.n_thinning) self._states = self._create_posterior_states(self.samples, X, Y)
def process_trace_link_2D_dict(self, trace_link_2D_dict: Dict[float, Dict[float, List[TraceLink]]]): print_str_dict, best_eval_result, best_final_threshold, best_maj_thresh = self._process_trace_link_2D_dict(trace_link_2D_dict) header_row = [""] # First header cell is empty -> needed for header column header_row += [self.FILE_LEVEL_DROP_THRESH_PATTERN.format(final_threshold) for final_threshold in print_str_dict[best_maj_thresh].keys()] excel_array = [header_row] for maj_thresh in sorted(print_str_dict): next_row = [self.MAJ_DROP_THRESH_PATTERN.format(maj_thresh)] # First cell is the maj thresh, followed by the evaluated f1 metrics for this maj thresh for final_threshold in sorted(print_str_dict[maj_thresh]): next_row.append(print_str_dict[maj_thresh][final_threshold]) if self._also_print_eval: log.info(f"\nm{maj_thresh} f{final_threshold}\n" f"{next_row[-1]}") excel_array.append(next_row) excel_array.append([""]) # Add empty row as divider if isinstance(best_eval_result, F1ResultObject): excel_array = self._add_best_f1_2D_excel_rows(excel_array, print_str_dict, best_eval_result, best_final_threshold, best_maj_thresh) else: excel_array.append([self.NO_BEST_F1_MESSAGE]) FileUtil.write_eval_to_excel(excel_array, self._excel_output_file_path)
def _find_unregistered_block_in_container(data): # Find whether a nested container structure contains Blocks if isinstance(data, (list, tuple)): for ele in data: if _find_unregistered_block_in_container(ele): return True return False elif isinstance(data, dict): for _, v in data.items(): if _find_unregistered_block_in_container(v): return True return False elif isinstance(data, Block): return not data in children else: return False
def _check_container_with_block(self): children = set(self._children.values()) def _find_unregistered_block_in_container(data): # Find whether a nested container structure contains Blocks if isinstance(data, (list, tuple)): for ele in data: if _find_unregistered_block_in_container(ele): return True return False elif isinstance(data, dict): for _, v in data.items(): if _find_unregistered_block_in_container(v): return True return False elif isinstance(data, Block): return not data in children else: return False for k, v in self.__dict__.items(): if isinstance(v, (list, tuple, dict)) and not (k.startswith('__') or k == '_children'): if _find_unregistered_block_in_container(v): warnings.warn( '"{name}" is an unregistered container with Blocks. ' 'Note that Blocks inside the list, tuple or dict will not be ' 'registered automatically. Make sure to register them using ' 'register_child() or switching to ' 'nn.Sequential/nn.HybridSequential instead. '.format( name=self.__class__.__name__ + "." + k), stacklevel=3)
def __init__( self, lower, constr_upper=None, init_val=None, regularizer=None, dimension=1): assert isinstance(lower, numbers.Real) and lower >= 0.0 # lower should be a real number self.lower = lower super(PositiveScalarEncoding, self).__init__( init_val, constr_lower=None, constr_upper=constr_upper, regularizer=regularizer, dimension=dimension)
def __read_bem__(self, fpath=None) -> xr.Dataset: if fpath is None: #TODO - should be able to do this as "from_dir" and build entire object fpath = os.path.join(self.params['wrk_dir'], self.params['name'] + '.nc') assert isinstance(fpath, str), 'fpath must be type(str), received {:}'.format(type(fpath)) assert os.path.exists(fpath) hydro = merge_complex_values(xr.open_dataset(fpath)) # TODO - this could result in a mismatch between mesh and hydro self.hydro = hydro self.__post_proc_bem__() return hydro
def __repr__(self): s = '{name}(\n{modstr}\n)' modstr = '\n'.join([ ' ({key}): {block}'.format(key=key, block=_indent(block.__repr__(), 2)) for key, block in self.__dict__.items() if isinstance(block, Block) ]) return s.format(name=self.__class__.__name__, modstr=modstr)
def _decorate_ind(e, cnt): if isinstance(e, Cnst): pass elif isinstance(e, Var): pass elif isinstance(e, Linear): pass elif isinstance(e, App): for ei in e.args: cnt = _decorate_ind(ei, cnt) elif isinstance(e, If): cnt_prev = dict(cnt) # record cnt of If e.ind = cnt['if'] cnt['if'] = cnt['if'] + 1 cnt = _decorate_ind(e.e1, cnt) cnt = _decorate_ind(e.e2, cnt) cnt = _decorate_ind(e.e3, cnt) # ASSUME: no Sample and Fsample inside If's assert (cnt_prev['sample'] == cnt['sample'] and cnt_prev['fsample'] == cnt['fsample']) elif isinstance(e, Let): cnt = _decorate_ind(e.v1, cnt) cnt = _decorate_ind(e.e1, cnt) cnt = _decorate_ind(e.e2, cnt) elif isinstance(e, Sample): # record cnt of Sample e.ind = cnt['sample'] cnt['sample'] = cnt['sample'] + 1 cnt = _decorate_ind(e.e1, cnt) cnt = _decorate_ind(e.e2, cnt) elif isinstance(e, Fsample): # record cnt of Fsample e.ind = cnt['fsample'] cnt['fsample'] = cnt['fsample'] + 1 cnt = _decorate_ind(e.e1, cnt) cnt = _decorate_ind(e.e2, cnt) elif isinstance(e, Observe): for ei in e.args: cnt = _decorate_ind(ei, cnt) cnt = _decorate_ind(e.c1, cnt) else: assert (False) return cnt
def calc_impedance(hydro:DataSet_type, damp_frac:float=0.05, make_sym:bool=True): """ Calculate intrinsic impedance (see, e.g., Falnes). @book{falnes2002ocean, title={Ocean Waves and Oscillating Systems: Linear Interactions Including Wave-Energy Extraction}, author={Falnes, J.}, isbn={9781139431934}, url={https://books.google.com/books?id=bl1FyQjCklgC}, year={2002}, publisher={Cambridge University Press} } Parameters ---------- hydro : xr.core.dataset.Dataset Hydro structure returned from Capytaine with mass matrix and hydrostatics damp_frac : float, optional Frictional damping. The default is 0.05. make_sym : bool, optional Make symmetric. The default it True. Returns ------- Zi : xr.core.dataset.DataArray Intrinsic impedance. """ assert isinstance(hydro, DataSet_type), 'hydro must be xr.DataSet, received {:}'.format(type(hydro)) assert isinstance(damp_frac, float), 'damp_frac must be float, received {:}'.format(type(damp_frac)) friction_damping = np.eye(hydro.radiation_damping[0,:,:].shape[0])*damp_frac Zi = hydro.radiation_damping + friction_damping + \ 1j * (hydro.omega * (hydro.mass + hydro.added_mass) \ - hydro.hydrostatic_stiffness / hydro.omega ) if make_sym: Zi.values = (Zi.values + Zi.values.transpose(0,2,1))/2 return Zi
def cholesky_computations(features, targets, mean, kernel, noise_variance, debug_log=False, test_intermediates=None): """ Given input matrix X (features), target matrix Y (targets), mean and kernel function, compute posterior state {L, P}, where L is the Cholesky factor of k(X, X) + sigsq_final * I and L P = Y - mean(X) Here, sigsq_final >= noise_variance is minimal such that the Cholesky factorization does not fail. :param features: Input matrix X (n,d) :param targets: Target matrix Y (n,m) :param mean: Mean function :param kernel: Kernel function :param noise_variance: Noise variance (may be increased) :param debug_log: Debug output during add_jitter CustomOp? :param test_intermediates: If given, all intermediates are written into this dict :return: L, P """ kernel_mat = kernel(features, features) # Add jitter to noise_variance (if needed) in order to guarantee that # Cholesky factorization works sys_mat = AddJitterOp(flatten_and_concat(kernel_mat, noise_variance), initial_jitter_factor=NOISE_VARIANCE_LOWER_BOUND, debug_log='true' if debug_log else 'false') chol_fact = cholesky_factorization(sys_mat) centered_y = targets - anp.reshape(mean(features), (-1, 1)) pred_mat = aspl.solve_triangular(chol_fact, centered_y, lower=True) # print('chol_fact', chol_fact) if test_intermediates is not None: assert isinstance(test_intermediates, dict) test_intermediates.update({ 'features': features, 'targets': targets, 'noise_variance': noise_variance, 'kernel_mat': kernel_mat, 'sys_mat': sys_mat, 'chol_fact': chol_fact, 'pred_mat': pred_mat, 'centered_y': centered_y }) test_intermediates.update(kernel.get_params()) test_intermediates.update(mean.get_params()) return chol_fact, pred_mat
def __setattr__(self, name, value): """Registers parameters.""" if hasattr(self, name): existing = getattr(self, name) if isinstance( existing, (Parameter, Block)) and not isinstance(value, type(existing)): raise TypeError('Changing attribute type for {name} from {type1} to {type2}' \ 'is not allowed.'.format( name=name, type1=type(existing), type2=type(value))) if isinstance(value, Block): self.register_child(value, name) elif isinstance(value, Parameter): assert name not in self._reg_params, \ "Overriding Parameter attribute %s is not allowed. " \ "If you want to share parameters between blocks, please set " \ "'params' at Block construction instead." self._reg_params[name] = value super(Block, self).__setattr__(name, value)
def __init__(self, param_name, encoding, size_cols, **kwargs): super(ConstantPositiveVector, self).__init__(**kwargs) assert isinstance(encoding, ScalarEncodingBase) self.param_name = param_name self.encoding = encoding self.size_cols = size_cols with self.name_scope(): init_val_int = encoding.init_val_int # Note: The initialization values are bogus! self.param_internal = self.params.get( param_name + '_internal', init=init_Constant(init_val_int), shape=(1,), dtype=DATA_TYPE)
def write_bem(self, fpath:str=None): """ Write the BEM solution to netCDF file Parameters ---------- fpath : str, optional DESCRIPTION. The default is the WEC's wrk_dir. """ if fpath is None: fpath = os.path.join(self.params['wrk_dir'], self.params['name'] + '.nc') assert isinstance(fpath, str), 'fpath must be type(str), received {:}'.format(type(fpath)) separate_complex_values(self.hydro).to_netcdf(fpath)
def _check_and_format_input(self, u): """ Check and massage the input to conform with the numerical type and context :param u: some np.ndarray """ assert isinstance(u, anp.ndarray) if u.ndim == 1: u = anp.reshape(u, (-1, 1)) if u.dtype != DATA_TYPE: return anp.array(u, dtype=DATA_TYPE) else: return u
def from_file(name: str, fpath: str=None) -> 'WEC': """ Generate a WEC object directly from a directory of previous results Parameters ---------- name : str fpath : str, optional The default is '.' Returns ------- my_wec : WecOptTool.core.Wec """ if fpath is None: fpath = '.' assert isinstance(fpath, str) assert isinstance(name, str) param_file = glob.glob(os.path.join(fpath,name+'*.json'))[0] with open(param_file) as f: params = json.load(f) bem_file = glob.glob(os.path.join(fpath,name+'*.nc')) if not bem_file: pass else: params['run_bem'] = bem_file[0] params['wrk_dir'] = fpath params['mesh'] = glob.glob(os.path.join(fpath,name+'*.stl'))[0] my_wec = WEC(**params) return my_wec
def param_to_pretty_string(gluon_param, encoding): """ Take a gluon parameter and transform it to a string amenable to plotting If need be, the gluon parameter is appropriately encoded (e.g., log-exp transform). :param gluon_param: gluon parameter :param encoding: object in charge of encoding/decoding the gluon_param """ assert isinstance(gluon_param, Parameter) assert encoding is not None, "encoding of param {} should not be None".format( gluon_param.name) param_as_numpy = encoding.get(getval(gluon_param.data())) return "{}: {}".format( gluon_param.name, ";".join("{:.6f}".format(value) for value in param_as_numpy))
def process_binary_operators(self, bin_op, left, right, disc_left, disc_right): """Discretise binary operators in model equations. Performs appropriate averaging of diffusivities if one of the children is a gradient operator, so that discretised sizes match up. Parameters ---------- bin_op : :class:`pybamm.BinaryOperator` Binary operator to discretise left : :class:`pybamm.Symbol` The left child of `bin_op` right : :class:`pybamm.Symbol` The right child of `bin_op` disc_left : :class:`pybamm.Symbol` The discretised left child of `bin_op` disc_right : :class:`pybamm.Symbol` The discretised right child of `bin_op` Returns ------- :class:`pybamm.BinaryOperator` Discretised binary operator """ # Post-processing to make sure discretised dimensions match left_evaluates_on_edges = left.evaluates_on_edges() right_evaluates_on_edges = right.evaluates_on_edges() # inner product takes fluxes from edges to nodes if isinstance(bin_op, pybamm.Inner): if left_evaluates_on_edges: disc_left = self.edge_to_node(disc_left) if right_evaluates_on_edges: disc_right = self.edge_to_node(disc_right) # If neither child evaluates on edges, or both children have gradients, # no need to do any averaging elif left_evaluates_on_edges == right_evaluates_on_edges: pass # If only left child evaluates on edges, map right child onto edges elif left_evaluates_on_edges and not right_evaluates_on_edges: disc_right = self.node_to_edge(disc_right) # If only right child evaluates on edges, map left child onto edges elif right_evaluates_on_edges and not left_evaluates_on_edges: disc_left = self.node_to_edge(disc_left) # Return new binary operator with appropriate class out = bin_op.__class__(disc_left, disc_right) return out
def predict_posterior_marginals(features, mean, kernel, chol_fact, pred_mat, test_features, test_intermediates=None): """ Computes posterior means and variances for test_features. If pred_mat is a matrix, so will be posterior_means, but not posterior_variances. Reflects the fact that for GP regression and fixed hyperparameters, the posterior mean depends on the targets y, but the posterior covariance does not. :param features: Training inputs :param mean: Mean function :param kernel: Kernel function :param chol_fact: Part L of posterior state :param pred_mat: Part P of posterior state :param test_features: Test inputs :return: posterior_means, posterior_variances """ k_tr_te = kernel(features, test_features) linv_k_tr_te = aspl.solve_triangular(chol_fact, k_tr_te, lower=True) posterior_means = anp.matmul(anp.transpose(linv_k_tr_te), pred_mat) + \ anp.reshape(mean(test_features), (-1, 1)) posterior_variances = kernel.diagonal(test_features) - anp.sum( anp.square(linv_k_tr_te), axis=0) if test_intermediates is not None: assert isinstance(test_intermediates, dict) test_intermediates.update({ 'k_tr_te': k_tr_te, 'linv_k_tr_te': linv_k_tr_te, 'test_features': test_features, 'pred_means': posterior_means, 'pred_vars': anp.reshape( anp.maximum(posterior_variances, MIN_POSTERIOR_VARIANCE), (-1, )) }) return posterior_means, anp.reshape( anp.maximum(posterior_variances, MIN_POSTERIOR_VARIANCE), (-1, ))
def _get_ind2e(e, res): if isinstance(e, Cnst): pass elif isinstance(e, Var): pass elif isinstance(e, Linear): pass elif isinstance(e, App): for ei in e.args: res = _get_ind2e(ei, res) elif isinstance(e, If): # add to res_dict res[('if', e.ind)] = e res = _get_ind2e(e.e1, res) res = _get_ind2e(e.e2, res) res = _get_ind2e(e.e3, res) elif isinstance(e, Let): res = _get_ind2e(e.e1, res) res = _get_ind2e(e.e2, res) elif isinstance(e, Sample): res = _get_ind2e(e.e1, res) res = _get_ind2e(e.e2, res) elif isinstance(e, Fsample): # add to res_dict res[('fsample', e.ind)] = e res = _get_ind2e(e.e1, res) res = _get_ind2e(e.e2, res) elif isinstance(e, Observe): for ei in e.args: res = _get_ind2e(ei, res) res = _get_ind2e(e.c1, res) else: assert (False) return res
def __init__(self, name, grad_req='write', shape=None, dtype=anp.float64, lr_mult=1.0, wd_mult=1.0, init=None, allow_deferred_init=False, differentiable=True, stype='default', grad_stype='default'): self._var = None self._data = None self._grad = None self._ctx_list = None self._ctx_map = None self._trainer = None self._deferred_init = () self._differentiable = differentiable if allow_deferred_init: raise NotImplementedError( 'allow_deferred_init is not a valid option in autograd') self._allow_deferred_init = allow_deferred_init self._grad_req = None if isinstance(shape, int): shape = (shape, ) self._shape = shape self.name = name self._dtype = dtype self.lr_mult = lr_mult self.wd_mult = wd_mult self.grad_req = grad_req self.init = init # sparse related storage type information valid_stypes = ['default'] assert grad_stype in valid_stypes, "grad_stype for Parameter '%s' must be " \ "one of 'default', 'row_sparse', or 'csr', but got '%s'" % (name, grad_stype) assert stype in valid_stypes, "stype for Parameter '%s' must be " \ "one of 'default', 'row_sparse', or 'csr', but got '%s'" % (name, stype) self._grad_stype = grad_stype self._stype = stype
def checker(ex, type_, truthval): assert isinstance(ex, type_) == truthval return 1.