def _integrate(self, prev, dt, eta0, q, g0=np.eye(4), intermediate=False): self.xi[0, :] = np.array([0, 0, 0, 0, 0, 1]) self.eta[0, :] = eta0 # integration over the body (don't need the initial point as the initial values are determined already) # g_half = g0 # known initial condition g_half = expm(dt / 2 * se(prev.eta[0, :])) for i in range(self.N - 1): # averaging over steps to get half step values xi_half = (self.xi[i, :] + prev.xi[i, :]) / 2 eta_half = (self.eta[i, :] + prev.eta[i, :]) / 2 # implicit midpoint approximation xi_dot = (self.xi[i, :] - prev.xi[i, :]) / dt eta_dot = (self.eta[i, :] - prev.eta[i, :]) / dt # external loads A_bar = 0 B_bar = 0 # viscosity B_bar += self.V @ xi_dot # other loads for load in self.loads: A, B = load.dist_load(g_half, xi_half, eta_half, xi_dot, eta_dot, self, q) A_bar += A B_bar += B if intermediate and i == self.N - 2: for load in self.loads: W = load.tip_load(g_half, xi_half, eta_half, xi_dot, eta_dot, self, q) B_bar += W # spatial derivatives xi_der = np.linalg.inv(self.K - A_bar) @ ( (self.M @ eta_dot) - (adjoint(eta_half).T @ self.M @ eta_half) + (adjoint(xi_half).T @ self.K @ (xi_half - self.xi_ref)) + B_bar) eta_der = xi_dot - (adjoint(xi_half) @ eta_half) # explicit Euler step xi_half_next = xi_half + self.ds * xi_der eta_half_next = eta_half + self.ds * eta_der g_half = g_half @ expm(se(self.ds * xi_half)) # determine next step from half step value self.xi[i + 1, :] = 2 * xi_half_next - prev.xi[i + 1, :] self.eta[i + 1, :] = 2 * eta_half_next - prev.eta[i + 1, :] # midpoint RKMK to step the g values for i in range(self.N): self.g[i, :] = flatten( unflatten(prev.g[i, :]) @ expm( se(dt * (self.eta[i, :] + prev.eta[i, :]) / 2))) return g_half
def process_input(self, i): i = utils.unflatten(i) if 'author' in i: author = trim_doc(i.author) alternate_names = author.get('alternate_names', None) or '' author.alternate_names = [name.strip() for name in alternate_names.replace("\n", ";").split(';') if name.strip()] author.links = author.get('links') or [] return author
def test_unflatten(self): col = np.array([[1, 1, 1, 1, 0, 0, 0, 0], [0, 0, 0, 0, 1, 1, 1, 1], [0, 0, 0, 0, 0, 1, 0, 0], [0, 0, 0, 0, 0, 0, 1, 0]]) img = utils.unflatten(col, (2, 3, 3), (2, 2), (0, 0), (1, 1)) img_true = np.array([[[1, 1, 0], [1, 1, 0], [0, 0, 0]], [[0, 1, 1], [0, 2, 1], [0, 1, 0]]]) self.assertTrue(np.allclose(img, img_true))
def save(self, formdata): """Update work and edition documents according to the specified formdata.""" comment = formdata.pop('_comment', '') user = accounts.get_current_user() delete = user and user.is_admin() and formdata.pop('_delete', '') formdata = utils.unflatten(formdata) work_data, edition_data = self.process_input(formdata) self.process_new_fields(formdata) saveutil = DocSaveHelper() if delete: if self.edition: self.delete(self.edition.key, comment=comment) if self.work and self.work.edition_count == 0: self.delete(self.work.key, comment=comment) return if work_data: # Create any new authors that were added for i, author in enumerate(work_data.get("authors") or []): if author['author']['key'] == "__new__": a = self.new_author(formdata['authors'][i]) author['author']['key'] = a.key saveutil.save(a) if self.work is None: self.work = self.new_work(self.edition) edition_data.works = [{'key': self.work.key}] self.work.update(work_data) saveutil.save(self.work) if self.edition and edition_data: identifiers = edition_data.pop('identifiers', []) self.edition.set_identifiers(identifiers) classifications = edition_data.pop('classifications', []) self.edition.set_classifications(classifications) self.edition.set_physical_dimensions( edition_data.pop('physical_dimensions', None)) self.edition.set_weight(edition_data.pop('weight', None)) self.edition.set_toc_text(edition_data.pop('table_of_contents', '')) if edition_data.pop('translation', None) != 'yes': edition_data.translation_of = None edition_data.translated_from = None self.edition.update(edition_data) saveutil.save(self.edition) saveutil.commit(comment=comment, action="edit-book")
def process_input(self, i): i = utils.unflatten(i) if "author" in i: author = trim_doc(i.author) alternate_names = author.get("alternate_names", None) or "" author.alternate_names = [ name.strip() for name in alternate_names.replace("\n", ";").split(";") if name.strip() ] author.links = author.get("links") or [] return author
def save(self, formdata): """Update work and edition documents according to the specified formdata.""" comment = formdata.pop('_comment', '') user = accounts.get_current_user() delete = user and user.is_admin() and formdata.pop('_delete', '') formdata = utils.unflatten(formdata) work_data, edition_data = self.process_input(formdata) self.process_new_fields(formdata) saveutil = DocSaveHelper() if delete: if self.edition: self.delete(self.edition.key, comment=comment) if self.work and self.work.edition_count == 0: self.delete(self.work.key, comment=comment) return if work_data: # Create any new authors that were added for i, author in enumerate(work_data.get("authors") or []): if author['author']['key'] == "__new__": a = self.new_author(formdata['authors'][i]) author['author']['key'] = a.key saveutil.save(a) if self.work is None: self.work = self.new_work(self.edition) edition_data.works = [{'key': self.work.key}] self.work.update(work_data) saveutil.save(self.work) if self.edition and edition_data: identifiers = edition_data.pop('identifiers', []) self.edition.set_identifiers(identifiers) classifications = edition_data.pop('classifications', []) self.edition.set_classifications(classifications) self.edition.set_physical_dimensions(edition_data.pop('physical_dimensions', None)) self.edition.set_weight(edition_data.pop('weight', None)) self.edition.set_toc_text(edition_data.pop('table_of_contents', '')) if edition_data.pop('translation', None) != 'yes': edition_data.translation_of = None edition_data.translated_from = None self.edition.update(edition_data) saveutil.save(self.edition) saveutil.commit(comment=comment, action="edit-book")
def save(self, formdata): """Update work and edition documents according to the specified formdata.""" comment = formdata.pop("_comment", "") user = web.ctx.site.get_user() delete = user and user.is_admin() and formdata.pop("_delete", "") formdata = utils.unflatten(formdata) work_data, edition_data = self.process_input(formdata) self.process_new_fields(formdata) saveutil = DocSaveHelper() if delete: if self.edition: self.delete(self.edition.key, comment=comment) if self.work and self.work.edition_count == 0: self.delete(self.work.key, comment=comment, action="delete") return for i, author in enumerate(work_data.get("authors") or []): if author["author"]["key"] == "__new__": a = self.new_author(formdata["authors"][i]) author["author"]["key"] = a.key saveutil.save(a) if work_data: if self.work is None: self.work = self.new_work(self.edition) self.edition.works = [{"key": self.work.key}] self.work.update(work_data) saveutil.save(self.work) if self.edition and edition_data: identifiers = edition_data.pop("identifiers", []) self.edition.set_identifiers(identifiers) classifications = edition_data.pop("classifications", []) self.edition.set_classifications(classifications) self.edition.set_physical_dimensions(edition_data.pop("physical_dimensions", None)) self.edition.set_weight(edition_data.pop("weight", None)) self.edition.set_toc_text(edition_data.pop("table_of_contents", "")) if edition_data.pop("translation", None) != "yes": edition_data.translation_of = None edition_data.translated_from = None self.edition.update(edition_data) saveutil.save(self.edition) saveutil.commit(comment=comment, action="edit-book")
def process_input(self, i): i = utils.unflatten(i) if 'edition' in i: edition = self.process_edition(i.edition) else: edition = None if 'work' in i: work = self.process_work(i.work) else: work = None return work, edition
def save(self, formdata): """Update work and edition documents according to the specified formdata.""" comment = formdata.pop('_comment', '') user = web.ctx.site.get_user() delete = user and user.is_admin() and formdata.pop('_delete', '') formdata = utils.unflatten(formdata) work_data, edition_data = self.process_input(formdata) self.process_new_fields(formdata) if delete: if self.edition: self.delete(self.edition.key, comment=comment) if self.work and self.work.edition_count == 0: self.delete(self.work.key, comment=comment) return for i, author in enumerate(work_data.get("authors") or []): if author['author']['key'] == "__new__": a = self.new_author(formdata['authors'][i]) a._save(utils.get_message("comment_new_author")) author['author']['key'] = a.key if work_data and not delete: if self.work is None: self.work = self.new_work(self.edition) self.edition.works = [{'key': self.work.key}] self.work.update(work_data) self.work._save(comment=comment) if self.edition and edition_data: identifiers = edition_data.pop('identifiers', []) self.edition.set_identifiers(identifiers) classifications = edition_data.pop('classifications', []) self.edition.set_classifications(classifications) self.edition.set_physical_dimensions(edition_data.pop('physical_dimensions', None)) self.edition.set_weight(edition_data.pop('weight', None)) self.edition.set_toc_text(edition_data.pop('table_of_contents', '')) if edition_data.pop('translation', None) != 'yes': edition_data.translation_of = None edition_data.translated_from = None self.edition.update(edition_data) self.edition._save(comment=comment)
def train(self, features, labels, iters): self.loss = [] self.add_layer(self.outputs) self.theta_dims = [t.shape for t in self.thetas] X = features y = labels options = {'maxiter': iters} costFunction = lambda p: self.cost_function(p, X, y) self.iter = 0 res = optimize.minimize(costFunction, utils.flatten(self.thetas), jac=True, method='TNC', options=options) self.thetas = utils.unflatten(res.x, self.theta_dims)
def _condition(self, prev, dt, xi0, q): # integrate and see if the tip condition is satisfied self._integrate(prev, dt, xi0, q) # data g = unflatten(self.g[-1, :]) R = g[:3, :3] R_des = self.g_des[:3, :3] p = g[:3, 3] p_des = self.g_des[:3, 3] angle_err = unskew(logm(R.T @ R_des)) pos_err = p - p_des return np.concatenate([angle_err**2, pos_err**2])
def POST(self): i = web.input(title="", publisher="", publish_date="", id_name="", id_value="", _test="false") if spamcheck.is_spam(i): return render_template( "message.html", "Oops", 'Something went wrong. Please try again later.') if not web.ctx.site.get_user(): recap = get_recaptcha() if recap and not recap.validate(): return render_template( 'message.html', 'Recaptcha solution was incorrect', 'Please <a href="javascript:history.back()">go back</a> and try again.' ) i = utils.unflatten(i) saveutil = DocSaveHelper() created_author = saveutil.create_authors_from_form_data( i.authors, i.author_names, _test=i._test == 'true') match = None if created_author else self.find_matches(i) if i._test == 'true' and not isinstance(match, list): if match: return 'Matched <a href="%s">%s</a>' % (match.key, match.key) else: return 'No match found' if isinstance(match, list): # multiple matches return render_template('books/check', i, match) elif match and match.key.startswith('/books'): # work match and edition match, match is an Edition return self.work_edition_match(match) elif match and match.key.startswith('/works'): # work match but not edition work = match return self.work_match(saveutil, work, i) else: # no match return self.no_match(saveutil, i)
def _condition(self, prev, dt, xi0, q): # integrate and see if the tip condition is satisfied self._integrate(prev, dt, xi0, q) # all tip loads W = 0 # data g = unflatten(self.g[-1, :]) xi = self.xi[-1, :] eta = self.xi[-1, :] xi_dot = (self.xi[-1, :] - prev.xi[-1, :]) / dt eta_dot = (self.eta[-1, :] - prev.eta[-1, :]) / dt for load in self.loads: W += load.tip_load(g, xi, eta, xi_dot, eta_dot, self, q) return self.K @ (self.xi[-1, :] - self.xi_ref) - W
def _condition(self, prev, dt, xi0, q): # same as before except just final rod self._integrate(prev, dt, xi0, q) # all tip loads W = 0 # data g = unflatten(self.g[-1, :]) xi = self.xi[-1, :] eta = self.eta[-1, :] xi_dot = (self.xi[-1, :] - prev.xi[-1, :]) / dt eta_dot = (self.eta[-1, :] - prev.eta[-1, :]) / dt for load in self.rods[-1].loads: W += load.tip_load(g, xi, eta, xi_dot, eta_dot, self.rods[-1], q) return self.rods[-1].body.Psi(xi, self.rods[-1].body.L) - W
def backward(self, dy): N = dy.shape[0] dx = np.zeros([N, self.dim_in]) for i in range(N): dfxi = np.zeros([self.dim_out, self.dim_k]) dfxi[np.arange(self.dim_out), self.max_indice[i,]] = dy[i,].reshape(self.shape).transpose([1, 2, 0]).ravel() dx[i, ] = utils.unflatten(dfxi, self.shape_in, self.shape_k, self.pad, self.stride, self.indice).ravel() return dx
def backward(self, dy): N = dy.shape[0] dx = np.zeros([N, self.dim_in]) dw = np.zeros([self.dim_k, self.dout]) db = np.zeros([1, self.dout]) for i in range(N): dyi = dy[i, :].reshape(self.dout, -1).T dfxi, dwi, dbi = utils.backward(dyi, self.fx[i, ], self.w) dx[i, ] = utils.unflatten(dfxi, self.shape_in, self.shape_k, self.pad, self.stride, self.indice).ravel() dw += dwi db += dbi self.dw_cache = dw self.db_cache = db return dx
def cost_function(self, thetas, features, labels): self.iter += 1 print(self.iter, end='... ') thetas = utils.unflatten(thetas, self.theta_dims) gradients = [np.zeros(t.shape) for t in thetas] num_layers = len(self.layers) J = 0 m = features.shape[0] logg = np.vectorize(math.log) one_minus = np.vectorize(lambda z: 1 - z) for i in range(m): x, y = features[i], [ int(o == labels[i]) for o in range(self.outputs) ] a = utils.forward_propagate(thetas, x) J += sum(y * logg(a[-1]) + one_minus(y) * logg(one_minus(a[-1]))) errors = [None] * num_layers errors[num_layers - 1] = a[-1] - y for layer in range(num_layers - 2, 0, -1): errors[layer] = ( (thetas[layer].T @ errors[layer + 1]) * np.vectorize(lambda x: x * (1 - x))(a[layer]))[1:] for layer in range(num_layers - 2, -1, -1): err = errors[layer + 1] a_l = a[layer] gradients[layer] += err.reshape((len(err), 1)) @ a_l.reshape( (1, len(a_l))) for i in range(len(gradients)): gradients[i] = self.learning_rate * 1 / m * ( gradients[i] + self.lambda_ * np.concatenate((np.zeros( (thetas[i].shape[0], 1)), thetas[i][:, 1:]), axis=1)) J *= -1 / m J += self.lambda_ / (2 * m) * sum( [np.linalg.norm(t[:, 1:])**2 for t in thetas]) print('loss: {0}'.format(J)) gradients = np.concatenate([a.ravel() for a in gradients]) self.loss.append(J) return J, gradients
def save(self, formdata): """ Update work and edition documents according to the specified formdata. :param web.storage formdata: :rtype: None """ comment = formdata.pop('_comment', '') user = accounts.get_current_user() delete = user and user.is_admin() and formdata.pop('_delete', '') formdata = utils.unflatten(formdata) work_data, edition_data = self.process_input(formdata) self.process_new_fields(formdata) saveutil = DocSaveHelper() if delete: if self.edition: self.delete(self.edition.key, comment=comment) if self.work and self.work.edition_count == 0: self.delete(self.work.key, comment=comment) return just_editing_work = edition_data is None if work_data: # Create any new authors that were added saveutil.create_authors_from_form_data( work_data.get("authors") or [], formdata.get('authors') or []) if not just_editing_work: # Handle orphaned editions new_work_key = (edition_data.get('works') or [{ 'key': None }])[0]['key'] if self.work is None and (new_work_key is None or new_work_key == '__new__'): # i.e. not moving to another work, create empty work self.work = self.new_work(self.edition) edition_data.works = [{'key': self.work.key}] work_data.key = self.work.key elif self.work is not None and new_work_key is None: # we're trying to create an orphan; let's not do that edition_data.works = [{'key': self.work.key}] if self.work is not None: self.work.update(work_data) saveutil.save(self.work) if self.edition and edition_data: # Create a new work if so desired new_work_key = (edition_data.get('works') or [{ 'key': None }])[0]['key'] if new_work_key == "__new__" and self.work is not None: self.work = self.new_work(self.edition) edition_data.works = [{'key': self.work.key}] saveutil.save(self.work) identifiers = edition_data.pop('identifiers', []) self.edition.set_identifiers(identifiers) classifications = edition_data.pop('classifications', []) self.edition.set_classifications(classifications) self.edition.set_physical_dimensions( edition_data.pop('physical_dimensions', None)) self.edition.set_weight(edition_data.pop('weight', None)) self.edition.set_toc_text(edition_data.pop('table_of_contents', '')) if edition_data.pop('translation', None) != 'yes': edition_data.translation_of = None edition_data.translated_from = None self.edition.update(edition_data) saveutil.save(self.edition) saveutil.commit(comment=comment, action="edit-book")
def generate_props(self): props = self.attrs.get('ftProps', []) if not isinstance(props, list): props = [props] return utils.unflatten(map(lambda e: tuple(e.split(':', 1)), props))
def _integrate(self, prev, dt, xi0, q, g0=np.eye(4), eta0=np.array([0, 0, 0, 0, 0, 0]), intermediate=False): self.xi[0, :] = xi0 self.eta[0, :] = eta0 # integration over the body (don't need the initial point as the initial values are determined already) g_half = g0 # known initial condition for i in range(self.N - 1): s = i * self.ds # averaging over steps to get half step values xi_half = (self.xi[i, :] + prev.xi[i, :]) / 2 eta_half = (self.eta[i, :] + prev.eta[i, :]) / 2 # implicit midpoint approximation xi_dot = (self.xi[i, :] - prev.xi[i, :]) / dt eta_dot = (self.eta[i, :] - prev.eta[i, :]) / dt # external loads A_bar = 0 B_bar = 0 # viscosity B_bar += self.body.viscosity(xi_dot, s) # other loads for load in self.loads: A, B = load.dist_load(g_half, xi_half, eta_half, xi_dot, eta_dot, self, q, s) A_bar += A B_bar += B if intermediate and i == self.N - 2: for load in self.loads: W = load.tip_load(g_half, xi_half, eta_half, xi_dot, eta_dot, self, q) B_bar += W # spatial derivatives xi_der = np.linalg.inv(self.body.Psi_der(xi_half, s) - A_bar) @ ( (self.body.M(s) @ eta_dot) - (adjoint(eta_half).T @ self.body.M(s) @ eta_half) + ( adjoint(xi_half).T @ self.body.Psi(xi_half, s)) + B_bar - self.body.Psi_prime(xi_half, s)) eta_der = xi_dot - (adjoint(xi_half) @ eta_half) # explicit Euler step xi_half_next = xi_half + self.ds * xi_der eta_half_next = eta_half + self.ds * eta_der R = g_half[:3, :3] p = g_half[:3, 3] p = p + self.ds * R @ xi_half[3:] q = toQuaternion(R) q = q + self.ds / 2 * np.array( [[0, -xi_half[0], -xi_half[1], -xi_half[2]], [xi_half[0], 0, xi_half[2], -xi_half[1]], [xi_half[1], -xi_half[2], 0, xi_half[0]], [xi_half[2], xi_half[1], -xi_half[0], 0]]) @ q g_half = unflatten(np.concatenate([q, p])) # determine next step from half step value self.xi[i + 1, :] = 2 * xi_half_next - prev.xi[i + 1, :] self.eta[i + 1, :] = 2 * eta_half_next - prev.eta[i + 1, :] # midpoint RKMK to step the g values for i in range(self.N): eta_half = (self.eta[i, :] + prev.eta[i, :]) / 2 q = prev.g[i, :4] p = prev.g[i, 4:] q = q + dt / 2 * np.array( [[0, -eta_half[0], -eta_half[1], -eta_half[2]], [eta_half[0], 0, eta_half[2], -eta_half[1]], [eta_half[1], -eta_half[2], 0, eta_half[0]], [eta_half[2], eta_half[1], -eta_half[0], 0]]) @ q p = p + dt * toMatrix((q + prev.g[i, :4]) / 2) @ eta_half[3:] self.g[i, :] = np.concatenate([q, p]) return g_half
def _initRods(self): g0 = np.eye(4) for rod in self.rods: rod._initRod(g0) g0 = unflatten(rod.g[-1, :])
def set_flat_weights(self, flat_weights): weights = utils.unflatten(flat_weights, self.weights_shapes) self.set_weights({self.weights_name: weights})