def integrate_clenshaw(f, a, b, maxn = 2**16, tol = 10e-16, debug_info = True, debug_plot = True): n = 3 prevI = None nodes = None cm = convergence_monitor() while n <= maxn: coeffs = clenshaw_coefficients(n) if nodes is None: nodes = cheb_nodes(n, a, b) fs = f(nodes) else: new_nodes = incremental_cheb_nodes(n, a, b) new_fs = f(new_nodes) nodes, fs = combine_interpolation_nodes_fast(nodes, fs, new_nodes, new_fs) I = dot(fs, coeffs) * 0.5 * (b - a) if prevI is not None: err = abs(I - prevI) cm.add(err, I) if cm.test_convergence()[0]: break prevI = I n = 2 * n - 1 if debug_info: print "====" if debug_plot and n >= maxn: # problems with integration debug_plot(a, b, nodes, fs, coeffs) return I, err
def adaptive_interp(self, par=None): if par is None: par = params.interpolation # increase number of nodes until error is small maxn = par.maxn n = len(self.Xs) old_err = None cm = convergence_monitor(par=par.convergence) while n <= maxn: new_n = 3 * n new_Xs = self.get_incremental_nodes1(new_n) new_Ys = self.f(new_Xs) err = self.test_accuracy(new_Xs, new_Ys) maxy = max(abs(new_Ys).max(), abs(self.Ys).max()) if par.debug_info: print "interp. err1", err, maxy, old_err, "nodes=", n, maxn cm.add(err, maxy) if cm.test_convergence()[0]: break old_err = err n = new_n self.add_nodes(new_Xs, new_Ys) self.n = n if par.debug_plot and n >= maxn: debug_plot(self.a, self.b, self.Xs, self.Ys, None) if par.debug_info: print "interp. err1 = ", err, "nodes=", n self.err = err
def supervised_train_loop(model, optimizer, train_dataset, avg_loss, mIoU, iters): i = 0 b = 0 for images, labels in train_dataset: # with tf.device('/GPU:0'): images, labels = augment(images, labels) with tf.GradientTape() as tape: logits = model(images) preds = tf.argmax(tf.nn.softmax(logits), axis=-1) valid_labels, valid_logits = valid_mask_preds(labels, logits) loss = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=valid_labels, logits=valid_logits) loss = tf.reduce_mean(loss) gradients = tape.gradient(loss, model.trainable_variables) optimizer.apply_gradients( grads_and_vars=zip(gradients, model.trainable_variables)) avg_loss.update_state(loss) valid_lbls, valid_preds = valid_mask_preds(labels, preds) mIoU.update_state(valid_lbls, valid_preds) if 0 < FLAGS.debug_freq <= b: debug_plot(images, labels, preds, i, b) b = 0 else: b += 1 i += 1
def integrate_fejer2(f, a, b, par=None, maxn=2**10, tol=finfo(double).eps, debug_info=False, debug_plot=False): if par is not None: maxn = par.maxn debug_plot = par.debug_plot debug_info = par.debug_info cm = convergence_monitor(par=par.convergence) else: cm = convergence_monitor() n = 65 prevI = None nodes = None while n <= maxn: coeffs = fejer2_coefficients(n) if nodes is None: nodes = cheb_nodes(n, a, b)[1:-1] fs = f(nodes) else: new_nodes = incremental_cheb_nodes(n, a, b) new_fs = f(new_nodes) # roles of new and old nodes are reversed in the call below nodes, fs = combine_interpolation_nodes_fast( new_nodes, new_fs, nodes, fs) I = dot(fs, coeffs) * (b - a) / 2 if prevI is not None: err = abs(I - prevI) if debug_info: print repr(I), err, n, I + err == I, err <= abs(I) * tol, min( nodes), max(nodes), min(fs), max(fs) cm.add(err, I) if cm.test_convergence()[0]: break prevI = I n = 2 * n - 1 if debug_info: print "====" if debug_plot and n >= maxn: # problems with integration debug_plot(a, b, nodes, fs, coeffs) # return currently best result I, err, _extra = cm.get_best_result() return I, err
def integrate_fejer2(f, a, b, par = None, maxn = 2**10, tol = finfo(double).eps, debug_info = False, debug_plot = False): if par is not None: maxn = par.maxn debug_plot = par.debug_plot debug_info = par.debug_info cm = convergence_monitor(par = par.convergence) else: cm = convergence_monitor() n = 65 prevI = None nodes = None while n <= maxn: coeffs = fejer2_coefficients(n) if nodes is None: nodes = cheb_nodes(n, a, b)[1:-1] fs = f(nodes) else: new_nodes = incremental_cheb_nodes(n, a, b) new_fs = f(new_nodes) # roles of new and old nodes are reversed in the call below nodes, fs = combine_interpolation_nodes_fast(new_nodes, new_fs, nodes, fs) I = dot(fs, coeffs) * (b - a) / 2 if prevI is not None: err = abs(I - prevI) if debug_info: print repr(I), err, n, I + err == I, err <= abs(I) * tol, min(nodes), max(nodes), min(fs), max(fs) cm.add(err, I) if cm.test_convergence()[0]: break prevI = I n = 2 * n - 1 if debug_info: print "====" if debug_plot and n >= maxn: # problems with integration debug_plot(a, b, nodes, fs, coeffs) # return currently best result I, err, _extra = cm.get_best_result() return I, err