def get_extrema(s, k, i): fc = s.drillstring.pdm.failure mu, cov, c = get_deg_block(s, k, i) inv = np.linalg.inv(cov) rad = np.sqrt(np.diag(cov)[:, None]) X = s.X[k:i] sep = Sep(X) m = sep.m xub = fc.rogp.warp_inv(mu + rad) xlb = fc.rogp.warp_inv(mu - rad) r = _pyomo_to_np(m.r, ind=X) hz = fc.rogp.warp(r) diff = hz - mu c = np.matmul(np.matmul(diff.T, inv), diff)[0, 0] obj = (c - 1)**2 m.Obj = p.Objective(expr=obj, sense=p.minimize) extrema = [] for i in range(mu.shape[0]): m.r[X[i]].value = xlb[i] m.r[X[i]].fixed = True utils.solve(sep, solver='Baron') r = _pyomo_to_np(m.r, ind=X, evaluate=True) hz = fc.rogp.warp(r) extrema.append(hz) m.r[X[i]].fixed = False return extrema
def check_deg_block(s, k, i): fc = s.drillstring.pdm.failure fc.rogp.set_tanh(False) # Initialize parameters alpha = 1 - (1 - s.alpha) / (len(s.Xm) + 1) F = sp.stats.norm.ppf(alpha) X = s.X[k:i] Xvar = s.Xvar delta = {s.X[j]: Xvar[j + 1] - Xvar[j] for j in range(k, i)} dp = [[s.m.rop[x].deltap()] for x in X] dp = _to_np_obj_array(dp) # TODO: make eps = 0.001 a parameter dt = [[delta[x] / (s.m.rop[x].V + 0.001)] for x in X] dt = [[x[0]()] for x in dt] dt = _to_np_obj_array(dt) sep = Sep(X) r = _pyomo_to_np(sep.m.r, ind=X) # Calculate matrices Sig = fc.rogp.predict_cov_latent(dp).astype('float') inv = np.linalg.inv(Sig) hz = fc.rogp.warp(r) mu = fc.rogp.predict_mu_latent(dp) diff = hz - mu obj = np.matmul(dt.T, r)[0, 0] sep.m.Obj = p.Objective(expr=obj, sense=p.maximize) c = np.matmul(np.matmul(diff.T, inv), diff)[0, 0] sep.m.cons.add(c <= F) utils.solve(sep, solver='Baron') if obj() - 1.0 > 10e-5: return False return True
def main(): sudokus_df = pd.read_csv("data/sudoku.csv") sudokus = np.array([(np.fromstring(i, np.int8) - ord("0")).reshape(9, 9) for i in sudokus_df["quizzes"].to_numpy()]) start_time = time.time() for p in tqdm(sudokus): solve(p) t = time.time() - start_time print(t) print(t / len(sudokus)) np.save("data/sudokus.npy", sudokus)
def iteration(self, theta): features = self.phi_state[np.arange(len(self.actions)), self.actions, :] next_actions = np.argmax(np.dot(self.phi_nstate, theta), axis=1) next_features = self.phi_nstate[np.arange(len(next_actions)), next_actions, :] self.A = np.sum(np.einsum('ij,ik->ijk',features, features - self.gamma*next_features), axis=0) \ + 0.001*np.eye(self.basis.size()) self.b = np.sum(features * self.rewards[:, None], axis=0) return solve(self.A, self.b)[0]
def single_LS_step_length_ht(graph, j, alpha=0.15, python=False): N = graph.number_of_nodes() M = nx.to_numpy_matrix(graph) for i in xrange(N): # Normalize if M[i].sum() != 0: M[i] /= M[i].sum() M[j] = 0 # Remove outedges of j A = np.eye(N) - (1 - alpha) * M b = np.repeat(1 - alpha, N) b[j] = 0 if python: return -utils.solve(A.tolist(), b.tolist()) else: return -np.linalg.solve(A, b)
def solution(tests, train_problems, train_users, test_problems, test_users, submissions): print("u1p1 has started, time =", time.clock()) purifying._purify_submissions_unique_rows(submissions) extend_data(train_problems, train_users, submissions) return utils.solve( train_problems, train_users, submissions, tests, train_problems, train_users, NUMBER_OF_FEATURES, get_feature_vector, )
def rauch_tung_striebel_smoother(self, params, m_filtered, P_filtered, dt, store=False, return_full=False, y=None, site_params=None, r=None): """ Run the RTS smoother to get p(fₙ|y₁,...,y_N), i.e. compute p(f)𝚷ₙsₙ(fₙ) where sₙ(fₙ) are the sites (approx. likelihoods). If sites are provided, then it is assumed they are to be updated, which is done by calling the site-specific update() method. :param params: the model parameters, i.e the hyperparameters of the prior & likelihood :param m_filtered: the intermediate distribution means computed during filtering [N, state_dim, 1] :param P_filtered: the intermediate distribution covariances computed during filtering [N, state_dim, state_dim] :param dt: step sizes Δtₙ = tₙ - tₙ₋₁ [N, 1] :param store: a flag determining whether to store and return state mean and covariance :param return_full: a flag determining whether to return the full state distribution or just the function(s) :param y: observed data [N, obs_dim] :param site_params: the Gaussian approximate likelihoods [2, N, obs_dim] :param r: spatial input locations :return: var_exp: the sum of the variational expectations [scalar] smoothed_mean: the posterior marginal means [N, obs_dim] smoothed_var: the posterior marginal variances [N, obs_dim] site_params: the updated sites [2, N, obs_dim] """ theta_prior, theta_lik = softplus_list(params[0]), softplus(params[1]) self.update_model( theta_prior ) # all model components that are not static must be computed inside the function N = dt.shape[0] dt = np.concatenate([dt[1:], np.array([0.0])], axis=0) with loops.Scope() as s: s.m, s.P = m_filtered[-1, ...], P_filtered[-1, ...] if return_full: s.smoothed_mean = np.zeros([N, self.state_dim, 1]) s.smoothed_cov = np.zeros([N, self.state_dim, self.state_dim]) else: s.smoothed_mean = np.zeros([N, self.func_dim, 1]) s.smoothed_cov = np.zeros([N, self.func_dim, self.func_dim]) if site_params is not None: s.site_mean = np.zeros([N, self.func_dim, 1]) s.site_var = np.zeros([N, self.func_dim, self.func_dim]) for n in s.range(N - 1, -1, -1): # --- First compute the smoothing distribution: --- A = self.prior.state_transition( dt[n], theta_prior ) # closed form integration of transition matrix m_predicted = A @ m_filtered[n, ...] tmp_gain_cov = A @ P_filtered[n, ...] P_predicted = A @ (P_filtered[n, ...] - self.Pinf) @ A.T + self.Pinf # backward Kalman gain: # G = F * A' * P^{-1} # since both F(iltered) and P(redictive) are cov matrices, thus self-adjoint, we can take the transpose: # = (P^{-1} * A * F)' G_transpose = solve(P_predicted, tmp_gain_cov) # (P^-1)AF s.m = m_filtered[n, ...] + G_transpose.T @ (s.m - m_predicted) s.P = P_filtered[ n, ...] + G_transpose.T @ (s.P - P_predicted) @ G_transpose H = self.prior.measurement_model(r[n], theta_prior) if store: if return_full: s.smoothed_mean = index_add(s.smoothed_mean, index[n, ...], s.m) s.smoothed_cov = index_add(s.smoothed_cov, index[n, ...], s.P) else: s.smoothed_mean = index_add(s.smoothed_mean, index[n, ...], H @ s.m) s.smoothed_cov = index_add(s.smoothed_cov, index[n, ...], H @ s.P @ H.T) # --- Now update the site parameters: --- if site_params is not None: # extract mean and var from state: post_mean, post_cov = H @ s.m, H @ s.P @ H.T # calculate the new sites _, site_mu, site_cov = self.sites.update( self.likelihood, y[n][..., np.newaxis], post_mean, post_cov, theta_lik, (site_params[0][n], site_params[1][n])) s.site_mean = index_add(s.site_mean, index[n, ...], site_mu) s.site_var = index_add(s.site_var, index[n, ...], site_cov) if site_params is not None: site_params = (s.site_mean, s.site_var) if store: return site_params, s.smoothed_mean, s.smoothed_cov return site_params
def kalman_filter(self, y, dt, params, store=False, mask=None, site_params=None, r=None): """ Run the Kalman filter to get p(fₙ|y₁,...,yₙ). The Kalman update step invloves some control flow to work out whether we are i) initialising the sites ii) using supplied sites iii) performing a Gaussian update with fixed parameters (e.g. in posterior sampling or ELBO calc.) If store is True then we compute and return the intermediate filtering distributions p(fₙ|y₁,...,yₙ) and sites sₙ(fₙ), otherwise we do not store the intermediates and simply return the energy / negative log-marginal likelihood, -log p(y). :param y: observed data [N, obs_dim] :param dt: step sizes Δtₙ = tₙ - tₙ₋₁ [N, 1] :param params: the model parameters, i.e the hyperparameters of the prior & likelihood :param store: flag to notify whether to store the intermediates :param mask: boolean array signifying which elements of y are observed [N, obs_dim] :param site_params: the Gaussian approximate likelihoods [2, N, obs_dim] :param r: spatial input locations :return: if store is True: neg_log_marg_lik: the filter energy, i.e. negative log-marginal likelihood -log p(y), used for hyperparameter optimisation (learning) [scalar] filtered_mean: intermediate filtering means [N, state_dim, 1] filtered_cov: intermediate filtering covariances [N, state_dim, state_dim] site_mean: mean of the approximate likelihood sₙ(fₙ) [N, obs_dim] site_cov: variance of the approximate likelihood sₙ(fₙ) [N, obs_dim] otherwise: neg_log_marg_lik: the filter energy, i.e. negative log-marginal likelihood -log p(y), used for hyperparameter optimisation (learning) [scalar] """ theta_prior, theta_lik = softplus_list(params[0]), softplus(params[1]) self.update_model( theta_prior ) # all model components that are not static must be computed inside the function N = dt.shape[0] with loops.Scope() as s: s.neg_log_marg_lik = 0.0 # negative log-marginal likelihood s.m, s.P = self.minf, self.Pinf if store: s.filtered_mean = np.zeros([N, self.state_dim, 1]) s.filtered_cov = np.zeros([N, self.state_dim, self.state_dim]) s.site_mean = np.zeros([N, self.func_dim, 1]) s.site_cov = np.zeros([N, self.func_dim, self.func_dim]) for n in s.range(N): y_n = y[n][..., np.newaxis] # -- KALMAN PREDICT -- # mₙ⁻ = Aₙ mₙ₋₁ # Pₙ⁻ = Aₙ Pₙ₋₁ Aₙ' + Qₙ, where Qₙ = Pinf - Aₙ Pinf Aₙ' A = self.prior.state_transition(dt[n], theta_prior) m_ = A @ s.m P_ = A @ (s.P - self.Pinf) @ A.T + self.Pinf # --- KALMAN UPDATE --- # Given previous predicted mean mₙ⁻ and cov Pₙ⁻, incorporate yₙ to get filtered mean mₙ & # cov Pₙ and compute the marginal likelihood p(yₙ|y₁,...,yₙ₋₁) H = self.prior.measurement_model(r[n], theta_prior) predict_mean = H @ m_ predict_cov = H @ P_ @ H.T if mask is not None: # note: this is a bit redundant but may come in handy in multi-output problems y_n = np.where(mask[n][..., np.newaxis], predict_mean[:y_n.shape[0]], y_n) # fill in masked obs with expectation log_lik_n, site_mean, site_cov = self.sites.update( self.likelihood, y_n, predict_mean, predict_cov, theta_lik, None) if site_params is not None: # use supplied site parameters to perform the update site_mean, site_cov = site_params[0][n], site_params[1][n] # modified Kalman update (see Nickish et. al. ICML 2018 or Wilkinson et. al. ICML 2019): S = predict_cov + site_cov HP = H @ P_ K = solve(S, HP).T # PH'(S^-1) s.m = m_ + K @ (site_mean - predict_mean) s.P = P_ - K @ HP if mask is not None: # note: this is a bit redundant but may come in handy in multi-output problems s.m = np.where(np.any(mask[n]), m_, s.m) s.P = np.where(np.any(mask[n]), P_, s.P) log_lik_n = np.where(mask[n][..., 0], np.zeros_like(log_lik_n), log_lik_n) s.neg_log_marg_lik -= np.sum(log_lik_n) if store: s.filtered_mean = index_add(s.filtered_mean, index[n, ...], s.m) s.filtered_cov = index_add(s.filtered_cov, index[n, ...], s.P) s.site_mean = index_add(s.site_mean, index[n, ...], site_mean) s.site_cov = index_add(s.site_cov, index[n, ...], site_cov) if store: return s.neg_log_marg_lik, (s.filtered_mean, s.filtered_cov, (s.site_mean, s.site_cov)) return s.neg_log_marg_lik
u.move(path[0]) adj_targets = u.adjacents(targets) if targets else None if adj_targets: kill = u.hit(adj_targets[0]) if eap > 3 and kill: return None if not targets: break rn += 1 score = sum(u.hp for u in units if u.hp > 0) return rn * score def p1(lines): return _solve(lines) def p2(lines): for eap in range(4, 35): outcome = _solve(lines, eap) if outcome: return outcome if __name__ == "__main__": solve(sys.argv, str_line, p1, p2)
def p1(lines): n = lines[0] scores = '37' e = 0, 1 for i in range(1, n + 10): ra, rb = int(scores[e[0]]), int(scores[e[1]]) scores += f"{ra + rb}" e = ((e[0] + 1 + ra) % len(scores), (e[1] + 1 + rb) % len(scores)) return scores[n:n + 10] def p2(lines): n = [int(d) for d in str(lines[0])] scores = [3, 7] e = 0, 1 while scores[-len(n):] != n and scores[-len(n) - 1:-1] != n: ra, rb = scores[e[0]], scores[e[1]] nr = ra + rb scores.extend(divmod(nr, 10) if nr >= 10 else (nr, )) e = ((e[0] + 1 + ra) % len(scores), (e[1] + 1 + rb) % len(scores)) return len(scores) - len(n) - (0 if scores[-len(n):] == n else 1) if __name__ == "__main__": solve([79303], int_line, p1, p2)
from qubo_constructor import construct_tsp_matrix, construct_traffic_matrix from utils import solve from configs import PROBLEMS dist_matrix = [[0, 5, 1, 7], [5, 0, 4, 2], [1, 4, 0, 8], [7, 2, 8, 0]] share_pairs = [((1, 1), (2, 1)), ((1, 2), (2, 1)), ((1, 3), (2, 1)), ((3, 1), (2, 2)), ((3, 2), (1, 3)), ((3, 2), (1, 1))] best_solution, distribution = solve(construct_tsp_matrix(dist_matrix), PROBLEMS['TSP'], True) print(best_solution)
seen, doors = navigate(p, opt + line[end + 1:], seen, doors) return seen, doors def p1(lines): global distances start = (0, 0) _, doors = navigate(start, lines[0].strip(), set(), defaultdict(set)) distances = {start: 0} q = Queue() q.put(start) while not q.empty(): src = q.get() for dst in doors[src]: if dst not in distances: distances[dst] = distances[src] + 1 q.put(dst) return max(distances.values()) def p2(_): global distances return len([k for k in distances if distances[k] >= 1000]) if __name__ == "__main__": solve(read_input(), lambda x: x, p1, p2)
# r4: IP # r5: inner counter print(r) # for r3 in range(1, r[2] + 1): # for r5 in range(1, r[2] + 1): # if r3 * r5 == r[2]: # r[0] += r3 for r3 in range(1, r[2] + 1): if r[2] % r3 == 0: r[0] += r3 return r[0] r = globals()[program[r[ip]][0]](r, *program[r[ip]][1:]) r[ip] += 1 def process_line(line): if not line.strip() or line.strip().startswith('#'): return None s = line.strip().split(' ') return s[0], int(s[1]), int(s[2]), int(s[3]) def get_input(): lines = read_input() return int(lines[0][4]), [l for l in map(process_line, lines[1:]) if l] if __name__ == "__main__": solve(get_input(), lambda x: x, p1, p2)
self.current = l return 0 def place23(self, m): l = (self.current - 7) % len(self.placed) s = m + self.placed.pop(l) self.left.remove(m) self.current = l return s def p1(lines): p, last = lines scores = defaultdict(int) current_p = 0 circle = Circle(last) while len(circle.left) > 0: score = circle.place() scores[current_p] += score current_p = (current_p + 1) % p return max(scores.values()) def p2(_): return None if __name__ == "__main__": solve((424, 71482), lambda x: x, p1, p2)
def solve(self, solver='Ipopt', options={}): return utils.solve(self, solver, options)
def solution(tests, train_problems, train_users, test_problems, test_users, submissions): purifying._purify_submissions_unique_rows(submissions) return utils.solve(train_problems, train_users, submissions, tests, test_problems, test_users, NUMBER_OF_FEATURES, get_feature_vector)
writeBDF('output/mesh.bdf', nodes, bars+1) mat, Kmat = get_matrix(EA, nodes, bars, constrained) rhs = numpy.zeros(mat.shape[0]) rhs[:3*len(forces)] = forces.flatten() sol = solve(mat, rhs)[:3*len(forces)] sol_surf = sol.reshape(xyz.shape) xyz += sol_surf surfs = [] surfs.append(xyz[:, :, 0, :]) surfs.append(xyz[:, :, -1, :]) surfs.append(xyz[:, 0, :, :]) surfs.append(xyz[:, -1, :, :]) surfs.append(xyz[ 0, :, :, :]) surfs.append(xyz[-1, :, :, :]) tecwrite.write_surf_multi('output/surf2.dat', surfs) writeBDF('output/mesh2.bdf', nodes + sol.reshape(nodes.shape), bars+1)
#!/usr/bin/env python import itertools import os import sys sys.path.insert(0, os.path.abspath('../..')) from utils import solve, int_line, int_list_line # nopep8 def p1(lines): return sum(lines) def p2(lines): seen = set() f = 0 for l in itertools.cycle(lines): seen.add(f) f += l if f in seen: return f if __name__ == "__main__": solve(sys.argv, int_line, p1, p2)
children = [] for _ in range(0, nn): c = Node.create(l) children.append(c) n = Node(children, [next(l) for _ in range(0, mn)]) return n def metadata_sum(self): return sum(self.metadata) + sum(c.metadata_sum() for c in self.children) def value(self): if len(self.children) == 0: return sum(self.metadata) return sum(self.children[i - 1].value() for i in self.metadata if 0 < i <= len(self.children)) def p1(lines): tree = Node.create(iter(lines[0])) return tree.metadata_sum() def p2(lines): tree = Node.create(iter(lines[0])) return tree.value() if __name__ == "__main__": solve(sys.argv, lambda l: int_list_line(l, ' '), p1, p2)
config.graph.method_create_graph) C_nodes = handler.get_tensor() fin = time.time() print("get tensor and decomposition done", fin - debut) sentence_to_articles = None if not config.graph.sentence_based else handler.articles.sentence_to_article graph = embedding_matrix_2_kNN( C, k=config.graph.num_nearest_neighbours, sentence_to_articles=sentence_to_articles).toarray() fin3 = time.time() print("KNN done", fin3 - fin) if config.learning.method_learning == "FaBP": # classe b(i){> 0, < 0} means i ∈ {“+”, “-”} beliefs = solve(graph, labels[:]) fin4 = time.time() print("FaBP done", fin4 - fin3) elif config.learning.method_learning in ["SVM", "RF"]: training_mask = labels > 0 test_mask = labels == 0 training_set = C[training_mask, :] l = labels[training_mask] l[l == 2] = -1 print("Fitting") if config.learning.method_learning == "SVM": clf = svm.SVC(gamma='scale') else: # Random forest clf = RandomForestClassifier(n_estimators=100, max_depth=2, random_state=0)
r4 = r2 | 0x10000 # 6 r2 = 6718165 # 7 i = 0 while True: r3 = r4 & 0xff # 8 r2 = (((r2 + r3) & 0xffffff) * 65899) & 0xffffff # 9 - 12 if 256 > r4: # 13 - 16 break r4, _ = divmod(r4, 256) # 17 - 26 i += 1 if part1: return r2 if r2 in seen: return p seen.add(r2) p = r2 def p1(_): return terminate() def p2(_): return terminate(False) if __name__ == "__main__": solve([], lambda x: x, p1, p2)
continue connections[u].add(v) connections[v].add(u) resolved = set() constellations = 0 for star in lines: if star in resolved: continue constellations += 1 q = [star] while q: u = heapq.heappop(q) if u in resolved: continue resolved.add(u) for v in connections[u]: if v not in resolved: heapq.heappush(q, v) return constellations def p2(_): return None if __name__ == "__main__": solve(sys.argv, lambda l: tuple(int_list_line(l, ',')), p1, p2)
import utils if __name__ == '__main__': # Array of grids in string form grids = [ '4.....8.5.3..........7......2.....6.....8.4......1.......6.3.7.5..2.....1.4......', '..5.......32..695..8..1..23.2.........7...4.........6.54..2..8..963..27.......5..', '1.4.9..68956.18.34..84.695151.....868..6...1264..8..97781923645495.6.823.6.854179', '5....26...9..5.84..439.6....1...4.5.....1.....6.2...7....8.956..54.3..8...61....9', '5.9.2..1.4...56...8..9.3..5.87..25..654....82..15684971.82.5...7..68...3.4..7.8..', '.4......8...7...5...8.......213.......9...6.......457.......9...1...9...9......1.', '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3', '9.1....8.8.5.7..4.2.4....6...7......5..............83.3..6......9................', '2.............62....1....7...6..8...3...9...7...6..4...4....8....52.............3', '9.1....8.8.5.7..4.2.4....6...7......5..............83.3..6......9................' ] for grid in grids: values = utils.solve(grid) utils.display(values) print('\n=====================\n')