def test3neuralnet(theta1, theta2, a_1): # sizes - layer 1: 400, layer 2: 25, layer 3: 10 a_1 = ut.create_design(a_1) a_2 = alg.sigmoid(a_1 @ theta1.T) a_2 = ut.create_design(a_2) a_3 = alg.sigmoid(theta2 @ a_2.T) p = np.argmax(a_3, axis=0) + 1 return p return a_3
def compute_utility(self, grid: FastGrid) -> float: r = prairie_fire(grid) rewards = 0.0 penalties = 0.0 top_score_reward = 0 for tile in r: clusters = r[tile] num_clusters = len(clusters) if num_clusters > 0: top_score_reward = self.rewards_for_top_score[tile] max_cluster_size = max(c['count'] for c in clusters) penalties += 3.0**num_clusters rewards += 1.5**max_cluster_size # # # for cluster in clusters: # # {'count': 0, 'minx': 1000, 'maxx': -1, 'miny': 5, 'maxy': -1, 'adjacent_tiles': []} # # # # rewards += (self.rewards_for_cluster_sizes[tile] * cluster['count']) #if cluster['count'] >= 2 else 0 # rewards += self.rewards_for_cluster_dimensions_x[cluster['maxx'] - cluster['minx']] # rewards += self.rewards_for_cluster_dimensions_y[cluster['maxy'] - cluster['miny']] # special_reward_for_zeros = sum(x['count'] for x in r[0]) # rewards += 10 * special_reward_for_zeros rewards += top_score_reward return sigmoid(rewards - penalties)
def activation(self): z = 0 if self.train: z = np.dot(self.weights.T, self.train_x) + self.bias else: z = np.dot(self.weights.T, self.valid_x) + self.bias a = sigmoid(z) return a
def compute_utility(self, grid: FastGrid) -> float: scores = [0.0] * 8 def fn(x, y): return self.weight[(y * 4) + x] for r in range(4): for c in range(4): scores[0] += grid[r, c] * fn(r, c) scores[1] += grid[r, c] * fn(3 - c, r) scores[2] += grid[r, c] * fn(3 - r, 3 - c) scores[3] += grid[r, c] * fn(c, 3 - r) scores[4] += grid[r, c] * fn(c, r) scores[5] += grid[r, c] * fn(r, 3 - c) scores[6] += grid[r, c] * fn(3 - c, 3 - r) scores[7] += grid[r, c] * fn(3 - r, c) return sigmoid(max(scores))
def compute_utility(self, grid: FastGrid): a = grid.board totals = array.array('i', [0, 0, 0, 0]) # // up/down direction for x in range(4): current = 0 neighbour = current + 1 while neighbour < 4: while neighbour < 4 and a[(neighbour * 4) + x] == 0: neighbour += 1 if neighbour >= 4: neighbour -= 1 current_value = a[(current * 4) + x] # get_val(a, x, current) next_value = a[(neighbour * 4) + x] # get_val(a, x, neighbour) if current_value < next_value: totals[0] += next_value - current_value elif next_value < current_value: totals[1] += current_value - next_value current = neighbour neighbour += 1 # # // left/right direction for y in range(4): current = 0 neighbour = current + 1 while neighbour < 4: while neighbour < 4 and a[(y * 4) + neighbour] == 0: neighbour += 1 if neighbour >= 4: neighbour -= 1 current_value = a[(y * 4) + current] # get_val(a, current, y) next_value = a[(y * 4) + neighbour] # get_val(a, neighbour, y) if current_value < next_value: totals[2] += next_value - current_value elif next_value < current_value: totals[3] += current_value - next_value current = neighbour neighbour += 1 result = max(totals[0], totals[1]) + max(totals[2], totals[3]) return sigmoid(result)
def test2(): print("\n\nTest 2 - Logistic Regression & Regularization") print("Expected / Actual:") print("\nCost & Gradient:") X, y = ut.read_csv('csv/ex2data1.csv') X = ut.create_design(X) theta = np.zeros((X.shape[1], )) cost = alg.cross_ent(theta, X, y) grad = alg.cross_ent_gradient(theta, X, y) print("0.693147 / ", cost) print("-0.1000 / ", grad[0]) print("-12.0092 / ", grad[1]) print("-11.2628 / ", grad[2]) res = opt.minimize(alg.cross_ent, theta, (X, y), method='BFGS', jac=alg.cross_ent_gradient, options={'maxiter': 400}) print("0.203498 / ", res.fun) theta = res.x print("-25.1613 / ", theta[0]) print("0.2062 / ", theta[1]) print("0.2015 / ", theta[2]) p = alg.sigmoid(ut.predict(np.array([[45, 85]]), theta)[0]) print("0.776291 /", p) p = np.mean(np.round(alg.sigmoid(X @ theta)) == y) * 100 print(">= 89.000000 /", p) print("\nRegularization:") X, y = ut.read_csv('csv/ex2data2.csv') X = ut.add_features(X[:, 0], X[:, 1], 6) print("118 / ", X.shape[0]) print("28 /", X.shape[1]) print("8.2291e-10 / ", X[117, 27]) print("0.2914 / ", X[99, 9]) theta = np.zeros((X.shape[1], )) l = 1 print("0.693147 / ", alg.cross_ent(theta, X, y, l)) grad = alg.cross_ent_gradient(theta, X, y, l) print("(28,) / ", grad.shape) print("0.0085 / ", grad[0]) print("0.0129 / ", grad[12]) print("0.0388 / ", grad[27]) l = 0 res = opt.minimize(alg.cross_ent, theta, (X, y, l), method='BFGS', jac=alg.cross_ent_gradient, options={'maxiter': 1000}) theta = res.x p = np.mean(np.round(alg.sigmoid(X @ theta)) == y) * 100 print(">= 88.983051 / ", p) theta = np.zeros((X.shape[1], )) l = 1 res = opt.minimize(alg.cross_ent, theta, (X, y, l), method='BFGS', jac=alg.cross_ent_gradient, options={'maxiter': 1000}) theta = res.x p = np.mean(np.round(alg.sigmoid(X @ theta)) == y) * 100 print(">= 83.050847 / ", p) theta = np.zeros((X.shape[1], )) l = 10 res = opt.minimize(alg.cross_ent, theta, (X, y, l), method='BFGS', jac=alg.cross_ent_gradient, options={'maxiter': 1000}) theta = res.x p = np.mean(np.round(alg.sigmoid(X @ theta)) == y) * 100 print(">= 74.576271 / ", p) theta = np.zeros((X.shape[1], )) l = 100 res = opt.minimize(alg.cross_ent, theta, (X, y, l), method='BFGS', jac=alg.cross_ent_gradient, options={'maxiter': 1000}) theta = res.x p = np.mean(np.round(alg.sigmoid(X @ theta)) == y) * 100 print(">= 61.016949 / ", p)
def compute_utility(self, grid: FastGrid) -> float: new_array = self.hole_detector_kernel.compute(grid) result = sigmoid(sum(new_array)) return result
def compute_utility(self, g: FastGrid) -> float: b = g.board[0:4] + array.array('i', reversed( g.board[4:8])) + g.board[8:12] + array.array( 'i', reversed(g.board[12:16])) result = sum(x / 10**n for n, x in enumerate(b)) return sigmoid(result)