def __init__(self, layer_size, prev_layer_size): self.id = id self.n_neurons = layer_size self.bias_val = 1 # this is how you init an array of zeroes based on self.n_neurons # [0, 0, 0, 0, 0, 0, 0, 0, 0, 0] # input vector self.input = [0] * self.n_neurons # output vector # it's not that bias enters this layer, it exists it # it will be used in next layer but exists in this layer self.output = [0] * (self.n_neurons + use_bias) # first vector is our bias value self.output[0] = self.bias_val # error vector self.error = [0] * self.n_neurons # weight matrix self.weight = make_matrix(prev_layer_size + use_bias, self.n_neurons) for i in range(len(self.weight)): for j in range(len(self.weight[i])): self.weight[i][j] = between(-0.2, 0.2)
def iteration(u, n, t, prev): s = 0.5 g = n / 2 result = np.zeros((n + 1, n + 1)) delta_u = delta(u, n) a = [0] + [-t * t * s * n * n] * (n - 1) + [0] b = [1] + [1 + 2 * t * t * s * n * n] * (n - 1) + [1] c = [0] + [-t * t * s * n * n] * (n - 1) + [0] temp = make_matrix(n) for k in range(1, n): d = -(u[k] - prev[k]) * t * g + t * t * ( delta_u[k] - f(k / n, np.linspace(0, 1, n + 1)) / n / n) * n * n d[0] = 0 d[n] = 0 temp[k] = tdma(n + 1, a, b, c, d) temp = np.transpose(temp) for k in range(1, n): d = temp[k] d[0] = 0 d[n] = 0 result[k] = tdma(n + 1, a, b, c, d) result = np.transpose(result) return result + 2 * u - prev
def de_mean_matrix(A): """returns the result of subtracting from every value in A the mean value of its column. the resulting matrix has mean 0 in every column""" nr, nc = A.shape column_means, _ = scale(A) return utils.make_matrix(nr, nc, lambda i, j: A[i,j] - column_means[j])
def de_mean_matrix(A): """returns the result of subtracting from every value in A the mean value of its column. the resulting matrix has mean 0 in every column""" nr, nc = A.shape column_means, _ = scale(A) return utils.make_matrix(nr, nc, lambda i, j: A[i, j] - column_means[j])
def first_approach(n): r = make_matrix(n) for k in range(n + 1): for m in range(n + 1): r[k][m] = exact_solution(m / n, k / n) * (1 + 0.1 * np.sin(m)) for k in range(0, n + 1): r[k][0] = 0.0 r[k][n] = 0.0 r[0][k] = np.sqrt(np.sin(np.pi * k / n)) r[n][k] = 0.0 return r
def rescale(data_matrix): '''rescales the input data so that each column has mean 0 and standard deviation 1 leaves alone columns with no deviation''' means, stdevs = scale(data_matrix) def rescaled(i, j): if stdevs[j] > 0: return (data_matrix[i,j] - means[j]) / stdevs[j] else: return data_matrix[i,j] num_rows, num_cols = data_matrix.shape return utils.make_matrix(num_rows, num_cols, rescaled)
def rescale(data_matrix): '''rescales the input data so that each column has mean 0 and standard deviation 1 leaves alone columns with no deviation''' means, stdevs = scale(data_matrix) def rescaled(i, j): if stdevs[j] > 0: return (data_matrix[i, j] - means[j]) / stdevs[j] else: return data_matrix[i, j] num_rows, num_cols = data_matrix.shape return utils.make_matrix(num_rows, num_cols, rescaled)
def iteration(u, n, t): result = make_matrix(n) for k in range(0, n + 1): result[0][k] = u[0][k] result[n][k] = u[n][k] a = [0] + [-t * n * n] * (n - 1) + [0] b = [1] + [1 + 2 * t * n * n] * (n - 1) + [1] c = [0] + [-t * n * n] * (n - 1) + [0] for k in range(1, n): d = u[k] + t * (u[k - 1] + u[k + 1] - 2 * u[k]) * n * n - t * f( k / n, np.linspace(0, 1, n + 1)) d[0] = u[k][0] d[n] = u[k][n] sol = tdma(n + 1, a, b, c, d) result[k] = sol return result
def converge_solution(u, n, *args, **kwargs): result = make_matrix(n) diff = 0 for t in range(0, n + 1): result[t][0] = 0.0 result[t][n] = 0.0 result[0][t] = 0.5 * sin(np.pi * t / n) result[n][t] = 0.0 for k in range(1, n): for m in range(1, n): a = u[k - 1][m] b = u[k + 1][m] c = u[k][m - 1] d = u[k][m + 1] result[k][m] = 0.25 * (a + b + c + d - f(k / n, m / n) / n / n) diff += (result[k][m] - u[k][m] or 0)**2 return result, diff
def converge_solution(u, n, *args, **kwargs): result = make_matrix(n) for t in range(0, n + 1): result[t][0] = 0.0 result[t][n] = 0.0 result[0][t] = sqrt(sin(np.pi * t / n)) result[n][t] = 0.0 for k in range(1, n): for m in range(1, n): a = u[k - 1][m] b = u[k + 1][m] c = u[k][m - 1] d = u[k][m + 1] result[k][m] = sqrt(a * a + b * b + c * c + d * d - 2 * f(k / n, m / n) / n / n) / 2 diff = np.linalg.norm(result - u, ord='fro') return result, diff
def solve_equation(n, eps=0.001): r = make_matrix(n) for k in range(n): solve_layer(r, n, k) prev = np.array(r) r, diff, prev = converge_solution(r, n, prev) diff0 = diff k = 1 while diff > diff0 * eps: r, diff, prev = converge_solution(r, n, prev) if k % 10 == 0 and k != 0: error = round(diff * 100 / diff0, ndigits=2) print(f'Прошло {k} итераций, погрешность - {error}%') k += 1 print(f'Сошлось за {k} итераций.') return r
def converge_solution(u, n, *args, **kwargs): t = 1 / 100000 result = make_matrix(n) for k in range(0, n + 1): result[k][0] = u[k][0] result[k][n] = u[k][n] result[0][k] = u[0][k] result[n][k] = u[n][k] for k in range(1, n): for m in range(1, n): a = u[k - 1][m] b = u[k + 1][m] c = u[k][m - 1] d = u[k][m + 1] e = u[k][m] delta = (a * a + b * b + c * c + d * d - 4 * e * e) * n * n / 2 - f(k / n, m / n) result[k][m] = e + delta * t diff = np.linalg.norm(result - u, ord='fro') return result, diff
def converge_solution(u, n, prev=None, *args, **kwargs): t = 1 / 1000 g = n / 2 result = make_matrix(n) for k in range(0, n + 1): result[k][0] = u[k][0] result[k][n] = u[k][n] result[0][k] = u[0][k] result[n][k] = u[n][k] for k in range(1, n): for m in range(1, n): a = u[k - 1][m] b = u[k + 1][m] c = u[k][m - 1] d = u[k][m + 1] e = u[k][m] result[k][m] = 2 * e - prev[k][m] \ - g * t * (e - prev[k][m]) + t * t * \ ((a * a + b * b + c * c + d * d - 4 * e * e) * n * n / 2 - f(k / n, m / n)) # print(f'{k},{m}\t\t', result[k][m], e) diff = np.linalg.norm(result - u, ord='fro') return result, diff, u
def test_make_matrix(self): self.assertEqual(make_matrix(2, 1), [[0], [0]])