def fun(x, y): # 隐藏层 if Wx1 and Wx2: b1 = -s1 * Wx1 b2 = -s2 * Wx2 if Wy1 and Wy2: b1 = -s1 * Wy1 b2 = -s2 * Wy2 if Wx3 and Wx3: b3 = -s3 * Wx3 b4 = -s4 * Wx4 if Wy4 and Wy4: b3 = -s3 * Wy3 b4 = -s4 * Wy4 a1 = network2.sigmoid(x * Wx1 + y * Wy1 + b1) a2 = network2.sigmoid(x * Wx2 + y * Wy2 + b2) a3 = network2.sigmoid(x * Wx3 + y * Wy3 + b3) a4 = network2.sigmoid(x * Wx4 + y * Wy4 + b4) # 输出层 w1 = w3 = h w2 = w4 = -w1 return network2.sigmoid(w1 * a1 + w2 * a2 + w3 * a3 + w4 * a4 + b)
def n_last_layer_out_put(n, fun): """画出最后一层的输出,fun是r-sigmod(f(x)),最终输出将是f(x) """ y = None x = None for i in np.linspace(0, 1, n, endpoint=False): s1 = i s2 = i + 1.0 / n h = fun(i) # 区间高度 x, y1 = pair_hidden_neurons_out_put(s1, s2, h) if y == None: y = y1 else: y += y1 y = network2.sigmoid(y) plot_figure.plot_base(y_coordinate=[y], x_coordinate=[x], title='n = %d' % (n), x_lable='X', y_lable='Sigmod (z)', x_limit=[min(x) - 0.2, max(x) + 0.2], y_limit=[min(y) - 0.2, max(y) + 0.2])
def fun(x, y): # 隐藏层 if Wx1 and Wx2: b1 = -s1 * Wx1 b2 = -s2 * Wx2 if Wy1 and Wy2: b1 = -s1 * Wy1 b2 = -s2 * Wy2 a1 = network2.sigmoid(x * Wx1 + y * Wy1 + b1) a2 = network2.sigmoid(x * Wx2 + y * Wy2 + b2) # 输出层 w1 = h w2 = -w1 b = 0 return w1 * a1 + w2 * a2 + b
def fun(x, y): b = -s * w1 return network2.sigmoid(x * w1 + y * w2 + b)
def getOutputOfHiddenUnits(data, net): output = [] for x,y in data: output.append(network2.sigmoid(np.dot(net.weights[0], x) + net.biases[0])) return output
def display_weight(matrix, i): display(network2.sigmoid(matrix[i]) )
def get_sigmod_coordinate(w, b): x = np.linspace(0, 1, 10000, endpoint=False) # 输入从0-1,一共10000个点 z = w * x + b y = network2.sigmoid(z) return x, y