layers = [] relu = NN_activators.ReLUActivator() layers.append(NN_Layer.Layer(n_inputs, n_hidden1, activator=relu)) layers.append(NN_Layer.Layer(n_hidden1, n_hidden2, activator=relu)) layers.append(NN_Layer.Layer(n_hidden2, n_outputs)) return layers # end housing = fetch_california_housing() X = housing.data y = housing.target.reshape(-1, 1) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.5, random_state=0) X_train = process_features(X_train) X_test = process_features(X_test) layers = create_layers() loss = NN_Loss.SquaredLoss() # 回归问题,损失函数定义为平方损失函数 model = NeuralNetwork(layers, loss) model.fit(X_train, y_train, 100000, 0.01) y_pred = model.predict(X_test) print("r2_score: {}.".format(r2_score(y_test, y_pred)))
n_features = 28 * 28 # 与手写图片尺寸相对应 n_hidden1 = 300 # 定义包含输出层在内的三个层 n_hidden2 = 100 n_classes = 10 # 数字分类问题,最后一层是 10个输出 layers = [] relu = NN_activators.ReLUActivator() layers.append(NN_Layer.Layer(n_features, n_hidden1, activator=relu)) layers.append(NN_Layer.Layer(n_hidden1, n_hidden2, activator=relu)) layers.append(NN_Layer.Layer(n_hidden2, n_classes)) return layers # end mnist = input_data.read_data_sets("MNIST_data/", one_hot=True) X_train, Y_train = mnist.train.images, mnist.train.labels X_test, Y_test = mnist.test.images, mnist.test.labels layers = create_layers() loss = NN_Loss.LogarithmicLoss() model = NeuralNetwork(layers, loss) model.fit(X_train, Y_train, 50000, 0.01) V = model.predict(X_test) # 注意:输出的是与 OneHot向量同维度的一个矩阵(一个测试点对应一个 OneHot向量) PROBA = NN_Loss.softmax(V) # 得到 softmax矩阵 y_pred = np.argmax(PROBA, axis=1) accuracy = accuracy_score(np.argmax(Y_test, axis=1), y_pred) print("accuracy: {}.".format(accuracy))
layers.append(NN_Layer.Layer(n_hidden1, n_hidden2, activator=relu)) layers.append(NN_Layer.Layer(n_hidden2, n_hidden3, activator=relu)) layers.append(NN_Layer.Layer(n_hidden3, n_classes, activator=ide)) return layers # end X, y = get_data() X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1, random_state=42) encoder = MinMaxScaler() X_train = encoder.fit_transform(X_train) X_test = encoder.fit_transform(X_test) encoder = OneHotEncoder() Y_train = encoder.fit_transform(y_train) # 仅将训练数据集的标签转换成 OneHot向量构成的矩阵 layers = create_layers() loss = NN_Loss.LogarithmicLoss() # 分类问题,使用对数损失函数(目标函数为交叉熵) model = NeuralNetwork(layers, loss) model.fit(X_train, Y_train, 70000, 0.02) VR = model.predict(X_test) # 注意:输出的是与 OneHot向量同维度的一个矩阵(一个测试点对应一个 OneHot向量),可能不符合概率要求 PROBA = NN_Loss.softmax(VR) # V(R)矩阵需要经过 Softmax变换,得到 softmax矩阵,符合概率要求 y_pred = np.argmax(PROBA, axis=1) # 最大分类函数,取预测概率最大的一个类别作为最终预测结果(一个数值,0 ~ 9) accuracy = accuracy_score(y_test, y_pred) # 计算准确率 print("accuracy: {}.".format(accuracy))