def test_nn(): # 设置初始化参数,采用的是mnist数据集,为28*28的手写数字图像,隐含层100,输出层10代表0~9的数字,学习率初始设为0.2 input_nodes = 28 * 28 hidden_nodes = 100 output_nodes = 10 learning_rate = 0.2 n = nn.NeuralNetwork(input_nodes, hidden_nodes, output_nodes, learning_rate) # 第一步:开始训练 print("start to train") train = True if train is True: # 训练方法1:用数据训练,采用较小的训练数据集 training_data_list = dataset.get_data_list( "mnist_dataset/mnist_train.csv") count = 0 # 用于打印进度 size = len(training_data_list) # 用于打印进度 for index, record in enumerate(training_data_list): label, inputs = dataset.get_scaled_data(record) targets = numpy.zeros(output_nodes) + 0.01 targets[label] = 0.99 n.train(inputs, targets) # 打印进度 print_process(index, size) # 将最终的权值矩阵保存 numpy.savetxt("w_input_hidden.txt", n.w_input_hidden) numpy.savetxt("w_hidden_output.txt", n.w_hidden_output) else: print("load data done") # 训练方法2:直接导入训练的结果(适用于已经有训练结果,即权值矩阵) n.load("mnist_dataset/w_input_hidden.txt", "mnist_dataset/w_hidden_output.txt") # 第二步:开始测试训练后的神经网络 print("start to test") test_data_list = dataset.get_data_list("mnist_dataset/mnist_test.csv") scorecard = [] # 记分牌,保存每个测试数据的测试结果 right = 0 # 正确总数 size = len(test_data_list) # 用于打印进度 for index, record in enumerate(test_data_list): label, inputs = dataset.get_scaled_data(record) result = n.query(inputs) # 对比神经网络预测结果和标签 if label == result: scorecard.append(1) right += 1 else: scorecard.append(0) # 打印进度 print_process(index, size) # 保存记分牌 numpy.savetxt("scorecard.txt", scorecard) # 打印正确率 print("right rate=", right / len(test_data_list) * 100, "%")
def test_data(): training_data_list = dataset.get_data_list( "kdd/KDDTrain+_22Percent-normalization.txt.csv") print(training_data_list[0]) label, data = get_kdd_data(training_data_list[0]) print(data) print(len(data), label)
def test_dnn(): layers = [28 * 28, 200, 100, 50, 10] learning_rate = 0.2 dnn = DeepNeuralNetwork(layers, learning_rate) # 开始测试训练 print("start to train") # 训练方法1:用数据训练,采用较小的训练数据集 training_data_list = dataset.get_data_list( "mnist_dataset/mnist_train.csv")[:] # 测试全部数据 size = len(training_data_list) # 用于打印进度 for index, record in enumerate(training_data_list): label, inputs = dataset.get_scaled_data(record) targets = dataset.get_targets_data(layers[-1], label) dnn.layer_train(inputs, targets) # 打印进度 print_process(index, size) # 开始测试 print("start to test") test_data_list = dataset.get_data_list( "mnist_dataset/mnist_test.csv") scorecard = [] # 记分牌,保存每个测试数据的测试结果 right = 0 # 正确总数 size = len(test_data_list) # 用于打印进度 for index, record in enumerate(test_data_list): label, inputs = dataset.get_scaled_data(record) result = dnn.layer_query_result(inputs) # 对比神经网络预测结果和标签 if label == result: scorecard.append(1) right += 1 else: scorecard.append(0) # 打印进度 print_process(index, size) # 打印正确率 print("right rate=", right / len(test_data_list) * 100, "%") pass
def test_query(): layers = [28 * 28, 100, 10] learning_rate = 0.2 dnn = DeepNeuralNetwork(layers, learning_rate) training_data_list = dataset.get_data_list( "mnist_dataset/mnist_train_100.csv") for record in training_data_list[0:4]: label, inputs = dataset.get_scaled_data(record) targets = dataset.get_targets_data(layers[-1], label) outputs = dnn.layer_query(inputs, targets)
from dataset import ImageDataset, get_data_list from model import YOLOV3 import tensorflow as tf from tensorflow import keras train_images, train_annotations, val_images, val_annotations, test_images, test_annotations = get_data_list( ) dataset = ImageDataset(train_images, train_annotations, batch_size=1, input_shape=(416, 416)) model = YOLOV3(input_shape=(416, 416, 3)) opt = keras.optimizers.Adam(1e-4) for i in range(2000): images, labels = next(dataset) with tf.GradientTape() as tape: predictions = model(images) obj_loss, reg_loss, cls_loss, loss = model.loss(labels, predictions) grads = tape.gradient(loss, model.trainable_availables) opt.apply_gradients(zip(grads, model.trainable_availables)) if i % 10 == 0: print( i, f'obj_loss={obj_loss},reg_loss={reg_loss},cls_loss={cls_loss},total loss={loss}' ) model.save('v1.h5')
def test_kdd_NSL(): # 设置初始化参数,采用的是mnist数据集,为28*28的手写数字图像,隐含层100,输出层10代表0~9的数字,学习率初始设为0.2 layers = [122, 100, 10, 5] learning_rate = 0.5 n = DeepNeuralNetwork(layers, learning_rate) labels_count = [0, 0, 0, 0, 0] # 第一步:开始训练 print("start to train") train = True if train is True: # 训练方法1:用数据训练,采用较小的训练数据集 training_data_list = dataset.get_data_list( "kdd/KDDTrain+_22Percent-normalization.txt.csv") # training_data_list = dataset.get_data_list( # "kdd/KDDTest-21-normalization.txt.csv") size = len(training_data_list) # 用于打印进度 for index, record in enumerate(training_data_list): label, inputs = get_kdd_data(record) targets = numpy.zeros(layers[-1]) + 0.01 targets[label] = 0.99 # targets = numpy.zeros(output_nodes) # targets[label] = 1 n.layer_train(inputs, targets) # 打印进度 print_process(index, size) # 统计标签数 labels_count[label] += 1 # 将最终的权值矩阵保存 print(labels_count) else: print("load data done") # 训练方法2:直接导入训练的结果(适用于已经有训练结果,即权值矩阵) n.load("w_input_hidden_kdd.txt", "w_hidden_output_kdd.txt") # 第二步:开始测试训练后的神经网络 labels_count = [0, 0, 0, 0, 0] print("start to test") test_data_list = dataset.get_data_list( "kdd/KDDTest-21-normalization.txt.csv") # test_data_list = dataset.get_data_list( # "kdd/KDDTrain+_22Percent-normalization.txt.csv") scorecard = [] # 记分牌,保存每个测试数据的测试结果 right = 0 # 正确总数 count = 0 # 用于打印进度 size = len(test_data_list) # 用于打印进度 for index, record in enumerate(test_data_list): label, inputs = get_kdd_data(record) result = n.layer_query_result(inputs) labels_count[label] += 1 # 对比神经网络预测结果和标签 if label == result: scorecard.append(1) right += 1 else: scorecard.append(0) # 打印进度 print_process(index, size) print(labels_count) # 打印正确率 print("right rate=", right / len(test_data_list) * 100, "%")
pred_dict = "pred_dict.csv" saved_dict = 'saved_dict.csv' batch_size = 1000 width = 1280 height = 1024 _, ds_test = get_ds(saved_dict, 1, batch_size) ds_test = ds_test.take(1) model = create_model() model.summary() model.load_weights('mymodel.h5') all_image_paths, all_x, all_y = get_data_list(saved_dict) predictions = [model.predict(x, steps=1) for x in ds_test] def save_xy(): with open(pred_dict, "w") as f: for i, pred in enumerate(predictions[0]): f.write(all_image_paths[i] + "," + str(int(pred[0] * width)) + "," + str(int(pred[1] * height)) + "\n") print("write {} predictions to {}".format(batch_size, pred_dict)) for index, pred in enumerate(predictions[0]): print("{}: predicted: ({}, {})\tlabel: ({}, {})".format(all_image_paths[index], int(pred[0] * width), int(pred[1] * height), all_x[index], all_y[index])) save_xy()
import tensorflow as tf from tensorflow import keras from dataset import PascalVOCDataset, get_data_list from model import SSD300 model = SSD300() train_imgs, train_anns, val_imgs, val_anns, test_imgs, tes_anns = get_data_list( ) train_dataset = PascalVOCDataset(split='train', images=train_imgs, annotations=train_anns, batch_size=1) opt = keras.optimizers.Adam(1e-4) for i in range(2000): imgs, true_locs, true_cls = next(train_dataset) with tf.GradientTape() as tape: pred_locs, pred_cls = model(imgs) loss, _, _, _ = model.loss(true_locs, true_cls, pred_locs, pred_cls) gradients = tape.gradient(loss, model.trainable_variables) opt.apply_gradients(zip(gradients, model.trainable_variables)) if i % 20 == 0: print(i, ' LOSS:', loss.numpy()) model.save('ssd300.h5')