batch_acc = 0 val_acc = 0 val_loss = 0 # train total = 0 train_acc = 0 train_loss = 0 # imgs,labs=data.next_batch(batch_size) for i in range(60000 // batch_size): imgs, labs = data.next_batch(batch_size) imgs = imgs #训练 sf_out = net.forward(imgs) net.backward(sf_out - labs) net.Gradient(alpha=learning_rate, weight_decay=0.01) #统计 for j in range(batch_size): if np.argmax(sf_out[j]) == np.argmax(labs[j]): train_acc += 1 total += 1 mod = 100 if i % mod == 0: print("epoch=%d batchs=%d train_acc: %.4f " % (epoch, i, train_acc / total)) train_acc = 0 total = 0