indices = np.arange(len(inputs)) np.random.shuffle(indices) for start_idx in range(0, len(inputs) - batch_size + 1, batch_size): if shuffle: excerpt = indices[start_idx:start_idx + batch_size] else: excerpt = slice(start_idx, start_idx + batch_size) yield inputs[excerpt], targets[excerpt] x = tf.placeholder(tf.float32, shape=[None, w, h, c], name='x') y_ = tf.placeholder(tf.int32, shape=[ None, ], name='y_') logits = CNN.CNNlayer(x) loss = tf.losses.sparse_softmax_cross_entropy(labels=y_, logits=logits) train_op = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss) correct_prediction = tf.equal(tf.cast(tf.argmax(logits, 1), tf.int32), y_) acc = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # 训练和测试数据 saver = tf.train.Saver(max_to_keep=3) max_acc = 0 f = open('train/acc.txt', 'w') n_epoch = 10 batch_size = 64 sess = tf.InteractiveSession() sess.run(tf.global_variables_initializer()) for epoch in range(n_epoch):
# # test_data_length = [len(item) for item in test_x] # max_length = np.max(test_data_length) # print(max_length) dev_x_tensor = data_loader.pad(dev_x, word2id, "-pad-",38) dev_y_tensor = torch.from_numpy(np.array(dev_y)) test_x_tensor = data_loader.pad(test_x, word2id, "-pad-",38) test_y_tensor = torch.from_numpy(np.array(test_y)) # print(test_x_tensor) plot_dev = [] plot_test = [] x = [] model = CNN.CNNlayer(len(word2id), 100, 2, [3, 4, 5],output_size=2) with torch.no_grad(): for i in range(100): # model = pooling.Pooling(len(word2id), 100, 2) model.eval() model.load_state_dict(torch.load(model_url+"/"+str(i+1)+".pt")) correct = 0 total = 0 outputs1 = model(dev_x_tensor.long()) dev_right_num = train.accuracy_num(outputs1, dev_y_tensor) dev_acc = float(dev_right_num)/len(dev_y_tensor)*100 outputs2 = model(test_x_tensor.long()) test_right_num = train.accuracy_num(outputs2, test_y_tensor) test_acc = float(test_right_num )/ len(test_y_tensor) * 100 plot_dev.append(dev_acc)