def _gpt2_eval_the_model(model, eval_dataloader, config): device = config["device"] model.eval() eval_loss, eval_accuracy = 0, 0 nb_eval_steps, nb_eval_examples = 0, 0 for batch in tqdm(eval_dataloader, desc="Evaluating"): batch = tuple(t.to(device) for t in batch) input_ids, mc_token_ids, lm_labels, mc_labels = batch with torch.no_grad(): _, mc_loss = model(input_ids, mc_token_ids, lm_labels, mc_labels) _, mc_logits, pre = model(input_ids, mc_token_ids) mc_logits = mc_logits.detach().cpu().numpy() mc_labels = mc_labels.to('cpu').numpy() tmp_eval_accuracy = func.accuracy(mc_logits, mc_labels) eval_loss += mc_loss.mean().item() eval_accuracy += tmp_eval_accuracy nb_eval_examples += input_ids.size(0) nb_eval_steps += 1 eval_loss = eval_loss / nb_eval_steps eval_accuracy = eval_accuracy / nb_eval_examples return eval_loss, eval_accuracy
trainLength = int(len(allData) * 0.6) testLength = len(allData) - trainLength trainData = [] for i in range(trainLength): trainData.append(random.choice(allData)) [xTrainData, yTrainData] = func.parseInfo(trainData) w_temp2[t] = linearRegression.LinearRegression( xTrainData, yTrainData) ##---- Get the error norm and score on the training data -----## TrainToolsDict_temp[t] = func.getPredictedScoreError( xTrainData, yTrainData, w_temp2[t]) testData = [item for item in allData if item not in trainData] [xTestData, yTestData] = func.parseInfo(testData) accuracy_lst.append(func.accuracy(w_temp2[t], xTestData, yTestData)) #func.reportAccuracy(tool, accuracy_lin[tool]) ## --- Get the mean of acuuracies --- ## accuracy_lin[tool] = sum(accuracy_lst) / len(accuracy_lst) * 100 func.reportAccuracy(tool, accuracy_lin[tool]) ## --- Get the mean of weights --- ## w_temp2_array = [w_temp2[i] for i in w_temp2.keys()] w_temp2_t = numpy.transpose(w_temp2_array) w[tool] = [ sum(w_temp2_t[i]) / len(w_temp2_t[i]) for i in range(0, len(w_temp2_t)) ]
if __name__ == "__main__": # 载入数据,生成散点图 x, y = creatdata() x = np.c_[np.ones(x.shape[0]), x] plt.scatter(x[:, 1], x[:, 2], c=y) plt.show() y = one_hot_code(y) # 划分数据集 x_train, x_val, y_train, y_val = train_test_split(x, y, test_size=0.2) # 使用分类器 cls = softmax_cls() y_val = np.argmax(y_val, axis=1) w, err = cls.fit(x_train, y_train, learning_rate=0.1, epochs=1000, L2=0.1) # 预测 y_pred = cls.predict(x_val) y_pred = np.argmax(y_pred, axis=1) print(accuracy(y_val, y_pred)) compare = (y_val == y_pred) * 1 plt.scatter(x_val[:, 1], x_val[:, 2], c=y_val) plt.show() plt.scatter(x_val[:, 1], x_val[:, 2], c=compare) plt.legend(["1", "0"]) plt.show() plt.plot(err) plt.show() temp = np.c_[y_val, y_pred] pass