def qwk_score(est, features, labels):
    raw_pred = est.predict(features)
    pred = preds_to_rank(raw_pred, np.min(labels), np.max(labels))
    return quadratic_weighted_kappa(labels, pred)
Beispiel #2
0
            if i % FLAGS.evaluation_interval == 0 or i == FLAGS.epochs:
                # test on training data
                train_preds = []
                for start in range(0, n_train, test_batch_size):
                    end = min(n_train, start+test_batch_size)
                    
                    #batched_memory = []
                    #for _ in range(end-start):
                    #    batched_memory.append(memory)
                    batched_memory = [memory] * (end-start)
                    preds, _ = test_step(trainE[start:end], batched_memory)
                    for ite in preds:
                        train_preds.append(ite)
                train_preds = np.add(train_preds, min_score)
                #train_kappp_score = kappa(train_scores, train_preds, 'quadratic')
                train_kappp_score = quadratic_weighted_kappa(
                    train_scores, train_preds, min_score, max_score)
                # test on eval data
                eval_preds = []
                for start in range(0, n_eval, test_batch_size):
                    end = min(n_eval, start+test_batch_size)
                    
                    #batched_memory = []
                    #for _ in range(end-start):
                    #    batched_memory.append(memory)
                    batched_memory = [memory] * (end-start)
                    preds, _ = test_step(evalE[start:end], batched_memory)
                    for ite in preds:
                        eval_preds.append(ite)

                eval_preds = np.add(eval_preds, min_score)
                #eval_kappp_score = kappa(eval_scores, eval_preds, 'quadratic')
Beispiel #3
0
    m.eval()
    result = []
    itrator = iter(test_dl)
    while True:
        try:
            x, y = next(itrator)
            #print (x)

            m.eval()
            out = m(V(x))
            result.append(F.sigmoid(out).cpu().data.numpy())
            #print (result)
        except:
            break
    #print (result)
    predictions = []
    for ix in result:
        for iy in ix:
            predictions.append(10 * iy + 2)
    predictions = np.rint(np.stack(predictions))
    return np.reshape(predictions, (-1))


df_test = pd.read_csv(f'data/essay1/test.csv', header=None)
ds_test = pd.DataFrame({0: 0, 1: df_test[1]})
ans = predict(m, ds_test)
print(quadratic_weighted_kappa(df_test[0].values, ans))

#df_test[4]=ans
#df_test.to_csv('data/task1_results.csv',header=None,index=False)
Beispiel #4
0
# fix random seed for reproducibility
seed = 7
numpy.random.seed(seed)
estimators = []
estimators.append(('standardize', StandardScaler()))
estimators.append(('mlp', KerasRegressor(build_fn=baseline_model, epochs=200, batch_size=50, verbose=0)))
pipeline = Pipeline(estimators)
kfold = KFold(n_splits=10, random_state=seed)
results = cross_val_score(pipeline, X, Y, cv=kfold)
print("Wider: %.2f (%.2f) MSE" % (results.mean(), results.std()))
#prediction


# X_new =scaler.fit_transform(X_new)
pipeline.fit(X,Y)
ynew = pipeline.predict(X_new)


# show the inputs and predicted outputs
ynew=np.around(ynew, decimals=0)
for i in range(len(ynew)):
	print("X=%s, Predicted=%s" % (Y_new[i], ynew[i]))
# pred=pred.tolist()
# Y_new=Y_new.tolist()
qwk = quadratic_weighted_kappa(Y_new,ynew)
print(qwk)

end = time.time()
print(end - start)
Beispiel #5
0
def get_metric(preds,targs):
    targs=targs.cpu().numpy()
    preds=np.reshape(np.rint(F.sigmoid(V(preds)).data.cpu().numpy()*info[1] + info[0]),-1)
    return quadratic_weighted_kappa(targs,preds)