X = np.zeros((len(data[i][0]), 400))
    y = np.zeros(len(data[i][0]))
    for j in tqdm.trange(len(data[i][0])):
        temp = produceWordEmbd(data[i][0][j])
        X[j] = temp
        y[j] = data[i][1][j]
    Res = []
    kf = KFold(n_splits=5)
    c = CorrelationPearson()
    for train_index, test_index in kf.split(X):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        model = regModel
        model.fit(X_train, y_train)
        model_predicted = model.predict(X_test)
        Res.append(c.result(y_test, model_predicted))
        print(regMethod + "- Pearson Coefficient for " + emotions[i] + ": ",
              c.result(y_test, model_predicted))

    print(
        regMethod + ":Avg of pearson-coefficients for the " + emotions[i] +
        " : ",
        sum(Res) / 5)
    finRes.append(sum(Res) / 5)

print("--------------------------------------------")
print("Final PC for " + regMethod, sum(finRes) / 4)
print("--------------------------------------------")

sys.stdout = orig_stdout
f.close()
示例#2
0
    return model

finRes = []
for i in range(4):
    X = np.zeros((len(data[i][0]), 400))
    y = np.zeros(len(data[i][0]))
    for j in tqdm.trange(len(data[i][0])):
        temp = produceWordEmbd(data[i][0][j])
        X[j] = temp
        y[j] = data[i][1][j]
    Res = []
    kf = KFold(n_splits=5)
    c = CorrelationPearson()
    for train_index, test_index in kf.split(X):
        X_train, X_test = X[train_index], X[test_index]
        y_train, y_test = y[train_index], y[test_index]
        model = KerasRegressor(build_fn=NN_model, epochs=100, batch_size=5, verbose=0)
        model.fit(X_train, y_train)
        model_predicted = model.predict(X_test)
        Res.append(c.result(y_test, model_predicted))
        print(regMethod +"- Pearson Coefficient for "+ emotions[i] + ": ", c.result(y_test, model_predicted))
		
    print(regMethod + ":Avg of pearson-coefficients for the " + emotions[i] + " : ", sum(Res)/5)
    finRes.append(sum(Res)/5)

print("--------------------------------------------")
print("Final PC for "+ regMethod ,sum(finRes)/4)
print("--------------------------------------------")
	
sys.stdout = orig_stdout
f.close()
示例#3
0
     kf = KFold(n_splits=5)
     c = CorrelationPearson()
     for train_index, test_index in kf.split(X):
         #print("TRAIN:", train_index, "TEST:", test_index)
         X_train, X_test = X[train_index], X[test_index]
         y_train, y_test = y[train_index], y[test_index]
         Rmodel = regModels[z]
         Rmodel.fit(X_train, y_train)
         Rmodel_predicted = Rmodel.predict(X_test)
         """
         y_test_new = get_pos_half(y_test, y_test)
         Rmodel_predicted_new = get_pos_half(Rmodel_predicted, y_test)
         y_test_new = y_test_new[y_test_new != 0]
         Rmodel_predicted_new = Rmodel_predicted_new[Rmodel_predicted_new != 0]
         """
         Res.append(c.result(y_test, Rmodel_predicted))
         print("Feature used: "+ feature_names[g] +"Regression model: "+regMethods[z] +"---- Pearson Coefficient for "+ emotions[i] + ": ", c.result(y_test, Rmodel_predicted))
     finRes.append(sum(Res)/5)
     print("--------------------------------------------")
     print("Avg PC for "+ emotions[i] + " with "+ feature_names[g] + " and "+ regMethods[z] ,sum(finRes))
     print("--------------------------------------------")
     avg_pc[z][i] += sum(finRes)
 """
 X_new = hstack([features[0], features[1]]).todense()
 Res2 = []
 print("Now working for bigram+unigram:++++++++++++++++++++")
 for train_index, test_index in kf.split(X_new):
     #print("TRAIN:", train_index, "TEST:", test_index)
     X_train, X_test = X_new[train_index], X_new[test_index]
     y_train, y_test = y[train_index], y[test_index]
     Rmodel = regModels[z]
示例#4
0
    for k in tqdm.trange(sum_arr[j - 1], sum_arr[j]):
        dev_arrays[k - sum_arr[j - 1]] = model[emotions[j] + str(k)]
        dev_labels[k - sum_arr[j - 1]] = data[j][1][k - sum_arr[j - 1]]
    print("Training a Neural Network with ")
    mlp = MLPRegressor(solver='sgd',
                       alpha=1e-5,
                       hidden_layer_sizes=(11, 6, 5),
                       random_state=1,
                       activation='relu',
                       learning_rate='adaptive')
    mlp.fit(train_arrays, train_labels)
    mlp_predicted = mlp.predict(dev_arrays)
    # print(dev_labels - mlp_predicted)
    c = CorrelationPearson()
    print("pearson-coefficient for " + emotions[i] + ": ",
          c.result(dev_labels, mlp_predicted))

regMethods = [
    "Neural Nets", "Decision Tree", "Random Forests", "K-NN", "ADA-Boost",
    "Gradient-Boost"
]
regModels = [
    MLPRegressor(solver='lbfgs',
                 alpha=1e-5,
                 hidden_layer_sizes=(9, 5, 7),
                 random_state=1,
                 activation='tanh',
                 learning_rate='adaptive'),
    DecisionTreeRegressor(random_state=0),
    RandomForestRegressor(max_depth=2, random_state=0),
    KNeighborsRegressor(n_neighbors=2),
示例#5
0
#pip install correlation-pearson

from correlation_pearson.code import CorrelationPearson

X_Speed = [0.73, 0.81, 1.53, 1.97, 2.29, 2.86]

X_Energy = [1.507, 1.235, 0.654, 0.864, 0.656, 0.490]

correlation = CorrelationPearson()

print('Correlation coefficient of speed and Energy:' +
      str(correlation.result(X_Speed, X_Energy)))

Y_Power = [1.0, 1.0, 1.1, 1.4, 1.5, 1.7]

Y_Energy = [0.654, 1.235, 1.507, 0.490, 0.656, 0.864]

print('Correlation coefficient of Power and Energy:' +
      str(correlation.result(Y_Power, Y_Energy)))