Exemple #1
0
def dataframe(df_path, subject_id, subject_template, in_mat, out_mat): # overwrite_dat=OVERWRITE_DF
  import os # Imports must be made inside Node Function. 
  import pandas as pd
  from utils import compute_rmse, vec

  if not os.path.isfile(df_path):
    e = 'File %s does not exist. Please re-run initialisation script.' % df_path
    return print (e)
  else:
    df = pd.read_csv(df_path, index_col=0)
  cols_h = ['subject','template','matfile_name','px','py','pz','translation','rotation','total_length','resolution','noise','rmse']
  cols_r = list(df)
  if not cols_h != cols_r:
    e = 'Column names do not match. Please check.'
    return print (e)
  matfile_id = in_mat.split('/')[-1]
  px, py, pz, vec_length = vec(in_mat,out_mat)
  rx, ry, rz = None, None, None
  trans, rot = matfile_id.split('_')[0], matfile_id.split('_')[1]
  vox_dim = '1' # find params of functions, to get vox_dim ## HERE
  rmse = compute_rmse(in_mat,out_mat)

  df_tmp = pd.DataFrame([[subject_id,matfile_id,px,py,pz,vec_length,trans,rot,vox_dim,rmse]], columns=cols_r)

  df = df.append(df_tmp, ignore_index=True)
  df.to_csv(df_path)
Exemple #2
0
def run_experiments(X_train, y_train, X_test, y_test, X_star_train,
                    X_star_test):
    # normalization of the training data
    scaler = StandardScaler()
    scaler.fit(X_train)
    X_train = scaler.transform(X_train)
    X_test = scaler.transform(X_test)
    '''
    scaler = StandardScaler()
    scaler.fit(X_star_train)
    X_star_train = scaler.transform(X_star_train)
    X_star_test = scaler.transform(X_star_test)
    '''
    results = OrderedDict()

    if 1:
        y_predicted = fit_SVR(X_train, y_train, X_test)
        results["svm"] = [util.compute_rmse(y_test, y_predicted)]

        X_train_mod = np.column_stack((X_train, X_star_train))
        X_test_mod = np.column_stack((X_test, X_star_test))
        scaler = StandardScaler()
        scaler.fit(X_train_mod)
        X_train_mod = scaler.transform(X_train_mod)
        X_test_mod = scaler.transform(X_test_mod)
        # print(X_train_mod.shape)
        y_predicted = fit_SVR(X_train_mod, y_train, X_test_mod)
        results["svm_pi"] = [util.compute_rmse(y_test, y_predicted)]

    if 1:
        y_predicted = KT_LUPI(X_train, X_star_train, y_train, X_test)

        results["svm_kt_lupi"] = [util.compute_rmse(y_test, y_predicted)]

    if 1:
        y_predicted = RobustKT_LUPI(X_train, X_star_train, y_train, X_test)
        results["svm_robust_kt_lupi"] = [
            util.compute_rmse(y_test, y_predicted)
        ]

    print(results)
    return results
Exemple #3
0
 def evaluate(self, X_test, y_test):
     """evaluates the pipeline on df_test and return the RMSE"""
     y_pred = self.pipeline.predict(X_test)
     rmse = compute_rmse(y_pred, y_test)
     return rmse
Exemple #4
0
    # normalization of the training data
    scaler = StandardScaler()
    scaler.fit(X_train)
    X_train = scaler.transform(X_train)
    X_test = scaler.transform(X_test)
    '''
    scaler = StandardScaler()
    scaler.fit(X_star_train)
    X_star_train = scaler.transform(X_star_train)
    X_star_test = scaler.transform(X_star_test)
    '''
    if 1:
        y_predicted = fit_SVR(X_train, y_train, X_test)
        print("SVM Error Rate:")
        rmseSVM[i] = util.compute_rmse(y_test, y_predicted)
        print(rmseSVM[i])

        X_train_mod = np.column_stack((X_train, X_star_train))
        X_test_mod = np.column_stack((X_test, X_star_test))
        scaler = StandardScaler()
        scaler.fit(X_train_mod)
        X_train_mod = scaler.transform(X_train_mod)
        X_test_mod = scaler.transform(X_test_mod)
        # print(X_train_mod.shape)
        y_predicted = fit_SVR(X_train_mod, y_train, X_test_mod)
        print("SVM with extra features Error Rate:")
        rmseSVM_PI[i] = util.compute_rmse(y_test, y_predicted)
        print(rmseSVM_PI[i])

    if 1:
Exemple #5
0
def main():
    logger = logging.getLogger(__name__)

    ## Load config file
    with open("config.json", "r") as f:
        config = json.load(f)

    ## Cleaning TensorBoard events
    clean_events(config)

    ## Load data
    data_loader = DataLoader(config)
    X_train, X_test, y_train, y_test = data_loader.get_data()

    ## Create placeholders
    X = tf.placeholder(tf.float64, [None, 13])
    # y = tf.placeholder(tf.float32, [None, 2])
    y = tf.placeholder(tf.float64, [None])

    ## Create model and outputs
    net = SimpleNet(config)
    net_output = net.forward(X)
    y_pred, log_sigma = net_output[..., 0], net_output[..., 1]
    # Track mean of log_sigma across batch of data
    tf.summary.scalar("mean_log_sigma", tf.reduce_mean(log_sigma))

    ## Define metrics based on experiment
    # Loss
    type_exp = '_'.join(config['exp_name'].split('_')[:2])
    if type_exp == 'vanilla_loss':
        loss = compute_loss(y_true=y, y_pred=y_pred)
    elif type_exp == 'loss_bnn':
        loss = compute_loss_bnn(y_true=y, y_pred=y_pred, log_sigma=log_sigma)

    # Root Mean Squared Error (RMSE)
    rmse = compute_rmse(y_true=y, y_pred=y_pred)

    ## Define optimizer
    optimizer = net.train_optimizer(loss)

    ## Merging all summaries
    merged_summary = tf.summary.merge_all()

    ## Launching the execution graph for training
    with tf.Session() as sess:
        # Initializing all variables
        sess.run(tf.global_variables_initializer())

        # Create train and test writer
        train_writer = tf.summary.FileWriter("./tensorboard/" +
                                             config["exp_name"] + "/train/")
        test_writer = tf.summary.FileWriter("./tensorboard/" +
                                            config["exp_name"] + "/test/")

        # Visualizing the Graph
        train_writer.add_graph(sess.graph)

        for epoch in range(config["trainer"]["num_epochs"]):
            for batch in range(config["trainer"]["num_iter_per_epoch"]):
                # Yield next batch of data
                batch_X, batch_y = next(
                    data_loader.get_next_batch(
                        config["trainer"]["batch_size"]))
                # Run the optimizer
                sess.run(optimizer, feed_dict={X: batch_X, y: batch_y})
                # Compute train loss and rmse
                train_loss, train_rmse = sess.run([loss, rmse],
                                                  feed_dict={
                                                      X: batch_X,
                                                      y: batch_y
                                                  })

            if (epoch % config["trainer"]["writer_step"] == 0):
                # Run the merged summary and write it to disk
                s = sess.run(merged_summary,
                             feed_dict={
                                 X: batch_X,
                                 y: batch_y
                             })

                train_writer.add_summary(s, (epoch + 1))

                # Evaluate test data
                test_loss, test_rmse = sess.run([loss, rmse],
                                                feed_dict={
                                                    X: X_test,
                                                    y: y_test
                                                })
                s = sess.run(merged_summary, feed_dict={X: X_test, y: y_test})

                test_writer.add_summary(s, (epoch + 1))

            if (epoch % config["trainer"]["display_step"] == 0):
                print("Epoch: {:03d},".format(epoch + 1), \
                         "train_loss= {:03f},".format(train_loss), \
                         "train_rmse= {:03f},".format(train_rmse), \
                         "test_loss= {:03f},".format(test_loss), \
                         "test_rmse={:03f}".format(test_rmse)
                         )

        print("Training complete")
Exemple #6
0
    testPred = clf.predict(test_data)
    return testPred


# Create a random dataset
rng = np.random.RandomState(1)
X = np.sort(200 * rng.rand(600, 1) - 100, axis=0)
y = np.array([np.pi * np.sin(X).ravel(), np.pi * np.cos(X).ravel()]).T
y += (0.5 - rng.rand(*y.shape))

X_train, X_test, y_train, y_test = train_test_split(
    X, y, train_size=400, test_size=200, random_state=4)

if 0:
    y_predicted = fit_SVR(X_train, y_train[:,1], X_test)
    rmse = util.compute_rmse(y_test[:,1], y_predicted)
    print("SVM Result:")
    print(rmse)

if 0:

    X_mod = np.column_stack((X_train, y_train[:,1]))
    X_test_mod = np.column_stack((X_test, y_test[:,1]))
    y_predicted = fit_SVR(X_mod, y_train[:,0], X_test_mod)
    rmse = util.compute_rmse(y_test[:,0], y_predicted)
    print("SVM with extra features Result:")
    print(rmse)

if 1:
    y_predicted = KT_LUPI_GPR(X_train, y_train[:,0], y_train[:,1], X_test)
    rmse = util.compute_rmse(y_test[:,0], y_predicted)
 def test_prediction(self):
     pred = super(TodayStreams, self).test_prediction()
     y_true = self.enc_test[self.label]
     self.rmse = utils.compute_rmse(y_true, pred)
     self.r_squared = utils.compute_r_squared(y_true, pred)
    output_weights = utils.pseudoinv_qrpivot(H)
else:
    output_weights = utils.pseudoinv_svd(H.T) * T.T
    print 'Unknown Pseudo-Inverse method selected! Using default Moore-Penrose Pseudo-Inverse method instead...'

t1 = time()
train_time = t0 - t1 # time to train the ELM


##################################################################
################## CALCULATE TRAINING ACCURACY ###################
Y = np.mat(H.T * output_weights).T # Y: the actual output of the training data
Y = np.squeeze(np.asarray(Y)) # Squeeze matrix to one dimension array
# print np.squeeze(Y), x_sample
if elm_type == REGRESSION:
    train_accuracy = utils.compute_rmse(T, Y)
    print 'Train Accuracy = ' + str(train_accuracy)


fig = plt.figure('TRAIN REGRESSION')
ax = fig.add_subplot(111)
ax.plot(x, y_true, 'g-', linewidth=2, label='True')
ax.scatter(x_sample, y_sample1, s=50, facecolors='none', edgecolors='b', linewidths=0.5, label='Train Data1')
ax.scatter(x_sample, y_sample2, s=50, facecolors='none', edgecolors='r', linewidths=0.5, label='Train Data2')
ax.plot(x_sample, Y, 'y--', linewidth=2, label='Train Output')
plt.xlabel('X')
plt.ylabel('y')
plt.title('Regression with ELM with N data = ' + str(len(x_sample)))
plt.legend()
plt.grid()
# plt.show()
Exemple #9
0
def elm(train_data, test_data, elm_type, num_hidden_neuron, activation_function, pseudo_inverse_method):
    """
    ELM taken from http://www.ntu.edu.sg/home/egbhuang/elm_random_hidden_nodes.html
    """
    REGRESSION = 0
    CLASSIFIER = 1
    ##################################################################
    ######################## LOAD TRAINING DATA SET ##################
    T = np.mat(train_data[:,0].T)
    P = np.mat(train_data[:,1:np.size(train_data,1)].T)
    #print 'T: ', T.shape
    #print 'P: ', P.shape


    ##################################################################
    ######################## LOAD TESTING DATA SET ###################
    TVT = np.mat(test_data[:,0].T)
    TVP = np.mat(test_data[:,1:np.size(test_data,1)].T)

    # Initialize NUMBER of NEURON, TEST DATA, and TRAIN DATA
    num_train_data = np.size(P,1)
    num_test_data = np.size(TVP,1)
    num_input_neuron = np.size(P,0)

    if elm_type != REGRESSION:
        print 'Not implemented yet!'


    ##################################################################
    ##################### CALCULATE WEIGHT AND BIAS ##################
    t0 = time()

    # Random generate input weight w_i and bias b_i of hidden neuron
    input_weights =np.mat(np.random.rand(num_hidden_neuron, num_input_neuron) * 2 - 1)
    bias_hidden_neuron = np.mat(np.random.rand(num_hidden_neuron, 1))
    temp_H = np.mat(input_weights * P)
    ind = np.mat(np.ones((1, num_train_data)))
    bias_matrix = bias_hidden_neuron * ind # Extend the bias matrix to match the dimension of H
    temp_H = temp_H + bias_matrix


    ##################################################################
    ############ CALCULATE HIDDEN NEURON OUTPUT MATRIX H #############
    if activation_function == 'sigmoid':
        # equal to MATLAB code -> H = 1 ./ (1 + exp(-tempH));
        H = np.mat(np.divide(1, (1 + np.exp(np.multiply(-1, temp_H))))) # element wise divide and multiplication
    elif activation_function == 'sine':
        H = np.mat(np.sin(temp_H))
    elif activation_function == 'hardlim':
        H = utils.hardlim(temp_H)
    elif activation_function == 'tribas':
        H = utils.triangular_bf(temp_H)
    elif activation_function == 'radbas':
        H = utils.rad_bf(temp_H)
    else:
        H = np.mat(np.divide(1, (1 + np.exp(np.multiply(-1, temp_H))))) # element wise divide and multiplication
        print 'Unknown Activation Function selected! Using default sigmoid as Activation Function instead...'

    ##################################################################
    ################ CALCULATE OUTPUT WEIGHTS beta_i #################
    if pseudo_inverse_method == 'svd':
        output_weights = utils.pseudoinv_svd(H.T) * T.T
    elif pseudo_inverse_method == 'geninv':
        output_weights = utils.pseudoinv_geninv(H.T) * T.T
    elif pseudo_inverse_method == 'qrpivot':
        output_weights = utils.pseudoinv_qrpivot(H.T) * T.T
    else:
        output_weights = utils.pseudoinv_svd(H.T) * T.T
        print 'Unknown Pseudo-Inverse method selected! Using default Moore-Penrose Pseudo-Inverse method instead...'

    t1 = time()
    train_time = t1 - t0 # time to train the ELM
    print 'Train Time = ' + str(train_time)


    ##################################################################
    ################## CALCULATE TRAINING ACCURACY ###################
    Y = np.mat(H.T * output_weights).T # Y: the actual output of the training data
    print 'Y_ELM: ', Y
    Y = np.squeeze(np.asarray(Y)) # Squeeze matrix to one dimension array
    # print np.squeeze(Y), x_sample
    train_accuracy = 0
    if elm_type == REGRESSION:
        train_accuracy = utils.compute_rmse(T, Y)
        print 'Train Accuracy = ' + str(train_accuracy)

    ##################################################################
    ############### CALCULATE OUTPUT OF TESTING INPUT ################
    t2 = time()
    temp_H_test = input_weights * TVP
    ind = np.mat(np.ones((1, num_test_data)))
    bias_matrix = bias_hidden_neuron * ind # Extend the bias matrix to match the dimension of H
    temp_H_test = temp_H_test + bias_matrix
    if activation_function == 'sigmoid':
        # equal to MATLAB code -> H = 1 ./ (1 + exp(-tempH));
        H_test = np.mat(np.divide(1, (1 + np.exp(np.multiply(-1, temp_H_test))))) # element wise divide and multiplication
    elif activation_function == 'sine':
        H_test = np.mat(np.sin(temp_H_test))
    elif activation_function == 'hardlim':
        H_test = utils.hardlim(temp_H_test)
    elif activation_function == 'tribas':
        H_test = utils.triangular_bf(temp_H_test)
    elif activation_function == 'radbas':
        H_test = utils.rad_bf(temp_H_test)
    else:
        H_test = np.mat(np.divide(1, (1 + np.exp(np.multiply(-1, temp_H_test))))) # element wise divide and multiplication
        print 'Unknown Activation Function selected! Using default sigmoid as Activation Function instead...'

    TY = np.mat(H_test.T * output_weights).T # TY: the actual output of the testing data
    print 'TY_ELM: ', TY
    t3 = time()
    test_time = t3 - t2 # ELM time to predict the whole testing data
    print 'Test Time = ' + str(test_time)
    TY = np.squeeze(np.asarray(TY)) # Squeeze matrix to one dimension array
    # print np.squeeze(Y), x_sample


    ##################################################################
    ################## CALCULATE TRAINING ACCURACY ###################
    test_accuracy = 0
    if elm_type == REGRESSION:
        test_accuracy = utils.compute_rmse(TVT, TY)
        print 'Test Accuracy = ' + str(test_accuracy)

    if elm_type == CLASSIFIER:
        print 'Not implemented yet!'

    return Y, TY, train_accuracy, test_accuracy