Beispiel #1
0
def genData(bld_name, n_train, n_valid, n_lag, T, curr_day):
    load_weekday = getWeekday.getWeekdayload(bld_name)
    max_load = np.max(load_weekday)
    min_load = np.min(load_weekday)
    load_weekday = (load_weekday - min_load) / (max_load - min_load)
    
    ################## generate data ##########################################
    y_train = np.zeros((n_train, T))
    X_train = np.zeros((n_train, T * n_lag))
    
    y_valid = np.zeros((n_valid, T))
    X_valid = np.zeros((n_valid, T * n_lag))
    
    row = 0
    for train_day in range(curr_day - n_train - n_valid, curr_day - n_valid):
        y_train[row,:] = load_weekday[train_day * T : train_day * T + T]
        X_train[row,0*T*n_lag:1*T*n_lag] = load_weekday[train_day * T - n_lag * T: train_day * T]
        row += 1
    
    row = 0
    for valid_day in range(curr_day - n_valid, curr_day):
        y_valid[row,:] = load_weekday[valid_day * T : valid_day * T + T]
        X_valid[row,0*T*n_lag:1*T*n_lag] = load_weekday[valid_day * T - n_lag * T: valid_day * T]
        row += 1    
        
    # building test data
    X_test = np.zeros((1, T * n_lag))
    X_test[0, 0*T*n_lag:1*T*n_lag] = load_weekday[curr_day*T - n_lag*T: curr_day*T]
    y_test = load_weekday[curr_day*T: curr_day *T + T]
    
    
    return(X_train, y_train, X_valid, y_valid, X_test, y_test, min_load, max_load)
Beispiel #2
0
        MAPE_sum_nn += MAPE_nn
        RMSPE_nn = predict_util.calRMSPE(y_test, y_nn)
        RMSPE_sum_nn += RMSPE_nn
        
        
    days_sample = n_days - 1 - n_train - n_lag
    MAPE_avg_nn = MAPE_sum_nn / days_sample
    RMSPE_avg_nn = RMSPE_sum_nn / days_sample
    return (MAPE_avg_nn, RMSPE_avg_nn)


if __name__ == "__main__":
    bld_names = ['combined/1008_EE_CSE', 'combined/1108_Chem', 'combined/1111_Fluke', 'combined/1126_Meany', 'combined/1143_McMahon', 'combined/1147_Haggett', 'combined/1158_McCarty', 'combined/1163_Port_Bay', 'combined/1195_Hec_Ed', 'combined/1201_Gowen', 'combined/1275_Pool', 'combined/1306_Physics', 'combined/1316_BAEEC', 'combined/1357_Fish_Sc', 'combined/4057_Foege']
    T = 96

    n_train = 50
    n_lag = 5
    n_clusters = 2
    
    nn_MAPE = []
    nn_RMSPE = []
    
    for bld_name in bld_names:
        load_weekday = getWeekday.getWeekdayload(bld_name)
        (MAPE_avg_nn, RMSPE_avg_nn) = forecast_kMeans_NN(bld_name, load_weekday, n_train, n_lag, n_clusters)
        nn_MAPE.append(MAPE_avg_nn)
        nn_RMSPE.append(RMSPE_avg_nn)
    
    d = dict({'bld_name' : bld_names, 'nn_MAPE' : nn_MAPE, 'nn_RMSPE' : nn_RMSPE})
    df = pd.DataFrame(d)    
    df.to_csv('NN_24cluster_forecast_results.csv', sep=',', index = False)
Beispiel #3
0
def NN_forecast(bld_name, n_train, n_lag, T):
    ############################ Iteration Parameter ##########################
    # maximum iteration
    Max_iter = 20000
    # stopping criteria
    epsilon = 1e-3
    last_l = 10000

    ############################ TensorFlow ###################################
    # place holders
    xs = tf.placeholder(tf.float32, [None, T * n_lag])
    ys = tf.placeholder(tf.float32, [None, T])

    N_neuron = 50
    # hidden layers
    (l1, w1, b1) = RBF_layer(xs, T * n_lag, N_neuron)
    #(l2, w2, b2) = add_layer(l1, N_neuron, N_neuron, activation_function=tf.nn.tanh)

    # output layer
    (prediction, wo, bo) = add_layer(l1, N_neuron, T, None)

    # loss function, RMSPE
    #loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - prediction), 1))
    loss = T * tf.reduce_mean(tf.square(ys - prediction))

    loss += 1e-2 * (tf.nn.l2_loss(wo) + tf.nn.l2_loss(bo))
    #loss += 1e-2 * ( tf.nn.l2_loss(w2) + tf.nn.l2_loss(b2) )

    # training step
    train_step = tf.train.AdamOptimizer().minimize(loss)

    # init.
    init = tf.global_variables_initializer()
    # run
    sess = tf.Session()
    sess.run(init)

    ###########################################################################

    load_weekday = getWeekday.getWeekdayload(bld_name)
    max_load = np.max(load_weekday)
    #max_load = 1
    n_days = int(load_weekday.size / T)
    ################## generate data ##########################################
    MAPE_sum = 0.0
    RMSPR_sum = 0.0

    for curr_day in range(n_train + n_lag, n_days - 1):
        y_train = np.zeros((n_train, T))
        X_train = np.zeros((n_train, T * n_lag))

        row = 0
        for train_day in range(curr_day - n_train, curr_day):
            y_train[row, :] = load_weekday[train_day * T:train_day * T + T]
            X_train[row, 0 * T * n_lag:1 * T *
                    n_lag] = load_weekday[train_day * T - n_lag * T:train_day *
                                          T]
            row += 1

        max_load = np.max(X_train)
        min_load = np.min(X_train)
        # building test data
        X_test = np.zeros((1, T * n_lag))
        X_test[0, 0 * T * n_lag:1 * T *
               n_lag] = load_weekday[curr_day * T - n_lag * T:curr_day * T]
        y_test = load_weekday[curr_day * T:curr_day * T + T]

        X_train = (X_train - min_load) / (max_load - min_load)
        y_train = (y_train - min_load) / (max_load - min_load)
        X_test = (X_test - min_load) / (max_load - min_load)
        #y_test = (y_test-min_load) / (max_load - min_load)

        # training

        i = 0
        while (i < Max_iter):
            # training
            (t_step, l) = sess.run([train_step, loss],
                                   feed_dict={
                                       xs: X_train,
                                       ys: y_train
                                   })
            if (abs(last_l - l) < epsilon):
                break
            else:
                last_l = l
                i = i + 1
        '''
        i = 0
        while (i < Max_iter):
            # training
            (t_step, l) = sess.run([train_step, loss], feed_dict={xs: X_train, ys: y_train})
            if(i % 100 == 0):
                print(l)
                y_pred = prediction.eval(session = sess, feed_dict={xs: X_test})
                
                mape = predict_util.calMAPE(y_test, y_pred)
                rmspe = predict_util.calRMSPE(y_test, y_pred)
                print('MAPE: %.2f, RMSPE: %.2f' % (mape, rmspe))
            i = i+1
        '''

        #y_ = prediction.eval(session = sess, feed_dict={xs: X_train})
        y_pred = prediction.eval(session=sess, feed_dict={xs: X_test})
        y_pred = y_pred * (max_load - min_load) + min_load
        # plot daily forecast
        '''
        xaxis = range(T)
        plt.plot(xaxis, y_pred.flatten(), 'r')
        plt.plot(xaxis, y_test.flatten(), 'g')
        plt.show()
        '''

        mape = predict_util.calMAPE(y_test, y_pred)
        rmspe = predict_util.calRMSPE(y_test, y_pred)

        # update error metric results
        #print('MAPE: %.2f, RMSPE: %.2f' % (mape, rmspe))
        MAPE_sum += mape
        RMSPR_sum += rmspe

    # close session
    tf.reset_default_graph()  # reset the graph
    sess.close()

    days_sample = n_days - 1 - n_train - n_lag

    return (MAPE_sum / days_sample, RMSPR_sum / days_sample)
Beispiel #4
0
def NN_forecast(bld_name, sess):
    load_weekday = getWeekday.getWeekdayload(bld_name)
    max_load = np.max(load_weekday)
    n_days = int(load_weekday.size / T)
    ################## generate data ##########################################
    MAPE_sum = 0.0
    RMSPR_sum = 0.0

    for curr_day in range(n_train + n_lag, n_days - 1):
        y_train = np.zeros((n_train, T))
        X_train = np.zeros((n_train, T * n_lag))
        row = 0
        for train_day in range(curr_day - n_train, curr_day):
            y_train[row, :] = load_weekday[train_day * T:train_day * T + T]
            X_train[row, 0 * T * n_lag:1 * T *
                    n_lag] = load_weekday[train_day * T - n_lag * T:train_day *
                                          T]
            row += 1

        # building test data
        X_test = np.zeros((1, T * n_lag))
        X_test[0, 0 * T * n_lag:1 * T *
               n_lag] = load_weekday[curr_day * T - n_lag * T:curr_day * T]
        y_test = load_weekday[curr_day * T:curr_day * T + T]

        X_train = X_train / max_load
        y_train = y_train / max_load
        X_test = X_test / max_load
        y_test = y_test / max_load

        # maximum iteration
        Max_iter = 10000
        # stopping criteria
        epsilon = 1e-7
        last_l = 10000

        for i in range(Max_iter):
            # training
            (t_step, l) = sess.run([train_step, loss],
                                   feed_dict={
                                       xs: X_train,
                                       ys: y_train
                                   })
            if (abs(last_l - l) < epsilon):
                #print(i)
                break
            else:
                last_l = l
                # to see the step improvement
                #print(sess.run(loss, feed_dict={xs: X_train, ys: y_train}))

        #y_ = prediction.eval(session = sess, feed_dict={xs: X_train})
        y_pred = prediction.eval(session=sess, feed_dict={xs: X_test})

        # plot daily forecast
        '''
        T = 96
        xaxis = range(T)
        plt.plot(xaxis, y_pred.flatten(), 'r')
        plt.plot(xaxis, y_test.flatten(), 'g')
        plt.show()
        '''
        mape = predict_util.calMAPE(y_test, y_pred)
        rmspe = predict_util.calRMSPE(y_test, y_pred)
        MAPE_sum += mape
        RMSPR_sum += rmspe

    days_sample = n_days - 1 - n_train - n_lag
    return (MAPE_sum / days_sample, RMSPR_sum / days_sample)
Beispiel #5
0
def RNN_LSTM(bld_name):
    # Training Parameters
    training_steps = 10000
    display_step = 20

    # Network Parameters
    num_input = 1  # MNIST data input (img shape: 28*28)
    T = 96
    num_hidden = 1  # hidden layer num of features
    n_train = 5
    n_valid = 1
    n_lag = 2
    timesteps = T * n_lag  # timesteps
    layers = 2

    # tf Graph input
    X = tf.placeholder("float", [None, timesteps, num_input])
    Y = tf.placeholder("float", [None, T])

    # Define weights
    weights = tf.Variable(tf.random_normal([T * n_lag, T]))
    biases = tf.Variable(tf.random_normal([T]))

    ###############################################################################
    prediction = RNN(X, weights, biases, num_hidden, timesteps, layers)

    # Define loss and optimizer
    loss = T * tf.reduce_mean(tf.square(Y - prediction))
    #loss += 1e-2 * ( tf.nn.l2_loss(weights) + tf.nn.l2_loss(biases) )
    train_op = tf.train.AdamOptimizer(learning_rate=0.1).minimize(loss)

    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()

    # Start training
    sess = tf.Session()
    # Run the initializer
    sess.run(init)

    load_weekday = getWeekday.getWeekdayload(bld_name)
    T = 96
    n_days = int(load_weekday.size / T)
    MAPE_sum = 0.0
    RMSPR_sum = 0.0

    for curr_day in range(55, n_days - 1):
        print(curr_day)
        ## get the training, validation, testing datasets
        (X_train, y_train, X_valid, y_valid, X_test, y_test, min_load,
         max_load) = genData(bld_name, n_train, n_valid, n_lag, T, curr_day)

        last_loss = 10000.0
        epsilon = 1e-5
        step = 0
        while (step < training_steps):

            X_train = X_train.reshape((n_train, timesteps, num_input))
            # Run optimization op (backprop)
            sess.run(train_op, feed_dict={X: X_train, Y: y_train})

            # Calculate loss on validation set
            X_valid = X_valid.reshape((n_valid, timesteps, num_input))
            l = sess.run(loss, feed_dict={X: X_valid, Y: y_valid})

            if ((step + 1) % display_step == 0):
                print('iteration number %d, loss is %2f' % (step + 1, l))
            if (abs(last_loss - l) < epsilon):
                print('training stopped at: iteration number %d, loss is %2f' %
                      (step + 1, l))
                break
            else:
                last_loss = l
                step += 1

        X_test = X_test.reshape((1, timesteps, num_input))
        y_pred = prediction.eval(session=sess, feed_dict={X: X_test})
        y_pred = y_pred * (max_load - min_load) + min_load
        y_test = y_test * (max_load - min_load) + min_load

        mape = predict_util.calMAPE(y_test, y_pred)
        rmspe = predict_util.calRMSPE(y_test, y_pred)
        MAPE_sum += mape
        RMSPR_sum += rmspe

        xaxis = range(T)
        plt.step(xaxis, y_pred.flatten(), 'r')
        plt.step(xaxis, y_test.flatten(), 'g')
        plt.show()
        print('MAPE: %.2f, RMSPE: %.2f' % (mape, rmspe))

        tf.reset_default_graph()

    sess.close()

    days_sample = n_days - 1 - 55
    MAPE_sum = MAPE_sum / days_sample
    RMSPR_sum = RMSPR_sum / days_sample
    print('AVERAGE MAPE: %.2f, RMSPE: %.2f' % (MAPE_sum, RMSPR_sum))