예제 #1
0
    def __init__(self, nr_inp, nr_out):
        self.n_nodes_input = nr_inp
        self.n_nodes_output = nr_out
        self.x = tf.placeholder('float', [None, nr_inp])
        self.y = tf.placeholder('float', [None, nr_out])
        reading_from_file.read_from_file_time_series_norwegian(
            self.n_nodes_input, self.n_nodes_output)
        reading_from_file.normalize_time_series()

        self.batch_size = reading_from_file.get_test_data_size_time_series()
        self.batch_size = 1000
예제 #2
0
def test_persistnece_time():
    reading_from_file.read_from_file_time_series_norwegian(5, 5)
    reading_from_file.normalize_time_series()
    test_output = reading_from_file.get_test_output_time_series()
    test_input = reading_from_file.get_test_input_time_series()
    result = persistence_test.persistence_test(test_input, test_output)
    print("Result:" + str(result))
    with open('output_files/raggovidda_persistence_test_time_norwegian.csv',
              'w') as csvfile:
        spamwriter = csv.writer(csvfile,
                                delimiter=';',
                                quotechar='|',
                                quoting=csv.QUOTE_MINIMAL)

        spamwriter.writerow(result)
예제 #3
0
def run_svr():
    #reading_from_file.read_from_file_time_series(5, 5)
    reading_from_file.read_from_file_time_series_norwegian(5, 5)
    reading_from_file.normalize_time_series()

    test_input = reading_from_file.get_test_input_time_series()
    test_output = reading_from_file.get_test_output_time_series()
    train_input = reading_from_file.get_train_input_time_series()
    train_output = reading_from_file.get_train_output_time_series()
    result = svr.predict_stuff(train_input, train_output, test_input,
                               test_output)

    with open('output_files/raggovidda_svr_norwegian.csv', 'w') as csvfile:
        spamwriter = csv.writer(csvfile,
                                delimiter=';',
                                quotechar='|',
                                quoting=csv.QUOTE_MINIMAL)
        spamwriter.writerow(result)
예제 #4
0
    def __init__(self, nr_inp, nr_out):
        self.n_nodes_input = nr_inp
        self.n_noes_actial_out = nr_out
        self.n_nodes_output = 1
        self.x = tf.placeholder('float', [None, nr_inp])
        self.y = tf.placeholder('float', [None, self.n_nodes_output])

        reading_from_file.read_from_file_time_series_norwegian(
            self.n_nodes_input, self.n_noes_actial_out)
        reading_from_file.normalize_time_series()
        #self.batch_size = reading_from_file.get_test_data_size_time_series()
        self.batch_size = 1000

        self.train_outputs = reading_from_file.get_train_output_time_series()
        lenght = (len(self.train_outputs))

        self.columns = list(zip(*self.train_outputs))
        self.output_values = self.columns[0]

        self.output_values = np.asarray(self.output_values).reshape(lenght, 1)
예제 #5
0
def test_knn_time_new():
    #reading_from_file.read_from_file_time_series(5, 5)
    reading_from_file.read_from_file_time_series_norwegian(5, 5)
    reading_from_file.normalize_time_series()

    test_input = reading_from_file.get_test_input_time_series()
    test_output = reading_from_file.get_test_output_time_series()

    train_input = reading_from_file.get_train_input_time_series()
    train_output = reading_from_file.get_train_output_time_series()

    with open('output_files/raggovidda_knn_norwegian.csv', 'w') as csvfile:
        spamwriter = csv.writer(csvfile,
                                delimiter=';',
                                quotechar='|',
                                quoting=csv.QUOTE_MINIMAL)
        for k in range(1, 30):
            result = knn_improved.knn_new(train_input, train_output,
                                          test_input, test_output, k)
            print("Result:" + str(result) + " For K = " + str(k))
            spamwriter.writerow(result)
예제 #6
0
def lstm_keras(nr_input, nr_output):
    reading_from_file.read_from_file_time_series_norwegian(nr_input, nr_output)
    reading_from_file.normalize_time_series()

    train_inp = reading_from_file.get_train_input_time_series()
    train_out = reading_from_file.get_train_output_time_series()

    x_train, y_train, x_test, y_test = lstm.load_data(
        'test_files/Raggovidda_2.csv', 5, True)
    #print("X:", x_test)
    #print("Y:", y_test)

    print(len(y_test))
    print(len(x_test))

    train_inp = np.asarray(train_inp)
    len_inp = len(train_inp)
    train_inp = train_inp.reshape(len_inp, 5, 1)

    columns = list(zip(*train_out))
    train_out = columns[0]
    train_out = np.reshape(train_out, len(train_out))

    test_inp = reading_from_file.get_test_input_time_series()
    test_out = reading_from_file.get_test_output_time_series()

    columns = list(zip(*test_out))
    test_out = columns[0]
    test_out = np.reshape(test_out, len(test_out))

    len_inp = len(test_inp)
    test_inp = test_inp.reshape(len_inp, 5, 1)

    #test_out = np.asarray(test_out)
    #test_out = test_out.reshape(len_inp, 5, 1)

    model = Sequential()

    model.add(LSTM(input_dim=1, output_dim=5, return_sequences=True))
    model.add(Dropout(0.2))

    model.add(LSTM(100, return_sequences=False))
    model.add(Dropout(0.2))

    model.add(Dense(output_dim=1))
    model.add(Activation('linear'))

    start = time.time()
    model.compile(loss='mse', optimizer='rmsprop')
    print('compilation time: ', time.time() - start)

    model.fit(x_train,
              y_train,
              batch_size=2000,
              validation_split=0.05,
              epochs=5)

    predictions = lstm.predict_sequences_multiple(model, x_test, 5, 5)
    pred = np.asarray(predictions)
    y_val = np.asarray(y_test)
    y_val = y_val.reshape(len(pred), 5)

    #print ("Y_val: ",y_val)
    #print ("Pred: ", pred)

    print(len(y_val))
    print(len(pred))

    result = mean_squared_error(pred, y_val, multioutput='raw_values')

    print(result)
    return result