Beispiel #1
0
def main():
    numpy.set_printoptions(threshold=numpy.nan)

    model = Linear_Regression()

    feature_hours = 1

    data = read_data()
    features = extract_features(data, feature_hours)
    target = extract_target(data, feature_hours, pm25_row=9)

    legends = []
    for lr in (0.01, 0.02, 0.05, 0.1):
        loss_data = model.train(features, target, epochs=5000, lr=lr)
        legends.append(f'lr = {lr}')
        plt.plot(loss_data)

    plt.legend(legends, fontsize=16)
    plt.xlabel('Epoch', fontsize=20)
    plt.ylabel('Root Mean Squared Error (RMSE)', fontsize=20)

    plt.xticks(fontsize=20)
    plt.yticks(fontsize=20)

    plt.show()
Beispiel #2
0
def main():
    numpy.set_printoptions(threshold=numpy.nan)

    data = read_data()
    features = extract_features(data, feature_hours)
    target = extract_target(data, feature_hours, pm25_row=9)

    run('pm25_only', features[..., 9::18], target)
    run('all', features, target)
Beispiel #3
0
def main():
    numpy.set_printoptions(threshold=numpy.nan)

    feature_hours = 1

    data = read_data()
    features = extract_features(data, feature_hours)
    target = extract_target(data, feature_hours, pm25_row=9)

    for regularization_term in (0, 0.01, 0.1, 10, 100, 1000):
        model = Linear_Regression(regularization_term)

        loss_data = model.train(features, target, epochs=10000)
        print('lambda={}, loss={}'.format(regularization_term, loss_data[-1]))
        inference(model, feature_hours, './data/test.csv',
                  f'./submission_q3_{regularization_term}.txt')
Beispiel #4
0
def main():
    numpy.set_printoptions(threshold=numpy.nan)

    model = Linear_Regression()

    feature_hours = 1

    data = read_data()
    features = extract_features(data, feature_hours)
    target = extract_target(data, feature_hours, pm25_row=9)

    # cross_validation(model, features, target, k=12)

    model.train(features, target)

    numpy.savez('model.npz', b=model.b, w=model.W, feature_hours=feature_hours)
Beispiel #5
0
import numpy
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adagrad

sys.path.append(os.path.dirname(os.path.dirname(__file__)))  # flake8: noqa

from pm25_data import read_data, extract_features, extract_target

numpy.set_printoptions(threshold=numpy.nan)

feature_hours = 1

data = read_data()
features = extract_features(data, feature_hours)
target = extract_target(data, feature_hours, pm25_row=9)

model = Sequential()
linear_layer = Dense(units=1,
                     input_dim=18,
                     kernel_initializer='zeros',
                     bias_initializer='zeros')
model.add(linear_layer)
optimizer = Adagrad(lr=0.1, epsilon=0)
model.compile(loss='mse', optimizer=optimizer)

# Training
for step in range(10000):
    cost = model.train_on_batch(features, target.T[0])
    weights, biases = linear_layer.get_weights()