Exemple #1
0
 def init_with_data(self,
                    is_load=True,
                    max_outgoing=10,
                    max_airports=500,
                    silent=False):
     ld = LoadData(self, is_load, max_outgoing, max_airports, silent)
     ld.load()
     silent or self.print_info()
Exemple #2
0
from sklearn.preprocessing import StandardScaler
import numpy as np
from load_data import LoadData
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
from torch.utils.data import TensorDataset, DataLoader
import csv

def s(_data):
    print(type(_data),np.shape(_data))


load_data = LoadData()
load_data.load()

# データ読み込み
# iris = datasets.load_iris()
# data = iris.data
# target = iris.target
target = load_data.target
data = load_data.data
with open('_target.csv', 'w') as f:
    writer = csv.writer(f)
    for i in target:
        writer.writerow([i])
with open('_data.csv', 'w') as f:
    writer = csv.writer(f)
    writer.writerows(data)
# 学習データと検証データに分割
Exemple #3
0
# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
import torch.optim as optim
from load_data import LoadData
import numpy as np
from torch.utils.data import TensorDataset

# デバッグ用
def info(string, _data):
    print(string + '=> 型:'+str(type(_data))+', 形状:'+str(np.shape(_data)))

# データの読み込み
data = LoadData()
data.load()
_DIR_HERE =  data.dir_here
# 学習データと検証データに分割
x_train, x_valid, y_train, y_valid = train_test_split(data.input_train, data.correct_train, shuffle=True)

input_data = torch.from_numpy(data.input_train.astype(np.float32))
correct_data = torch.from_numpy(data.correct_train.astype(np.float32))
info('input_data', input_data)
info('correct_data', correct_data)
print(correct_data[:100])

#NNの定義
n_in = np.shape(data.input_train)[1]
n_mid = n_in * 4
n_out = data.n_out
model = nn.Sequential(
    nn.Linear(n_in,n_mid),
h = holidays.Germany()


def plot_df(df, **kwargs):
    dfy = df[df["Datum"].dt.year.between(2018, 2019)]
    # dfy = dfy[dfy["Datum"].dt.weekday < 5]
    # dfy = dfy[dfy.apply(lambda x: x["Datum"] not in h, axis=1)]
    dfy = dfy[dfy.apply(lambda x: x["Datum"] in h or x["Datum"].weekday() > 4,
                        axis=1)]
    dg = dfy.groupby(dfy["Datum"].dt.month).mean()
    plt.plot(dg, **kwargs)


pendlerstrecken = [1, 2, 4, 5, 6, 13]
freizeitstrecken = [7, 9, 10, 11, 12]
files = LoadData.load(freizeitstrecken)
avg_df = pd.DataFrame({"Datum": [], "Zaehlerstand": []})

for key, data in files.items():
    avg_df = avg_df.append(data)
    plot_df(data,
            label=LoadData.NAMINGS[key],
            linestyle="dotted",
            linewidth=1.5)

plot_df(avg_df, label="Durchschnitt", linestyle="solid", linewidth=3.0, c="k")

plt.ylim(0)
plt.xlim(1, 12)
plt.grid(True)
plt.legend()