base_path = os.path.dirname(os.path.abspath(__file__))

data_path = os.path.join(base_path, "../data/solar")

dataset = Dataset(data_path, timesteps)

train_data, train_target = np.array(dataset.train_data).astype(np.float32), np.array(dataset.train_target).astype(np.float32)
train_data = train_data.reshape(-1, num_input, timesteps)

trainloader = DataLoader(energyDataset(train_data, train_target), batch_size=batch_size, shuffle=True, num_workers=0)

channel_sizes = [hidden_units] * levels

model = TCN(num_input, n_classes, channel_sizes, kernel_size=ksize, dropout=dropout)
model = model.cuda()

optimizer = Adam(model.parameters(), lr=learning_rate)

pbar = tqdm(range(1, epoches+1))

model.train()

losses = list()

for epoch in pbar:
    if epoch % 100 == 0:
        learning_rate *= 0.7
        optimizer = Adam(model.parameters(), lr=learning_rate)

    train_loss = 0.
seed = 6783
model_version = "5"

# -----------------------------------------------

# data
train_set, valid_set, test_set = data_generator(nan_alg=nan_alg)
train_set, valid_set, test_set = train_set.cuda(), valid_set.cuda(
), test_set.cuda()

# model
torch.manual_seed(seed)
n_channels = [nhid] * levels

model = TCN(input_size, output_size, n_channels, kernel_size, dropout=dropout)
model.cuda()

criterion = nn.CrossEntropyLoss()
optimizer = getattr(torch.optim, optim)(model.parameters(), lr=lr)

# loss_function = nn.BCELoss()
loss_function = nn.MSELoss()
# def loss_function(input, target):
# 	o = input.flatten()
# 	x = torch.stack((o, 1-o), dim=1)
# 	return nn.CrossEntropyLoss(x, target.flatten())


def evaluate(X_data, name='Eval'):
    model.eval()
    eval_idx_list = np.arange(len(X_data), dtype="int32")