def analize(XX, yy): y_pred = regr.predict(XX) # orig_y_true = scaler.inverse_transform(yy) # orig_y_pred = scaler.inverse_transform(y_pred) plot_result(yy, y_pred) full_report(yy, y_pred)
def analzie(data_set): ## Select training example y_true = data_set[:, -1, -1] predictions = lstm_model.predict(data_set[:, :, :-1]) y_pred = predictions[:, -1, -1] full_report(y_true, y_pred) plot_result(y_true, y_pred)
def analzie(data_set, save=False): ## Select training example y_true = data_set[:, -1, -1] predictions = lstm_model.predict(data_set[:, :, :-1]) y_pred = predictions[:, -1, -1] if save: np.save('lstm', y_pred) full_report(y_true, y_pred) plot_result(y_true, y_pred)
def analzie(data_loader): ## Select training example y_pred = [] y_true = [] for batch in data_loader: x = batch[:, :, :-1] y = batch[:, -1, -1] with torch.no_grad(): netout = net(x) true = y.cpu().numpy() y_true.extend(true) pred = netout[:, -1, -1] pred = pred.cpu().numpy() # print(pred) y_pred.extend(pred) y_true = np.array(y_true) y_pred = np.array(y_pred) # orig_y_true = scaler.inverse_transform(y_true) # orig_y_pred = scaler.inverse_transform(y_pred) full_report(y_true, y_pred) plot_result(y_true, y_pred)
def analize(XX, yy): y_pred = regr.predict(XX) plot_result(yy, y_pred) full_report(yy, y_pred)