os.chdir('C:\Git\ZSS') df = pd.read_csv('ZSSdata.csv', header=None) dataset = df.to_numpy() # zss is broken, use angle_steers! keys = [ 'angle_steers', 'shitty_angle', 'zss', 'output_steer', 'wheel_speeds.fl', 'wheel_speeds.fr', 'wheel_speeds.rl', 'wheel_speeds.rr' ] data = [np.take(dataset, indices=i, axis=1) for i in range(dataset.shape[1])] data_zip = list(zip(keys, data)) scales = dict(zip(keys, [[np.min(i[1]), np.max(i[1])] for i in data_zip])) # normalize data data_normalized = norm(data_zip, scales) inputs = [ 'shitty_angle', 'output_steer', 'wheel_speeds.fl', 'wheel_speeds.fr', 'wheel_speeds.rl', 'wheel_speeds.rr' ] output = 'angle_steers' # this is the accurate tssp2 sensor # sort data into above format with output at the end for tokenization seq_len = 20 # if we want to predict the future steer angle, we offset the output list by a few samples and remove that amount from beginning of input lists data_sorted = np.array([ data_normalized[i] for i in inputs + [output] ]) # formats dict of lists into list of lists sorted by inputs list above data_sorted = np.stack( data_sorted, axis=1) # join the array so that each item is a full sample
samples_to_use = 7000000 if samples_to_use != 'all': y_train = np.load("data/{}/y_train.npy".format(data_dir))[:samples_to_use] else: y_train = np.load("data/{}/y_train.npy".format(data_dir)) print(len(y_train)) is_array = False if not os.path.exists(norm_dir.format(data_dir)): if samples_to_use != 'all': x_train = np.load("data/{}/x_train.npy".format(data_dir))[:samples_to_use] else: x_train = np.load("data/{}/x_train.npy".format(data_dir)) print("Normalizing...", flush=True) normalized = norm(x_train) x_train = normalized['normalized'] scales = normalized['scales'] print("Dumping normalization...", flush=True) np.save(norm_dir.format(data_dir), x_train) with open('data/LSTM/scales', "wb") as f: pickle.dump(normalized['scales'], f) #with open(norm_dir.format(data_dir), "wb") as f: #pickle.dump(normalized, f) else: is_array = True print("Loading normalized data...", flush=True) x_train = np.load(norm_dir.format(data_dir)) with open('data/LSTM/scales', "rb") as f: scales = pickle.load(f) print('Loaded!', flush=True)
y_train = json.load(f) NORM = True if NORM: #x_train_copy = list(x_train) v_ego_scale = get_3d_min_max(x_train, 0) a_ego_scale = get_3d_min_max(x_train, 1) v_lead_scale = get_3d_min_max(x_train, 2) x_lead_scale = get_3d_min_max(x_train, 3) a_lead_scale = get_3d_min_max(x_train, 4) x_train_reformat = [] # reformat to sequence data for nn to train, to test for idx, i in enumerate(x_train): x_train_reformat.append([[], [], [], [], []]) for x in i: x_train_reformat[idx][0].append(norm(x[0], v_ego_scale)) x_train_reformat[idx][1].append(norm(x[1], a_ego_scale)) x_train_reformat[idx][2].append(norm(x[2], v_lead_scale)) x_train_reformat[idx][3].append(norm(x[3], x_lead_scale)) x_train_reformat[idx][4].append(norm(x[4], a_lead_scale)) x_train = x_train_reformat #x_train = x_train * 2 #y_train = y_train * 2 x_train = np.asarray(x_train) y_train = np.asarray([np.interp(i, [-1, 1], [0, 1]) for i in y_train]) else: x_train = np.asarray(x_train) y_train = np.asarray(y_train)
if stop.lower()=="y": model.stop_training = True''' #v_ego, v_lead, d_lead os.chdir("C:/Git/dynamic-follow-tf") with open("data/x", "r") as f: x_train = json.load(f) with open("data/y", "r") as f: y_train = json.load(f) NORM = True if NORM: v_ego, v_ego_scale = (norm([i[0] for i in x_train])) a_ego, a_ego_scale = (norm([i[1] for i in x_train])) v_lead, v_lead_scale = (norm([i[2] for i in x_train])) x_lead, x_lead_scale = (norm([i[3] for i in x_train])) a_lead, a_lead_scale = (norm([i[4] for i in x_train])) x_train = [[v_ego[idx], a_ego[idx], v_lead[idx], x_lead[idx], a_lead[idx]] for (idx, i) in enumerate(v_ego)] #x_train.append([v_ego[idx], v_lead[idx], x_lead[idx], a_lead[idx]]) x_train = np.asarray(x_train) #y_train = np.asarray([np.interp(i, [-1, 1], [0, 1]) for i in y_train]) #y_train = np.asarray(y_train) #scaler = preprocessing.StandardScaler().fit(x_train) #x_train = scaler.transform(x_train)
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import os from normalizer import norm import itertools os.chdir("C:/Git/dynamic-follow-tf/models/h5_models") model = tf.keras.models.load_model("3model-epoch-8.h5") v_scale, a_scale, x_scale = [0.0, 48.288787841797], [-8.39838886261, 9.78254699707], [0.125, 138.625] data = [norm(0, v_scale), norm(0, v_scale), norm(0, x_scale), norm(0, a_scale)] prediction=model.predict([[data]])[0][0] #print(prediction) print((prediction - 0.5)*2.0)
with open("data/LSTM/y_train", "r") as f: y_train = json.load(f) NORM = False if NORM: x_train_copy = list(x_train) v_ego_scale = get_min_max(x_train, 0) a_ego_scale = get_min_max(x_train, 1) v_lead_scale = get_min_max(x_train, 2) x_lead_scale = get_min_max(x_train, 3) a_lead_scale = get_min_max(x_train, 4) x_train = [] for idx, i in enumerate(x_train_copy): x_train.append([]) for x in i: x_train[idx].append([norm(x[0], v_ego_scale), norm(x[1], a_ego_scale), norm(x[2], v_lead_scale), norm(x[3], x_lead_scale), norm(x[4], a_lead_scale)]) #x_train = x_train * 2 #y_train = y_train * 2 x_train = np.asarray(x_train) y_train = np.asarray([np.interp(i, [-1, 1], [0, 1]) for i in y_train]) else: x_train = np.asarray(x_train) y_train = np.asarray(y_train) '''for idx,i in enumerate(y_train): if i < -.5 and x_train[idx][0] > 8.9: print(i) print(idx) print(x_train[idx]) break'''
import tensorflow as tf import numpy as np import matplotlib.pyplot as plt import os from normalizer import norm os.chdir("C:/Git/dynamic-follow-tf/models/h5_models") model = tf.keras.models.load_model("gm-only.h5") v_ego_scale = [0.0, 36.1995735168457] a_ego_scale = [-3.0412862300872803, 2.78971791267395] v_lead_scale = [0.0, 91.02222442626953] x_lead_scale = [0.9600000381469727, 138.67999267578125] a_lead_scale = [-3.909122943878174, 25.991727828979492] data = [ norm(0, v_ego_scale), norm(0, a_ego_scale), norm(1, v_lead_scale), norm(3, x_lead_scale), norm(.3, a_lead_scale) ] prediction = model.predict(np.asarray([data]))[0][0] print((prediction - 0.5) * 2.0)