def gan_objective(prior_params, d_params, n_data, n_samples, bnn_layer_sizes, act, d_act='tanh'): '''estimates V(G, D) = E_p_gp[D(f)] - E_pbnn[D(f)]]''' x = sample_inputs('uniform', n_data, (-10, 10)) fbnns = sample_bnn(prior_params, x, n_samples, bnn_layer_sizes, act) # [nf, nd] fgps = sample_gpp(x, n_samples, 'rbf') # sample f ~ P_gp(f) D_fbnns = nn_predict(d_params, fbnns, d_act) D_fgps = nn_predict(d_params, fgps, d_act) print(D_fbnns.shape) eps = np.random.uniform() f = eps * fgps + (1 - eps) * fbnns def D(function): return nn_predict(d_params, function, 'tanh') J = jacobian(D)(f) print(J.shape) g = elementwise_grad(D)(f) print(g.shape) pen = 10 * (norm(g, ord=2, axis=1) - 1)**2 return np.mean(D_fgps - D_fbnns + pen)
def plot_scatter(fig_title,num_rows, num_columns, graph_title_list, x_axis, y_axis, data, model, N): '''x_axis and y_axis are array with num_graphs x data_point''' rpms = np.linspace(1000, 3500, 5) injs = np.linspace(40, 5, 5) rpms, injs = np.meshgrid(rpms, injs) rpms = np.ravel(rpms) injs = np.ravel(injs) dr_2 = abs((rpms[-1] - rpms[0])/8) di_2 = abs((injs[-1] - injs[0])/8) dataframe_actual = data['xy0'] dataframe_actual['op'] = None for i, (rpm, inj) in enumerate(zip(list(rpms), list(injs))): condition1 = abs(rpm - data['e0']['epm_neng']).abs() <= dr_2 condition2 = abs(inj - data['i0']['injctl_qsetunbal']).abs() <= di_2 condition3 = (condition1 & condition2) dataframe_actual['op'][condition3] = i from nn import nn_predict pred = pd.DataFrame(nn_predict(model, N, data['x0'].values), columns=data['y0'].columns) pred = pd.concat([data['x0'], pred], axis=1) num_graphs = len(graph_title_list) plot_main(fig_title,num_graphs,num_rows,num_columns, graph_title_list, lambda i: (plt.scatter(x_axis[i,:],y_axis[i,:], s=0.4), plt.ylabel(''), plt.xlabel(''), plt.xticks(rotation=60, va = 'center'), sns.scatterplot('nox_g/h', 'russ_g/h', data=dataframe_actual[dataframe_actual['op']==i], linewidth=0, s=6, color='r'), sns.scatterplot('nox_g/h', 'russ_g/h', data=pred[dataframe_actual['op']==i], linewidth=0, s=6, color='k')), lambda i: None, lambda i: None)
def cycle_progress(FILE_DAT, FILE_NUMPY, model_base_o, N_o, data_reduce=100): from scipy.interpolate import interp2d def chop_numpy(array, data_reduce): #array is 3d shortened_data = np.zeros_like(array)[:100] window = int(array.shape[0] / data_reduce) for i in range(data_reduce): shortened_data[i] = array[i * window] return shortened_data calibration_progress = np.load(FILE_NUMPY) shortened_progress = chop_numpy(calibration_progress, data_reduce) del calibration_progress def generate_dat(FILE_DAT, channel_list): temp_dat = dyn_dat(FILE_DAT, channel_list, raster=1) temp_dat = temp_dat[temp_dat['x1'] > 600 / 1000] temp_dat.reset_index(inplace=True, drop=True) return temp_dat temp_dat = generate_dat(FILE_DAT, ['x1', 'x2', 'x3']) model_input = np.zeros((shortened_progress.shape[0], temp_dat.shape[0], shortened_progress.shape[2])) for time in range(shortened_progress.shape[0]): temp_slice = shortened_progress[time] df_slice = pd.DataFrame(temp_slice, index=None) for cal_index in [3, 4, 5, 6, 7]: df_pivot = df_slice.pivot_table(values=cal_index, index=0, columns=1).fillna(method='ffill', axis=1) f = interp2d(df_pivot.index, df_pivot.columns, df_pivot.values.T, kind='linear') model_input[time, :, cal_index] = np.array([ f(epm, inj)[0] for epm, inj in zip(temp_dat['x1'], temp_dat['x2']) ]) model_input[time, :, 0] = temp_dat['x1'] model_input[time, :, 1] = temp_dat['x2'] model_input[time, :, 2] = temp_dat['x3'] model_input[time, :, 8] = 910 / 1000 from nn import nn_predict model_output = [ nn_predict(model_base_o, N_o, model_input[i])[None, :] for i in range(100) ] model_output = np.concatenate(model_output, axis=0) drive_pattern = temp_dat = generate_dat(FILE_DAT, channel_list=['x1', 'v1']) return drive_pattern, model_input, model_output
def sweep(rpm, inj, operating_points, sweep_label, min_calibrations, max_calibrations, model, N): #takes rpm, inj, operating_points_array from df, etc, and generate calibration and pediction calibration_array = closest_operating_point( rpm, inj, operating_points)[:, :min_calibrations.shape[0]] calibration_array = np.tile(calibration_array, (50, 1)) calibration_array[:, 2 + sweep_label] = np.linspace( min_calibrations[2 + sweep_label], max_calibrations[2 + sweep_label], 50) predict = nn_predict(model, N, calibration_array) return calibration_array, predict
def train(n_data=50, n_data_test=100, n_functions=500, nn_arch=[1,15,15,1], hyper_arch=[20], act='rbf', ker='per', lr=0.01, iters=200, exp=1, run=1, feed_x=True, plot=True, save=False): _, num_weights = shapes_and_num(nn_arch) hyper_arch = [2*n_data]+hyper_arch+[num_weights] xs, ys, xys = sample_data(n_functions, n_data, ker=ker) # save_file, args = manage_and_save(inspect.currentframe(),exp,run) save_name = 'none-'+str(n_data)+'nf-'+str(n_functions)+"-"+act+ker save_name = get_save_name(n_data, n_functions, act, ker, nn_arch, hyper_arch) if plot: fig, ax = setup_plot() def objective(params, t): return hyper_loss(params, xs, ys, xys, nn_arch, act) def callback(params, t, g): x, y, xy = sample_data(1, n_data, ker=ker) preds = hyper_predict(params, x, xy, nn_arch, act) # [1, nd] if plot: p.plot_iter(ax, x[0], x[0], y, preds) # cov_compare = np.cov(y.ravel())-np.cov(preds.ravel()) print("ITER {} | OBJ {} COV DIFF {}".format(t, objective(params, t), 1)) var_params = adam(grad(objective), init_random_params(hyper_arch), step_size=lr, num_iters=iters, callback=callback) xs, ys, xys = sample_data(10000, n_data, ker=ker) ws = nn_predict(var_params, xys, act) # [ns, nw] #ws = reparameterize(wse, prior='dropout') fs = bnn_predict(ws, xs, nn_arch, act)[:, :, 0] # [nf, nd] p.plot_weights(ws, save_name) #p.plot_weights(ws, 'post'+save_name) p.plot_weights_function_space(ws, fs, save_name) #p.plot_fs(xs[0], fs[0:3], xs[0], ys[0:3], save_name) return ws, var_params
def dyn_2(data_dyn1, model_base, data, N, plot=True): '''predict and store the value in y pred''' data_dyn2 = data_dyn1.copy() predicted_targets = list(data['y0'].columns) #predicted_targets.append('afs_mairpercyl') df = pd.DataFrame(nn_predict(model_base, N, data_dyn1['x0'].values), columns=predicted_targets) chassis_targets = data_dyn1['y0'].columns other_targets = [ i for i in predicted_targets if i not in data_dyn1['y0'].columns ] data_dyn2['pb0'] = df.loc[:, chassis_targets] data_dyn2['pb1'] = df.loc[:, other_targets] #plots for chassis measurement if plot == True: plt.figure('prediction with base model') for i in range(4): plt.subplot(2, 2, i + 1) plt.grid(False) plt.plot(data_dyn2['v0']['time_s'].values, data_dyn2['v0'].iloc[:, 1], label='vehicle_speed', color='r') plt.yticks([]) plt.legend() plt.title(data_dyn2['y0'].columns[i]) plt.twinx() plt.plot(data_dyn2['v0']['time_s'].values, data_dyn2['y0'].iloc[:, i], label='actual', color='k') plt.plot(data_dyn2['v0']['time_s'].values, data_dyn2['pb0'].iloc[:, i], label='predicted', color='b') plt.ylabel(data_dyn2['y0'].columns[i]) plt.legend() plt.grid(False) return data_dyn2
def get_weights(x, n_data, layer_sizes, rn_arch=[1, 1], n_samples=10, save=False): xs, ys = sample_gps(n_samples, n_data, ker='rbf') weights = [] for x, f in zip(xs, ys): opt_weights = fit_nn(x, f[:, None], layer_sizes) plot(x, f, nn_predict(opt_weights, x)) weight, _ = flatten(opt_weights) weights.append(weight[:, None]) print("fit1") weights = np.concatenate(weights, axis=1) mu = np.mean(weights, axis=1) sig = np.std(weights, axis=1) #sig = np.cov(weights) return mu, sig
def hyper_predict(params, x, xy, nn_arch, nn_act): # xy shape is [nf, 2*nd] weights = nn_predict(params, xy, nn_act) # [nf, nw] #weights = reparameterize(weights, prior='dropout') return bnn_predict(weights, x, nn_arch, nn_act)[:, :, 0] # [nf, nd]
def D(function): return nn_predict(d_params, function, 'tanh')
def hyper_predict(params, x, y, nn_arch, nn_act): # y shape is [nf, nd] weights = nn_predict(params, y, 'relu') # [nf, nw] return bnn_predict(weights, x, nn_arch, nn_act)[:, :, 0] # [nf, nd]
if plot: p.plot_iter(ax, x, x, y, preds) cd = np.cov(y.ravel()) - np.cov(preds.ravel()) print("ITER {} | OBJ {} COV DIFF {}".format(t, objective(params, t), cd)) var_params = adam(grad(objective), init_random_params(hyper_arch), step_size=0.005, num_iters=200, callback=callback) xtest = np.linspace(-10, 10, n_data_test).reshape(n_data_test, 1) fgps = sample_gpp(x, n_samples=500, kernel=ker) #fgps = sample_function(x, 500) ws = nn_predict(var_params, fgps, "relu") # [ns, nw] fs = bnn_predict(ws, x, nn_arch, act)[:, :, 0] #p.plot_weights_function_space(ws, fs, save_name) moments = get_moments(ws, full_cov=True) #p.plot_heatmap(moments, "heatmap"+save_name) # PLOT HYPERNET #fgp = sample_gpp(x, n_samples=2, kernel=ker) #fnns = hyper_predict(var_params, xtest,fgp, nn_arch, act) #p.plot_fs(xtest, fnns, x, fgp, save_name) #plot_heatmap(moments,"Cov-heatmap"+save_name+'.pdf') #plot_samples(moments, xtest, 5, nn_arch, act=act, ker=ker, save = save_name+'.pdf') #p.plot_dark_contour(ws) #p.plot_weights(moments, num_weights, save_name)