def convolve_with_basis(self, signal): """ Convolve each column of the event count matrix with this basis :param S: signal: an array-like data, each series is (1, T) shape :return: TxB of inputs convolved with bases """ (T,_) = signal.shape (R,B) = self.basis.shape # Initialize array for filtered stimulus F = np.empty((T,B)) # Compute convolutions fo each basis vector, one at a time for b in np.arange(B): F[:,b] = sig.fftconvolve(signal, np.reshape(self.basis[:,b],(R,1)), 'full')[:T,:] # Check for positivity if np.amin(self.basis) >= 0 and np.amin(signal) >= 0: np.clip(F, 0, np.inf, out=F) assert np.amin(F) >= 0, "convolution should be >= 0" return F
def callback_kl(prior_params, iter, g): kl = obj(prior_params, iter, N_samples=N_samples) kls.append(kl) min_kls.append(np.amin(kls)) print("Iteration {} KL {} ".format(iter, kl)) plot_lines(ax1, prior_params, inputs) plot_heatmap(ax2, prior_params) ax3.imshow(real_cov) plot_kls(ax4, kls, min_kls) plt.draw() # plt.savefig(os.path.join(plotting_dir, 'contours_iteration_' + str(iter) + '.pdf')) plt.pause(1.0 / 400.0) ax1.cla() ax2.cla() ax3.cla() ax4.cla() if iter % 10 == 0: samples = sample_obs(prior_params, N_samples, inputs, layer_sizes) y_mean, y_cov = np.mean(samples, axis=0), np.cov(samples.T) print(y_cov) print(y_cov - real_cov) print(y_mean - real_mean)
def add_data(self, S, F=None): """ Add a data set to the list of observations. First, filter the data with the impulse response basis, then instantiate a set of parents for this data set. :param S: a TxK matrix of of event counts for each time bin and each process. """ assert isinstance(S, np.ndarray) and S.ndim == 2 and S.shape[1] == self.K \ and np.amin(S) >= 0 and S.dtype == np.int, \ "Data must be a TxK array of event counts" T = S.shape[0] if F is None: # Filter the data into a TxKxB array Ftens = self.basis.convolve_with_basis(S) # Flatten this into a T x (KxB) matrix # [F00, F01, F02, F10, F11, ... F(K-1)0, F(K-1)(B-1)] F = Ftens.reshape((T, self.K * self.B)) assert np.allclose(F[:,0], Ftens[:,0,0]) if self.B > 1: assert np.allclose(F[:,1], Ftens[:,0,1]) if self.K > 1: assert np.allclose(F[:,self.B], Ftens[:,1,0]) # Prepend a column of ones F = np.hstack((np.ones((T,1)), F)) for k,node in enumerate(self.nodes): node.add_data(F, S[:,k])
def solve(f, x0, eps=1e-8, it_max=200): x = x0 it = 0 df, dff = ag.grad(f), ag.hessian(f) d_f_eval_prev = None while it < it_max: # augment hessian to be PD hess = np.squeeze(dff(x)) eig_min = np.amin(np.linalg.eig(hess)[0]) hess += max(0.0, -eig_min + 0.001) * np.eye(hess.shape[0]) direction = np.dot(np.linalg.inv(hess), -df(x)) s = line_search(f, df, x, direction) xx = x + s * direction f_eval_xx, f_eval_x = f(xx), f(x) d_f_eval = f_eval_xx - f_eval_x x = xx it += 1 if d_f_eval_prev is not None and abs(d_f_eval / d_f_eval_prev) < eps: break d_f_eval_prev = d_f_eval return x, f_eval_xx, it
def plot_function(self,ax): big_val1 = np.amax(np.asarray([abs(a[0]) for a in self.w_hist])) big_val2 = np.amax(np.asarray([abs(a[1]) for a in self.w_hist])) big_val = max(big_val1,big_val2,3) # create plotting range r = np.linspace(-big_val,big_val,100) # create grid from plotting range w1_vals,w2_vals = np.meshgrid(r,r) w1_vals.shape = (len(r)**2,1) w2_vals.shape = (len(r)**2,1) g_vals = self.g([w1_vals,w2_vals]) # vals for cost surface w1_vals.shape = (len(r),len(r)) w2_vals.shape = (len(r),len(r)) g_vals.shape = (len(r),len(r)) # vals for plotting range gmin = np.amin(g_vals) gmax = np.amax(g_vals) ggap = (gmax - gmin)*0.1 gmin = gmin - ggap gmax = gmax + ggap # plot and fix up panel strider = int(round(45/float(big_val))) strider = max(strider,2) ax.plot_surface(w1_vals,w2_vals,g_vals,alpha = 0.1,color = 'k',rstride=strider, cstride=strider ,linewidth=1,edgecolor = 'k')
def add_data(self, S, F=None): """ Add a data set to the list of observations. First, filter the data with the impulse response basis, then instantiate a set of parents for this data set. :param S: a TxK matrix of of event counts for each time bin and each process. """ assert isinstance(S, np.ndarray) and S.ndim == 2 and S.shape[1] == self.K \ and np.amin(S) >= 0 and S.dtype == np.int, \ "Data must be a TxK array of event counts" T = S.shape[0] if F is None: # Filter the data into a TxKxB array Ftens = self.basis.convolve_with_basis(S) # Flatten this into a T x (KxB) matrix # [F00, F01, F02, F10, F11, ... F(K-1)0, F(K-1)(B-1)] F = Ftens.reshape((T, self.K * self.B)) assert np.allclose(F[:, 0], Ftens[:, 0, 0]) if self.B > 1: assert np.allclose(F[:, 1], Ftens[:, 0, 1]) if self.K > 1: assert np.allclose(F[:, self.B], Ftens[:, 1, 0]) # Prepend a column of ones F = np.hstack((np.ones((T, 1)), F)) for k, node in enumerate(self.nodes): node.add_data(F, S[:, k])
def animate(k): ax.cla() # quadratic to plot alpha = alpha_values[k] g = lambda w: w[0]**2 + alpha*w[1]**2 # create grid from plotting range w1_vals,w2_vals = np.meshgrid(input_range,input_range) w1_vals.shape = (len(input_range)**2,1) w2_vals.shape = (len(input_range)**2,1) g_vals = g([w1_vals,w2_vals]) # vals for cost surface w1_vals.shape = (len(input_range),len(input_range)) w2_vals.shape = (len(input_range),len(input_range)) g_vals.shape = (len(input_range),len(input_range)) g_range = np.amax(g_vals) - np.amin(g_vals) # used for cleaning up final plot ggap = g_range*0.5 # plot original function ax.plot_surface(w1_vals,w2_vals,g_vals,alpha = 0.1,color = 'k',rstride=15, cstride=15,linewidth=2,edgecolor = 'k') # clean up plotting area ax.set_title(set_title,fontsize = 15) ax.set_xlabel(horiz_1_label,fontsize = 15) ax.set_ylabel(horiz_2_label,fontsize = 15) ax.set_zlabel(vert_label,fontsize = 15) ax.view_init(view[0],view[1]) ax.axis(set_axis) return artist,
def plot_var(self, max_var_list, s): xpt = np.array(range(max_var_list.shape[0])) xpt = xpt - xpt[-1]+s self.lvar[0].set_xdata(xpt) self.lvar[0].set_ydata(max_var_list) # self.lvar[0].set_3d_properties(zs=0) self.ax4.set_xlim([xpt[0], xpt[-1]]) self.ax4.set_ylim([np.amin(max_var_list), np.amax(max_var_list)]) self.ax4.title.set_text("new max_post_var: %f" % max_var_list[-1]) self.fig.canvas.draw()
def lam(ts, eval_ts=None, bw=1.0): """ """ if eval_ts is None: eval_ts = ts fn = gaussian_kde(ts, bw * (np.amax(ts) - np.amin(ts)) / ts.size # * (ts.size**(-0.8)) ) return fn(eval_ts) * ts.size
def standarizeImage(im): if len(im.shape) < 3: im = convert_bw_to_rgb(im) im = np.array(im, 'float32') if im.shape[0] != 64: im = imresize(im, (64, 64, 3)) if np.amax(im) > 1.1: im = im / 255.0 assert ((np.amax(im) > 0.01) & (np.amax(im) <= 1)) assert ((np.amin(im) >= 0.00)) return im
def convolve_with_basis(self, signal): """ Convolve each column of the event count matrix with this basis :param S: signal: an array-like data, each series is (1, T) shape :return: TxB of inputs convolved with bases """ (T, _) = signal.shape (R, B) = self.basis.shape # Initialize array for filtered stimulus F = np.empty((T, B)) # Compute convolutions fo each basis vector, one at a time for b in np.arange(B): F[:, b] = sig.fftconvolve(signal, np.reshape(self.basis[:, b], (R, 1)), 'full')[:T, :] # Check for positivity if np.amin(self.basis) >= 0 and np.amin(signal) >= 0: np.clip(F, 0, np.inf, out=F) assert np.amin(F) >= 0, "convolution should be >= 0" return F
def animate(k): # clear panels for next slide ax1.cla() ax2.cla() ax3.cla() # print rendering update if np.mod(k + 1, 25) == 0: print('rendering animation frame ' + str(k + 1) + ' of ' + str(num_frames)) if k == num_frames - 1: print('animation rendering complete!') time.sleep(1.5) clear_output() # plot function 1 ax1.plot(w_plot, g1_plot, color='k', zorder=1) ax1.set_title(title1, fontsize=15) # plot function 2 ax2.plot(w_plot, g2_plot, color='k', zorder=1) ax2.set_title(title2, fontsize=15) # plot combination of both alpha = alpha_vals[k] if mode == 'regularization': g_combo = g1_plot + alpha * g2_plot else: g_combo = (1 - alpha) * g1_plot + alpha * g2_plot ax3.plot(w_plot, g_combo, color='k', zorder=1) ax3.set_title(title3, fontsize=15) # set vertical limits ax1.set_ylim([g1_min, g1_max]) ax2.set_ylim([g2_min, g2_max]) # set vertical limit markers gmin = np.amin(g_combo) gmax = np.amax(g_combo) g_gap = 0.2 * (gmax - gmin) gmin = gmin - g_gap gmax = gmax + g_gap ax3.set_ylim([gmin, gmax]) return artist,
def calculate_S_roots(A, B, C, D): s1 = S1_2(A, B, C, D) s2 = S2_2(A, B, C, D) s3 = S3_2(A, B, C, D) roots = [s1, s2, s3] print("Roots") print(roots) S_plus2 = np.amax(roots) S_plus = np.sqrt(S_plus2) S_minus2 = np.amin(roots) S_minus = np.sqrt(S_minus2) #print(roots) #print(S_plus2,S_minus2) roots.remove(S_plus2) roots.remove(S_minus2) S3 = np.sqrt(roots[0]) return S_plus, S_minus, S3
def compute_LR(rate, old_points, g_pert, type_epsilon = "relative"): if type_epsilon == "relative": norm_old = np.linalg.norm(old_points, axis = 1) norm_pert = np.linalg.norm(g_pert, axis = 1) #Replace all tiny values by 1 norm_pert[norm_pert < 0.000001] = 1 ratio = norm_old/norm_pert epsilon = rate * np.amin(ratio) elif type_epsilon == "absolute": norm_pert = np.linalg.norm(g_pert, axis = 1) norm_pert[norm_pert < 0.000001] = 1 epsilon = rate / np.amax(norm_pert) elif type_epsilon == "usual": epsilon = rate else: print("Error type of epsilon") return epsilon
def truncate0(x, axis=None, strict=False, tol=1e-13): '''make sure everything in x is non-negative''' # the maximum along axis maxes = np.maximum(np.amax(x, axis=axis), 1e-300) # the negative part of minimum along axis mins = np.maximum(-np.amin(x, axis=axis), 0.0) # assert the negative numbers are small (relative to maxes) assert np.all(mins <= tol * maxes) if axis is not None: idx = [slice(None)] * x.ndim idx[axis] = np.newaxis mins = mins[idx] maxes = maxes[idx] if strict: # set everything below the tolerance to 0 return set0(x, x < tol * maxes) else: # set everything of same magnitude as most negative number, to 0 return set0(x, x < 2 * mins)
def normalize0(self, data, axis=0): assert (np.isfinite(data).all() == True) mean = np.mean(data, axis=axis) var = np.var(data, axis=axis) stdn = np.std(data, axis=axis) minimum_arr = np.amin(data, axis=axis, keepdims=True) maximum_arr = np.amax(data, axis=axis, keepdims=True) normalize_state = { "mean": mean, "var": var, "min": minimum_arr, "max": maximum_arr, "stdn": stdn } if (self.config.NN_ZERO_MEAN_NORMALIZE == True): normalized = (data - mean) / (stdn + 0.00001) else: normalized = (data - minimum_arr) / (maximum_arr - minimum_arr + 0.0001) return normalized.reshape(data.shape), normalize_state
def draw_it(self,**args): # user input functions to add self.g1 = args['g1'] # input function self.g2 = args['g2'] # input function num_frames = 100 if 'num_frames' in args: num_frames = args['num_frames'] min_range = -3 if 'min_range' in args: min_range = args['min_range'] max_range = -3 if 'max_range' in args: max_range = args['max_range'] if 'mode' in args: mode = args['mode'] else: mode = 'convex_combo' if 'alpha_range' in args: alpha_range = args['alpha_range'] else: alpha_range = [0,1] if 'title1' in args: title1 = args['title1'] else: title1 = '$g_1$' if 'title2' in args: title2 = args['title2'] else: title2 = '$g_2$' if 'title3' in args: title3 = args['title3'] else: title3 = '$(1 - \\alpha)\,g_1 + \\alpha\,g_2$' # initialize figure fig = plt.figure(figsize = (15,5)) artist = fig ax1 = fig.add_subplot(131) ax2 = fig.add_subplot(132) ax3 = fig.add_subplot(133) # generate base function for plotting on each slide w_plot = np.linspace(min_range,max_range,200) g1_plot = self.g1(w_plot) g2_plot = self.g2(w_plot) # set vertical limit markers g1_min = np.amin(g1_plot) g2_min = np.amin(g2_plot) g1_max = np.amax(g1_plot) g2_max = np.amax(g2_plot) g1_gap = 0.2*(g1_max - g1_min) g2_gap = 0.2*(g2_max - g2_min) g1_min = np.amin(g1_plot) - g1_gap g2_min = np.amin(g2_plot) - g2_gap g1_max = np.amax(g1_plot) + g1_gap g2_max = np.amax(g2_plot) + g2_gap # decide on number of slides alpha_vals = np.linspace(alpha_range[0], alpha_range[1], num_frames) print ('starting animation rendering...') # animation sub-function def animate(k): # clear panels for next slide ax1.cla() ax2.cla() ax3.cla() # print rendering update if np.mod(k+1,25) == 0: print ('rendering animation frame ' + str(k+1) + ' of ' + str(num_frames)) if k == num_frames - 1: print ('animation rendering complete!') time.sleep(1.5) clear_output() # plot function 1 ax1.plot(w_plot,g1_plot,color = 'k',zorder = 1) ax1.set_title(title1,fontsize = 15) # plot function 2 ax2.plot(w_plot,g2_plot,color = 'k',zorder = 1) ax2.set_title(title2,fontsize = 15) # plot combination of both alpha = alpha_vals[k] if mode == 'regularization': g_combo = g1_plot + alpha*g2_plot else: g_combo = (1-alpha)*g1_plot + alpha*g2_plot ax3.plot(w_plot,g_combo,color = 'k',zorder = 1) ax3.set_title(title3,fontsize = 15) # set vertical limits ax1.set_ylim([g1_min,g1_max]) ax2.set_ylim([g2_min,g2_max]) # set vertical limit markers gmin = np.amin(g_combo) gmax = np.amax(g_combo) g_gap = 0.2*(gmax - gmin) gmin = gmin - g_gap gmax = gmax + g_gap ax3.set_ylim([gmin,gmax]) return artist, anim = animation.FuncAnimation(fig, animate ,frames=num_frames, interval=num_frames, blit=True) return(anim)
def S_minus_2(A, B, C, D): s1 = S1_2(A, B, C, D) s2 = S2_2(A, B, C, D) s3 = S3_2(A, B, C, D) return np.amin([s1, s2, s3])
def fit( param_vector=None, pi_kappa=0.0, pi_omega=1e-8, max_steps=y.size, step_iter=50, step_size=0.1, gamma=0.9, eps=1e-8, backoff=0.75): if param_vector is None: param_vector = pack() n_params = param_vector.size param_path = np.zeros((n_params, max_steps)) pi_kappa_path = np.zeros(max_steps) pi_omega_path = np.zeros(max_steps) loglik_path = np.zeros(max_steps) dof_path = np.zeros(max_steps) aic_path = np.zeros(max_steps) # Now, an idiotic gradient descent algorithm # Seeding by iteratively-reweighted least squares # or just least squares would be better grad_negloglik = grad(negloglik, 0) grad_penalty = grad(penalty, 0) grad_objective = grad(objective, 0) avg_sq_grad = np.ones_like(param_vector) for j in range(max_steps): loss = objective(param_vector) best_loss = loss local_step_size = step_size best_param_vector = np.array(param_vector) for i in range(step_iter): g_negloglik = grad_negloglik(param_vector) g_penalty = grad_penalty(param_vector, pi_kappa, pi_omega) g = g_negloglik + g_penalty avg_sq_grad = avg_sq_grad * gamma + g**2 * (1 - gamma) velocity = g/(np.sqrt(avg_sq_grad) + eps) / sqrt(i+1.0) # watch out, nans velocity[np.logical_not(np.isfinite(velocity))] = 0.0 penalty_dominant = np.abs( g_negloglik ) < ( penalty_weight(pi_kappa, pi_omega) ) velocity[penalty_dominant * (velocity == 0)] = 0.0 new_param_vector = param_vector - velocity * local_step_size # coefficients that pass through 0 must stop there new_param_vector[ np.abs( np.sign(new_param_vector) - np.sign(param_vector) ) == 2 ] = 0.0 new_param_vector[:] = np.maximum(new_param_vector, param_floor) new_loss = objective(new_param_vector) if new_loss < loss: # print('good', loss, '=>', new_loss, local_step_size) loss = new_loss param_vector = new_param_vector else: # print('bad', loss, '=>', new_loss, local_step_size) local_step_size = local_step_size * backoff new_param_vector = param_vector + backoff * ( new_param_vector - param_vector ) loss = objective(new_param_vector) if loss < best_loss: best_param_vector = np.array(param_vector) best_loss = loss if local_step_size < 1e-3: print('nope', j, i, max_steps) break this_loglik = -negloglik(best_param_vector) this_dof = dof(best_param_vector) param_path[:, j] = best_param_vector pi_kappa_path[j] = pi_kappa pi_omega_path[j] = pi_omega loglik_path[j] = this_loglik dof_path[j] = this_dof aic_path[j] = 2 * this_loglik - 2 * this_dof # regularisation parameter selection # ideally should be randomly weight according # to sizes of those two damn vectors mu_grad, kappa_grad, log_omega_grad = unpack( np.abs( grad_objective(best_param_vector) * (best_param_vector != 0.0) ) ) if ( np.random.random() < ( sqrt(log_omega_grad.size) / (sqrt(kappa_grad.size) + sqrt(log_omega_grad.size)) )): print('log_omega_grad', log_omega_grad) pi_omega += max( np.amin(log_omega_grad[log_omega_grad > 0]) * j/max_steps, pi_omega * 0.1 ) else: print('kappa_grad', kappa_grad) pi_kappa += max( np.amin(kappa_grad[kappa_grad > 0]) * j / max_steps, pi_kappa * 0.1 ) return dict( param_path=param_path, pi_kappa_path=pi_kappa_path, pi_omega_path=pi_omega_path, loglik_path=loglik_path, dof_path=dof_path, aic_path=aic_path )
os.makedirs(dir_path) filename = 'mps_chi%d_%s_energy.npy' % (chi, order) path = dir_path + filename np.save(path, np.array(E_list)) filename = 'mps_chi%d_%s_dt.npy' % (chi, order) path = dir_path + filename np.save(path, np.array(t_list)) filename = 'mps_chi%d_%s_error.npy' % (chi, order) path = dir_path + filename np.save(path, np.array(update_error_list)) dir_path = 'data/1d_%s_g%.1f/' % (Hamiltonian, g) best_E = np.amin(E_list) filename = 'mps_chi%d_%s_energy.csv' % (chi, order) path = dir_path + filename # Try to load file # If data return E_dict = {} overwrite = True try: E_array = misc.load_array(path) E_dict = misc.nparray_2_dict(E_array) assert L in E_dict.keys() print("Found data") if overwrite: raise except Exception as error: print(error)
fidelity_reached = np.abs(mps_func.overlap( Ap_list, A_list))**2 / mps_func.overlap(Ap_list, Ap_list) print("fidelity reached : ", fidelity_reached) update_error_list.append(1. - fidelity_reached) current_energy = np.sum(mps_func.expectation_values(A_list, H_list)) E_list.append(current_energy) Sz_array[idx, :] = mps_func.expectation_values_1_site(A_list, Sz_list) ent_array[idx, :] = mps_func.get_entanglement(A_list) t_list.append(t_list[-1] + dt) print("T=", t_list[-1], " E=", E_list[-1], " Sz=", Sz_array[idx, L // 2]) trunc_error = np.abs(1. - fidelity_reached) if trunc_error > stop_crit: first_break_idx = np.amin([first_break_idx, idx]) if first_break_idx + int(1. // dt) < idx: break num_data = len(t_list) Sz_array = Sz_array[:num_data, :] ent_array = ent_array[:num_data, :] dir_path = 'data_te/1d_%s_g%.4f_h%.4f/L%d/' % (Hamiltonian, g, h, L) if not os.path.exists(dir_path): os.makedirs(dir_path) filename = 'mps_chi%d_%s_energy.npy' % (chi, order) path = dir_path + filename np.save(path, np.array(E_list))
def draw_it(self, **args): # get user defined function self.g = args['g'] # user-defined input function ### other options # size of figure set_figsize = 7 if 'set_figsize' in args: set_figsize = args['set_figsize'] # turn axis on or off set_axis = 'on' if 'set_axis' in args: set_axis = args['set_axis'] # plot title set_title = '' if 'set_title' in args: set_title = args['set_title'] # horizontal and vertical axis labels horiz_1_label = '' if 'horiz_1_label' in args: horiz_1_label = args['horiz_1_label'] horiz_2_label = '' if 'horiz_2_label' in args: horiz_2_label = args['horiz_2_label'] vert_label = '' if 'vert_label' in args: vert_label = args['vert_label'] # set width of plot input_range = np.linspace(-3, 3, 100) # input range for original function if 'input_range' in args: input_range = args['input_range'] # set viewing angle on plot view = [20, 50] if 'view' in args: view = args['view'] # initialize figure fig = plt.figure(figsize=(set_figsize, set_figsize)) artist = fig ax = fig.add_subplot(111, projection='3d') # create grid from plotting range w1_vals, w2_vals = np.meshgrid(input_range, input_range) w1_vals.shape = (len(input_range)**2, 1) w2_vals.shape = (len(input_range)**2, 1) g_vals = self.g([w1_vals, w2_vals]) # vals for cost surface w1_vals.shape = (len(input_range), len(input_range)) w2_vals.shape = (len(input_range), len(input_range)) g_vals.shape = (len(input_range), len(input_range)) g_range = np.amax(g_vals) - np.amin( g_vals) # used for cleaning up final plot ggap = g_range * 0.5 # plot original function ax.plot_surface(w1_vals, w2_vals, g_vals, alpha=0.1, color='k', rstride=15, cstride=15, linewidth=0.07, edgecolor='k', antialiased=True) # clean up plotting area ax.axis(set_axis) ax.set_title(set_title, fontsize=15) ax.set_xlabel(horiz_1_label, fontsize=15) ax.set_ylabel(horiz_2_label, fontsize=15) ax.set_zlabel(vert_label, fontsize=15) ax.view_init(view[0], view[1]) ax.axis(set_axis) plt.show()
# f_tz=0 # f_tf=0 # y_pret = np.dot(x_norm, weight_history[p]) # for j in range(len(y)): # if y[j]>0: # if np.sign(y[j]) == np.sign(y_pret[j]): # tz += 1 # else: # f_tz += 1 # else: # if np.sign(y[j]) == np.sign(y_pret[j]): # tf += 1 # else: # f_tf += 1 # table[0][0]=tz # table[0][1]=f_tz # table[1][0]=f_tf # table[1][1]=tf # # print('the table is: ',table) return numbers # plot figure and print the minimum number of classification fig, ax = plt.subplots(1, 1, figsize=(6, 3)) ax.plot(np.linspace(0, 100, 100), number(y, weight_history), 'b') plt.xlabel('iteration') plt.ylabel('number of misclassifications') plt.show() print('The minimum number of misclassifications is', np.amin(number(y, weight_history)))