def plot_function(self,ax): big_val1 = np.amax(np.asarray([abs(a[0]) for a in self.w_hist])) big_val2 = np.amax(np.asarray([abs(a[1]) for a in self.w_hist])) big_val = max(big_val1,big_val2,3) # create plotting range r = np.linspace(-big_val,big_val,100) # create grid from plotting range w1_vals,w2_vals = np.meshgrid(r,r) w1_vals.shape = (len(r)**2,1) w2_vals.shape = (len(r)**2,1) g_vals = self.g([w1_vals,w2_vals]) # vals for cost surface w1_vals.shape = (len(r),len(r)) w2_vals.shape = (len(r),len(r)) g_vals.shape = (len(r),len(r)) # vals for plotting range gmin = np.amin(g_vals) gmax = np.amax(g_vals) ggap = (gmax - gmin)*0.1 gmin = gmin - ggap gmax = gmax + ggap # plot and fix up panel strider = int(round(45/float(big_val))) strider = max(strider,2) ax.plot_surface(w1_vals,w2_vals,g_vals,alpha = 0.1,color = 'k',rstride=strider, cstride=strider ,linewidth=1,edgecolor = 'k')
def get_neighs_sq(inputs): verts = list() if inputs.shape[2] == 576: for i in range(12): for j in range(12): x = 2 * i + 48 * j a = inputs[:, :, x] b = inputs[:, :, x + 1] c = inputs[:, :, x + 24] d = inputs[:, :, x + 25] temp = np.stack((a, b, c, d)) temp = np.amax(temp, axis=0) verts.append(temp) elif inputs.shape[2] == 64: for i in range(4): for j in range(4): x = 2 * i + 16 * j a = inputs[:, :, x] b = inputs[:, :, x + 1] c = inputs[:, :, x + 8] d = inputs[:, :, x + 9] temp = np.stack((a, b, c, d)) temp = np.amax(temp, axis=0) verts.append(temp) return verts
def standarizeImage(im): if len(im.shape) < 3: im = convert_bw_to_rgb(im) im = np.array(im, 'float32') if im.shape[0] != 64: im = imresize(im, (64, 64, 3)) if np.amax(im) > 1.1: im = im / 255.0 assert ((np.amax(im) > 0.01) & (np.amax(im) <= 1)) assert ((np.amin(im) >= 0.00)) return im
def amariError(W_hat, W_true): A = np.dot(W_hat.T, W_true) n_rows, n_cols = A.shape amari_error = 0. for col_idx in xrange(n_cols): row_max = np.amax(np.abs(A[:, col_idx])) amari_error += (1. / (2 * n_cols)) * ( np.sum(np.abs(A[:, col_idx])) / row_max - 1.) for row_idx in xrange(n_rows): col_max = np.amax(np.abs(A[row_idx, :])) amari_error += (1. / (2 * n_rows)) * ( np.sum(np.abs(A[row_idx, :])) / col_max - 1.) return amari_error
def forward_pass(self, inputs, param_vector): #n = mesh_traversal.get_neighs_sq(inputs) #result = np.array(n) #result = np.moveaxis(result, 0, 2) # if inputs.shape[2] == 576: new_shape = inputs.shape[:2] for i in [0]: pool_width = self.pool_shape[i] img_width = inputs.shape[i + 2] new_dim = int((np.sqrt(img_width) / np.sqrt(pool_width))) new_shape += (new_dim * new_dim, ) result = [] for i in range(new_dim): for j in range(new_dim): x = (3 * j + 25) + 24 * 3 * i n = mesh_traversal.get_neighs_sq2(adj_mtx, x) a = inputs[:, :, n[0]] b = inputs[:, :, n[1]] c = inputs[:, :, n[2]] d = inputs[:, :, n[3]] e = inputs[:, :, n[4]] f = inputs[:, :, n[5]] g = inputs[:, :, n[6]] h = inputs[:, :, n[7]] temp = np.stack((a, b, c, d, e, f, g, h)) temp = np.amax(temp, axis=0) result.append(temp) result = np.stack(result) result = np.moveaxis(result, 0, 2) return result
def data_snr_maximized_extrinsic(frequencies, data, detector, chirpm, symmratio, spin1, spin2, Luminosity_Distance, theta, phi, iota, alpha_squared, bppe, NSflag, cosmology=cosmology.Planck15): noise_temp, noisefunc, f = IMRPhenomD.populate_noise(detector, int_scheme='quad') noise = noisefunc(frequencies)**2 template_detector_response = detector_response_dCS( frequencies, chirpm, symmratio, spin1, spin2, Luminosity_Distance, theta, phi, iota, alpha_squared, bppe, NSflag, cosmology) int1 = 4 * simps((np.conjugate(template_detector_response) * template_detector_response).real / noise, frequencies) snr_template = np.sqrt(int1) g_tilde = 4 * np.divide( np.multiply(np.conjugate(data), template_detector_response), noise) g = np.fft.ifft(g_tilde) gmag = np.abs(g) deltaf = frequencies[1] - frequencies[0] maxg = np.amax(gmag).real * (len(frequencies)) * (deltaf) return maxg / snr_template
def animate(k): ax.cla() # quadratic to plot alpha = alpha_values[k] g = lambda w: w[0]**2 + alpha*w[1]**2 # create grid from plotting range w1_vals,w2_vals = np.meshgrid(input_range,input_range) w1_vals.shape = (len(input_range)**2,1) w2_vals.shape = (len(input_range)**2,1) g_vals = g([w1_vals,w2_vals]) # vals for cost surface w1_vals.shape = (len(input_range),len(input_range)) w2_vals.shape = (len(input_range),len(input_range)) g_vals.shape = (len(input_range),len(input_range)) g_range = np.amax(g_vals) - np.amin(g_vals) # used for cleaning up final plot ggap = g_range*0.5 # plot original function ax.plot_surface(w1_vals,w2_vals,g_vals,alpha = 0.1,color = 'k',rstride=15, cstride=15,linewidth=2,edgecolor = 'k') # clean up plotting area ax.set_title(set_title,fontsize = 15) ax.set_xlabel(horiz_1_label,fontsize = 15) ax.set_ylabel(horiz_2_label,fontsize = 15) ax.set_zlabel(vert_label,fontsize = 15) ax.view_init(view[0],view[1]) ax.axis(set_axis) return artist,
def my_projection(X, alpha): # print("check max") if (np.amax(np.abs(X)) >= alpha): print("projecting") X[X > alpha] = alpha X[X < -alpha] = -alpha return X
def gather_and_con(a,x): x1 = x[:,:,0] x2 = x[:,:,1] y1 = a[x1.astype(int)] y2 = a[x2.astype(int)] z = np.multiply(y1,y2) f = np.amax(z,axis = 1) return f
def unit_vector_deriv(self, x): """Calculate derivative of unit vector x_unit w.r.t. its non-unit vector x""" dims = np.shape(x) n = np.amax(dims) xmag = self.column_vector_norm2(x) d_x_unit_d_x = (1. / xmag) * (np.identity(n) - (1. / xmag**2) * np.outer(x, x)) return d_x_unit_d_x
def column_vector_norm2(self, x): """Calculate 2 norm of column vector explicitly rather than using numpy's built-in utilities""" dims = np.shape(x) dim = np.amax(dims) norm = 0. for i in range(dim): norm = norm + x[i, 0]**2 norm = norm**0.5 return norm
def logsumexp(x): # We need to use these element wise functions # because standard array level functions # did not work with Autograd def scalar_subtract_and_exp(a,scalar): return np.asarray([np.exp(a[i] - scalar) for i in range(a.size)]) mx = np.amax(x) emx = scalar_subtract_and_exp(x,mx) return np.log(emx.sum()) + mx
def plot_var(self, max_var_list, s): xpt = np.array(range(max_var_list.shape[0])) xpt = xpt - xpt[-1]+s self.lvar[0].set_xdata(xpt) self.lvar[0].set_ydata(max_var_list) # self.lvar[0].set_3d_properties(zs=0) self.ax4.set_xlim([xpt[0], xpt[-1]]) self.ax4.set_ylim([np.amin(max_var_list), np.amax(max_var_list)]) self.ax4.title.set_text("new max_post_var: %f" % max_var_list[-1]) self.fig.canvas.draw()
def plot_function(self, ax): big_val = np.amax(np.asarray([abs(a) for a in self.w_hist])) big_val = max(big_val, 3) # create plotting range w_plot = np.linspace(-big_val, big_val, 500) g_plot = self.g(w_plot) # plot function ax.plot(w_plot, g_plot, color='k', zorder=0) # plot function
def logsumexp(x): # We need to use these element wise functions # because standard array level functions # did not work with Autograd def scalar_subtract_and_exp(a, scalar): return np.asarray([np.exp(a[i] - scalar) for i in range(a.size)]) mx = np.amax(x) emx = scalar_subtract_and_exp(x, mx) return np.log(emx.sum()) + mx
def lam(ts, eval_ts=None, bw=1.0): """ """ if eval_ts is None: eval_ts = ts fn = gaussian_kde(ts, bw * (np.amax(ts) - np.amin(ts)) / ts.size # * (ts.size**(-0.8)) ) return fn(eval_ts) * ts.size
def get_log_prob_n(self, x): lp_nk = self.get_log_prob_nk(x) # Prevent underflows in the weights by doing logsumexp ourself. v = self.mix_par.values lp_n_max = np.amax(lp_nk, axis=1) lp_n = np.log(np.einsum( 'nk,k->n', np.exp(lp_nk - np.expand_dims(lp_n_max, 1)), v['w'][0,:])) + lp_n_max return lp_n
def tensor_bullet2_vector(self, t, v): """Calculate product of n x n x n tensor and n x 1 vector to produce n x n matrix""" dims = np.shape(v) n = np.amax(dims) # use append to try to avoid assignment like m[1] = blah m = np.zeros((n, n)) for i in range(n): m[:, i] = np.dot(t[:, :, i], v[:, 0]) return m
def loss(self, W): # first shift the values of f so that the highest number is 0: scores = self.score(W) scores -= np.array([np.amax(scores, axis=1)]).T # raise to e^x ex = np.exp(scores) # normalize to 1 nex = ex / ex.sum(axis=0) nexln = -np.log(nex) one_hot_array = np.zeros(nexln.shape) one_hot_array[np.arange(self.X.shape[0]), np.array(self.Y).T] = 1 # find average loss of all return ((nexln * one_hot_array)).flatten().sum() / self.Y.shape[0]
def animate(k): # clear panels for next slide ax1.cla() ax2.cla() ax3.cla() # print rendering update if np.mod(k + 1, 25) == 0: print('rendering animation frame ' + str(k + 1) + ' of ' + str(num_frames)) if k == num_frames - 1: print('animation rendering complete!') time.sleep(1.5) clear_output() # plot function 1 ax1.plot(w_plot, g1_plot, color='k', zorder=1) ax1.set_title(title1, fontsize=15) # plot function 2 ax2.plot(w_plot, g2_plot, color='k', zorder=1) ax2.set_title(title2, fontsize=15) # plot combination of both alpha = alpha_vals[k] if mode == 'regularization': g_combo = g1_plot + alpha * g2_plot else: g_combo = (1 - alpha) * g1_plot + alpha * g2_plot ax3.plot(w_plot, g_combo, color='k', zorder=1) ax3.set_title(title3, fontsize=15) # set vertical limits ax1.set_ylim([g1_min, g1_max]) ax2.set_ylim([g2_min, g2_max]) # set vertical limit markers gmin = np.amin(g_combo) gmax = np.amax(g_combo) g_gap = 0.2 * (gmax - gmin) gmin = gmin - g_gap gmax = gmax + g_gap ax3.set_ylim([gmin, gmax]) return artist,
def findWMax(hm): ''' read the heatmap and return the coordinates of the values of the maximum of the heatmap :param hm : the heatmap given by readHM :return : [W_max, score] where W_max is a array containing the coordinates of the maximum and score is the value of the maximum ''' p = hm.shape[2] W_max = np.zeros([2, p]) score = np.zeros(p) for i in range(p): score[i] = np.amax(hm[:, :, i]) (x, y) = np.where(hm[:, :, i] == score[i]) W_max[0, i] = y[0] W_max[1, i] = x[0] return [W_max, score]
def construct_numpy_representation_with_ordered_pairs_of_rankings_and_features_and_weights( features: pd.DataFrame, performances: pd.DataFrame, max_pairs_per_instance=100, seed=1, order="asc", skip_value=None): """Get numpy representation of features, performances and rankings Arguments: features {pd.DataFrame} -- Feature values performances {pd.DataFrame} -- Performances of algorithms Returns: [type] -- Triple of numpy ndarrays, first stores the feature values, the second stores the algirhtm performances and the third stores the algorithm rankings """ rankings, weights = sample_pairs(performances, pairs_per_instance=max_pairs_per_instance, seed=seed, skip_value=skip_value) joined = rankings.join(features).join(performances, lsuffix="_rank", rsuffix="_performance") np_features = joined[features.columns.values].values np_performances = joined[[x for x in performances.columns]].values np_rankings = joined[[x for x in rankings.columns]].values + 1 np_performances = np_performances[ np.arange(np_performances.shape[0])[:, np.newaxis], np_rankings - 1] max_len = len(performances.columns) print("performances", performances.head()) print("labels", rankings.head()) print("weight", weights.head()) np_weights = weights.to_numpy() np_weights = np.amax(np_weights, axis=1) # print("np_weights", np_weights) np_weights = np.exp2(np_weights) # print("exp np_weights", np_weights) # TODO check for maximization problems if order == "desc": np_rankings = np.flip(np_rankings, axis=1) np_performances = np.flip(np_performances, axis=1) return np_features, np_performances, np_rankings, np_weights
def logsumexp(a, axis=None, keepdims=False): """Modified from scipy : Compute the log of the sum of exponentials of input elements. Parameters ---------- a : array_like Input array. axis : None or int or tuple of ints, optional Axis or axes over which the sum is taken. By default `axis` is None, and all elements are summed. .. versionadded:: 0.11.0 keepdims : bool, optional If this is set to True, the axes which are reduced are left in the result as dimensions with size one. With this option, the result will broadcast correctly against the original array. .. versionadded:: 0.15.0 Returns ------- res : ndarray The result, ``np.log(np.sum(np.exp(a)))`` calculated in a numerically more stable way. If `b` is given then ``np.log(np.sum(b*np.exp(a)))`` is returned. sgn : ndarray If return_sign is True, this will be an array of floating-point numbers matching res and +1, 0, or -1 depending on the sign of the result. If False, only one result is returned. """ a_max = np.amax(a, axis=axis, keepdims=True) # Cutting the max if infinite a_max = np.where(~np.isfinite(a_max), 0, a_max) assert np.sum(~np.isfinite(a_max)) == 0 tmp = np.exp(a - a_max) # suppress warnings about log of zero with np.errstate(divide='ignore'): s = np.sum(tmp, axis=axis, keepdims=keepdims) out = np.log(s) if not keepdims: a_max = np.squeeze(a_max, axis=axis) out += a_max return out
def calculate_S_roots(A, B, C, D): s1 = S1_2(A, B, C, D) s2 = S2_2(A, B, C, D) s3 = S3_2(A, B, C, D) roots = [s1, s2, s3] print("Roots") print(roots) S_plus2 = np.amax(roots) S_plus = np.sqrt(S_plus2) S_minus2 = np.amin(roots) S_minus = np.sqrt(S_minus2) #print(roots) #print(S_plus2,S_minus2) roots.remove(S_plus2) roots.remove(S_minus2) S3 = np.sqrt(roots[0]) return S_plus, S_minus, S3
def solve(self,f_obj, f_ineq, A, b, x0, it=20, eps=1e-9): """ solves min_x f(x) s.t. f_ineq(x) <= 0, A(x) = b """ t = 1.0 mu = 1.5 m = A.shape[0] eps = 1e-9 x = x0 # check f_ineq constraints if not np.all(f_ineq(x) <= 0): # solve for min a s.t. f_ineq(x) <= a # check if a <=0 def f_obj_aux(x): return x[0, 0] def f_ineq_aux(x): return x[1:, 0] - x[0, 0] v_max = np.amax(x) + 1. xx = np.concatenate([[[v_max]], x]) A_aux = np.zeros((A.shape[0], A.shape[1] + 1)) A_aux[:, 1:] = A b_aux = b xx_soln, _, _, _ = self.solve(f_obj_aux, f_ineq_aux, A_aux, b_aux, xx) if xx[0, 0] > 0: return None else: x = xx[1:, 0] # set initial starting point v = np.zeros((A.shape[0], 1)) # dual variable while True: f_aug = self.f_augment(f_obj, f_ineq, t) x, v = self.solve_inner(f_aug, A, b, x, v, it, eps, eps) if m / t <= eps: break t = mu * t r = self.residual(f_aug, A, b, x, v) return x, v, np.dot(r.T, r)[0, 0], np.max(np.dot(A, x) - b)
def to_loss(weights,Program_temp,current_a,clause_gen,xs,ILP_prob,all_ground_atoms, forward): s = time.time() if forward: print(current_a) for step in range(1,Program_temp.T+1): sump = np.zeros((len(current_a))) for pred_ind in range(0,len(clause_gen)): #pa/t calculate b for each pa softmaxweightsfull = softmaxf(weights[pred_ind]) softmaxweightsre = softmaxweightsfull.reshape(softmaxweightsfull.shape + (1,)) a_throughf = np.array([[np.amax((np.array([gather_and_con(current_a,xs[pred_ind][0][op1_ind]),gather_and_con(current_a,xs[pred_ind][1][op2_ind])])),axis = 0) for op2_ind in range(0,len(clause_gen[pred_ind][1]))] for op1_ind in range(0,len(clause_gen[pred_ind][0]))]) Interm = np.multiply(a_throughf,softmaxweightsre).reshape(-1,len(current_a)).sum(axis = 0) sump = sump + Interm current_a = current_a + sump - (current_a*sump) if forward: print("post ",step,current_a) e = time.time() print("tstep",e-s) s = e sys.stdout.flush() loss = calcLoss(current_a,ILP_prob.Pos,ILP_prob.Neg,ILP_prob.LangFrame.target,all_ground_atoms, forward) return loss
def compute_LR(rate, old_points, g_pert, type_epsilon = "relative"): if type_epsilon == "relative": norm_old = np.linalg.norm(old_points, axis = 1) norm_pert = np.linalg.norm(g_pert, axis = 1) #Replace all tiny values by 1 norm_pert[norm_pert < 0.000001] = 1 ratio = norm_old/norm_pert epsilon = rate * np.amin(ratio) elif type_epsilon == "absolute": norm_pert = np.linalg.norm(g_pert, axis = 1) norm_pert[norm_pert < 0.000001] = 1 epsilon = rate / np.amax(norm_pert) elif type_epsilon == "usual": epsilon = rate else: print("Error type of epsilon") return epsilon
def alogsumexp(a, b=None, axis=None, keepdims=False): """ Performs logsumexp using the numpy from autograd np.log(np.sum(a*np.exp(b))) Args: a(np.ndarray): The matrix/vector to be exponentiated (shape (N,..)) b(np.ndarray): The number at which to multiply exp(a) (shape (N,)) (default None) axis(int): the axis at which to sum over (defaul None) keepdims(bool): whether to keep the result as the same shape (default False) Return: a matrix that is the logsumexp result of a & b """ if b is not None: if nup.any(b == 0): a = a + 0. # promote to at least float a[b == 0] = -nup.inf # find maximum of a along the axis provided a_max = nup.amax(a, axis=axis, keepdims=True) if b is not None: b = nup.asarray(b) tmp = b * nup.exp(a - a_max) else: tmp = nup.exp(a - a_max) #suppress warnings about log of zero with nup.errstate(divide='ignore'): s = nup.sum(tmp, axis=axis, keepdims=keepdims) out = nup.log(s) if not keepdims: a_max = nup.squeeze(a_max, axis=axis) out += a_max return out
def truncate0(x, axis=None, strict=False, tol=1e-13): '''make sure everything in x is non-negative''' # the maximum along axis maxes = np.maximum(np.amax(x, axis=axis), 1e-300) # the negative part of minimum along axis mins = np.maximum(-np.amin(x, axis=axis), 0.0) # assert the negative numbers are small (relative to maxes) assert np.all(mins <= tol * maxes) if axis is not None: idx = [slice(None)] * x.ndim idx[axis] = np.newaxis mins = mins[idx] maxes = maxes[idx] if strict: # set everything below the tolerance to 0 return set0(x, x < tol * maxes) else: # set everything of same magnitude as most negative number, to 0 return set0(x, x < 2 * mins)
def forward_pass(W1, W2, W3, b1, b2, b3, x): """ forward-pass for an fully connected neural network with 2 hidden layers of M neurons Inputs: W1 : (M, 784) weights of first (hidden) layer W2 : (M, M) weights of second (hidden) layer W3 : (10, M) weights of third (output) layer b1 : (M, 1) biases of first (hidden) layer b2 : (M, 1) biases of second (hidden) layer b3 : (10, 1) biases of third (output) layer x : (N, 784) training inputs Outputs: Fhat : (N, 10) output of the neural network at training inputs """ H1 = np.maximum(0, np.dot(x, W1.T) + b1.T) # layer 1 neurons with ReLU activation, shape (N, M) H2 = np.maximum(0, np.dot(H1, W2.T) + b2.T) # layer 2 neurons with ReLU activation, shape (N, M) Fhat = np.dot( H2, W3.T ) + b3.T # layer 3 (output) neurons with linear activation, shape (N, 10) # Implement a stable log-softmax activation function at the ouput layer # Compute max of each row a = np.ones(np.shape(Fhat)) * np.expand_dims( np.amax(Fhat, axis=1), axis=1) # a is typically max of g ; make to the same shape as Fhat log_sum_exp = np.ones(np.shape(Fhat)) * np.expand_dims( np.log(np.sum(np.exp(np.subtract(Fhat, a)), axis=1)), axis=1) # Compute using logSumExp trick # Element-wise subtraction Fhat = np.subtract(np.subtract(Fhat, a), log_sum_exp) return Fhat
def normalize0(self, data, axis=0): assert (np.isfinite(data).all() == True) mean = np.mean(data, axis=axis) var = np.var(data, axis=axis) stdn = np.std(data, axis=axis) minimum_arr = np.amin(data, axis=axis, keepdims=True) maximum_arr = np.amax(data, axis=axis, keepdims=True) normalize_state = { "mean": mean, "var": var, "min": minimum_arr, "max": maximum_arr, "stdn": stdn } if (self.config.NN_ZERO_MEAN_NORMALIZE == True): normalized = (data - mean) / (stdn + 0.00001) else: normalized = (data - minimum_arr) / (maximum_arr - minimum_arr + 0.0001) return normalized.reshape(data.shape), normalize_state
def softmax(z): # Avoid numerical overflow by removing max e = np.exp(z - np.amax(z, axis=1, keepdims=True)) return e / np.sum(e, axis=1, keepdims=True)
def softmax(z): e = np.exp(z - np.amax(z, axis=1, keepdims=True)) return e / np.sum(e, axis=1, keepdims=True)