def gaussNewton(P0): pCur = P0.reshape(P0.shape[0] * P0.shape[1]) fCur = getFunc(pCur) normCur = np.linalg.norm(fCur) threshNorm = 1e-1 while (normCur > threshNorm): print("Reprojection error: %f" % normCur) # JCur = numJac(pCur) JCur = analyJac(pCur) pInv = pseudoInv(JCur) pNext = pCur - np.dot(pInv, fCur) pCur = pNext fCur = getFunc(pCur) normCur = np.linalg.norm(fCur) np.set_printoptions(precision=2, suppress=True) POpt = pCur.reshape(3, 4) # POpt = POpt/POpt[2, 3] print("Reprojection error: %f" % normCur) return POpt
def visualize(inputs, outputs, reads, writes, adds, erases): """ Print out some summary of what the NTM did for a given sequence. """ wi = inputs.shape[0] hi = outputs[0].shape[0] np.set_printoptions(formatter={'float': '{: 0.1f}'.format}, linewidth=150) out = toArray(outputs, hi, wi) ins = np.array(inputs.T, dtype='float') heads = len(reads) r, w, a, e = {}, {}, {}, {} for idx in range(heads): r[idx] = toArray(reads[idx], reads[0][0].shape[0], wi) w[idx] = toArray(writes[idx], writes[0][0].shape[0], wi) a[idx] = toArray(adds[idx], adds[0][0].shape[0], wi) e[idx] = toArray(erases[idx], erases[0][0].shape[0], wi) print "inputs: " print ins print "outputs: " print out for idx in range(heads): print "reads-" + str(idx) print r[idx] print "writes-" + str(idx) print w[idx] print "adds-" + str(idx) print a[idx] print "erases-" + str(idx) print e[idx]
def visualize(inputs, outputs, reads, writes, adds, erases): """ Print out some summary of what the NTM did for a given sequence. """ wi = inputs.shape[0] hi = outputs[0].shape[0] np.set_printoptions(formatter={'float': '{: 0.1f}'.format}, linewidth=150) out = toArray(outputs, hi, wi) ins = np.array(inputs.T,dtype='float') heads = len(reads) r,w,a,e = {},{},{},{} for idx in range(heads): r[idx] = toArray(reads[idx] , reads[0][0].shape[0] , wi) w[idx] = toArray(writes[idx] , writes[0][0].shape[0] , wi) a[idx] = toArray(adds[idx] , adds[0][0].shape[0] , wi) e[idx] = toArray(erases[idx] , erases[0][0].shape[0] , wi) print "inputs: " print ins print "outputs: " print out for idx in range(heads): print "reads-" + str(idx) print r[idx] print "writes-" + str(idx) print w[idx] print "adds-" + str(idx) print a[idx] print "erases-" + str(idx) print e[idx]
def main(): np.set_printoptions(threshold=20) parser = argparse.ArgumentParser(description='nSolver') parser.add_argument('--spec', type=str, default='spec.json', help='the specification file') parser.add_argument('--algorithm', type=str, help='the chosen algorithm') parser.add_argument('--threshold', type=float, help='the threshold in sprt') parser.add_argument('--eps', type=float, help='the distance value') parser.add_argument('--dataset', type=str, help='the data set for fairness experiments') args = parser.parse_args() with open(args.spec, 'r') as f: spec = json.load(f) add_assertion(args, spec) add_solver(args, spec) model, assertion, solver, display = parse(spec) if args.dataset == 'bank': pathX = 'benchmark/fairness/bank/data/' pathY = 'benchmark/fairness/bank/data/labels.txt' elif args.dataset == 'census': pathX = 'benchmark/fairness/census/data/' pathY = 'benchmark/fairness/census/data/labels.txt' elif args.dataset == 'credit': pathX = 'benchmark/fairness/credit/data/' pathY = 'benchmark/fairness/credit/data/labels.txt' y0s = np.array(ast.literal_eval(read(pathY))) for i in range(100): assertion['x0'] = pathX + 'data' + str(i) + '.txt' x0 = np.array(ast.literal_eval(read(assertion['x0']))) output_x0 = model.apply(x0) lbl_x0 = np.argmax(output_x0, axis=1)[0] print('Data {}\n'.format(i)) print('x0 = {}'.format(x0)) print('output_x0 = {}'.format(output_x0)) print('lbl_x0 = {}'.format(lbl_x0)) print('y0 = {}\n'.format(y0s[i])) if lbl_x0 == y0s[i]: print('Run at data {}\n'.format(i)) solver.solve(model, assertion) else: print('Skip at data {}'.format(i)) print('\n============================\n')
def main(): # 一些为了显示更加方便添加的语句 warnings.filterwarnings("ignore") plt.rcParams['font.sans-serif'] = ['SimHei'] plt.rcParams['axes.unicode_minus'] = False np.set_printoptions(precision=4) if (test == 'wmap'): optimize_wmap() elif (test == 'sliding_window'): sliding_window_opt()
def main(): np.set_printoptions(threshold=20) parser = argparse.ArgumentParser(description='nSolver') parser.add_argument('--spec', type=str, default='spec.json', help='the specification file') parser.add_argument('--algorithm', type=str, help='the chosen algorithm') parser.add_argument('--threshold', type=float, help='the threshold in sprt') args = parser.parse_args() with open(args.spec, 'r') as f: spec = json.load(f) add_assertion(args, spec) add_solver(args, spec) model, assertion, solver, display = parse(spec) lower = model.lower upper = model.upper for i in range(50): pathX = 'benchmark/mnist_challenge/x_y/x' + str(i) + '.txt' pathY = 'benchmark/mnist_challenge/x_y/y' + str(i) + '.txt' x0s = np.array(ast.literal_eval(read(pathX))) y0s = np.array(ast.literal_eval(read(pathY))) for j in range(200): x0 = x0s[j] assertion['x0'] = str(x0.tolist()) output_x0 = model.apply(x0) lbl_x0 = np.argmax(output_x0, axis=1)[0] print('Data {}\n'.format(i * 200 + j)) print('x0 = {}'.format(x0)) print('output_x0 = {}'.format(output_x0)) print('lbl_x0 = {}'.format(lbl_x0)) print('y0 = {}\n'.format(y0s[j])) if lbl_x0 == y0s[j]: update_bounds(args, model, x0, lower, upper) print('Run at data {}\n'.format(i * 200 + j)) solver.solve(model, assertion) else: print('Skip at data {}'.format(i * 200 + j)) print('\n============================\n')
def __init__(self, *args, **kwargs): super(TestLogLinearModel, self).__init__(*args, **kwargs) np.set_printoptions(threshold=sys.maxsize) self.scenario = scen.ASRankingScenario() self.scenario.read_scenario("aslib_data-aslib-v4.0/CPMP-2015") self.scenario.compute_rankings() # preprocessing of data self.scaler = StandardScaler() self.scenario.feature_data[ self.scenario.feature_data.columns] = self.scaler.fit_transform( self.scenario.feature_data[self.scenario.feature_data.columns]) self.test_scen, self.train_scen = self.scenario.get_split(indx=5) self.test_scen.remove_duplicates() self.train_scen.remove_duplicates()
def model_confusion(y, model_preds): unique_y = np.unique(y) class_map = dict() for i, val in enumerate(unique_y): class_map[val] = i class_names = list(sample_sub.columns[1:-1]) y_map = np.zeros((y.shape[0], )) y_map = np.array([class_map[val] for val in y]) cnf_matrix = confusion_matrix(y_map, np.argmax(model_preds, axis=-1)) np.set_printoptions(precision=2) plt.figure(figsize=(8, 8)) plot_confusion_matrix(cnf_matrix, classes=class_names, normalize=True, title='Confusion matrix')
def main(): np.set_printoptions(threshold=20) parser = argparse.ArgumentParser(description='nSolver') parser.add_argument('--spec', type=str, default='spec.json', help='the specification file') args = parser.parse_args() with open(args.spec, 'r') as f: spec = json.load(f) model, assertion, solver, display = parse(spec) solver.solve(model, assertion, display)
def main(): np.set_printoptions(threshold=20) parser = argparse.ArgumentParser(description='nSolver') parser.add_argument('--spec', type=str, default='spec.json', help='the specification file') parser.add_argument('--algorithm', type=str, help='the chosen algorithm') parser.add_argument('--threshold', type=float, help='the threshold in sprt') parser.add_argument('--eps', type=float, help='the distance value') parser.add_argument('--dataset', type=str, help='the data set for fairness experiments') args = parser.parse_args() with open(args.spec, 'r') as f: spec = json.load(f) add_assertion(args, spec) add_solver(args, spec) model, assertion, solver, display = parse(spec) if args.dataset == 'bank': pathX = '../benchmark/fairness/bank/data/' pathY = '../benchmark/fairness/bank/data/labels.txt' elif args.dataset == 'census': pathX = '../benchmark/fairness/census/data/' pathY = '../benchmark/fairness/census/data/labels.txt' elif args.dataset == 'credit': pathX = '../benchmark/fairness/credit/data/' pathY = '../benchmark/fairness/credit/data/labels.txt' elif args.dataset == 'FairSquare': pathX = '../benchmark/fairness/FairSquare/data/' pathY = '../benchmark/fairness/FairSquare/data/labels.txt' y0s = np.array(ast.literal_eval(read(pathY))) assertion['x0'] = pathX + 'data' + str(0) + '.txt' solver.solve(model, assertion) print('\n============================\n') '''
def calc_rolled_out_gradient(self, cnt=100): np.set_printoptions(precision=2, linewidth=200) scores = [] # normalize for k in self.cur_grad_db: for j in self.cur_grad_db[k]: self.grad_db[k][j] = [] # perform cnt rollouts for i in range(cnt): self.run_model() scores.append(self.cur_trace_score) for k in self.cur_grad_db: for j in self.cur_grad_db[k]: self.grad_db[k][j].append(self.cur_grad_db[k][j]) # our baseline is just the median scores = np.atleast_2d(scores) total_score = np.sum(scores) # scores = scores - np.mean( scores ) scores = scores - np.median(scores) # normalize for k in self.cur_grad_db: for j in self.cur_grad_db[k]: # print "%s - %s" % (k,j) # tmp = self.grad_db[ k ][ j ] # print tmp[0].shape # self.grad_db[ k ][ j ] = np.dot( scores, np.vstack( tmp ) ) / cnt tmp = self.grad_db[k][j] tmpdims = len(tmp[0].shape) grads = np.stack(tmp, axis=tmpdims) newshape = np.ones((tmpdims + 1), dtype=int) newshape[tmpdims] = scores.shape[1] tmpscores = np.reshape(scores, newshape) self.grad_db[k][j] = np.sum(tmpscores * grads, axis=tmpdims) / cnt return total_score / float(cnt)
def run_mnist_classification(config): modelcls, loss, error = factory(config) X_train, Y_train, X_val, Y_val, X_test, Y_test = utils.load_mnist() N, D = X_train.shape _, C = Y_train.shape print('N, D, C:', N, D, C) layers = [D] + config.hidden_layers + [C] activation = utils.relu # activation = utils.logistic activation_output = utils.softmax model = modelcls(layers, activation, activation_output) params = model.new_params(initializer, initializer2) grad = autograd.grad(loss) train_errs = list() test_errs = list() losses = list() np.set_printoptions(formatter={'float_kind': lambda x: "%.2f" % x}) for epoch in range(config.epochs): batch_iterator = utils.create_batch_iterator(config.batch_size, X_train, Y_train) # print(f'# Epoch:{epoch:>3}') with tqdm(total=N) as pbar: for i, (X_batch, Y_batch) in enumerate(batch_iterator): l = loss(params, model, X_batch, Y_batch) gradients = grad(params, model, X_batch, Y_batch) pbar.set_description(f'Batch:{i:>3}; Loss:{l:>10.2f}') pbar.update(len(X_batch)) model.update(params, gradients, lr=config.lr) l = loss(params, model, X_train, Y_train) train_error = error(params, model, X_train, Y_train) test_error = error(params, model, X_test, Y_test) losses.append(l) train_errs.append(train_error) test_errs.append(test_error) print( f'Epoch:{epoch:>3}; Loss:{l:>10.2f}; Train Error:{100*train_error:>6.2f}%; Test Error:{100*test_error:>6.2f}%' )
def print_wf_values(theta1=0.0, theta2=0.0, use_j=False, B=0.0): wf = Wavefunction(use_jastrow=use_j) # Adjust numpy output so arrays are printed with higher precision float_formatter = "{:.15g}".format np.set_printoptions(formatter={'float_kind':float_formatter}) if use_j: VP = np.array([theta1, theta2, B]) print("Values for theta = ",theta1,theta2," and jastrow B = ",B) else: VP = np.array([theta1, theta2]) print("Values for theta = ",theta1,theta2," and no jastrow") r1 = np.array([1.0, 2.0, 3.0]) r2 = np.array([0.0, 1.1, 2.2]) psi_val = wf.psi(r1, r2, VP) print(" wf = ",psi_val," log wf = ",np.log(np.abs(psi_val))) g0 = wf.grad0(r1, r2, VP)/psi_val print(" grad/psi for particle 0 = ",g0[0],g0[1],g0[2]) # Using the laplacian of log psi to match internal QMCPACK values lap_0 = wf.lap0(r1, r2, VP) print(" laplacian of log psi for particle 0 = ",lap_0) lap_1 = wf.lap1(r1, r2, VP) print(" laplacian for log psi particle 1 = ",lap_1) eloc = wf.local_energy(r1, r2, VP) print(" local energy = ",eloc) dp = wf.dpsi(r1, r2, VP) print(" parameter derivative of log psi = ",dp / psi_val) deloc = wf.dlocal_energy(r1, r2, VP) print(" parameter derivative of local energy = ",deloc) print("")
def visualize(inputs, outputs, reads, writes, adds, erases): """ Print out some summary of what the NTM did for a given sequence. """ wi = inputs.shape[0] hi = outputs[0].shape[0] np.set_printoptions(formatter={'float': '{: 0.1f}'.format}, linewidth=150) out = toArray(outputs, hi, wi) ins = np.array(inputs.T, dtype='float') print "inputs: " print ins print "outputs: " print out print "reads" print reads print "writes" print writes print "adds" print adds print "erases" print erases
def visualize(inputs, outputs, reads, writes, adds, erases): """ Print out some summary of what the NTM did for a given sequence. """ wi = inputs.shape[0] hi = outputs[0].shape[0] np.set_printoptions(formatter={'float': '{: 0.1f}'.format}, linewidth=150) out = toArray(outputs, hi, wi) ins = np.array(inputs.T,dtype='float') print "inputs: " print ins print "outputs: " print out print "reads" print reads print "writes" print writes print "adds" print adds print "erases" print erases
def stochastic_hessian_inverse(self, hessian_scaling, S1=None, S2=None): ''' From Agarwal et. al. "Second-order stochastic optimization for machine learning in linear time." 2017. Not clear that this provides good accuracy in a reasonable amount of time. ''' self.compute_derivs() X = self.training_data.X N = self.training_data.X.shape[0] D = self.params.get_free().shape[0] if S1 is None and S2 is None: S1 = int(np.sqrt(N)/10) S2 = int(10*np.sqrt(N)) if self.regularization is not None: evalRegHess = autograd.hessian(self.regularization) paramsCpy = self.params.get_free().copy() regHess = evalRegHess(self.params.get_free()) regHess[-1,-1] = 0.0 self.params.set_free(paramsCpy) hinvEsts = np.zeros((S1,D,D)) for ii in range(S1): hinvEsts[ii] = np.eye(D) for n in range(1,S2): idx = np.random.choice(N) H_n = np.outer(X[idx],X[idx]) * self.D2[idx] * N + regHess if np.linalg.norm(H_n) >= hessian_scaling*0.9999: from IPython import embed; np.set_printoptions(linewidth=150); embed() print(np.linalg.norm(H_n)) #H_n = self.get_single_datapoint_hessian(idx) * N H_n /= hessian_scaling hinvEsts[ii] = np.eye(D) + (np.eye(D) - H_n).dot(hinvEsts[ii]) return np.mean(hinvEsts, axis=0) / hessian_scaling
from capytaine.io.xarray import separate_complex_values from capytaine.io.xarray import merge_complex_values import glob import json import control from scipy.optimize import minimize from scipy.optimize import Bounds from scipy import optimize from mhkit import wave from scipy import fft from scipy import sparse import types import warnings np.set_printoptions(precision=3) np.set_printoptions(linewidth=160) np.set_printoptions(threshold=sys.maxsize) DataSet_type = xr.core.dataset.Dataset def calc_impedance(hydro:DataSet_type, damp_frac:float=0.05, make_sym:bool=True): """ Calculate intrinsic impedance (see, e.g., Falnes). @book{falnes2002ocean, title={Ocean Waves and Oscillating Systems: Linear Interactions Including Wave-Energy Extraction}, author={Falnes, J.}, isbn={9781139431934}, url={https://books.google.com/books?id=bl1FyQjCklgC},
Returns: expm_h :: ndarray(N x N) - The unitary operator of a. """ eigvals, p = np.linalg.eigh(h) p_dagger = np.conjugate(np.swapaxes(p, -1, -2)) d = np.exp(t * eigvals) return np.matmul(p * d, p_dagger) def H_expectation_value(A_list, H_list): return np.real(np.sum(mps_func.expectation_values(A_list, H_list))) if __name__ == "__main__": np.random.seed(1) np.set_printoptions(linewidth=2000, precision=5, threshold=4000) L = int(sys.argv[1]) g = float(sys.argv[2]) h = float(sys.argv[3]) chi = int(sys.argv[4]) order = str(sys.argv[5]) assert order in ['1st', '2nd'] Hamiltonian = 'TFI' ## [TODO] add check whether data already J = 1. tol = 1e-14 cov_crit = tol * 0.1 max_N_iter = 100
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR # CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, # EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR # PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY # OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # ---------------------------------------------------------------------- import autograd.numpy as np import sys from autograd import grad, jacobian np.set_printoptions(linewidth=1000) np.set_printoptions(threshold=sys.maxsize) np.set_printoptions(formatter={'float': lambda x: format(x, '11.4e')}) # Applies softmax per every column of the 'x' matrix. def softmax(x): e_x = np.exp(x - np.max(x, axis=0)) return e_x / e_x.sum(axis=0) # Multi-head attention model, the forward pass. Works with multiple # 'q' vector time-steps and one sequence (batchSize=1/beamSize=1). def attn_fwd(q, k, v, wq, wk, wv, wo, nheads, rlink): out = 0
import pandapower as pp import pandapower.networks as ppnw import autograd.numpy as np from autograd import grad import time from copy import deepcopy np.set_printoptions( formatter={ 'complexfloat': lambda x: "{0:.3f}".format(x), 'float_kind': lambda x: "{0:.3f}".format(x) }) def fin_diff(f, x, eps=1e-6): """ Finite difference approximation of grad of function f. From JAX docs. """ return np.array([(f(x + eps * v) - f(x - eps * v)) / (2 * eps) for v in np.eye(len(x))]) def init_v(net, n, pd2ppc): """ Initial bus voltage vector using generator voltage setpoints or 1j+0pu. """ v = [0j + 1 for _ in range(n)] for r in net.gen.itertuples(): v[pd2ppc[r.bus]] = r.vm_pu for r in net.ext_grid.itertuples(): v[pd2ppc[r.bus]] = r.vm_pu * np.exp(1j * r.va_degree * np.pi / 180) return np.array(v, dtype=np.complex64) def scheduled_p_q(net, n, pd2ppc):
def main(): np.set_printoptions(threshold=20) parser = argparse.ArgumentParser(description='nSolver') parser.add_argument('--spec', type=str, default='spec.json', help='the specification file') parser.add_argument('--algorithm', type=str, help='the chosen algorithm') parser.add_argument('--threshold', type=float, help='the threshold in sprt') parser.add_argument('--eps', type=float, help='the distance value') parser.add_argument('--dataset', type=str, help='the data set for ERAN experiments') args = parser.parse_args() with open(args.spec, 'r') as f: spec = json.load(f) add_assertion(args, spec) add_solver(args, spec) model, assertion, solver, display = parse(spec) lower = model.lower upper = model.upper if args.dataset == 'cifar_conv': pathX = 'benchmark/eran/data/cifar_conv/' pathY = 'benchmark/eran/data/labels/y_cifar.txt' elif args.dataset == 'cifar_fc': pathX = 'benchmark/eran/data/cifar_fc/' pathY = 'benchmark/eran/data/labels/y_cifar.txt' elif args.dataset == 'mnist_conv': pathX = 'benchmark/eran/data/mnist_conv/' pathY = 'benchmark/eran/data/labels/y_mnist.txt' elif args.dataset == 'mnist_fc': pathX = 'benchmark/eran/data/mnist_fc/' pathY = 'benchmark/eran/data/labels/y_mnist.txt' y0s = np.array(ast.literal_eval(read(pathY))) for i in range(10): assertion['x0'] = pathX + 'data' + str(i) + '.txt' x0 = np.array(ast.literal_eval(read(assertion['x0']))) output_x0 = model.apply(x0) lbl_x0 = np.argmax(output_x0, axis=1)[0] print('Data {}\n'.format(i)) print('x0 = {}'.format(x0)) print('output_x0 = {}'.format(output_x0)) print('lbl_x0 = {}'.format(lbl_x0)) print('y0 = {}\n'.format(y0s[i])) t0 = time.time() if lbl_x0 == y0s[i]: update_bounds(args, model, x0, lower, upper) print('Run at data {}\n'.format(i)) solver.solve(model, assertion) else: print('Skip at data {}'.format(i)) t1 = time.time() print('time = {}'.format(t1 - t0)) print('\n============================\n')
# Fit a 2d MOG model using optimization over a Riemannian manifold. # as described in this paper http://arxiv.org/pdf/1506.07677v1.pdf # Code is slightly modified from # https://pymanopt.github.io/MoG.html import autograd.numpy as np np.set_printoptions(precision=2) import matplotlib.pyplot as plt #%matplotlib inline from autograd.scipy.misc import logsumexp from pymanopt.manifolds import Product, Euclidean, PositiveDefinite from pymanopt import Problem from pymanopt.solvers import SteepestDescent, TrustRegions # Number of data N = 1000 # Dimension of data D = 2 # Number of clusters K = 3 # True model parameters pi = [0.1, 0.6, 0.3] mu = [np.array([-4, 1]), np.array([0, 0]), np.array([2, -1])] Sigma = [np.array([[3, 0],[0, 1]]), np.array([[1, 1.], [1, 3]]), .5 * np.eye(2)] # Sample some data components = np.random.choice(K, size=N, p=pi) samples = np.zeros((N, D))
def inference_graph_iht(self, x, w, relaxed=False, return_energy=False, **kwargs): """find argmax_y np.dot(w, joint_feature(x, y))""" # j = int(len(x) * 0.1 / 2.0) np.set_printoptions(threshold=sys.maxsize) max_iter = 1000 y_hat = x # y_hat = np.random.rand(len(x)) yt = np.copy(y_hat) for iter in range(max_iter): # print("---------------------------------------------------------------------------") # print("iter {}".format(iter)) # print("current w: {}".format(w)) # print("current yt {}".format(yt)) # print("current joint feature: {}".format(self.joint_feature(x, yt))) # print("current delta joint feature: {}".format(self.delta_joint_feature(x, yt, w))) Omega_X = [] y_prev = np.copy(yt) gradient = self._get_objective_grad(x, yt, w) # print("gradient: {}".format(gradient)) normalized_grad = self._normalized_gradient(yt, gradient) # print("normalized gradient {}".format(normalized_grad)) # print("normalized gradient {}".format(np.nonzero(normalized_grad))) sig_nodes = [] for i, ng in enumerate(normalized_grad): if ng == 1.0: sig_nodes.append(i) # print("sig nodes {}".format(len(sig_nodes))) # print("sig nodes {}".format(sig_nodes)) # g: number of connected component edges = np.array(self.edges) costs = np.ones(len(edges)) # re_head = head_proj(edges=edges, weights=costs, x=normalized_grad, g=1, s=k, budget=k - 1, delta=1. / 169., # max_iter=100, err_tol=1e-8, root=-1, pruning='strong', epsilon=1e-10, verbose=0) # re_nodes, re_edges, p_y = re_head re_head = self.algo_head_tail_bisearch(edges=edges, x=normalized_grad, costs=costs, g=1, root=-1, s_low=250, s_high=300, max_num_iter=1000, verbose=0) re_nodes, p_y = re_head omega_yt = set(re_nodes) # print("omega_yt {}".format(len(omega_yt))) indicator_yt = np.zeros_like(yt) indicator_yt[list(omega_yt)] = 1.0 # print("current yt {}".format(yt)) by = (yt + 0.001 * gradient) * indicator_yt # print("gradient ascent result {}".format(yt + 0.001 * gradient)) # print("current by {}".format(by)) sorted_indices = np.argsort(by)[::-1] by[by <= 0.0] = 0.0 num_non_posi = len(np.where(by == 0.0)) by[by > 1.0] = 1.0 if num_non_posi == len(x): print("siga-1 is too large and all values in the gradient are non-positive") for i in range(5): by[sorted_indices[i]] = 1.0 edges = np.array(self.edges) costs = np.ones(len(edges)) # re_tail = tail_proj(edges=edges, weights=costs, x=by, g=1, s=k, budget=k - 1, nu=2.5, max_iter=100, # err_tol=1e-8, root=-1, pruning='strong', verbose=0) # re_nodes, re_edges, p_y = re_tail re_tail = self.algo_head_tail_bisearch(edges=edges, x=by, costs=costs, g=1, root=-1, s_low=240, s_high=260, max_num_iter=1000, verbose=0) re_nodes, p_y = re_tail psi_y = re_nodes # print("psi_y {}".format(psi_y)) yt = np.zeros_like(yt) yt[list(psi_y)] = by[list(psi_y)] # TODO: note the non-zero entries of xt[list(psi_x)] may not be connected # print("yt {}".format(np.nonzero(yt))) gap_y = np.linalg.norm(yt - y_prev) ** 2 if gap_y < 1e-6: break value = np.dot(w, self.joint_feature(x, yt)) # print("value {}, w {}, joint feature {}".format(value, w, self.joint_feature(x, yt))) return yt
def main(): np.set_printoptions(threshold=20) parser = argparse.ArgumentParser(description='nSolver') parser.add_argument('--spec', type=str, default='spec.json', help='the specification file') parser.add_argument('--algorithm', type=str, help='the chosen algorithm') parser.add_argument('--eps', type=float, help='the distance value') parser.add_argument('--has_ref', action='store_true', help='turn on/off refinement') parser.add_argument('--max_ref', type=int, default=20, help='maximum times of refinement') parser.add_argument('--ref_typ', type=int, default=0, help='type of refinement') parser.add_argument('--max_sus', type=int, default=1, help='maximum times of finding adversarial sample') parser.add_argument('--dataset', type=str, help='the data set for CEGAR experiments') parser.add_argument('--num_tests', type=int, default=100, help='maximum number of tests') args = parser.parse_args() with open(args.spec, 'r') as f: spec = json.load(f) add_assertion(args, spec) add_solver(args, spec) model, assertion, solver, display = parse(spec) lower = model.lower upper = model.upper pathX = 'benchmark/cegar/data/mnist_fc/' pathY = 'benchmark/cegar/data/labels/y_mnist.txt' y0s = np.array(ast.literal_eval(read(pathY))) for i in range(args.num_tests): assertion['x0'] = pathX + 'data' + str(i) + '.txt' x0 = np.array(ast.literal_eval(read(assertion['x0']))) output_x0 = model.apply(x0) lbl_x0 = np.argmax(output_x0, axis=1)[0] print('Data {}\n'.format(i)) print('x0 = {}'.format(x0)) print('output_x0 = {}'.format(output_x0)) print('lbl_x0 = {}'.format(lbl_x0)) print('y0 = {}\n'.format(y0s[i])) if lbl_x0 == y0s[i]: best_verified, best_failed = 0, 1e9 eps, step_eps = 0.01, 0.01 while True: t0 = time.time() args.eps = eps update_bounds(args, model, x0, lower, upper) print('Run at data {}\n'.format(i)) res = solver.solve(model, assertion) if res == 1: print('Verified at {:.3f}'.format(eps)) best_verified = max(best_verified, eps) elif res == 0: print('Failed at {:.3f}'.format(eps)) best_failed = min(best_failed, eps) else: break t1 = time.time() print('time = {}'.format(t1 - t0)) print('\n============================\n') if best_verified == round(best_failed - 0.001, 3): break if res == 1: if step_eps == 0.01: eps = round(eps + step_eps, 3) elif step_eps == -0.005: step_eps = 0.001 eps = round(eps + step_eps, 3) elif step_eps == 0.001: eps = round(eps + step_eps, 3) elif res == 0: if step_eps == 0.01: step_eps = -0.005 eps = round(eps + step_eps, 3) elif step_eps == -0.005: step_eps = -0.001 eps = round(eps + step_eps, 3) elif step_eps == -0.001: eps = round(eps + step_eps, 3) print("Image {} Verified at {:.3f} and Failed at {:.3f}".format( i, best_verified, best_failed)) else: print('Skip at data {}'.format(i)) print("Image {} Verified at {:.3f} and Failed at {:.3f}".format( i, -1, -1)) res = -1 print('\n============================\n')
import autograd.numpy as np import tensorflow as tf from extract_data import dense_to_one_hot from multilayer_nn import MultiLayerNN from plotting import plot_confusion_matrices from utils import train_validation_split, compute_accuracy np.set_printoptions(suppress=True) np.set_printoptions(threshold=np.nan) if __name__ == "__main__": (x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data() print(x_train.shape, y_train.shape, x_test.shape, y_test.shape) x_train = np.reshape(x_train, (-1, 32 * 32 * 3)) / 255 x_test = np.reshape(x_test, (-1, 32 * 32 * 3)) / 255 y_train = dense_to_one_hot(y_train) y_test = np.reshape(y_test, 10000) x_train, y_train, x_valid, y_valid = train_validation_split( x_train, y_train, training_set_size=45_000) params = { 'batch_size': 32, 'num_of_epochs': 10, 'learning_rate': 0.1, 'init_scale': 0.05, 'keep_prob': 0.9, 'ema': 0.999 }
def main(): test_acc_only = False np.set_printoptions(threshold=20) parser = argparse.ArgumentParser(description='nSolver') parser.add_argument('--spec', type=str, default='spec.json', help='the specification file') parser.add_argument('--algorithm', type=str, help='the chosen algorithm') parser.add_argument('--threshold', type=float, help='the threshold in sprt') parser.add_argument('--eps', type=float, help='the distance value') parser.add_argument('--dataset', type=str, help='the data set for rnn experiments') args = parser.parse_args() with open(args.spec, 'r') as f: spec = json.load(f) #add_assertion(args, spec) add_solver(args, spec) model, assertion, solver, display = parse(spec) lower = model.lower[0] upper = model.upper[0] if args.dataset == 'jigsaw': pathX = '../benchmark/rnn_fairness/data/jigsaw/all/' pathY = '../benchmark/rnn_fairness/data/jigsaw/all/labels.txt' elif args.dataset == 'wiki': pathX = '../benchmark/rnn_fairness/data/wiki/' pathY = '../benchmark/rnn_fairness/data/wiki/labels.txt' y0s = np.array(ast.literal_eval(read(pathY))) model.shape = (100, 50) l_pass = 0 l_fail = 0 if test_acc_only == True: for i in range(1000): assertion['x0'] = pathX + 'data' + str(i) + '.txt' #assertion['x0'] = pathX + str(i) + '.txt' x0 = np.array(ast.literal_eval(read(assertion['x0']))) shape_x0 = (int(x0.size / 50), 50) model.shape = shape_x0 model.lower = np.full(x0.size, lower) model.upper = np.full(x0.size, upper) output_x0 = model.apply(x0) lbl_x0 = 1 - np.argmax(output_x0, axis=1)[0] ''' lbl_x0 = 0 if output_x0[0][0] > output_x0[0][1]: lbl_x0 = 1 else: lbl_x0 = 0 ''' print('Data {}, y {}, lbl {}'.format(i, y0s[i], lbl_x0)) # accuracy test if lbl_x0 == y0s[i]: l_pass = l_pass + 1 else: l_fail = l_fail + 1 print("Accuracy of ori network: %f.\n" % (l_pass / (l_pass + l_fail))) else: solver.solve(model, assertion)
xlim = (-1.0, 71) for i in range(3): ax[i].legend(ncol=1, loc='upper right') ax[i].set_xlim(xlim) ax[0].set_title(category + ' methods') ax[-1].set_xlabel('Iteration') ax[0].set_ylabel('Iterate error norm') ax[1].set_ylabel('Objective error') ax[2].set_ylabel('Gradient norm') fig.tight_layout() return fig, ax if __name__ == "__main__": # Reset global options np.set_printoptions(precision=3) seed = 1 # Make LQR problem data A, B, Q, X0 = gen_lqr_problem(n=3, m=2, rho=0.9, round_places=1, seed=seed) n, m = B.shape # Make LQR objective f, g, h = make_lqr_objective(A, B, Q, X0) obj = Objective(f, g, h, name='LQR') # Initial policy K0 = np.zeros([m, n]) vK0 = vec(K0) sanity_check(K0, A, B, Q, X0, f, g, h)
loss_grad(...) <-- calculates gradients for the loss function Other Useful Functions: build(...) <-- generates a dictionary of parameters (connections) update_params(...) <-- updates param weights based on gradients provided NOTE!!!! This model doesn't work exactly as described in Kruschke's original paper; primarily because the similarity equation using the minkowsky dist func isn't differentiable when the distance metric is >= 2 ''' ## std lib ## ext requirements import autograd.numpy as np from autograd import grad from scipy import spatial np.set_printoptions(suppress=True) ## int requirements import utils minfloat = np.finfo(np.double).tiny def pdist(a1, a2, r, **kwargs): attention_weights = kwargs.get('attention_weights', np.ones([1,a1.shape[1]]) / a1.shape[1]) # format inputs & exemplars for (i think vectorized) pairwise distance calculations a1_tiled = np.tile(a1, a2.shape[0]).reshape(a1.shape[0], a2.shape[0], a1.shape[1]) a2_tiled = np.repeat([a2], a1.shape[0], axis=0) if hps['r'] > 1: # get attention-weighted pairwise distances
from utilis import * from scipy.linalg import block_diag import matplotlib.pyplot as plt # import numpy as np # the auto grad things: import autograd.numpy as np import autograd.numpy.random as npr from autograd import value_and_grad, grad from autograd import hessian SMALL = 1e-3 np.set_printoptions(edgeitems=30, linewidth=100000, formatter=dict(float=lambda x: "%.3g" % x)) np.set_printoptions(precision=3) np.set_printoptions(suppress=True) class GPFA_sv_mc_lr(GPFA_sv_mc): """ sparce variational GPFA """ algName = "lr_mc_sv_GPFA"
def main(): start = time.time() np.set_printoptions(threshold=20) parser = argparse.ArgumentParser(description='nSolver') parser.add_argument('--spec', type=str, default='spec.json', help='the specification file') parser.add_argument('--size', type=int, default=3, help='the size of the backdoor') parser.add_argument('--rate', type=float, default=0.90, help='the success rate') parser.add_argument('--threshold', type=float, default=0.01, help='the threshold') parser.add_argument('--target', type=int, help='the target used in verify and attack') # for attacking parser.add_argument('--atk_only', action='store_true', help='turn on/off attack') parser.add_argument('--atk_pos', type=int, help='the attack position') parser.add_argument('--algorithm', type=str, default='backdoor', help='the chosen algorithm') parser.add_argument('--num_procs', type=int, default=0, help='the number of processes') parser.add_argument('--total_imgs', type=int, default=10000, help='the number of images') parser.add_argument('--num_imgs', type=int, default=100, help='the number of images') parser.add_argument('--dataset', type=str, help='the data set for BACKDOOR experiments') args = parser.parse_args() if args.atk_only: run_attack(args) else: run_verify_parallel(args) end = time.time() t = round(end - start) m = int(t / 60) s = t - 60 * m print('Running time = {}m {}s'.format(m, s))
import autograd.numpy as numpy import autograd import scipy.linalg import scipy.stats import scipy.optimize import visualisation from tqdm import tqdm numpy.set_printoptions(precision=2) eps = numpy.finfo(numpy.random.randn(1).dtype).eps class GP_Beta: def __init__(self, length_scale=None, std=None, omega=None, kappa=None): self.n = None self.y = None self.mu = None self.sigma = None self.q = None
s = 10 sortInds = np.argsort(predErrsExact) plt.figure(figsize=(5.5, 5)) plt.scatter(np.arange(sets.shape[0]), predErrsExact[sortInds], label='Exact CV', c='b', s=s) plt.scatter(np.arange(sets.shape[0]), predErrsIJUpperBnd[sortInds], label=r'$\widetilde\mathrm{IJ}$ Upper Bnd.', c='r', s=s) plt.scatter(np.arange(sets.shape[0]), predErrsIJLowerBnd[sortInds], label=r'$\widetilde\mathrm{IJ}$ Lower Bnd.', c='k', s=s) plt.legend(fontsize=legendFontsize) plt.xlabel('Datapoint, n', fontsize=axlabelFontsize) plt.ylabel('Prediction error of $y_n$', fontsize=axlabelFontsize) plt.title('Exact CV vs bounds across datapoints n', fontsize=titleFontsize) plt.gca().tick_params(axis='both', which='major', labelsize=tickFontsize) plt.tight_layout() #plt.savefig('C:YOUR_FILEPATH/singleTrialErrorBounds-Poisson-IJ.png', bbox='tight') plt.show() from IPython import embed np.set_printoptions(linewidth=80) embed()
MODELS_PATH = 'mnistmodels/' print 'Reading images from %s' % TRAINING_IMAGES_PATH images = utils.read_images(TRAINING_IMAGES_PATH) print 'Reading labels from %s' % TRAINING_LABELS_PATH labels = utils.read_labels(TRAINING_LABELS_PATH) print 'Reading images from %s' % TESTING_IMAGES_PATH testing_images = utils.read_images(TESTING_IMAGES_PATH) print 'Reading labels from %s' % TESTING_LABELS_PATH testing_labels = utils.read_labels(TESTING_LABELS_PATH) training_images = images[5000:-5000] training_labels = labels[5000:-5000] validating_images = np.concatenate([images[:5000], images[-5000:]]) validating_labels = np.concatenate([labels[:5000], labels[-5000:]]) np.set_printoptions(suppress=True) nnm = None def create(layers=[28*28, 100, 10], batch_size=32, dropout=0.1): print 'Creating neural network' global nnm nnm = NeuralNetworkModel(layers, batch_size, dropout) def learn(): print 'Learning' for no, _ in enumerate(nnm.epochs_learn(training_images, training_labels)): print 'Epoch {0}: {1}'.format(no, nnm.test(validating_images, validating_labels))
def main(): np.set_printoptions(threshold=20) parser = argparse.ArgumentParser(description='nSolver') parser.add_argument('--spec', type=str, default='spec.json', help='the specification file') parser.add_argument('--algorithm', type=str, help='the chosen algorithm') parser.add_argument('--threshold', type=float, help='the threshold in sprt') parser.add_argument('--eps', type=float, help='the distance value') parser.add_argument('--has_ref', action='store_true', help='turn on/off refinement') parser.add_argument('--max_ref', type=int, default=20, help='maximum times of refinement') parser.add_argument('--ref_typ', type=int, default=0, help='type of refinement') parser.add_argument('--max_sus', type=int, default=1, help='maximum times of finding adversarial sample') parser.add_argument('--dataset', type=str, help='the data set for fairness experiments') parser.add_argument('--num_tests', type=int, default=100, help='maximum number of tests') args = parser.parse_args() with open(args.spec, 'r') as f: spec = json.load(f) add_assertion(args, spec) add_solver(args, spec) model, assertion, solver, display = parse(spec) ''' if args.dataset == 'bank': pathX = 'benchmark/fairness/bank/data/' pathY = 'benchmark/fairness/bank/data/labels.txt' elif args.dataset == 'census': pathX = 'benchmark/fairness/census/data/' pathY = 'benchmark/fairness/census/data/labels.txt' elif args.dataset == 'credit': pathX = 'benchmark/fairness/credit/data/' pathY = 'benchmark/fairness/credit/data/labels.txt' ''' if args.dataset == 'bank': pathX = '../benchmark/fairness/bank/data/' #debug pathY = '../benchmark/fairness/bank/data/labels.txt' #debug elif args.dataset == 'census': pathX = '../benchmark/fairness/census/data/' #debug pathY = '../benchmark/fairness/census/data/labels.txt' #debug elif args.dataset == 'credit': pathX = '../benchmark/fairness/credit/data/' #debug pathY = '../benchmark/fairness/credit/data/labels.txt' #debug y0s = np.array(ast.literal_eval(read(pathY))) for i in range(args.num_tests): assertion['x0'] = pathX + 'data' + str(i) + '.txt' x0 = np.array(ast.literal_eval(read(assertion['x0']))) output_x0 = model.apply(x0) lbl_x0 = np.argmax(output_x0, axis=1)[0] print('Data {}\n'.format(i)) print('x0 = {}'.format(x0)) print('output_x0 = {}'.format(output_x0)) print('lbl_x0 = {}'.format(lbl_x0)) print('y0 = {}\n'.format(y0s[i])) if lbl_x0 == y0s[i]: print('Run at data {}\n'.format(i)) solver.solve(model, assertion) else: print('Skip at data {}'.format(i)) print('\n============================\n')