def optimize(self, E, iterations): M = self.M W, H = utils.init_wh(M, self.k) for i in range(iterations): print(i, utils.objective(M, W, H, E)) pH = H H = H + self.step * (W.T.mm(E * (M - W.mm(H)))) H = utils.proj(H, pH, self.sticky) pW = W W = W + self.step * ((E * (M - W.mm(H))).mm(H.T)) W = utils.proj(W, pW, self.sticky) return W, H
def optimize(self, E: Tensor, iterations): M = self.M W, H = utils.init_wh(M, self.k) mW, mH = 0, 0 for i in range(iterations): print(i, utils.objective(M, W, H, E)) prevW, prevH = W, H W = W + (1 - self.momentum) * mW H = H + (1 - self.momentum) * mH (gW, gH) = self.grad(M, W, H, E) # nW = np.random.normal(0, 0.01 * self.step, (n, self.k)) # nH = np.random.normal(0, 0.01 * self.step, (self.k, m)) W = utils.proj(W - self.step * gW, W, self.sticky) H = utils.proj(H - self.step * gH, H, self.sticky) mW = W - prevW mH = H - prevH return W, H
def visualize(param, w_default=300): image_file = param['image_file'] model_file = param['model_file'] proj_param = param['proj_param'] # load image if not os.path.exists(os.path.join(image_file)): print "Image file does not exsists. Skip %s"%(image_file) return img = Image.open(image_file) print "Processing %s"%(image_file) w = w_default h = int(w * img.height / float(img.width)) img = img.resize((w, h), Image.ANTIALIAS) img_array = np.array(img) # load CAD model if not os.path.exists(os.path.join(model_file)): print "Model file does not exsists. Skip %s"%(model_file) return data_3d = utils.read_obj(model_file) vertices_3d = data_3d['vertices'] faces = data_3d['faces'] # projection vertices_2d = utils.proj( vertices_3d, proj_param['R'], proj_param['d'], proj_param['uv'], proj_param['f']) # lay projection onto the image num_vertices = vertices_2d.shape[0] num_faces = faces.shape[0] x_min = np.min(vertices_2d[:, 0]) y_min = np.min(vertices_2d[:, 1]) x_max = np.max(vertices_2d[:, 0]) y_max = np.max(vertices_2d[:, 1]) patches = [] for i in range(0, num_faces): trivert = np.zeros((3, 2)) trivert[0, :] = vertices_2d[faces[i, 0], :] trivert[1, :] = vertices_2d[faces[i, 1], :] trivert[2, :] = vertices_2d[faces[i, 2], :] triangle = Polygon(trivert) patches.append(triangle) p = PatchCollection(patches, alpha=0.2, linewidth=0) fig,ax = plt.subplots() plt.imshow(img_array) ax.add_collection(p) plt.axis('off') plt.show() plt.close(fig)
def get_faces_vertices_2d(param): model_file = param['model_file'] proj_param = param['proj_param'] # load CAD model if not os.path.exists(os.path.join(model_file)): print("Model file does not exsists. Skip %s" % model_file) return data_3d = utils.read_obj(model_file) vertices_3d = data_3d['vertices'] faces = data_3d['faces'] # projection vertices_2d = utils.proj(vertices_3d, proj_param['R'], proj_param['d'], proj_param['uv'], proj_param['f']) return faces, vertices_2d
def decompose(self): """ Starting initially with Gram-Schmidt. """ self.Q = np.array(self.A) self.R = np.zeros_like(self.A) M, N = self.Q.shape # for each column for i in range(N): # for each column on the left of current for j in range(i): # calculate projection of ith col on jth col p = proj(self.Q[:, i], self.Q[:, j]) self.Q[:, i] -= p # normalize ith colum self.Q[:, i] /= l2_norm(self.Q[:, i]) # compute R self.R = np.dot(self.Q.T, self.A) return self.Q, self.R
def obj_func(x, x0, y0): # evaluate objective function # if hard label: -1 if image is correctly classified, 0 otherwise # (done this way because BayesOpt assumes we want to maximize) # if soft label, correct logit - highest logit other than correct logit # in both cases, successful adversarial perturbation iff objective function >= 0 x = transform(x, args.dset, args.arch, args.cos, args.sin).to(device) x = proj(x, args.eps, args.inf_norm, args.discrete) with torch.no_grad(): y = cnn_model.predict_scores(x + x0) if not args.hard_label: y = torch.log_softmax(y, dim=1) max_score = y[:, y0] y, index = torch.sort(y, dim=1, descending=True) select_index = (index[:, 0] == y0).long() next_max = y.gather(1, select_index.view(-1, 1)).squeeze() f = torch.max(max_score - next_max, torch.zeros_like(max_score)) else: index = torch.argmax(y, dim=1) f = torch.where(index == y0, torch.ones_like(index), torch.zeros_like(index)).float() return -f
from utils import proj TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) TEMPLATE_DIRS = (proj('templates/'))
import sys from utils import proj DEBUG = True TEMPLATE_DEBUG = DEBUG DATABASES = { 'default': { 'ENGINE': 'django.db.backends.sqlite3', 'NAME': proj('db.sqlite3'), 'USER': '', 'PASSWORD': '', 'HOST': '', 'PORT': '', } } if 'test' in sys.argv: DATABASES['default']['NAME'] = ':memory:' SITE_ID = 1 TIME_ZONE = 'America/Chicago' LANGUAGE_CODE = 'en-us' USE_TZ = False USE_I18N = True USE_L10N = True SECRET_KEY = 'f3=dg%b600m5aivknk-tld04ucg0a1yj&4+1nxj#a!p3m6kkbp'
def bayes_opt(x0, y0): """ Main Bayesian optimization loop. Begins by initializing model, then for each iteration, it fits the GP to the data, gets a new point with the acquisition function, adds it to the dataset, and exits if it's a successful attack """ best_observed = [] query_count, success = 0, 0 # call helper function to initialize model train_x, train_obj, mll, model, best_value, mean, std = initialize_model( x0, y0, n=args.initial_samples) if args.standardize_every_iter: train_obj = (train_obj - train_obj.mean()) / train_obj.std() best_observed.append(best_value) query_count += args.initial_samples # run args.iter rounds of BayesOpt after the initial random batch for _ in range(args.iter): # fit the model fit_gpytorch_model(mll) # define the qNEI acquisition module using a QMC sampler if args.q != 1: qmc_sampler = SobolQMCNormalSampler(num_samples=2000, seed=seed) qEI = qExpectedImprovement(model=model, sampler=qmc_sampler, best_f=best_value) else: if args.acqf == 'EI': qEI = ExpectedImprovement(model=model, best_f=best_value) elif args.acqf == 'PM': qEI = PosteriorMean(model) elif args.acqf == 'POI': qEI = ProbabilityOfImprovement(model, best_f=best_value) elif args.acqf == 'UCB': qEI = UpperConfidenceBound(model, beta=args.beta) # optimize and get new observation new_x, new_obj = optimize_acqf_and_get_observation(qEI, x0, y0) if args.standardize: new_obj = (new_obj - mean) / std # update training points train_x = torch.cat((train_x, new_x)) train_obj = torch.cat((train_obj, new_obj)) if args.standardize_every_iter: train_obj = (train_obj - train_obj.mean()) / train_obj.std() # update progress best_value, best_index = train_obj.max(0) best_observed.append(best_value.item()) best_candidate = train_x[best_index] # reinitialize the model so it is ready for fitting on next iteration torch.cuda.empty_cache() model.set_train_data(train_x, train_obj, strict=False) # get objective value of best candidate; if we found an adversary, exit best_candidate = best_candidate.view(1, -1) best_candidate = transform(best_candidate, args.dset, args.arch, args.cos, args.sin).to(device) best_candidate = proj(best_candidate, args.eps, args.inf_norm, args.discrete) with torch.no_grad(): adv_label = torch.argmax( cnn_model.predict_scores(best_candidate + x0)) if adv_label != y0: success = 1 if args.inf_norm: print('Adversarial Label', adv_label.item(), 'Norm:', best_candidate.abs().max().item()) else: print('Adversarial Label', adv_label.item(), 'Norm:', best_candidate.norm().item()) return query_count, success query_count += args.q # not successful (ran out of query budget) return query_count, success
from utils import proj DEBUG = True TEMPLATE_DEBUG = DEBUG DATABASES = { "default": { "ENGINE": "django.db.backends.sqlite3", "NAME": proj("db.sqlite3"), "USER": "", "PASSWORD": "", "HOST": "", "PORT": "", } } SITE_ID = 1 TIME_ZONE = "America/Los_Angeles" LANGUAGE_CODE = "en-us" USE_TZ = True USE_I18N = True USE_L10N = True SECRET_KEY = "f3=dg%b600m5aivknk-tld04ucg0a1yj&4+1nxj#a!p3m6kkbp" ROOT_URLCONF = "cghub.urls" WSGI_APPLICATION = "cghub.wsgi.application"
from utils import proj TEMPLATE_LOADERS = ( 'django.template.loaders.filesystem.Loader', 'django.template.loaders.app_directories.Loader', ) TEMPLATE_DIRS = ( proj('templates/') )