def gradient_ascent_intermediate_layer(prep_img, select_layer, select_filter): model = get_model('vgg16') if "features" in dict(list(model.named_children())): conv_model = torch.nn.Sequential( *list(model.features.children())[:select_layer + 1]) else: conv_model = torch.nn.Sequential( *list(model.children())[:select_layer + 1]) optimizer = Adam([prep_img], lr=0.1, weight_decay=1e-6) for i in range(1, 201): optimizer.zero_grad() output = conv_model(prep_img)[0][select_filter] loss = -torch.mean(output) print(i, "->", loss) loss.backward() optimizer.step() created_image = utils.recreate_image(prep_img) if i % 5 == 0: im_path = '../generated/layer_vis_%d.jpg' % i utils.save_image(created_image, im_path)
def generate(self, original_image, im_label, target_class): im_label_as_var = torch.from_numpy(np.asarray([target_class])) criterion = nn.CrossEntropyLoss() # process image as ImageNet dataset format processed_image = preprocess_image(original_image) for i in range(10): print('Iteration: {}'.format(str(i))) # zero previous gradient processed_image.grad = None out = self.model(processed_image) pred_loss = criterion(out, im_label_as_var) # calculate gradient pred_loss.backward() # create noise # processed_image.grad.data represents gradient of first layer adv_noise = self.alpha * torch.sign(processed_image.grad.data) # add noise to image processed_image.data = processed_image.data - adv_noise # generate confirmation image recreated_image = recreate_image(processed_image) # process confirmation image prep_confirmation_image = preprocess_image(recreated_image) pred = self.model(prep_confirmation_image) # get prediction index _, pred_index = pred.data.max(1) confidence = F.softmax(pred, dim=1)[0][pred_index].data.numpy()[0] # convert tensor to int pred_index = pred_index.numpy()[0] if pred_index == target_class: print('\nOriginal image class: ', im_label, '\nTarget image class: ', target_class, '\nConfidence: ', confidence) # create the image for noise as: Original image - generated image noise_image = original_image - recreated_image cv2.imwrite( './generated/targeted_adv_noise_from_' + str(im_label) + '_to_' + str(pred_index) + '.jpg', noise_image) # write image cv2.imwrite( './generated/targeted_adv_img_from_' + str(im_label) + '_to_' + str(pred_index) + '.jpg', recreated_image) break return 1
def gradient_ascent_output(prep_img, target_class): model = get_model('vgg16') optimizer = Adam([prep_img], lr=0.1, weight_decay=0.01) for i in range(1, 201): optimizer.zero_grad() output = model(prep_img)[0][target_class] loss = -torch.mean(output) print(i, "->", loss) loss.backward() optimizer.step() created_image = utils.recreate_image(prep_img) if i % 5 == 0: im_path = '../generated/output_vis_%d.jpg'%i utils.save_image(created_image, im_path)
NODE_LOCAL_ORIGINAL_IMAGES_PATH + image_name, 1) preprocess_original_image = preprocess_image(local_original_image) time.sleep( 5 ) # Simulate the situation where each adv image requires 5 seconds to create # To do: decompose features x and label y # x, y = preprocess_original_image.unsqueeze(0), # To do: import net and device # net, device = Generate_Adv = GenAdv(net, device, F.cross_entropy) local_adv_image, local_adv_label = Generate_Adv.generate_adv(x, y) recreated_local_adv_image = recreate_image(local_adv_image) # Save adv image to local storage first cv2.imwrite(NODE_LOCAL_PATH + adv_image_name, recreated_local_adv_image) # adv_image_file.write(adv_image_data) # adv_image_file.close() # Upload new version of adv image to cloud (can handle the first upload) upload_file(client, NODE_LOCAL_PATH + adv_image_name, BUCKET, REMOTE_ADV_IMAGE_FOLDER, adv_image_name) print("Uploaded " + adv_image_name + "!") round_cnt += 1 # terminate the system based on the round counter