def gen_adv(): mse = 0 original_files = get_original_file(input_dir + val_list) #下一个图片的初始梯度方向为上一代的最后的值 global momentum momentum = 0 for filename, label in original_files: img_path = input_dir + filename print("Image: {0} ".format(img_path)) img = process_img(img_path) #adv_img = attack_nontarget_by_ensemble(img, label,origdict[label],label) adv_img, m = attack_nontarget_by_ensemble(img, label, origdict[label], label, momentum) #m为上一个样本最后一次梯度值 momentum = m #adv_img 已经经过转换了,范围是0-255 image_name, image_ext = filename.split('.') ##Save adversarial image(.png) save_adv_image(adv_img, output_dir + image_name + '.png') org_img = tensor2img(img) score = calc_mse(org_img, adv_img) print("Image:{0}, mase = {1} ".format(img_path, score)) mse += score print("ADV {} files, AVG MSE: {} ".format(len(original_files), mse / len(original_files)))
def gen_adv(): mse = 0 adv_acc = 0 original_files = get_original_file(input_dir + val_list) #init_files = get_init_file(init_dir+'init_list.txt') for idx, (filename, label) in enumerate(original_files[args.start:args.end]): img_path = input_dir + filename #init_path = init_dir + init_files[idx][0] + '.jpg' image_name, image_ext = filename.split('.') if image_name in area_rank[:40]: SPARSE_PER = 99 if image_name in area_rank[40:80]: SPARSE_PER = 97 if image_name in area_rank[80:]: SPARSE_PER = 95 #bboxes = get_bbox('mask/'+image_name+'.xml') bboxes = None if verbose: print("Image: {0} ".format(img_path)) img = process_img(img_path) #init = process_img(init_path) * 0.01 init = None adv_img, adv_label = attack_by_MPGD(img, label, bboxes, SPARSE_PER, init) save_adv_image(adv_img, output_dir + image_name + '.png') org_img = tensor2img(img) score = calc_mse(org_img, adv_img) mse += score if label != adv_label else 128 adv_acc += 1 if label == adv_label else 0 if label == adv_label: print("model: ", i, "\timage: ", filename, label) print("ADV {} files, AVG MSE: {}, ADV_ACC: {} ".\ format(len(original_files), mse/len(original_files),adv_acc))
def gen_adv(): original_files = get_original_file(input_dir + val_list) test_acc = 0 print("the model's name is {}".format(model_name)) for filename, label in original_files: img_path = input_dir + filename print("Image: {0} ".format(img_path)) img=process_img(img_path) result = exe.run(eval_program, fetch_list=[out], feed={input_layer.name: img}) result = result[0][0] o_label = np.argsort(result)[::-1][:1][0] print("原始标签为{0}, {1}网络模型下标签为{2}".format(label, model_name, o_label)) if o_label == int(label): test_acc += 1 acc = test_acc / 120.0 print("the acc num is {0}".format(test_acc)) print("the model name is {0}, the acc is {1}".format(model_name, acc))
def gen_adv(): mse = 0 original_files = get_original_file(input_dir + val_list) for filename, label in original_files: img_path1 = input_dir + filename img_path2 = output_dir + filename.split('.')[0] + '.png' print("Image: {0} ".format(img_path1)) img1=process_img(img_path1) img2=process_img(img_path2) img1 = tensor2img(img1) img2 = tensor2img(img2) score = calc_mse(img1, img2) mse += score print("ADV {} files, AVG MSE: {} ".format(len(original_files), mse/len(original_files)))
def p_pent(self, img): scale = float(self.pnet_size / self.min_face) _img = process_img(img, scale) h, w, _ = _img.shape all_boxes = [] while min(h, w) > self.pnet_size: # print(_img.shape) p_cls, p_box = self.load_pnet(np.expand_dims(_img, axis=0)) boxes = generate_box(p_cls[:, :, 1], p_box, scale, 0.6) scale *= self.factor _img = process_img(img, scale) h, w, _ = _img.shape nms = NMS(boxes[:, :5], 0.5) boxes = boxes[nms] all_boxes.append(boxes) all_boxes = np.vstack(all_boxes) # box = all_boxes[:,:5] box_w = all_boxes[:, 2] - all_boxes[:, 0] box_h = all_boxes[:, 3] - all_boxes[:, 1] res_boxes = np.vstack([ all_boxes[:, 0] + all_boxes[:, 5] * box_w, all_boxes[:, 1] + all_boxes[:, 6] * box_h, all_boxes[:, 2] + all_boxes[:, 7] * box_w, all_boxes[:, 3] + all_boxes[:, 8] * box_h, all_boxes[:, 4] ]) #[5,NUM] ---> [NUM,5] print(res_boxes.shape) res_boxes = res_boxes.T print(res_boxes.shape) return res_boxes
def gen_adv(): mse = 0 original_files = get_original_file(input_dir + val_list) num = 1 cout = 0 print("the model is {}".format(model_name)) for filename, label in original_files: img_path = input_dir + filename print("Image: {0} ".format(img_path)) img = process_img(img_path) #print(img.shape) result = exe.run(eval_program, fetch_list=[out], feed={input_layer.name: img}) result = result[0][0] o_label = np.argsort(result)[::-1][:1][0] print("原始标签为{0}".format(o_label)) if o_label == int(label): adv_img = attack_nontarget_by_FGSM(img, label) #adv_img = attack_nontarget_by_PGD(img, label) else: print("{0}个样本已为对抗样本, name为{1}".format(num, filename)) img = tensor2img(img) #print(img.shape) image_name, image_ext = filename.split('.') save_adv_image(img, output_dir + image_name + '.png') num += 1 cout += 1 continue image_name, image_ext = filename.split('.') ##Save adversarial image(.png) save_adv_image(adv_img, output_dir + image_name + '.png') org_img = tensor2img(img) score = calc_mse(org_img, adv_img) mse += score num += 1 print("成功attack的有 {}".format(120 - cout)) print("ADV {} files, AVG MSE: {} ".format(len(original_files), mse / len(original_files)))
def gen_adv(): mse = 0 original_files = get_original_file(input_dir + val_list) for filename, gt_label in original_files: img_path = input_dir + filename img = process_img(img_path) image_name, image_ext = filename.split('.') adv_img = attack_driver(img, gt_label, filename) save_adv_image(adv_img, output_dir + image_name + '.png') org_img = tensor2img(img) score = calc_mse(org_img, adv_img) print(score) mse += score print("ADV {} files, AVG MSE: {} ".format(len(original_files), mse / len(original_files)))
def gen_adv(): mse = 0 original_files = get_original_file('input_image/' + val_list) for filename, label in original_files: img_path = input_dir + filename.split('.')[0] + '.png' print("Image: {0} ".format(img_path)) img = process_img(img_path) adv_img = attack_nontarget_by_SINIFGSM(img, label) image_name, image_ext = filename.split('.') # Save adversarial image(.png) save_adv_image(adv_img, output_dir + image_name + '.png') org_img = tensor2img(img) score = calc_mse(org_img, adv_img) mse += score print("ADV {} files, AVG MSE: {} ".format(len(original_files), mse / len(original_files)))
def process_img(): data = request.json img = data['ImgSrc'] left_shoulder = data['leftShoulder'] right_shoulder = data['rightShoulder'] left_hip = data['leftHip'] right_hip = data['rightHip'] ratio = utils.process_img(img) print("#####################") print(ratio) print("#####################") response = { 'shoulderWidth': utils.get_shoulder_width(ratio, left_shoulder, right_shoulder) + 10, 'hipWidth': utils.get_hip_width(ratio, left_hip, right_hip) + 10 } return make_response(response, 200)
def gen_adv(): mse = 0 original_files = get_original_file('./input_image/' + val_list) target_label_list = [ 76, 18, 104, 36, 72, 72, 47, 92, 113, 5, 84, 74, 82, 34, 42, 84, 70, 98, 29, 87, 104, 94, 103, 61, 21, 83, 108, 104, 26, 112, 84, 107, 104, 45, 72, 19, 72, 75, 55, 104, 54, 104, 72, 74, 91, 25, 68, 107, 91, 41, 116, 21, 104, 56, 102, 51, 46, 87, 113, 19, 113, 85, 24, 93, 110, 102, 24, 84, 27, 38, 48, 43, 10, 32, 68, 87, 54, 12, 84, 29, 3, 13, 26, 2, 3, 106, 105, 34, 118, 66, 19, 74, 63, 42, 9, 113, 21, 6, 40, 40, 21, 104, 86, 23, 40, 12, 37, 20, 40, 12, 79, 15, 9, 48, 74, 51, 91, 79, 46, 80 ] # hard examples need use targeted attack for filename, label in original_files[args.start_idx:args.end_idx]: img_path = input_dir + filename.split('.')[0] + args.subfix print("Image: {0} ".format(img_path)) img = process_img(img_path) target = target_label_list[label - 1] if IsTarget: print('target class', target) adv_img = attack_nontarget_by_FGSM(img, label, target) # adv_img = attack_nontarget_by_FGSM(img, label) image_name, image_ext = filename.split('.') ##Save adversarial image(.png) save_adv_image(adv_img, output_dir + image_name + '.png') org_img = tensor2img(img) score = calc_mse(org_img, adv_img) mse += score print('MSE %.2f' % (score)) sys.stdout.flush() print("ADV {} files, AVG MSE: {} ".format(len(original_files), mse / len(original_files)))
def predict(self, filenames=None, images=None, **kwargs): """Make predictions given image file paths. Arguments: filenames {tuple} -- Iterable containing file names or a generator that yields `filenames, labels`. Returns: {np.ndarray} -- array-like contianing predicted values. """ if images is None and filenames is not None: # Convert filenames to images. images = (utils.process_img(file) for file in filenames) elif filenames is None and images is not None: pass else: raise ValueError('Supply either `filenames` or `images`.') # Make predictions on each image. prediction = [self._predict(im, **kwargs) for im in images] return np.asarray(prediction)
def reader(): for line in lines: label, filename = line.split() img = process_img(os.path.join('datasets/input_image/', filename)) yield img, int(label), filename
def gen_adv(): mse = 0 num = 1 original_files = get_original_file(input_dir + val_list) f = open('log.txt', 'w') # log for filename, label in original_files: img_path = input_dir + filename print("Image: {0} ".format(img_path)) img = process_img(img_path) Res_result, Inception_result, Mob_result = exe.run( double_eval_program, fetch_list=[Res_out, Inception_out, Mob_out], feed={input_layer.name: img}) Res_result = Res_result[0] Inception_result = Inception_result[0] Mob_result = Mob_result[0] r_o_label = np.argsort(Res_result)[::-1][:1][0] i_o_label = np.argsort(Inception_result)[::-1][:1][0] m_o_label = np.argsort(Mob_result)[::-1][:1][0] pred_label = [r_o_label, i_o_label, m_o_label] print("原始标签为{0}".format(label)) print("Res result: %d, Inception result: %d, Mobile result: %d" % (r_o_label, i_o_label, m_o_label)) f.write("原始标签为{0}\n".format(label)) f.write("Res result: %d, Inception result: %d, Mobile result: %d\n" % (r_o_label, i_o_label, m_o_label)) if r_o_label == int(label) and i_o_label == int( label) and m_o_label == int(label): global Res_ratio, Incep_ratio, Mob_ratio Res_ratio = 30.0 / 43.0 Incep_ratio = 10.0 / 43.0 Mob_ratio = 3.0 / 43.0 adv_img = attack_nontarget_by_PGD( double_adv_program, img, pred_label, label, out=[Res_out, Inception_out, Mob_out]) image_name, image_ext = filename.split('.') ##Save adversarial image(.png) org_img = tensor2img(img) score = calc_mse(org_img, adv_img) #image_name += "MSE_{}".format(score) save_adv_image(adv_img, output_dir + image_name + '.png') mse += score elif r_o_label == int(label) and m_o_label == int( label): # Inception 预测错误 print("filename:{}, Inception failed!".format(filename)) Res_ratio = 0.9 Incep_ratio = 0 Mob_ratio = 0.1 adv_img = attack_nontarget_by_PGD(double_adv_program, img, [r_o_label, 0, m_o_label], label, out=[Res_out, Mob_out]) image_name, image_ext = filename.split('.') ##Save adversarial image(.png) org_img = tensor2img(img) score = calc_mse(org_img, adv_img) #image_name += "MSE_{}".format(score) save_adv_image(adv_img, output_dir + image_name + '.png') mse += score elif r_o_label == int(label) and i_o_label == int( label): # Mobile 预测错误 print("filename:{}, Mobile failed!".format(filename)) Res_ratio = 0.75 Incep_ratio = 0.25 Mob_ratio = 0 adv_img = attack_nontarget_by_PGD(double_adv_program, img, [r_o_label, i_o_label, 0], label, out=[Res_out, Inception_out]) image_name, image_ext = filename.split('.') ##Save adversarial image(.png) org_img = tensor2img(img) score = calc_mse(org_img, adv_img) # image_name += "MSE_{}".format(score) save_adv_image(adv_img, output_dir + image_name + '.png') mse += score elif r_o_label == int(label): # Mobile, Inception 预测错误 print("filename:{}, Mobile failed!, Inception failed!".format( filename)) Res_ratio = 1.0 Incep_ratio = 0.0 Mob_ratio = 0.0 adv_img = attack_nontarget_by_PGD(double_adv_program, img, [r_o_label, 0, 0], label, out=[Res_out]) image_name, image_ext = filename.split('.') ##Save adversarial image(.png) org_img = tensor2img(img) score = calc_mse(org_img, adv_img) # image_name += "MSE_{}".format(score) save_adv_image(adv_img, output_dir + image_name + '.png') mse += score else: print("{0}个样本已为对抗样本, name为{1}".format(num, filename)) score = 0 f.write("{0}个样本已为对抗样本, name为{1}\n".format(num, filename)) img = tensor2img(img) image_name, image_ext = filename.split('.') #image_name += "_un_adv_" save_adv_image(img, output_dir + image_name + '.png') print("this rext network weight is {0}".format(Res_ratio)) num += 1 print("the image's mse is {}".format(score)) # break print("ADV {} files, AVG MSE: {} ".format(len(original_files), mse / len(original_files))) #print("ADV {} files, AVG MSE: {} ".format(len(original_files - num), mse / len(original_files - num))) f.write("ADV {} files, AVG MSE: {} ".format(len(original_files), mse / len(original_files))) f.close()
def gen_adv(): print('gen adv') mse = 0 adv_files = get_original_file(input_dir + val_list) print("read original files", len(adv_files)) target_label_list = [ 76, 18, 104, 36, 72, 72, 47, 92, 113, 5, 84, 74, 82, 34, 42, 84, 70, 98, 29, 87, 104, 94, 103, 61, 21, 83, 108, 104, 26, 112, 84, 107, 104, 45, 72, 19, 72, 75, 55, 104, 54, 104, 72, 74, 91, 25, 68, 107, 91, 41, 116, 21, 104, 56, 102, 51, 46, 87, 113, 19, 113, 85, 24, 93, 110, 102, 24, 84, 27, 38, 48, 43, 10, 32, 68, 87, 54, 12, 84, 29, 3, 13, 26, 2, 3, 106, 105, 34, 118, 66, 19, 74, 63, 42, 9, 113, 21, 6, 40, 40, 21, 104, 86, 23, 40, 12, 37, 20, 40, 12, 79, 15, 9, 48, 74, 51, 91, 79, 46, 80 ] least_list = [] counter = 0 unt_counter = 0 # 记录logits logits_list = [] mse_list = [] # 记录max和second logit的差值 logits_diff = [] count = 0 for filename, label in tqdm(adv_files): if args.output == 'input_image/': img_path = output_dir + filename.split('.')[0] + '.jpg' else: img_path = output_dir + filename.split('.')[0] + '.png' # print("Image: {0} ".format(img_path)) # !ssize.empty() in function 'resize' img = process_img(img_path) # print('Image range', np.min(img), np.max(img)) pred_label, pred_score, least_class = inference(img) logits = inference_logits(img) # print(logits) return_logits = np.sort(logits)[::-1] logits_list.append(return_logits) # logits_diff.append(return_logits[0] - return_logits[1]) logits_diff.append(return_logits[0] - logits[label]) # print("Test-score: {0}, class {1}".format(pred_score, pred_label)) # if pred_label == target_label_list[label - 1]: # counter += 1 if pred_label != label: # print("Failed target image: {0} ".format(img_path)) unt_counter += 1 else: # print("Serious!!!Failed untarget image: {0} ".format(img_path)) pass least_list.append(least_class) # adv_img = attack_nontarget_by_FGSM(img, label) # image_name, image_ext = filename.split('.') ## Save adversarial image(.png) # save_adv_image(adv_img, output_dir + image_name + '.png') ## check MSE # org_img = tensor2img(img) org_filename = filename.split('.')[0] + '.jpg' org_img_path = input_dir + org_filename org_img = process_img(org_img_path) score = calc_mse(tensor2img(org_img), tensor2img(img)) mse_list.append(score) mse += score count += 1 # print('Least likely list', least_list) # print('logits', logits_list) print('logits diff: ') for i, logit in enumerate(logits_diff): if logit < 0.001: print('id: %d, mse: %.10f, diff logits: %.2f, ******************' % (i + 1, mse_list[i], logit)) else: print('id: %d, mse: %.10f, diff logits: %.2f' % (i + 1, mse_list[i], logit)) print('logits diff', np.mean(logits_diff)) print('The LL success number is', counter) print('The untargeted success number is', unt_counter) print("AVG MSE: {} ", mse / count)