Example #1
0
def load_GTSRB(data_train_model: DataSpec, data_test_model: DataSpec,
               data_train_monitor: DataSpec, data_test_monitor: DataSpec,
               data_run: DataSpec):
    # names of the data files
    data_train_model.file = "data/GTSRB/train.p"
    data_test_model.file = "data/GTSRB/test.p"
    data_train_monitor.file = data_train_model.file  # use training data for training
    data_test_monitor.file = data_test_model.file  # use testing data for running
    data_run.file = data_test_model.file  # use testing data for running

    pixel_depth = 255.0

    all_classes_network, all_classes_rest = load_data(
        data_train_model=data_train_model,
        data_test_model=data_test_model,
        data_train_monitor=data_train_monitor,
        data_test_monitor=data_test_monitor,
        data_run=data_run,
        pixel_depth=pixel_depth)

    # labels
    labels_all = ['label' + str(i)
                  for i in range(43)]  # dummy names, TODO add correct names
    labels_all[0] = "20 km/h"
    labels_all[1] = "30 km/h"
    labels_all[2] = "50 km/h"
    labels_all[10] = "no passing"

    labels_network = filter_labels(labels_all, all_classes_network)
    labels_rest = filter_labels(labels_all, all_classes_rest)

    return all_classes_network, labels_network, all_classes_rest, labels_rest
Example #2
0
def load_CIFAR_10(data_train_model: DataSpec, data_test_model: DataSpec,
                  data_train_monitor: DataSpec, data_test_monitor: DataSpec,
                  data_run: DataSpec):
    # raise(NotImplementedError("This method was abandoned. Please fix it first before using it."))

    cifar10_dataset_folder_path = "../data/cifar-10-python/cifar-10-batches-py"
    # preprocess_and_save_data(cifar10_dataset_folder_path, normalize, one_hot_encode)
    n_batches = 5
    x_train = []
    y_train = []
    for batch_i in range(1, n_batches + 1):
        features, labels = load_cfar10_batch(cifar10_dataset_folder_path,
                                             batch_i)
        x_train.extend(features)
        y_train.extend(labels)
    x_train = np.array(x_train)
    y_train = np.array(y_train)
    data_train_model.set_data(x=x_train, y=y_train)
    data_train_monitor.set_data(x=x_train, y=y_train)
    with open(cifar10_dataset_folder_path + "/test_batch", mode='rb') as file:
        test = pickle.load(file, encoding='latin1')
    x_test = np.array(test['data'].reshape(
        (len(test['data']), 3, 32, 32)).transpose(0, 2, 3, 1))
    y_test = np.array(test['labels'])
    data_test_model.set_data(x=x_test, y=y_test)
    data_test_monitor.set_data(x=x_test, y=y_test)
    data_run.set_data(x=x_test, y=y_test)
    pixel_depth = 255.0
    all_classes_network, all_classes_rest = load_data(
        data_train_model=data_train_model,
        data_test_model=data_test_model,
        data_train_monitor=data_train_monitor,
        data_test_monitor=data_test_monitor,
        data_run=data_run,
        pixel_depth=pixel_depth)
    # labels
    labels_all = ['label' + str(i) for i in range(10)]

    labels_network = filter_labels(labels_all, all_classes_network)
    labels_rest = filter_labels(labels_all, all_classes_rest)

    return all_classes_network, labels_network, all_classes_rest, labels_rest
Example #3
0
def load_ToyData(data_train_model: DataSpec, data_test_model: DataSpec,
                 data_train_monitor: DataSpec, data_test_monitor: DataSpec,
                 data_run: DataSpec):
    # add data
    x = np.array([
        [0.7, 0.2],
        [0.6, 0.2],
        [0.7, 0.1],
        [0.8, 0.1],  # class 1, first cluster
        [0.9, 0.2],  # class 1, second cluster
        [0.5, 0.5],
        [0.5, 0.6],
        [0.4, 0.6],  # class 2, first cluster
        [0.2, 0.7]  # class 2, second cluster
    ])
    y = np.array([0, 0, 0, 0, 0, 1, 1, 1, 1])
    data_train_model.set_data(x=x, y=y)
    data_test_model.set_data(x=x, y=y)
    data_train_monitor.set_data(x=x, y=y)
    data_test_monitor.set_data(x=x, y=y)
    data_run.set_data(x=x, y=y)

    pixel_depth = None

    load_data(data_train_model=data_train_model,
              data_test_model=data_test_model,
              data_train_monitor=data_train_monitor,
              data_test_monitor=data_test_monitor,
              data_run=data_run,
              pixel_depth=pixel_depth)

    # labels
    all_classes_network = [0, 1]
    labels_network = ["label1", "label2"]
    all_classes_rest = all_classes_network
    labels_rest = labels_network

    return all_classes_network, labels_network, all_classes_rest, labels_rest
Example #4
0
def load_MNIST(data_train_model: DataSpec, data_test_model: DataSpec,
               data_train_monitor: DataSpec, data_test_monitor: DataSpec,
               data_run: DataSpec):
    # raise(NotImplementedError("This method was abandoned. Please fix it first before using it."))

    # URLs for the train image and label data
    url_train_image = 'train-images-idx3-ubyte.gz'
    url_train_labels = 'train-labels-idx1-ubyte.gz'
    num_train_samples = 60000

    # print("Downloading train data")
    # train = try_download(url_train_image, url_train_labels, num_train_samples)

    # URLs for the test image and label data
    url_test_image = 't10k-images-idx3-ubyte.gz'
    url_test_labels = 't10k-labels-idx1-ubyte.gz'
    num_test_samples = 10000

    # print("Downloading test data")
    # test = try_download(url_test_image, url_test_labels, num_test_samples)
    """
    # (x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()

    # Reshaping the array to 4-dims so that it can work with the Keras API
    x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
    x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)
    # Making sure that the values are float so that we can get decimal points after division
    x_train = x_train.astype('float32')
    x_test = x_test.astype('float32')
    # Normalizing the RGB codes by dividing it to the max RGB value.
    x_train /= 255
    x_test /= 255

    x_train = np.array(x_train)
    y_train = np.array(y_train)
    x_test = np.array(x_test)
    y_test = np.array(y_test)
    """

    x_train = loadData(url_train_image, num_train_samples)
    y_train = loadLabels(url_train_labels, num_train_samples)

    x_test = loadData(url_test_image, num_test_samples)
    y_test = loadLabels(url_test_labels, num_test_samples)

    # Reshaping the array to 4-dims so that it can work with the Keras API
    x_train = x_train.reshape(x_train.shape[0], 28, 28, 1)
    x_test = x_test.reshape(x_test.shape[0], 28, 28, 1)

    data_train_model.set_data(x=x_train, y=y_train)
    data_train_monitor.set_data(x=x_train, y=y_train)
    data_test_model.set_data(x=x_test, y=y_test)
    data_test_monitor.set_data(x=x_test, y=y_test)
    data_run.set_data(x=x_test, y=y_test)
    pixel_depth = 255.0
    all_classes_network, all_classes_rest = load_data(
        data_train_model=data_train_model,
        data_test_model=data_test_model,
        data_train_monitor=data_train_monitor,
        data_test_monitor=data_test_monitor,
        data_run=data_run,
        pixel_depth=pixel_depth)
    # labels
    labels_all = ['label' + str(i) for i in range(10)]

    labels_network = filter_labels(labels_all, all_classes_network)
    labels_rest = filter_labels(labels_all, all_classes_rest)

    return all_classes_network, labels_network, all_classes_rest, labels_rest
Example #5
0
def main():
    tf.set_random_seed(1234)  # for producing the same images

    if not hasattr(keras.backend, "tf"):
        raise RuntimeError("This tutorial requires keras to be configured"
                           " to use the TensorFlow backend.")

    if keras.backend.image_dim_ordering() != 'tf':
        keras.backend.set_image_dim_ordering('tf')
        print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
              "'th', temporarily setting to 'tf'")

    sess = tf.Session()
    keras.backend.set_session(sess)

    # load and preprocess dataset
    data_spec = DataSpec(batch_size=TOT_IMAGES,
                         scale_size=256,
                         crop_size=224,
                         isotropic=False)
    image_producer = ImageNetProducer(data_path=INPUT_DIR,
                                      num_images=TOT_IMAGES,
                                      data_spec=data_spec,
                                      batch_size=TOT_IMAGES)

    # Define input TF placeholder
    x = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))
    y = tf.placeholder(tf.float32, shape=(None, 1000))
    class_num = 1000

    # load target model and produce data
    # model = preprocess layer + pretrained model
    from keras.applications.densenet import DenseNet121
    from keras.applications.densenet import preprocess_input
    pretrained_model = DenseNet121(weights='imagenet')
    image_producer.startover()
    target_model = keras_model_wrapper(pretrained_model,
                                       preprocess_input,
                                       x=x,
                                       y=y)
    for (indices, label, names, images) in image_producer.batches(sess):
        images = np.array(images)
        label = np_utils.to_categorical(np.array(label), class_num)
    accuracy = model_eval(sess,
                          x,
                          y,
                          target_model.predictions,
                          images,
                          label,
                          args={'batch_size': 32})
    print('Test accuracy of wrapped target model:{:.4f}'.format(accuracy))

    # data information
    x_test, y_test = images, label  # x_test [0, 255]
    print('loading %s images in total ', images.shape)
    print(np.min(x_test), np.max(x_test))

    # local attack specific parameters
    clip_min = args.lower
    clip_max = args.upper
    nb_imgs = args.nb_imgs
    li_eps = args.epsilon
    targeted_true = True if args.attack_type == 'targeted' else False
    k = args.K  # iteration
    a = args.learning_rate  # step size

    # Test the accuracy of targeted attacks, need to redefine the attack graph
    target_ys_one_hot, orig_images, target_ys, orig_labels = generate_attack_inputs(
        target_model, x_test, y_test, class_num, nb_imgs)

    # Set random seed to improve reproducibility
    tf.set_random_seed(args.seed)
    np.random.seed(args.seed)

    # test whether adversarial examples exsit, if no, generate it, otherwise, load it.
    prefix = "Results"
    prefix = os.path.join(prefix, str(args.seed))

    if not os.path.exists(prefix):  # no history info
        # load local models or define the architecture
        local_model_types = ['VGG16', 'VGG19', 'resnet50']
        local_model_ls = []
        pred_ls = []
        for model_type in local_model_types:
            pretrained_model, preprocess_input_func = load_model(model_type)
            local_model = keras_model_wrapper(pretrained_model,
                                              preprocess_input_func,
                                              x=x,
                                              y=y)
            accuracy = model_eval(sess,
                                  x,
                                  y,
                                  local_model.predictions,
                                  images,
                                  label,
                                  args={'batch_size': 32})
            print('Test accuracy of model {}: {:.4f}'.format(
                model_type, accuracy))
            local_model_ls.append(local_model)
            pred_ls.append(local_model.predictions)

        # load local model attack graph
        if targeted_true:
            orig_img_loss = compute_cw_loss(target_model,
                                            orig_images,
                                            target_ys_one_hot,
                                            targeted=targeted_true)
        else:
            orig_img_loss = compute_cw_loss(target_model,
                                            orig_images,
                                            orig_labels,
                                            targeted=targeted_true)

        local_attack_graph = LinfPGDAttack(local_model_ls,
                                           epsilon=li_eps,
                                           k=k,
                                           a=a,
                                           random_start=False,
                                           loss_func='xent',
                                           targeted=targeted_true,
                                           x=x,
                                           y=y)
        # pgd attack to local models and generate adversarial example seed
        if targeted_true:
            _, pred_labs, local_aes, pgd_cnt_mat, max_loss, \
            min_loss, ave_loss, max_gap, min_gap, ave_gap = local_attack_in_batches(sess,
                              orig_images,
                              target_ys_one_hot,
                              eval_batch_size = 1,
                              attack_graph=local_attack_graph,
                              model=target_model,
                              clip_min=clip_min,
                              clip_max=clip_max)
        else:
            _, pred_labs, local_aes, pgd_cnt_mat, max_loss, \
            min_loss, ave_loss, max_gap, min_gap, ave_gap = local_attack_in_batches(sess,
                              orig_images,
                              orig_labels,
                              eval_batch_size = 1,
                              attack_graph=local_attack_graph,
                              model=target_model,
                              clip_min=clip_min,
                              clip_max=clip_max)

        # calculate the loss for all adversarial seeds
        if targeted_true:
            adv_img_loss = compute_cw_loss(target_model,
                                           local_aes,
                                           target_ys_one_hot,
                                           targeted=targeted_true)
        else:
            adv_img_loss = compute_cw_loss(target_model,
                                           local_aes,
                                           orig_labels,
                                           targeted=targeted_true)

        success_rate = accuracy_score(target_ys, pred_labs)
        print(
            '** Success rate of targeted adversarial examples generated from local models: **'
            + str(success_rate))
        accuracy = accuracy_score(np.argmax(orig_labels, axis=1), pred_labs)
        print(
            '** Success rate of targeted adversarial examples generated by local models (untargeted): **'
            + str(1 - accuracy))

        # l-inf distance of orig_images and local_aes
        dist = local_aes - orig_images
        l_fin_dist = np.linalg.norm(dist.reshape(nb_imgs, -1), np.inf, axis=1)

        # save the generated local adversarial example ...
        os.makedirs(prefix)
        # save statistics
        fname = prefix + '/adv_img_loss.txt'
        np.savetxt(fname, adv_img_loss)
        fname = prefix + '/orig_img_loss.txt'
        np.savetxt(fname, orig_img_loss)
        fname = prefix + '/pgd_cnt_mat.txt'
        np.savetxt(fname, pgd_cnt_mat)
        fname = prefix + '/max_loss.txt'
        np.savetxt(fname, max_loss)
        fname = prefix + '/min_loss.txt'
        np.savetxt(fname, min_loss)
        fname = prefix + '/ave_loss.txt'
        np.savetxt(fname, ave_loss)
        fname = prefix + '/max_gap.txt'
        np.savetxt(fname, max_gap)
        fname = prefix + '/min_gap.txt'
        np.savetxt(fname, min_gap)
        fname = prefix + '/ave_gap.txt'
        np.savetxt(fname, ave_gap)

        # save output for local attacks
        fname = os.path.join(prefix, 'local_aes.npy')
        np.save(fname, local_aes)
        fname = os.path.join(prefix, 'orig_images.npy')
        np.save(fname, orig_images)
        fname = os.path.join(prefix, 'target_ys.npy')
        np.save(fname, target_ys)
        fname = os.path.join(prefix, 'target_ys_one_hot.npy')
        np.save(fname, target_ys_one_hot)
    else:
        print('loading data from files')
        local_aes = np.load(os.path.join(prefix, 'local_aes.npy'))
        orig_images = np.load(os.path.join(prefix, 'orig_images.npy'))
        target_ys = np.load(os.path.join(prefix, 'target_ys.npy'))
        target_ys_one_hot = np.load(
            os.path.join(prefix, 'target_ys_one_hot.npy'))

    assert local_aes.shape == (nb_imgs, 224, 224, 3)
    assert orig_images.shape == (nb_imgs, 224, 224, 3)
    assert target_ys.shape == (nb_imgs, )
    assert target_ys_one_hot.shape == (nb_imgs, class_num)

    print('begin NES attack')
    num_queries_list = []
    success_flags = []
    # fetch batch
    orig_images = orig_images[args.bstart:args.bend]
    target_ys = target_ys[args.bstart:args.bend]
    local_aes = local_aes[args.bstart:args.bend]
    # begin loop
    for idx in range(len(orig_images)):
        initial_img = orig_images[idx:idx + 1]
        target_class = target_ys[idx]
        if args.attack_seed_type == 'adv':
            print('attack seed is %s' % args.attack_seed_type)
            attack_seed = local_aes[idx]
        else:
            print('attack seed is %s' % args.attack_seed_type)
            attack_seed = orig_images[idx]
        _, num_queries, adv = nes_attack(sess, args, target_model, attack_seed,
                                         initial_img, target_class, class_num,
                                         IMAGE_SIZE)
        if num_queries == args.max_queries:
            success_flags.append(0)
        else:
            success_flags.append(1)
        num_queries_list.append(num_queries)

    # save query number and success
    fname = os.path.join(prefix,
                         '{}_num_queries.txt'.format(args.attack_seed_type))
    np.savetxt(fname, num_queries_list)
    fname = os.path.join(prefix,
                         '{}_success_flags.txt'.format(args.attack_seed_type))
    np.savetxt(fname, success_flags)

    print('finish NES attack')
Example #6
0
def main(args):
	tf.set_random_seed(1234) # for producing the same images

	if not hasattr(keras.backend, "tf"):
		raise RuntimeError("This tutorial requires keras to be configured"
						" to use the TensorFlow backend.")

	if keras.backend.image_dim_ordering() != 'tf':
		keras.backend.set_image_dim_ordering('tf')
		print("INFO: '~/.keras/keras.json' sets 'image_dim_ordering' to "
			"'th', temporarily setting to 'tf'")

	sess = tf.Session()
	keras.backend.set_session(sess)

	# load and preprocess dataset
	data_spec = DataSpec(batch_size=TOT_IMAGES, scale_size=256, crop_size=224, isotropic=False)
	image_producer = ImageNetProducer(
		data_path=INPUT_DIR,
		num_images=TOT_IMAGES,
		data_spec=data_spec,
		batch_size=TOT_IMAGES)

	# Define input TF placeholder
	x = tf.placeholder(tf.float32, shape=(None, 224, 224, 3))
	y = tf.placeholder(tf.float32, shape=(None, 1000))
	class_num = 1000

	# load target model and produce data
	# model = preprocess layer + pretrained model
	from keras.applications.densenet import DenseNet121
	from keras.applications.densenet import preprocess_input
	pretrained_model = DenseNet121(weights='imagenet')
	image_producer.startover()
	target_model = keras_model_wrapper(pretrained_model, preprocess_input, x = x,y = y)
	images, label = None, None
	for (indices, label, names, images) in image_producer.batches(sess):
		images = np.array(images)
		label = np_utils.to_categorical(np.array(label), class_num)
	assert type(images) == np.ndarray, type(images)
	assert type(label) == np.ndarray, type(images)

	accuracy = model_eval(sess, x, y, target_model.predictions, images, label, args= {'batch_size': 32})
	print('Test accuracy of wrapped target model:{:.4f}'.format(accuracy))
	
	# data information
	x_test, y_test = images, label # x_test [0, 255]
	print(images.shape, len(label))
	print(np.min(x_test), np.max(x_test))


	# local attack specific parameters 
	clip_min = 0.0
	clip_max = 255.0
	nb_imgs = args['num_img']
	li_eps = 12.0
	targeted_true = True if args['attack_type'] == 'targeted' else False
	k = 30 # pgd iteration
	a = 2.55 # pgd step size

	# Test the accuracy of targeted attacks, need to redefine the attack graph
	target_ys_one_hot, orig_images, target_ys, orig_labels = generate_attack_inputs(target_model, x_test,y_test, class_num, nb_imgs)

	# Set random seed to improve reproducibility
	tf.set_random_seed(args["seed"])
	np.random.seed(args["seed"])

	# test whether adversarial examples exsit, if no, generate it, otherwise, load it.
	prefix = "Results"
	prefix = os.path.join(prefix, str(args["seed"]))

	if not os.path.exists(prefix): # no history info
		# load local models or define the architecture
		local_model_types = ['VGG16', 'VGG19', 'resnet50'] # 'VGG16', 'VGG19', 'resnet50']
		local_model_ls = []
		pred_ls = []
		for model_type in local_model_types:
			pretrained_model, preprocess_input_func = load_local_model(model_type)
			local_model = keras_model_wrapper(pretrained_model, preprocess_input_func, x = x,y = y)
			accuracy = model_eval(sess, x, y, local_model.predictions, images, label, args= {'batch_size': 32})
			# assert accuracy >= 0.5, 'Error: low accuracy of local model'
			print('Test accuracy of model {}: {:.4f}'.format(model_type, accuracy))
			local_model_ls.append(local_model)
			pred_ls.append(local_model.predictions)

		# load local model attack graph
		if targeted_true:
			orig_img_loss = compute_cw_loss(target_model, orig_images, target_ys_one_hot,targeted=targeted_true)
		else:
			orig_img_loss = compute_cw_loss(target_model, orig_images,orig_labels,targeted=targeted_true)

		attack_sub_pgd_tar = LinfPGDAttack(local_model_ls,
							epsilon = li_eps, 
							k = k,
							a = a,
							random_start = False,
							loss_func = 'xent',
							targeted = targeted_true,
							x = x,
							y = y)
		# pgd attack to local models and generate adversarial example seed
				# pgd attack to local models and generate adversarial example seed
		if targeted_true:
			_, pred_labs, local_aes, pgd_cnt_mat, max_loss, \
			min_loss, ave_loss, max_gap, min_gap, ave_gap = local_attack_in_batches(sess, 
																					orig_images, 
																					target_ys_one_hot, 
																					eval_batch_size = 1,
																					attack_graph=attack_sub_pgd_tar, 
																					model=target_model, 
																					clip_min=clip_min, 
																					clip_max=clip_max)
		else:
			_, pred_labs, local_aes, pgd_cnt_mat, max_loss, \
			min_loss, ave_loss, max_gap, min_gap, ave_gap = local_attack_in_batches(sess, 
																					orig_images, 
																					orig_labels,
																					eval_batch_size = 1,
																					attack_graph=attack_sub_pgd_tar, 
																					model=target_model, 
																					clip_min=clip_min, 
																					clip_max=clip_max)

		# calculate the loss for all adversarial seeds
		if targeted_true:
			adv_img_loss = compute_cw_loss(target_model,local_aes, target_ys_one_hot,targeted=targeted_true)
		else:
			adv_img_loss = compute_cw_loss(target_model,local_aes,orig_labels,targeted=targeted_true)

		
		
		success_rate = accuracy_score(target_ys, pred_labs)
		print('** Success rate of targeted adversarial examples generated from local models: **' + str(success_rate)) # keep
		accuracy = accuracy_score(np.argmax(orig_labels,axis = 1), pred_labs)
		print('** Success rate of targeted adversarial examples generated by local models (untargeted): **' + str(1-accuracy))

		# save local adversarial
		os.makedirs(prefix)
		# save statistics
		fname = prefix + '/adv_img_loss.txt'
		np.savetxt(fname, adv_img_loss)
		fname = prefix + '/orig_img_loss.txt'
		np.savetxt(fname, orig_img_loss)
		fname = prefix + '/pgd_cnt_mat.txt'
		np.savetxt(fname, pgd_cnt_mat)
		fname = prefix + '/max_loss.txt'
		np.savetxt(fname, max_loss)
		fname = prefix + '/min_loss.txt'
		np.savetxt(fname, min_loss)
		fname = prefix + '/ave_loss.txt'
		np.savetxt(fname, ave_loss)
		fname = prefix + '/max_gap.txt'
		np.savetxt(fname, max_gap)
		fname = prefix + '/min_gap.txt'
		np.savetxt(fname, min_gap)
		fname = prefix + '/ave_gap.txt'
		np.savetxt(fname, ave_gap)

		# save output for local attacks
		fname = prefix + '/local_aes.npy'
		np.save(fname, local_aes)
		fname = prefix + '/orig_images.npy'
		np.save(fname, orig_images)
		fname = prefix + '/target_ys.npy'
		np.save(fname, target_ys)
		fname = prefix + '/target_ys_one_hot.npy'
		np.save(fname, target_ys_one_hot)
	else:
		print('loading data from files')
		local_aes = np.load(prefix + '/local_aes.npy')
		orig_images = np.load(prefix + '/orig_images.npy')
		target_ys = np.load(prefix + '/target_ys.npy')
		target_ys_one_hot = np.load(prefix + '/target_ys_one_hot.npy')
	assert local_aes.shape == (nb_imgs, 224, 224, 3)
	assert orig_images.shape == (nb_imgs, 224, 224, 3)
	assert target_ys.shape == (nb_imgs,)
	assert target_ys_one_hot.shape == (nb_imgs, class_num)

	# load autoencoder
	encoder = load_model(os.path.join(args["codec_dir"], 'imagenet_2_whole_encoder.h5'))
	decoder = load_model(os.path.join(args["codec_dir"], 'imagenet_2_whole_decoder.h5'))
	args["img_resize"] = decoder.input_shape[1]

	# ################## test whether autoencoder is working ##################
	# encode_img = encoder.predict(orig_images/255.-0.5)
	# decode_img = decoder.predict(encode_img)
	# # rescale decode_img
	# decode_img = np.clip((decode_img+0.5) * 255, a_min=0.0, a_max=255)
	# diff_img = (decode_img - orig_images) / 255.0 - 0.5
	# diff_mse = np.mean(diff_img.reshape(-1)**2)
	# print('MSE: %.4f' % diff_mse)
	########################################################################

	# define black-box model graph of autozoom
	blackbox_attack = AutoZOOM(sess, target_model, args, decoder,
							num_channels=3,image_size=224,num_labels=class_num)

	print('begin autoencoder attack')
	num_queries_list = []
	success_flags = []
	# fetch batch
	orig_images = orig_images[args['bstart']:args['bend']]
	target_ys = target_ys[args['bstart']:args['bend']]
	local_aes = local_aes[args['bstart']:args['bend']]
	target_ys_one_hot = target_ys_one_hot[args['bstart']:args['bend']]
	
	for idx in range(len(orig_images)):
		initial_img = orig_images[idx:idx+1]
		target_y_one_hot = target_ys_one_hot[idx]
		if args["attack_seed_type"] == 'adv':
			print('attack seed is %s'%args["attack_seed_type"])
			attack_seed = local_aes[idx:idx+1]
		else:
			print('attack seed is %s'%args["attack_seed_type"])
			attack_seed = orig_images[idx:idx+1]
		# scale imgs to [-0.5, 0.5]
		initial_img = initial_img / 255.0 - 0.5
		attack_seed = attack_seed / 255.0 - 0.5
		# attack
		if targeted_true:
			ae, query_num = autozoom_attack(blackbox_attack, attack_seed, initial_img, target_y_one_hot)
		else:
			raise NotImplementedError
		print('image %d: query_num %d'%(idx, query_num))
		# save query number and success
		if query_num == args["max_iterations"] * 2:
			success_flags.append(0)
		else:
			success_flags.append(1)
		num_queries_list.append(query_num)
	# save query number and success
	fname = prefix + '/{}_num_queries.txt'.format(args["attack_seed_type"])
	np.savetxt(fname,num_queries_list)
	fname = prefix + '/{}_success_flags.txt'.format(args["attack_seed_type"])
	np.savetxt(fname,success_flags)

	print('finish autozoom attack')