def __init__(self, sess, epsilon, sample_size=30, max_steps=1000, learning_rate=0.1, debug=False): self._sess = sess self._input = tf.placeholder(tf.float32, (299, 299, 3)) x_expanded = tf.expand_dims(self._input, axis=0) ensemble_xs = tf.concat( [defend(x_expanded) for _ in range(sample_size)], axis=0) self._logits, self._preds = inceptionv3_model(sess, ensemble_xs) self._label = tf.placeholder(tf.int32, ()) one_hot = tf.expand_dims(tf.one_hot(self._label, 1000), axis=0) ensemble_labels = tf.tile(one_hot, (self._logits.shape[0], 1)) self._loss = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits_v2(logits=self._logits, labels=ensemble_labels)) self._grad, = tf.gradients(self._loss, self._input) self._epsilon = epsilon self._max_steps = max_steps self._learning_rate = learning_rate self._debug = debug
def multigpu_npoplogits_modify(self, inputx, modify_try, input_img): with tf.device('/gpu:0'): #### run on gpu 0 lllj ? resized_images = tf.image.resize_images(modify_try, [299, 299], method=0) ori_x = tf.tanh(inputx + resized_images) * self.boxmul + self.boxplus realdist = ori_x - input_img realclipdist = tf.clip_by_value(realdist, -self.epsi, self.epsi) ensemble_prex = realclipdist + input_img ensemble_x = tf.concat([ensemble_prex for _ in range(samples)], axis=0) x = tf.reshape(ensemble_x, [samples * npop, 299, 299, 3]) x_a = tf.split(x, gpu_num) final_logits = [] for gpu_id in range(gpu_num): with tf.device('/gpu:%d' % gpu_id): a = [] for i in range(x_a[gpu_id].shape[0]): temp_randomized = defend(x_a[gpu_id][i:i + 1]) temp_randomized = tf.squeeze(temp_randomized) a.append(temp_randomized) tensor = tf.convert_to_tensor(a) logits, _ = inceptionv3_model(self._sess, tensor) logits = tf.reshape(logits, [int(samples / gpu_num), npop, -1]) final_logits.append(logits) final_logits = tf.convert_to_tensor(final_logits) final_logits = tf.reshape(final_logits, [samples, npop, -1]) final_logits = tf.reduce_mean(final_logits, axis=0) return final_logits
def __init__(self, sess): self._sess = sess self._input = tf.placeholder(tf.float32, (299, 299, 3)) input_expanded = tf.expand_dims(self._input, axis=0) randomized = defend(input_expanded) self._logits, self._predictions = inceptionv3_model(sess, randomized) self._dataset = robustml.dataset.ImageNet((299, 299, 3)) self._threat_model = robustml.threat_model.Linf(epsilon=8.0/255.0)
def __init__(self, sess): self._sess = sess self._input = tf.placeholder(tf.float32, (299, 299, 3)) input_expanded = tf.expand_dims(self._input, axis=0) randomized = defend(input_expanded) self._logits, self._predictions = inceptionv3_model(sess, randomized) self._dataset = robustml.dataset.ImageNet((299, 299, 3)) self._threat_model = robustml.threat_model.Linf(epsilon=8.0 / 255.0)
def batchlogits(self, x): a = [] for i in range(x.shape[0]): temp_randomized = defend(x[i:i + 1]) temp_randomized = tf.squeeze(temp_randomized) a.append(temp_randomized) randomized = tf.convert_to_tensor(a) logits, _ = inceptionv3_model(self._sess, randomized) logits = tf.reduce_mean(logits, axis=0) return logits
def batchlogits_modify(self, inputx, modify, input_img): resized_images = tf.image.resize_images(modify, [299, 299], method=0) ori_x = tf.tanh(inputx + resized_images) * self.boxmul + self.boxplus realdist = ori_x - input_img realclipdist = tf.clip_by_value(realdist, -self.epsi, self.epsi) ensemble_prex = realclipdist + input_img x = tf.concat([ensemble_prex for _ in range(samples)], axis=0) a = [] for i in range(x.shape[0]): temp_randomized = defend(x[i:i + 1]) temp_randomized = tf.squeeze(temp_randomized) a.append(temp_randomized) randomized = tf.convert_to_tensor(a) logits, _ = inceptionv3_model(self._sess, randomized) logits = tf.reduce_mean(logits, axis=0) return logits
def __init__(self, sess, epsilon, sample_size=30, max_steps=1000, learning_rate=0.1, debug=False): self._sess = sess self._input = tf.placeholder(tf.float32, (299, 299, 3)) x_expanded = tf.expand_dims(self._input, axis=0) ensemble_xs = tf.concat([defend(x_expanded) for _ in range(sample_size)], axis=0) self._logits, self._preds = inceptionv3_model(sess, ensemble_xs) self._label = tf.placeholder(tf.int32, ()) one_hot = tf.expand_dims(tf.one_hot(self._label, 1000), axis=0) ensemble_labels = tf.tile(one_hot, (self._logits.shape[0], 1)) self._loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self._logits, labels=ensemble_labels)) self._grad, = tf.gradients(self._loss, self._input) self._epsilon = epsilon self._max_steps = max_steps self._learning_rate = learning_rate self._debug = debug
def multigpu_npoplogits(self, inputx): x = inputx x_a = tf.split(x, gpu_num) final_logits = [] for gpu_id in range(gpu_num): with tf.device('/gpu:%d' % gpu_id): a = [] for i in range(x_a[gpu_id].shape[0]): temp_randomized = defend(x_a[gpu_id][i:i + 1]) temp_randomized = tf.squeeze(temp_randomized) a.append(temp_randomized) tensor = tf.convert_to_tensor(a) logits, _ = inceptionv3_model(self._sess, tensor) logits = tf.reshape(logits, [int(samples / gpu_num), npop, -1]) final_logits.append(logits) final_logits = tf.convert_to_tensor(final_logits) final_logits = tf.reshape(final_logits, [samples, npop, -1]) final_logits = tf.reduce_mean(final_logits, axis=0) return final_logits
def outlogits(self, x): randomized = defend(x) logits, _ = inceptionv3_model(self._sess, randomized) return logits