def flow(self, mode='train'): while True: if mode == 'train': print self.train_keys[0] # shuffle(self.train_keys) keys = self.train_keys elif mode == 'val' or mode == 'demo': shuffle(self.validation_keys) keys = self.validation_keys else: raise Exception('invalid mode: %s' % mode) inputs = [] targets = [] for key in keys: image_path = self.path_prefix + key if not os.path.exists(image_path): continue image_array = imread(image_path) image_array = imresize(image_array, self.image_size) num_image_channels = len(image_array.shape) if num_image_channels != 3: continue ground_truth = self.ground_truth_data[key] if self.do_random_crop: image_array = self._do_random_crop(image_array) image_array = image_array.astype('float32') if mode == 'train' or mode == 'demo': if self.ground_truth_transformer != None: image_array, ground_truth = self.transform( image_array, ground_truth) ground_truth = (self.ground_truth_transformer. assign_boxes(ground_truth)) else: image_array = self.transform(image_array)[0] if self.grayscale: image_array = cv2.cvtColor( image_array.astype('uint8'), cv2.COLOR_RGB2GRAY).astype('float32') image_array = np.expand_dims(image_array, -1) inputs.append(image_array) targets.append(ground_truth) if len(inputs) == self.batch_size: #print len(inputs) inputs = np.asarray(inputs) targets = np.asarray(targets) #print targets targets = to_categorical(targets, self.classes) if mode == 'train' or mode == 'val': inputs = self.preprocess_images(inputs) #print inputs.shape #print targets.shape image_data = {'input': inputs} targets = {'label': targets} yield [image_data, targets] inputs = [] targets = []
def flow(self, mode='train'): while True: if mode == 'train': shuffle(self.train_keys) keys = self.train_keys elif mode == 'val' or mode == 'demo': shuffle(self.validation_keys) keys = self.validation_keys else: raise Exception('invalid mode: %s' % mode) inputs = [] targets = [] for key in keys: image_path = self.path_prefix + key image_array = imread(image_path) image_array = imresize(image_array, self.image_size) num_image_channels = len(image_array.shape) if num_image_channels != 3: continue ground_truth = self.ground_truth_data[key] image_array = image_array.astype('float32') if mode == 'train' or mode == 'demo': image_array = self.transform(image_array)[0] if self.grayscale: image_array = cv2.cvtColor( image_array.astype('uint8'), cv2.COLOR_RGB2GRAY).astype('float32') image_array = np.expand_dims(image_array, -1) inputs.append(image_array) targets.append(ground_truth) if len(targets) == self.batch_size: inputs = np.asarray(inputs) targets = np.asarray(targets) gender = np_utils.to_categorical(targets[:, 0], 2) # Quantizing the age age_bins = np.linspace(0, 100, 21) age_step = np.digitize(targets[:, 1], age_bins) age_quantized = np_utils.to_categorical(age_step, 21) if mode == 'train' or mode == 'val': inputs = self.preprocess_images(inputs) yield self._wrap_in_dictionary(inputs, gender, age_quantized) if mode == 'demo': yield self._wrap_in_dictionary(inputs, gender, age_quantized) inputs = [] targets = []
def flow(self, mode='train'): while True: if mode == 'train': shuffle(self.train_keys) keys = self.train_keys elif mode == 'val': keys = self.validation_keys else: raise Exception('invalid mode: %s' % mode) inputs = [] targets = [] for key in keys: image_path = os.path.join(self.path_prefix, key) image_array = imread(image_path) image_array = imresize(image_array, self.image_size) num_image_channels = len(image_array.shape) if num_image_channels != 3: continue ground_truth = self.ground_truth_data[key] image_array = image_array.astype('float32') if mode == 'train': image_array = self.transform(image_array)[0] if self.grayscale: image_array = cv2.cvtColor( image_array.astype('uint8'), cv2.COLOR_RGB2GRAY).astype('float32') image_array = np.expand_dims(image_array, -1) inputs.append(image_array) targets.append(ground_truth) if len(targets) == self.batch_size: inputs = np.asarray(inputs) targets = np.asarray(targets) # gender = np_utils.to_categorical(targets[:, 0], 2) # softmax gender = targets[:, 0] # sigmoid age = np_utils.to_categorical(targets[:, 1], self.bins) emotion = np_utils.to_categorical(targets[:, 2], 7) if mode == 'train' or mode == 'val': inputs = self.preprocess_images(inputs) yield self._wrap_in_dictionary(inputs, gender, age, emotion) inputs = [] targets = []
def main(): parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, help='Save model path') parser.add_argument('--epsilon', type=float, help='Epsilon for adversarial perturbation') args = parser.parse_args() input_shape = (IMG_SIZE, IMG_SIZE, 3) alpha = 1 n_age_bins = 21 model = MobileNetDeepEstimator(input_shape[0], alpha, n_age_bins, weights='imagenet')() model.load_weights(args.model) image_path, gender_one_hot, age_quantized = random_image() image = imread(image_path) image = preprocess_image(image, input_shape) predictions = predict(model, image) gender_orig_img, age_orig_img = predictions x_adv = FGSM(image, np.expand_dims(gender_one_hot, axis=0), model, eps=args.epsilon) predictions = predict(model, x_adv) gender_adv_img, age_adv_img = predictions age_result = 'Actual age bin: {}, Predicted age bin for original image: {}, Predicted age bin for adversarial image: {}' gender_result = 'Actual gender: {}, Predicted gender for original image: {}, Predicted gender for adversarial image: {}' print( age_result.format(np.argmax(age_quantized), np.argmax(age_orig_img), np.argmax(age_adv_img))) print( gender_result.format(np.argmax(gender_one_hot), np.argmax(gender_orig_img), np.argmax(gender_adv_img))) plot_adversarial(image, x_adv)
def main(): parser = argparse.ArgumentParser() parser.add_argument('--model', type=str, help='Save model path') parser.add_argument('--epsilon', type=float, help='Epsilon for adversarial perturbation') args = parser.parse_args() input_shape = (IM_WIDTH, IM_HEIGHT, 3) model = load_model(args.model) img_path, age, gender_one_hot, race_one_hot = random_image() print(age) print(gender_one_hot) print(race_one_hot) image = imread(img_path) image = preprocess_image(image, input_shape) predictions = predict(model, image) age_pred, race_pred, gender_pred = predictions race_pred, gender_pred = race_pred.argmax(axis=-1), gender_pred.argmax(axis=-1) print(age_pred) print(gender_pred) print(race_pred) x_adv1, x_adv2, x_adv3, x_adv4, x_adv5 = FGSM(image, np.expand_dims(race_one_hot, axis=0), model, eps=args.epsilon) predictions = predict(model, x_adv1) age_adv_img, race_adv_img, gender_adv_img = predictions race_adv_img, gender_adv_img = race_adv_img.argmax(axis=-1), gender_adv_img.argmax(axis=-1) print(age_adv_img) print(gender_adv_img) print(race_adv_img) predictions = predict(model, x_adv5) age_adv_img, race_adv_img, gender_adv_img = predictions race_adv_img, gender_adv_img = race_adv_img.argmax(axis=-1), gender_adv_img.argmax(axis=-1) print(age_adv_img) print(gender_adv_img) print(race_adv_img) img_list = [image, x_adv1, x_adv2, x_adv3, x_adv4, x_adv5] plot_adversarial(img_list)